4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
118 #include "linux_loop.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
130 #define CLONE_IO 0x80000000 /* Clone io context */
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid
)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
295 loff_t
*, res
, uint
, wh
);
297 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
298 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
300 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group
,int,error_code
)
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address
,int *,tidptr
)
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
309 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
317 unsigned long *, user_mask_ptr
);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
320 unsigned long *, user_mask_ptr
);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
323 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
325 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
326 struct __user_cap_data_struct
*, data
);
327 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
328 struct __user_cap_data_struct
*, data
);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get
, int, which
, int, who
)
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
341 unsigned long, idx1
, unsigned long, idx2
)
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
349 unsigned int, mask
, struct target_statx
*, statxbuf
)
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier
, int, cmd
, int, flags
)
355 static bitmask_transtbl fcntl_flags_tbl
[] = {
356 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
357 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
358 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
359 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
360 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
361 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
362 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
363 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
364 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
365 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
366 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
367 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
368 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
379 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
391 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
393 #ifdef TARGET_NR_utimensat
394 #if defined(__NR_utimensat)
395 #define __NR_sys_utimensat __NR_utimensat
396 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
397 const struct timespec
*,tsp
,int,flags
)
399 static int sys_utimensat(int dirfd
, const char *pathname
,
400 const struct timespec times
[2], int flags
)
406 #endif /* TARGET_NR_utimensat */
408 #ifdef TARGET_NR_renameat2
409 #if defined(__NR_renameat2)
410 #define __NR_sys_renameat2 __NR_renameat2
411 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
412 const char *, new, unsigned int, flags
)
414 static int sys_renameat2(int oldfd
, const char *old
,
415 int newfd
, const char *new, int flags
)
418 return renameat(oldfd
, old
, newfd
, new);
424 #endif /* TARGET_NR_renameat2 */
426 #ifdef CONFIG_INOTIFY
427 #include <sys/inotify.h>
429 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
430 static int sys_inotify_init(void)
432 return (inotify_init());
435 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
436 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
438 return (inotify_add_watch(fd
, pathname
, mask
));
441 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
442 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
444 return (inotify_rm_watch(fd
, wd
));
447 #ifdef CONFIG_INOTIFY1
448 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
449 static int sys_inotify_init1(int flags
)
451 return (inotify_init1(flags
));
456 /* Userspace can usually survive runtime without inotify */
457 #undef TARGET_NR_inotify_init
458 #undef TARGET_NR_inotify_init1
459 #undef TARGET_NR_inotify_add_watch
460 #undef TARGET_NR_inotify_rm_watch
461 #endif /* CONFIG_INOTIFY */
463 #if defined(TARGET_NR_prlimit64)
464 #ifndef __NR_prlimit64
465 # define __NR_prlimit64 -1
467 #define __NR_sys_prlimit64 __NR_prlimit64
468 /* The glibc rlimit structure may not be that used by the underlying syscall */
469 struct host_rlimit64
{
473 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
474 const struct host_rlimit64
*, new_limit
,
475 struct host_rlimit64
*, old_limit
)
479 #if defined(TARGET_NR_timer_create)
480 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
481 static timer_t g_posix_timers
[32] = { 0, } ;
483 static inline int next_free_host_timer(void)
486 /* FIXME: Does finding the next free slot require a lock? */
487 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
488 if (g_posix_timers
[k
] == 0) {
489 g_posix_timers
[k
] = (timer_t
) 1;
497 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
499 static inline int regpairs_aligned(void *cpu_env
, int num
)
501 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
503 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
504 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
505 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
506 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
507 * of registers which translates to the same as ARM/MIPS, because we start with
509 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
510 #elif defined(TARGET_SH4)
511 /* SH4 doesn't align register pairs, except for p{read,write}64 */
512 static inline int regpairs_aligned(void *cpu_env
, int num
)
515 case TARGET_NR_pread64
:
516 case TARGET_NR_pwrite64
:
523 #elif defined(TARGET_XTENSA)
524 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
526 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
529 #define ERRNO_TABLE_SIZE 1200
531 /* target_to_host_errno_table[] is initialized from
532 * host_to_target_errno_table[] in syscall_init(). */
533 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
537 * This list is the union of errno values overridden in asm-<arch>/errno.h
538 * minus the errnos that are not actually generic to all archs.
540 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
541 [EAGAIN
] = TARGET_EAGAIN
,
542 [EIDRM
] = TARGET_EIDRM
,
543 [ECHRNG
] = TARGET_ECHRNG
,
544 [EL2NSYNC
] = TARGET_EL2NSYNC
,
545 [EL3HLT
] = TARGET_EL3HLT
,
546 [EL3RST
] = TARGET_EL3RST
,
547 [ELNRNG
] = TARGET_ELNRNG
,
548 [EUNATCH
] = TARGET_EUNATCH
,
549 [ENOCSI
] = TARGET_ENOCSI
,
550 [EL2HLT
] = TARGET_EL2HLT
,
551 [EDEADLK
] = TARGET_EDEADLK
,
552 [ENOLCK
] = TARGET_ENOLCK
,
553 [EBADE
] = TARGET_EBADE
,
554 [EBADR
] = TARGET_EBADR
,
555 [EXFULL
] = TARGET_EXFULL
,
556 [ENOANO
] = TARGET_ENOANO
,
557 [EBADRQC
] = TARGET_EBADRQC
,
558 [EBADSLT
] = TARGET_EBADSLT
,
559 [EBFONT
] = TARGET_EBFONT
,
560 [ENOSTR
] = TARGET_ENOSTR
,
561 [ENODATA
] = TARGET_ENODATA
,
562 [ETIME
] = TARGET_ETIME
,
563 [ENOSR
] = TARGET_ENOSR
,
564 [ENONET
] = TARGET_ENONET
,
565 [ENOPKG
] = TARGET_ENOPKG
,
566 [EREMOTE
] = TARGET_EREMOTE
,
567 [ENOLINK
] = TARGET_ENOLINK
,
568 [EADV
] = TARGET_EADV
,
569 [ESRMNT
] = TARGET_ESRMNT
,
570 [ECOMM
] = TARGET_ECOMM
,
571 [EPROTO
] = TARGET_EPROTO
,
572 [EDOTDOT
] = TARGET_EDOTDOT
,
573 [EMULTIHOP
] = TARGET_EMULTIHOP
,
574 [EBADMSG
] = TARGET_EBADMSG
,
575 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
576 [EOVERFLOW
] = TARGET_EOVERFLOW
,
577 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
578 [EBADFD
] = TARGET_EBADFD
,
579 [EREMCHG
] = TARGET_EREMCHG
,
580 [ELIBACC
] = TARGET_ELIBACC
,
581 [ELIBBAD
] = TARGET_ELIBBAD
,
582 [ELIBSCN
] = TARGET_ELIBSCN
,
583 [ELIBMAX
] = TARGET_ELIBMAX
,
584 [ELIBEXEC
] = TARGET_ELIBEXEC
,
585 [EILSEQ
] = TARGET_EILSEQ
,
586 [ENOSYS
] = TARGET_ENOSYS
,
587 [ELOOP
] = TARGET_ELOOP
,
588 [ERESTART
] = TARGET_ERESTART
,
589 [ESTRPIPE
] = TARGET_ESTRPIPE
,
590 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
591 [EUSERS
] = TARGET_EUSERS
,
592 [ENOTSOCK
] = TARGET_ENOTSOCK
,
593 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
594 [EMSGSIZE
] = TARGET_EMSGSIZE
,
595 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
596 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
597 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
598 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
599 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
600 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
601 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
602 [EADDRINUSE
] = TARGET_EADDRINUSE
,
603 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
604 [ENETDOWN
] = TARGET_ENETDOWN
,
605 [ENETUNREACH
] = TARGET_ENETUNREACH
,
606 [ENETRESET
] = TARGET_ENETRESET
,
607 [ECONNABORTED
] = TARGET_ECONNABORTED
,
608 [ECONNRESET
] = TARGET_ECONNRESET
,
609 [ENOBUFS
] = TARGET_ENOBUFS
,
610 [EISCONN
] = TARGET_EISCONN
,
611 [ENOTCONN
] = TARGET_ENOTCONN
,
612 [EUCLEAN
] = TARGET_EUCLEAN
,
613 [ENOTNAM
] = TARGET_ENOTNAM
,
614 [ENAVAIL
] = TARGET_ENAVAIL
,
615 [EISNAM
] = TARGET_EISNAM
,
616 [EREMOTEIO
] = TARGET_EREMOTEIO
,
617 [EDQUOT
] = TARGET_EDQUOT
,
618 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
619 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
620 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
621 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
622 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
623 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
624 [EALREADY
] = TARGET_EALREADY
,
625 [EINPROGRESS
] = TARGET_EINPROGRESS
,
626 [ESTALE
] = TARGET_ESTALE
,
627 [ECANCELED
] = TARGET_ECANCELED
,
628 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
629 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
631 [ENOKEY
] = TARGET_ENOKEY
,
634 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
637 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
640 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
643 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
645 #ifdef ENOTRECOVERABLE
646 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
649 [ENOMSG
] = TARGET_ENOMSG
,
652 [ERFKILL
] = TARGET_ERFKILL
,
655 [EHWPOISON
] = TARGET_EHWPOISON
,
659 static inline int host_to_target_errno(int err
)
661 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
662 host_to_target_errno_table
[err
]) {
663 return host_to_target_errno_table
[err
];
668 static inline int target_to_host_errno(int err
)
670 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
671 target_to_host_errno_table
[err
]) {
672 return target_to_host_errno_table
[err
];
677 static inline abi_long
get_errno(abi_long ret
)
680 return -host_to_target_errno(errno
);
685 const char *target_strerror(int err
)
687 if (err
== TARGET_ERESTARTSYS
) {
688 return "To be restarted";
690 if (err
== TARGET_QEMU_ESIGRETURN
) {
691 return "Successful exit from sigreturn";
694 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
697 return strerror(target_to_host_errno(err
));
700 #define safe_syscall0(type, name) \
701 static type safe_##name(void) \
703 return safe_syscall(__NR_##name); \
706 #define safe_syscall1(type, name, type1, arg1) \
707 static type safe_##name(type1 arg1) \
709 return safe_syscall(__NR_##name, arg1); \
712 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
713 static type safe_##name(type1 arg1, type2 arg2) \
715 return safe_syscall(__NR_##name, arg1, arg2); \
718 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
724 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
731 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
732 type4, arg4, type5, arg5) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
739 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
740 type4, arg4, type5, arg5, type6, arg6) \
741 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
742 type5 arg5, type6 arg6) \
744 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
747 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
748 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
749 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
750 int, flags
, mode_t
, mode
)
751 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
752 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
753 struct rusage
*, rusage
)
755 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
756 int, options
, struct rusage
*, rusage
)
757 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
758 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
759 defined(TARGET_NR_pselect6)
760 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
761 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
763 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
764 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
765 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
768 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
769 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
771 #if defined(__NR_futex)
772 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
773 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
775 #if defined(__NR_futex_time64)
776 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
777 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
779 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
780 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
781 safe_syscall2(int, tkill
, int, tid
, int, sig
)
782 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
783 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
784 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
785 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
786 unsigned long, pos_l
, unsigned long, pos_h
)
787 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
788 unsigned long, pos_l
, unsigned long, pos_h
)
789 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
791 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
792 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
793 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
794 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
795 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
796 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
797 safe_syscall2(int, flock
, int, fd
, int, operation
)
798 #ifdef TARGET_NR_rt_sigtimedwait
799 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
800 const struct timespec
*, uts
, size_t, sigsetsize
)
802 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
804 #if defined(TARGET_NR_nanosleep)
805 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
806 struct timespec
*, rem
)
808 #ifdef TARGET_NR_clock_nanosleep
809 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
810 const struct timespec
*, req
, struct timespec
*, rem
)
814 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
817 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
818 void *, ptr
, long, fifth
)
822 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
826 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
827 long, msgtype
, int, flags
)
829 #ifdef __NR_semtimedop
830 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
831 unsigned, nsops
, const struct timespec
*, timeout
)
833 #ifdef TARGET_NR_mq_timedsend
834 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
835 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
837 #ifdef TARGET_NR_mq_timedreceive
838 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
839 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
841 /* We do ioctl like this rather than via safe_syscall3 to preserve the
842 * "third argument might be integer or pointer or not present" behaviour of
845 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
846 /* Similarly for fcntl. Note that callers must always:
847 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
848 * use the flock64 struct rather than unsuffixed flock
849 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
857 static inline int host_to_target_sock_type(int host_type
)
861 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
863 target_type
= TARGET_SOCK_DGRAM
;
866 target_type
= TARGET_SOCK_STREAM
;
869 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
873 #if defined(SOCK_CLOEXEC)
874 if (host_type
& SOCK_CLOEXEC
) {
875 target_type
|= TARGET_SOCK_CLOEXEC
;
879 #if defined(SOCK_NONBLOCK)
880 if (host_type
& SOCK_NONBLOCK
) {
881 target_type
|= TARGET_SOCK_NONBLOCK
;
888 static abi_ulong target_brk
;
889 static abi_ulong target_original_brk
;
890 static abi_ulong brk_page
;
892 void target_set_brk(abi_ulong new_brk
)
894 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
895 brk_page
= HOST_PAGE_ALIGN(target_brk
);
898 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
899 #define DEBUGF_BRK(message, args...)
901 /* do_brk() must return target values and target errnos. */
902 abi_long
do_brk(abi_ulong new_brk
)
904 abi_long mapped_addr
;
905 abi_ulong new_alloc_size
;
907 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
910 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
913 if (new_brk
< target_original_brk
) {
914 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
919 /* If the new brk is less than the highest page reserved to the
920 * target heap allocation, set it and we're almost done... */
921 if (new_brk
<= brk_page
) {
922 /* Heap contents are initialized to zero, as for anonymous
924 if (new_brk
> target_brk
) {
925 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
927 target_brk
= new_brk
;
928 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
932 /* We need to allocate more memory after the brk... Note that
933 * we don't use MAP_FIXED because that will map over the top of
934 * any existing mapping (like the one with the host libc or qemu
935 * itself); instead we treat "mapped but at wrong address" as
936 * a failure and unmap again.
938 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
939 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
940 PROT_READ
|PROT_WRITE
,
941 MAP_ANON
|MAP_PRIVATE
, 0, 0));
943 if (mapped_addr
== brk_page
) {
944 /* Heap contents are initialized to zero, as for anonymous
945 * mapped pages. Technically the new pages are already
946 * initialized to zero since they *are* anonymous mapped
947 * pages, however we have to take care with the contents that
948 * come from the remaining part of the previous page: it may
949 * contains garbage data due to a previous heap usage (grown
951 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
953 target_brk
= new_brk
;
954 brk_page
= HOST_PAGE_ALIGN(target_brk
);
955 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
958 } else if (mapped_addr
!= -1) {
959 /* Mapped but at wrong address, meaning there wasn't actually
960 * enough space for this brk.
962 target_munmap(mapped_addr
, new_alloc_size
);
964 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
967 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
970 #if defined(TARGET_ALPHA)
971 /* We (partially) emulate OSF/1 on Alpha, which requires we
972 return a proper errno, not an unchanged brk value. */
973 return -TARGET_ENOMEM
;
975 /* For everything else, return the previous break. */
979 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
980 defined(TARGET_NR_pselect6)
981 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
982 abi_ulong target_fds_addr
,
986 abi_ulong b
, *target_fds
;
988 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
989 if (!(target_fds
= lock_user(VERIFY_READ
,
991 sizeof(abi_ulong
) * nw
,
993 return -TARGET_EFAULT
;
997 for (i
= 0; i
< nw
; i
++) {
998 /* grab the abi_ulong */
999 __get_user(b
, &target_fds
[i
]);
1000 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1001 /* check the bit inside the abi_ulong */
1008 unlock_user(target_fds
, target_fds_addr
, 0);
1013 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1014 abi_ulong target_fds_addr
,
1017 if (target_fds_addr
) {
1018 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1019 return -TARGET_EFAULT
;
1027 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1033 abi_ulong
*target_fds
;
1035 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1036 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1038 sizeof(abi_ulong
) * nw
,
1040 return -TARGET_EFAULT
;
1043 for (i
= 0; i
< nw
; i
++) {
1045 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1046 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1049 __put_user(v
, &target_fds
[i
]);
1052 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1058 #if defined(__alpha__)
1059 #define HOST_HZ 1024
1064 static inline abi_long
host_to_target_clock_t(long ticks
)
1066 #if HOST_HZ == TARGET_HZ
1069 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1073 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1074 const struct rusage
*rusage
)
1076 struct target_rusage
*target_rusage
;
1078 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1079 return -TARGET_EFAULT
;
1080 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1081 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1082 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1083 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1084 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1085 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1086 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1087 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1088 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1089 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1090 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1091 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1092 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1093 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1094 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1095 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1096 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1097 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1098 unlock_user_struct(target_rusage
, target_addr
, 1);
1103 #ifdef TARGET_NR_setrlimit
1104 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1106 abi_ulong target_rlim_swap
;
1109 target_rlim_swap
= tswapal(target_rlim
);
1110 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1111 return RLIM_INFINITY
;
1113 result
= target_rlim_swap
;
1114 if (target_rlim_swap
!= (rlim_t
)result
)
1115 return RLIM_INFINITY
;
1121 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1122 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1124 abi_ulong target_rlim_swap
;
1127 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1128 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1130 target_rlim_swap
= rlim
;
1131 result
= tswapal(target_rlim_swap
);
1137 static inline int target_to_host_resource(int code
)
1140 case TARGET_RLIMIT_AS
:
1142 case TARGET_RLIMIT_CORE
:
1144 case TARGET_RLIMIT_CPU
:
1146 case TARGET_RLIMIT_DATA
:
1148 case TARGET_RLIMIT_FSIZE
:
1149 return RLIMIT_FSIZE
;
1150 case TARGET_RLIMIT_LOCKS
:
1151 return RLIMIT_LOCKS
;
1152 case TARGET_RLIMIT_MEMLOCK
:
1153 return RLIMIT_MEMLOCK
;
1154 case TARGET_RLIMIT_MSGQUEUE
:
1155 return RLIMIT_MSGQUEUE
;
1156 case TARGET_RLIMIT_NICE
:
1158 case TARGET_RLIMIT_NOFILE
:
1159 return RLIMIT_NOFILE
;
1160 case TARGET_RLIMIT_NPROC
:
1161 return RLIMIT_NPROC
;
1162 case TARGET_RLIMIT_RSS
:
1164 case TARGET_RLIMIT_RTPRIO
:
1165 return RLIMIT_RTPRIO
;
1166 case TARGET_RLIMIT_SIGPENDING
:
1167 return RLIMIT_SIGPENDING
;
1168 case TARGET_RLIMIT_STACK
:
1169 return RLIMIT_STACK
;
1175 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1176 abi_ulong target_tv_addr
)
1178 struct target_timeval
*target_tv
;
1180 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1181 return -TARGET_EFAULT
;
1184 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1185 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1187 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1192 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1193 const struct timeval
*tv
)
1195 struct target_timeval
*target_tv
;
1197 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1198 return -TARGET_EFAULT
;
1201 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1202 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1204 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1209 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1210 const struct timeval
*tv
)
1212 struct target__kernel_sock_timeval
*target_tv
;
1214 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1215 return -TARGET_EFAULT
;
1218 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1219 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1221 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1226 #if defined(TARGET_NR_futex) || \
1227 defined(TARGET_NR_rt_sigtimedwait) || \
1228 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1229 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1230 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1231 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1232 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1233 defined(TARGET_NR_timer_settime) || \
1234 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1235 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1236 abi_ulong target_addr
)
1238 struct target_timespec
*target_ts
;
1240 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1241 return -TARGET_EFAULT
;
1243 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1244 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1245 unlock_user_struct(target_ts
, target_addr
, 0);
1250 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1251 defined(TARGET_NR_timer_settime64) || \
1252 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1253 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1254 abi_ulong target_addr
)
1256 struct target__kernel_timespec
*target_ts
;
1258 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1259 return -TARGET_EFAULT
;
1261 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1262 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1263 unlock_user_struct(target_ts
, target_addr
, 0);
1268 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1269 struct timespec
*host_ts
)
1271 struct target_timespec
*target_ts
;
1273 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1274 return -TARGET_EFAULT
;
1276 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1277 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1278 unlock_user_struct(target_ts
, target_addr
, 1);
1282 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1283 struct timespec
*host_ts
)
1285 struct target__kernel_timespec
*target_ts
;
1287 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1288 return -TARGET_EFAULT
;
1290 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1291 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1292 unlock_user_struct(target_ts
, target_addr
, 1);
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1298 struct timezone
*tz
)
1300 struct target_timezone
*target_tz
;
1302 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1303 return -TARGET_EFAULT
;
1306 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1307 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1309 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1317 abi_ulong target_tz_addr
)
1319 struct target_timezone
*target_tz
;
1321 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1322 return -TARGET_EFAULT
;
1325 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1326 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1328 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1337 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1338 abi_ulong target_mq_attr_addr
)
1340 struct target_mq_attr
*target_mq_attr
;
1342 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1343 target_mq_attr_addr
, 1))
1344 return -TARGET_EFAULT
;
1346 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1347 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1348 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1349 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1351 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1356 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1357 const struct mq_attr
*attr
)
1359 struct target_mq_attr
*target_mq_attr
;
1361 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1362 target_mq_attr_addr
, 0))
1363 return -TARGET_EFAULT
;
1365 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1366 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1367 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1368 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1370 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long
do_select(int n
,
1379 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1380 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1382 fd_set rfds
, wfds
, efds
;
1383 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1385 struct timespec ts
, *ts_ptr
;
1388 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1392 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1396 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1401 if (target_tv_addr
) {
1402 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1403 return -TARGET_EFAULT
;
1404 ts
.tv_sec
= tv
.tv_sec
;
1405 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1411 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1414 if (!is_error(ret
)) {
1415 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1416 return -TARGET_EFAULT
;
1417 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1418 return -TARGET_EFAULT
;
1419 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1420 return -TARGET_EFAULT
;
1422 if (target_tv_addr
) {
1423 tv
.tv_sec
= ts
.tv_sec
;
1424 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1425 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1426 return -TARGET_EFAULT
;
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long
do_old_select(abi_ulong arg1
)
1437 struct target_sel_arg_struct
*sel
;
1438 abi_ulong inp
, outp
, exp
, tvp
;
1441 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1442 return -TARGET_EFAULT
;
1445 nsel
= tswapal(sel
->n
);
1446 inp
= tswapal(sel
->inp
);
1447 outp
= tswapal(sel
->outp
);
1448 exp
= tswapal(sel
->exp
);
1449 tvp
= tswapal(sel
->tvp
);
1451 unlock_user_struct(sel
, arg1
, 0);
1453 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1458 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1461 return pipe2(host_pipe
, flags
);
1467 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1468 int flags
, int is_pipe2
)
1472 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1475 return get_errno(ret
);
1477 /* Several targets have special calling conventions for the original
1478 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1480 #if defined(TARGET_ALPHA)
1481 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1482 return host_pipe
[0];
1483 #elif defined(TARGET_MIPS)
1484 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1485 return host_pipe
[0];
1486 #elif defined(TARGET_SH4)
1487 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1488 return host_pipe
[0];
1489 #elif defined(TARGET_SPARC)
1490 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1491 return host_pipe
[0];
1495 if (put_user_s32(host_pipe
[0], pipedes
)
1496 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1497 return -TARGET_EFAULT
;
1498 return get_errno(ret
);
1501 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1502 abi_ulong target_addr
,
1505 struct target_ip_mreqn
*target_smreqn
;
1507 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1509 return -TARGET_EFAULT
;
1510 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1511 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1512 if (len
== sizeof(struct target_ip_mreqn
))
1513 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1514 unlock_user(target_smreqn
, target_addr
, 0);
1519 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1520 abi_ulong target_addr
,
1523 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1524 sa_family_t sa_family
;
1525 struct target_sockaddr
*target_saddr
;
1527 if (fd_trans_target_to_host_addr(fd
)) {
1528 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1531 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1533 return -TARGET_EFAULT
;
1535 sa_family
= tswap16(target_saddr
->sa_family
);
1537 /* Oops. The caller might send a incomplete sun_path; sun_path
1538 * must be terminated by \0 (see the manual page), but
1539 * unfortunately it is quite common to specify sockaddr_un
1540 * length as "strlen(x->sun_path)" while it should be
1541 * "strlen(...) + 1". We'll fix that here if needed.
1542 * Linux kernel has a similar feature.
1545 if (sa_family
== AF_UNIX
) {
1546 if (len
< unix_maxlen
&& len
> 0) {
1547 char *cp
= (char*)target_saddr
;
1549 if ( cp
[len
-1] && !cp
[len
] )
1552 if (len
> unix_maxlen
)
1556 memcpy(addr
, target_saddr
, len
);
1557 addr
->sa_family
= sa_family
;
1558 if (sa_family
== AF_NETLINK
) {
1559 struct sockaddr_nl
*nladdr
;
1561 nladdr
= (struct sockaddr_nl
*)addr
;
1562 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1563 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1564 } else if (sa_family
== AF_PACKET
) {
1565 struct target_sockaddr_ll
*lladdr
;
1567 lladdr
= (struct target_sockaddr_ll
*)addr
;
1568 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1569 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1571 unlock_user(target_saddr
, target_addr
, 0);
1576 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1577 struct sockaddr
*addr
,
1580 struct target_sockaddr
*target_saddr
;
1587 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1589 return -TARGET_EFAULT
;
1590 memcpy(target_saddr
, addr
, len
);
1591 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1592 sizeof(target_saddr
->sa_family
)) {
1593 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1595 if (addr
->sa_family
== AF_NETLINK
&&
1596 len
>= sizeof(struct target_sockaddr_nl
)) {
1597 struct target_sockaddr_nl
*target_nl
=
1598 (struct target_sockaddr_nl
*)target_saddr
;
1599 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1600 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1601 } else if (addr
->sa_family
== AF_PACKET
) {
1602 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1603 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1604 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1605 } else if (addr
->sa_family
== AF_INET6
&&
1606 len
>= sizeof(struct target_sockaddr_in6
)) {
1607 struct target_sockaddr_in6
*target_in6
=
1608 (struct target_sockaddr_in6
*)target_saddr
;
1609 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1611 unlock_user(target_saddr
, target_addr
, len
);
1616 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1617 struct target_msghdr
*target_msgh
)
1619 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1620 abi_long msg_controllen
;
1621 abi_ulong target_cmsg_addr
;
1622 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1623 socklen_t space
= 0;
1625 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1626 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1628 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1629 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1630 target_cmsg_start
= target_cmsg
;
1632 return -TARGET_EFAULT
;
1634 while (cmsg
&& target_cmsg
) {
1635 void *data
= CMSG_DATA(cmsg
);
1636 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1638 int len
= tswapal(target_cmsg
->cmsg_len
)
1639 - sizeof(struct target_cmsghdr
);
1641 space
+= CMSG_SPACE(len
);
1642 if (space
> msgh
->msg_controllen
) {
1643 space
-= CMSG_SPACE(len
);
1644 /* This is a QEMU bug, since we allocated the payload
1645 * area ourselves (unlike overflow in host-to-target
1646 * conversion, which is just the guest giving us a buffer
1647 * that's too small). It can't happen for the payload types
1648 * we currently support; if it becomes an issue in future
1649 * we would need to improve our allocation strategy to
1650 * something more intelligent than "twice the size of the
1651 * target buffer we're reading from".
1653 qemu_log_mask(LOG_UNIMP
,
1654 ("Unsupported ancillary data %d/%d: "
1655 "unhandled msg size\n"),
1656 tswap32(target_cmsg
->cmsg_level
),
1657 tswap32(target_cmsg
->cmsg_type
));
1661 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1662 cmsg
->cmsg_level
= SOL_SOCKET
;
1664 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1666 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1667 cmsg
->cmsg_len
= CMSG_LEN(len
);
1669 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1670 int *fd
= (int *)data
;
1671 int *target_fd
= (int *)target_data
;
1672 int i
, numfds
= len
/ sizeof(int);
1674 for (i
= 0; i
< numfds
; i
++) {
1675 __get_user(fd
[i
], target_fd
+ i
);
1677 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1678 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1679 struct ucred
*cred
= (struct ucred
*)data
;
1680 struct target_ucred
*target_cred
=
1681 (struct target_ucred
*)target_data
;
1683 __get_user(cred
->pid
, &target_cred
->pid
);
1684 __get_user(cred
->uid
, &target_cred
->uid
);
1685 __get_user(cred
->gid
, &target_cred
->gid
);
1687 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1688 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1689 memcpy(data
, target_data
, len
);
1692 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1693 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1696 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1698 msgh
->msg_controllen
= space
;
1702 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1703 struct msghdr
*msgh
)
1705 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1706 abi_long msg_controllen
;
1707 abi_ulong target_cmsg_addr
;
1708 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1709 socklen_t space
= 0;
1711 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1712 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1714 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1715 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1716 target_cmsg_start
= target_cmsg
;
1718 return -TARGET_EFAULT
;
1720 while (cmsg
&& target_cmsg
) {
1721 void *data
= CMSG_DATA(cmsg
);
1722 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1724 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1725 int tgt_len
, tgt_space
;
1727 /* We never copy a half-header but may copy half-data;
1728 * this is Linux's behaviour in put_cmsg(). Note that
1729 * truncation here is a guest problem (which we report
1730 * to the guest via the CTRUNC bit), unlike truncation
1731 * in target_to_host_cmsg, which is a QEMU bug.
1733 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1734 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1738 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1739 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1741 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1743 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1745 /* Payload types which need a different size of payload on
1746 * the target must adjust tgt_len here.
1749 switch (cmsg
->cmsg_level
) {
1751 switch (cmsg
->cmsg_type
) {
1753 tgt_len
= sizeof(struct target_timeval
);
1763 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1764 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1765 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1768 /* We must now copy-and-convert len bytes of payload
1769 * into tgt_len bytes of destination space. Bear in mind
1770 * that in both source and destination we may be dealing
1771 * with a truncated value!
1773 switch (cmsg
->cmsg_level
) {
1775 switch (cmsg
->cmsg_type
) {
1778 int *fd
= (int *)data
;
1779 int *target_fd
= (int *)target_data
;
1780 int i
, numfds
= tgt_len
/ sizeof(int);
1782 for (i
= 0; i
< numfds
; i
++) {
1783 __put_user(fd
[i
], target_fd
+ i
);
1789 struct timeval
*tv
= (struct timeval
*)data
;
1790 struct target_timeval
*target_tv
=
1791 (struct target_timeval
*)target_data
;
1793 if (len
!= sizeof(struct timeval
) ||
1794 tgt_len
!= sizeof(struct target_timeval
)) {
1798 /* copy struct timeval to target */
1799 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1800 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1803 case SCM_CREDENTIALS
:
1805 struct ucred
*cred
= (struct ucred
*)data
;
1806 struct target_ucred
*target_cred
=
1807 (struct target_ucred
*)target_data
;
1809 __put_user(cred
->pid
, &target_cred
->pid
);
1810 __put_user(cred
->uid
, &target_cred
->uid
);
1811 __put_user(cred
->gid
, &target_cred
->gid
);
1820 switch (cmsg
->cmsg_type
) {
1823 uint32_t *v
= (uint32_t *)data
;
1824 uint32_t *t_int
= (uint32_t *)target_data
;
1826 if (len
!= sizeof(uint32_t) ||
1827 tgt_len
!= sizeof(uint32_t)) {
1830 __put_user(*v
, t_int
);
1836 struct sock_extended_err ee
;
1837 struct sockaddr_in offender
;
1839 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1840 struct errhdr_t
*target_errh
=
1841 (struct errhdr_t
*)target_data
;
1843 if (len
!= sizeof(struct errhdr_t
) ||
1844 tgt_len
!= sizeof(struct errhdr_t
)) {
1847 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1848 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1849 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1850 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1851 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1852 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1853 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1854 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1855 (void *) &errh
->offender
, sizeof(errh
->offender
));
1864 switch (cmsg
->cmsg_type
) {
1867 uint32_t *v
= (uint32_t *)data
;
1868 uint32_t *t_int
= (uint32_t *)target_data
;
1870 if (len
!= sizeof(uint32_t) ||
1871 tgt_len
!= sizeof(uint32_t)) {
1874 __put_user(*v
, t_int
);
1880 struct sock_extended_err ee
;
1881 struct sockaddr_in6 offender
;
1883 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1884 struct errhdr6_t
*target_errh
=
1885 (struct errhdr6_t
*)target_data
;
1887 if (len
!= sizeof(struct errhdr6_t
) ||
1888 tgt_len
!= sizeof(struct errhdr6_t
)) {
1891 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1892 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1893 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1894 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1895 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1896 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1897 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1898 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1899 (void *) &errh
->offender
, sizeof(errh
->offender
));
1909 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1910 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1911 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1912 if (tgt_len
> len
) {
1913 memset(target_data
+ len
, 0, tgt_len
- len
);
1917 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1918 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1919 if (msg_controllen
< tgt_space
) {
1920 tgt_space
= msg_controllen
;
1922 msg_controllen
-= tgt_space
;
1924 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1925 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1928 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1930 target_msgh
->msg_controllen
= tswapal(space
);
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1936 abi_ulong optval_addr
, socklen_t optlen
)
1940 struct ip_mreqn
*ip_mreq
;
1941 struct ip_mreq_source
*ip_mreq_source
;
1945 /* TCP options all take an 'int' value. */
1946 if (optlen
< sizeof(uint32_t))
1947 return -TARGET_EINVAL
;
1949 if (get_user_u32(val
, optval_addr
))
1950 return -TARGET_EFAULT
;
1951 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1958 case IP_ROUTER_ALERT
:
1962 case IP_MTU_DISCOVER
:
1969 case IP_MULTICAST_TTL
:
1970 case IP_MULTICAST_LOOP
:
1972 if (optlen
>= sizeof(uint32_t)) {
1973 if (get_user_u32(val
, optval_addr
))
1974 return -TARGET_EFAULT
;
1975 } else if (optlen
>= 1) {
1976 if (get_user_u8(val
, optval_addr
))
1977 return -TARGET_EFAULT
;
1979 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1981 case IP_ADD_MEMBERSHIP
:
1982 case IP_DROP_MEMBERSHIP
:
1983 if (optlen
< sizeof (struct target_ip_mreq
) ||
1984 optlen
> sizeof (struct target_ip_mreqn
))
1985 return -TARGET_EINVAL
;
1987 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1988 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1989 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1992 case IP_BLOCK_SOURCE
:
1993 case IP_UNBLOCK_SOURCE
:
1994 case IP_ADD_SOURCE_MEMBERSHIP
:
1995 case IP_DROP_SOURCE_MEMBERSHIP
:
1996 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1997 return -TARGET_EINVAL
;
1999 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2000 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2001 unlock_user (ip_mreq_source
, optval_addr
, 0);
2010 case IPV6_MTU_DISCOVER
:
2013 case IPV6_RECVPKTINFO
:
2014 case IPV6_UNICAST_HOPS
:
2015 case IPV6_MULTICAST_HOPS
:
2016 case IPV6_MULTICAST_LOOP
:
2018 case IPV6_RECVHOPLIMIT
:
2019 case IPV6_2292HOPLIMIT
:
2022 case IPV6_2292PKTINFO
:
2023 case IPV6_RECVTCLASS
:
2024 case IPV6_RECVRTHDR
:
2025 case IPV6_2292RTHDR
:
2026 case IPV6_RECVHOPOPTS
:
2027 case IPV6_2292HOPOPTS
:
2028 case IPV6_RECVDSTOPTS
:
2029 case IPV6_2292DSTOPTS
:
2031 #ifdef IPV6_RECVPATHMTU
2032 case IPV6_RECVPATHMTU
:
2034 #ifdef IPV6_TRANSPARENT
2035 case IPV6_TRANSPARENT
:
2037 #ifdef IPV6_FREEBIND
2040 #ifdef IPV6_RECVORIGDSTADDR
2041 case IPV6_RECVORIGDSTADDR
:
2044 if (optlen
< sizeof(uint32_t)) {
2045 return -TARGET_EINVAL
;
2047 if (get_user_u32(val
, optval_addr
)) {
2048 return -TARGET_EFAULT
;
2050 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2051 &val
, sizeof(val
)));
2055 struct in6_pktinfo pki
;
2057 if (optlen
< sizeof(pki
)) {
2058 return -TARGET_EINVAL
;
2061 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2062 return -TARGET_EFAULT
;
2065 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2067 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2068 &pki
, sizeof(pki
)));
2071 case IPV6_ADD_MEMBERSHIP
:
2072 case IPV6_DROP_MEMBERSHIP
:
2074 struct ipv6_mreq ipv6mreq
;
2076 if (optlen
< sizeof(ipv6mreq
)) {
2077 return -TARGET_EINVAL
;
2080 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2081 return -TARGET_EFAULT
;
2084 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2086 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2087 &ipv6mreq
, sizeof(ipv6mreq
)));
2098 struct icmp6_filter icmp6f
;
2100 if (optlen
> sizeof(icmp6f
)) {
2101 optlen
= sizeof(icmp6f
);
2104 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2105 return -TARGET_EFAULT
;
2108 for (val
= 0; val
< 8; val
++) {
2109 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2112 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2124 /* those take an u32 value */
2125 if (optlen
< sizeof(uint32_t)) {
2126 return -TARGET_EINVAL
;
2129 if (get_user_u32(val
, optval_addr
)) {
2130 return -TARGET_EFAULT
;
2132 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2133 &val
, sizeof(val
)));
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2145 char *alg_key
= g_malloc(optlen
);
2148 return -TARGET_ENOMEM
;
2150 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2152 return -TARGET_EFAULT
;
2154 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2159 case ALG_SET_AEAD_AUTHSIZE
:
2161 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2170 case TARGET_SOL_SOCKET
:
2172 case TARGET_SO_RCVTIMEO
:
2176 optname
= SO_RCVTIMEO
;
2179 if (optlen
!= sizeof(struct target_timeval
)) {
2180 return -TARGET_EINVAL
;
2183 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2187 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2191 case TARGET_SO_SNDTIMEO
:
2192 optname
= SO_SNDTIMEO
;
2194 case TARGET_SO_ATTACH_FILTER
:
2196 struct target_sock_fprog
*tfprog
;
2197 struct target_sock_filter
*tfilter
;
2198 struct sock_fprog fprog
;
2199 struct sock_filter
*filter
;
2202 if (optlen
!= sizeof(*tfprog
)) {
2203 return -TARGET_EINVAL
;
2205 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2206 return -TARGET_EFAULT
;
2208 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2209 tswapal(tfprog
->filter
), 0)) {
2210 unlock_user_struct(tfprog
, optval_addr
, 1);
2211 return -TARGET_EFAULT
;
2214 fprog
.len
= tswap16(tfprog
->len
);
2215 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2216 if (filter
== NULL
) {
2217 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2218 unlock_user_struct(tfprog
, optval_addr
, 1);
2219 return -TARGET_ENOMEM
;
2221 for (i
= 0; i
< fprog
.len
; i
++) {
2222 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2223 filter
[i
].jt
= tfilter
[i
].jt
;
2224 filter
[i
].jf
= tfilter
[i
].jf
;
2225 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2227 fprog
.filter
= filter
;
2229 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2230 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2233 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2234 unlock_user_struct(tfprog
, optval_addr
, 1);
2237 case TARGET_SO_BINDTODEVICE
:
2239 char *dev_ifname
, *addr_ifname
;
2241 if (optlen
> IFNAMSIZ
- 1) {
2242 optlen
= IFNAMSIZ
- 1;
2244 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2246 return -TARGET_EFAULT
;
2248 optname
= SO_BINDTODEVICE
;
2249 addr_ifname
= alloca(IFNAMSIZ
);
2250 memcpy(addr_ifname
, dev_ifname
, optlen
);
2251 addr_ifname
[optlen
] = 0;
2252 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2253 addr_ifname
, optlen
));
2254 unlock_user (dev_ifname
, optval_addr
, 0);
2257 case TARGET_SO_LINGER
:
2260 struct target_linger
*tlg
;
2262 if (optlen
!= sizeof(struct target_linger
)) {
2263 return -TARGET_EINVAL
;
2265 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2266 return -TARGET_EFAULT
;
2268 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2269 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2270 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2272 unlock_user_struct(tlg
, optval_addr
, 0);
2275 /* Options with 'int' argument. */
2276 case TARGET_SO_DEBUG
:
2279 case TARGET_SO_REUSEADDR
:
2280 optname
= SO_REUSEADDR
;
2283 case TARGET_SO_REUSEPORT
:
2284 optname
= SO_REUSEPORT
;
2287 case TARGET_SO_TYPE
:
2290 case TARGET_SO_ERROR
:
2293 case TARGET_SO_DONTROUTE
:
2294 optname
= SO_DONTROUTE
;
2296 case TARGET_SO_BROADCAST
:
2297 optname
= SO_BROADCAST
;
2299 case TARGET_SO_SNDBUF
:
2300 optname
= SO_SNDBUF
;
2302 case TARGET_SO_SNDBUFFORCE
:
2303 optname
= SO_SNDBUFFORCE
;
2305 case TARGET_SO_RCVBUF
:
2306 optname
= SO_RCVBUF
;
2308 case TARGET_SO_RCVBUFFORCE
:
2309 optname
= SO_RCVBUFFORCE
;
2311 case TARGET_SO_KEEPALIVE
:
2312 optname
= SO_KEEPALIVE
;
2314 case TARGET_SO_OOBINLINE
:
2315 optname
= SO_OOBINLINE
;
2317 case TARGET_SO_NO_CHECK
:
2318 optname
= SO_NO_CHECK
;
2320 case TARGET_SO_PRIORITY
:
2321 optname
= SO_PRIORITY
;
2324 case TARGET_SO_BSDCOMPAT
:
2325 optname
= SO_BSDCOMPAT
;
2328 case TARGET_SO_PASSCRED
:
2329 optname
= SO_PASSCRED
;
2331 case TARGET_SO_PASSSEC
:
2332 optname
= SO_PASSSEC
;
2334 case TARGET_SO_TIMESTAMP
:
2335 optname
= SO_TIMESTAMP
;
2337 case TARGET_SO_RCVLOWAT
:
2338 optname
= SO_RCVLOWAT
;
2343 if (optlen
< sizeof(uint32_t))
2344 return -TARGET_EINVAL
;
2346 if (get_user_u32(val
, optval_addr
))
2347 return -TARGET_EFAULT
;
2348 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2353 case NETLINK_PKTINFO
:
2354 case NETLINK_ADD_MEMBERSHIP
:
2355 case NETLINK_DROP_MEMBERSHIP
:
2356 case NETLINK_BROADCAST_ERROR
:
2357 case NETLINK_NO_ENOBUFS
:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359 case NETLINK_LISTEN_ALL_NSID
:
2360 case NETLINK_CAP_ACK
:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363 case NETLINK_EXT_ACK
:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366 case NETLINK_GET_STRICT_CHK
:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2373 if (optlen
< sizeof(uint32_t)) {
2374 return -TARGET_EINVAL
;
2376 if (get_user_u32(val
, optval_addr
)) {
2377 return -TARGET_EFAULT
;
2379 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2382 #endif /* SOL_NETLINK */
2385 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2387 ret
= -TARGET_ENOPROTOOPT
;
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2394 abi_ulong optval_addr
, abi_ulong optlen
)
2401 case TARGET_SOL_SOCKET
:
2404 /* These don't just return a single integer */
2405 case TARGET_SO_PEERNAME
:
2407 case TARGET_SO_RCVTIMEO
: {
2411 optname
= SO_RCVTIMEO
;
2414 if (get_user_u32(len
, optlen
)) {
2415 return -TARGET_EFAULT
;
2418 return -TARGET_EINVAL
;
2422 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2427 if (len
> sizeof(struct target_timeval
)) {
2428 len
= sizeof(struct target_timeval
);
2430 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2431 return -TARGET_EFAULT
;
2433 if (put_user_u32(len
, optlen
)) {
2434 return -TARGET_EFAULT
;
2438 case TARGET_SO_SNDTIMEO
:
2439 optname
= SO_SNDTIMEO
;
2441 case TARGET_SO_PEERCRED
: {
2444 struct target_ucred
*tcr
;
2446 if (get_user_u32(len
, optlen
)) {
2447 return -TARGET_EFAULT
;
2450 return -TARGET_EINVAL
;
2454 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2462 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2463 return -TARGET_EFAULT
;
2465 __put_user(cr
.pid
, &tcr
->pid
);
2466 __put_user(cr
.uid
, &tcr
->uid
);
2467 __put_user(cr
.gid
, &tcr
->gid
);
2468 unlock_user_struct(tcr
, optval_addr
, 1);
2469 if (put_user_u32(len
, optlen
)) {
2470 return -TARGET_EFAULT
;
2474 case TARGET_SO_PEERSEC
: {
2477 if (get_user_u32(len
, optlen
)) {
2478 return -TARGET_EFAULT
;
2481 return -TARGET_EINVAL
;
2483 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2485 return -TARGET_EFAULT
;
2488 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2490 if (put_user_u32(lv
, optlen
)) {
2491 ret
= -TARGET_EFAULT
;
2493 unlock_user(name
, optval_addr
, lv
);
2496 case TARGET_SO_LINGER
:
2500 struct target_linger
*tlg
;
2502 if (get_user_u32(len
, optlen
)) {
2503 return -TARGET_EFAULT
;
2506 return -TARGET_EINVAL
;
2510 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2518 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2519 return -TARGET_EFAULT
;
2521 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2522 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2523 unlock_user_struct(tlg
, optval_addr
, 1);
2524 if (put_user_u32(len
, optlen
)) {
2525 return -TARGET_EFAULT
;
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG
:
2533 case TARGET_SO_REUSEADDR
:
2534 optname
= SO_REUSEADDR
;
2537 case TARGET_SO_REUSEPORT
:
2538 optname
= SO_REUSEPORT
;
2541 case TARGET_SO_TYPE
:
2544 case TARGET_SO_ERROR
:
2547 case TARGET_SO_DONTROUTE
:
2548 optname
= SO_DONTROUTE
;
2550 case TARGET_SO_BROADCAST
:
2551 optname
= SO_BROADCAST
;
2553 case TARGET_SO_SNDBUF
:
2554 optname
= SO_SNDBUF
;
2556 case TARGET_SO_RCVBUF
:
2557 optname
= SO_RCVBUF
;
2559 case TARGET_SO_KEEPALIVE
:
2560 optname
= SO_KEEPALIVE
;
2562 case TARGET_SO_OOBINLINE
:
2563 optname
= SO_OOBINLINE
;
2565 case TARGET_SO_NO_CHECK
:
2566 optname
= SO_NO_CHECK
;
2568 case TARGET_SO_PRIORITY
:
2569 optname
= SO_PRIORITY
;
2572 case TARGET_SO_BSDCOMPAT
:
2573 optname
= SO_BSDCOMPAT
;
2576 case TARGET_SO_PASSCRED
:
2577 optname
= SO_PASSCRED
;
2579 case TARGET_SO_TIMESTAMP
:
2580 optname
= SO_TIMESTAMP
;
2582 case TARGET_SO_RCVLOWAT
:
2583 optname
= SO_RCVLOWAT
;
2585 case TARGET_SO_ACCEPTCONN
:
2586 optname
= SO_ACCEPTCONN
;
2593 /* TCP options all take an 'int' value. */
2595 if (get_user_u32(len
, optlen
))
2596 return -TARGET_EFAULT
;
2598 return -TARGET_EINVAL
;
2600 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2603 if (optname
== SO_TYPE
) {
2604 val
= host_to_target_sock_type(val
);
2609 if (put_user_u32(val
, optval_addr
))
2610 return -TARGET_EFAULT
;
2612 if (put_user_u8(val
, optval_addr
))
2613 return -TARGET_EFAULT
;
2615 if (put_user_u32(len
, optlen
))
2616 return -TARGET_EFAULT
;
2623 case IP_ROUTER_ALERT
:
2627 case IP_MTU_DISCOVER
:
2633 case IP_MULTICAST_TTL
:
2634 case IP_MULTICAST_LOOP
:
2635 if (get_user_u32(len
, optlen
))
2636 return -TARGET_EFAULT
;
2638 return -TARGET_EINVAL
;
2640 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2643 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2645 if (put_user_u32(len
, optlen
)
2646 || put_user_u8(val
, optval_addr
))
2647 return -TARGET_EFAULT
;
2649 if (len
> sizeof(int))
2651 if (put_user_u32(len
, optlen
)
2652 || put_user_u32(val
, optval_addr
))
2653 return -TARGET_EFAULT
;
2657 ret
= -TARGET_ENOPROTOOPT
;
2663 case IPV6_MTU_DISCOVER
:
2666 case IPV6_RECVPKTINFO
:
2667 case IPV6_UNICAST_HOPS
:
2668 case IPV6_MULTICAST_HOPS
:
2669 case IPV6_MULTICAST_LOOP
:
2671 case IPV6_RECVHOPLIMIT
:
2672 case IPV6_2292HOPLIMIT
:
2675 case IPV6_2292PKTINFO
:
2676 case IPV6_RECVTCLASS
:
2677 case IPV6_RECVRTHDR
:
2678 case IPV6_2292RTHDR
:
2679 case IPV6_RECVHOPOPTS
:
2680 case IPV6_2292HOPOPTS
:
2681 case IPV6_RECVDSTOPTS
:
2682 case IPV6_2292DSTOPTS
:
2684 #ifdef IPV6_RECVPATHMTU
2685 case IPV6_RECVPATHMTU
:
2687 #ifdef IPV6_TRANSPARENT
2688 case IPV6_TRANSPARENT
:
2690 #ifdef IPV6_FREEBIND
2693 #ifdef IPV6_RECVORIGDSTADDR
2694 case IPV6_RECVORIGDSTADDR
:
2696 if (get_user_u32(len
, optlen
))
2697 return -TARGET_EFAULT
;
2699 return -TARGET_EINVAL
;
2701 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2704 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2706 if (put_user_u32(len
, optlen
)
2707 || put_user_u8(val
, optval_addr
))
2708 return -TARGET_EFAULT
;
2710 if (len
> sizeof(int))
2712 if (put_user_u32(len
, optlen
)
2713 || put_user_u32(val
, optval_addr
))
2714 return -TARGET_EFAULT
;
2718 ret
= -TARGET_ENOPROTOOPT
;
2725 case NETLINK_PKTINFO
:
2726 case NETLINK_BROADCAST_ERROR
:
2727 case NETLINK_NO_ENOBUFS
:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729 case NETLINK_LISTEN_ALL_NSID
:
2730 case NETLINK_CAP_ACK
:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733 case NETLINK_EXT_ACK
:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736 case NETLINK_GET_STRICT_CHK
:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 if (get_user_u32(len
, optlen
)) {
2739 return -TARGET_EFAULT
;
2741 if (len
!= sizeof(val
)) {
2742 return -TARGET_EINVAL
;
2745 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2749 if (put_user_u32(lv
, optlen
)
2750 || put_user_u32(val
, optval_addr
)) {
2751 return -TARGET_EFAULT
;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755 case NETLINK_LIST_MEMBERSHIPS
:
2759 if (get_user_u32(len
, optlen
)) {
2760 return -TARGET_EFAULT
;
2763 return -TARGET_EINVAL
;
2765 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2767 return -TARGET_EFAULT
;
2770 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2772 unlock_user(results
, optval_addr
, 0);
2775 /* swap host endianess to target endianess. */
2776 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2777 results
[i
] = tswap32(results
[i
]);
2779 if (put_user_u32(lv
, optlen
)) {
2780 return -TARGET_EFAULT
;
2782 unlock_user(results
, optval_addr
, 0);
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2790 #endif /* SOL_NETLINK */
2793 qemu_log_mask(LOG_UNIMP
,
2794 "getsockopt level=%d optname=%d not yet supported\n",
2796 ret
= -TARGET_EOPNOTSUPP
;
2802 /* Convert target low/high pair representing file offset into the host
2803 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804 * as the kernel doesn't handle them either.
2806 static void target_to_host_low_high(abi_ulong tlow
,
2808 unsigned long *hlow
,
2809 unsigned long *hhigh
)
2811 uint64_t off
= tlow
|
2812 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2813 TARGET_LONG_BITS
/ 2;
2816 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2819 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2820 abi_ulong count
, int copy
)
2822 struct target_iovec
*target_vec
;
2824 abi_ulong total_len
, max_len
;
2827 bool bad_address
= false;
2833 if (count
> IOV_MAX
) {
2838 vec
= g_try_new0(struct iovec
, count
);
2844 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2845 count
* sizeof(struct target_iovec
), 1);
2846 if (target_vec
== NULL
) {
2851 /* ??? If host page size > target page size, this will result in a
2852 value larger than what we can actually support. */
2853 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2856 for (i
= 0; i
< count
; i
++) {
2857 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2858 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2863 } else if (len
== 0) {
2864 /* Zero length pointer is ignored. */
2865 vec
[i
].iov_base
= 0;
2867 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2868 /* If the first buffer pointer is bad, this is a fault. But
2869 * subsequent bad buffers will result in a partial write; this
2870 * is realized by filling the vector with null pointers and
2872 if (!vec
[i
].iov_base
) {
2883 if (len
> max_len
- total_len
) {
2884 len
= max_len
- total_len
;
2887 vec
[i
].iov_len
= len
;
2891 unlock_user(target_vec
, target_addr
, 0);
2896 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2897 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2900 unlock_user(target_vec
, target_addr
, 0);
2907 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2908 abi_ulong count
, int copy
)
2910 struct target_iovec
*target_vec
;
2913 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2914 count
* sizeof(struct target_iovec
), 1);
2916 for (i
= 0; i
< count
; i
++) {
2917 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2918 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2922 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2924 unlock_user(target_vec
, target_addr
, 0);
2930 static inline int target_to_host_sock_type(int *type
)
2933 int target_type
= *type
;
2935 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2936 case TARGET_SOCK_DGRAM
:
2937 host_type
= SOCK_DGRAM
;
2939 case TARGET_SOCK_STREAM
:
2940 host_type
= SOCK_STREAM
;
2943 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2946 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2947 #if defined(SOCK_CLOEXEC)
2948 host_type
|= SOCK_CLOEXEC
;
2950 return -TARGET_EINVAL
;
2953 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2954 #if defined(SOCK_NONBLOCK)
2955 host_type
|= SOCK_NONBLOCK
;
2956 #elif !defined(O_NONBLOCK)
2957 return -TARGET_EINVAL
;
2964 /* Try to emulate socket type flags after socket creation. */
2965 static int sock_flags_fixup(int fd
, int target_type
)
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2969 int flags
= fcntl(fd
, F_GETFL
);
2970 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2972 return -TARGET_EINVAL
;
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long
do_socket(int domain
, int type
, int protocol
)
2982 int target_type
= type
;
2985 ret
= target_to_host_sock_type(&type
);
2990 if (domain
== PF_NETLINK
&& !(
2991 #ifdef CONFIG_RTNETLINK
2992 protocol
== NETLINK_ROUTE
||
2994 protocol
== NETLINK_KOBJECT_UEVENT
||
2995 protocol
== NETLINK_AUDIT
)) {
2996 return -TARGET_EPROTONOSUPPORT
;
2999 if (domain
== AF_PACKET
||
3000 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3001 protocol
= tswap16(protocol
);
3004 ret
= get_errno(socket(domain
, type
, protocol
));
3006 ret
= sock_flags_fixup(ret
, target_type
);
3007 if (type
== SOCK_PACKET
) {
3008 /* Manage an obsolete case :
3009 * if socket type is SOCK_PACKET, bind by name
3011 fd_trans_register(ret
, &target_packet_trans
);
3012 } else if (domain
== PF_NETLINK
) {
3014 #ifdef CONFIG_RTNETLINK
3016 fd_trans_register(ret
, &target_netlink_route_trans
);
3019 case NETLINK_KOBJECT_UEVENT
:
3020 /* nothing to do: messages are strings */
3023 fd_trans_register(ret
, &target_netlink_audit_trans
);
3026 g_assert_not_reached();
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3040 if ((int)addrlen
< 0) {
3041 return -TARGET_EINVAL
;
3044 addr
= alloca(addrlen
+1);
3046 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3050 return get_errno(bind(sockfd
, addr
, addrlen
));
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3060 if ((int)addrlen
< 0) {
3061 return -TARGET_EINVAL
;
3064 addr
= alloca(addrlen
+1);
3066 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3070 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3075 int flags
, int send
)
3081 abi_ulong target_vec
;
3083 if (msgp
->msg_name
) {
3084 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3085 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3086 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3087 tswapal(msgp
->msg_name
),
3089 if (ret
== -TARGET_EFAULT
) {
3090 /* For connected sockets msg_name and msg_namelen must
3091 * be ignored, so returning EFAULT immediately is wrong.
3092 * Instead, pass a bad msg_name to the host kernel, and
3093 * let it decide whether to return EFAULT or not.
3095 msg
.msg_name
= (void *)-1;
3100 msg
.msg_name
= NULL
;
3101 msg
.msg_namelen
= 0;
3103 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3104 msg
.msg_control
= alloca(msg
.msg_controllen
);
3105 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3107 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3109 count
= tswapal(msgp
->msg_iovlen
);
3110 target_vec
= tswapal(msgp
->msg_iov
);
3112 if (count
> IOV_MAX
) {
3113 /* sendrcvmsg returns a different errno for this condition than
3114 * readv/writev, so we must catch it here before lock_iovec() does.
3116 ret
= -TARGET_EMSGSIZE
;
3120 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3121 target_vec
, count
, send
);
3123 ret
= -host_to_target_errno(errno
);
3126 msg
.msg_iovlen
= count
;
3130 if (fd_trans_target_to_host_data(fd
)) {
3133 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3134 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3135 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3136 msg
.msg_iov
->iov_len
);
3138 msg
.msg_iov
->iov_base
= host_msg
;
3139 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3143 ret
= target_to_host_cmsg(&msg
, msgp
);
3145 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3149 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3150 if (!is_error(ret
)) {
3152 if (fd_trans_host_to_target_data(fd
)) {
3153 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3154 MIN(msg
.msg_iov
->iov_len
, len
));
3156 ret
= host_to_target_cmsg(msgp
, &msg
);
3158 if (!is_error(ret
)) {
3159 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3160 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3161 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3162 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3163 msg
.msg_name
, msg
.msg_namelen
);
3175 unlock_iovec(vec
, target_vec
, count
, !send
);
3180 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3181 int flags
, int send
)
3184 struct target_msghdr
*msgp
;
3186 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3190 return -TARGET_EFAULT
;
3192 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3193 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198 * so it might not have this *mmsg-specific flag either.
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3204 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3205 unsigned int vlen
, unsigned int flags
,
3208 struct target_mmsghdr
*mmsgp
;
3212 if (vlen
> UIO_MAXIOV
) {
3216 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3218 return -TARGET_EFAULT
;
3221 for (i
= 0; i
< vlen
; i
++) {
3222 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3223 if (is_error(ret
)) {
3226 mmsgp
[i
].msg_len
= tswap32(ret
);
3227 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228 if (flags
& MSG_WAITFORONE
) {
3229 flags
|= MSG_DONTWAIT
;
3233 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3235 /* Return number of datagrams sent if we sent any at all;
3236 * otherwise return the error.
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3246 abi_ulong target_addrlen_addr
, int flags
)
3248 socklen_t addrlen
, ret_addrlen
;
3253 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3255 if (target_addr
== 0) {
3256 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3259 /* linux returns EINVAL if addrlen pointer is invalid */
3260 if (get_user_u32(addrlen
, target_addrlen_addr
))
3261 return -TARGET_EINVAL
;
3263 if ((int)addrlen
< 0) {
3264 return -TARGET_EINVAL
;
3267 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3268 return -TARGET_EINVAL
;
3270 addr
= alloca(addrlen
);
3272 ret_addrlen
= addrlen
;
3273 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3274 if (!is_error(ret
)) {
3275 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3276 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3277 ret
= -TARGET_EFAULT
;
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3285 abi_ulong target_addrlen_addr
)
3287 socklen_t addrlen
, ret_addrlen
;
3291 if (get_user_u32(addrlen
, target_addrlen_addr
))
3292 return -TARGET_EFAULT
;
3294 if ((int)addrlen
< 0) {
3295 return -TARGET_EINVAL
;
3298 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3299 return -TARGET_EFAULT
;
3301 addr
= alloca(addrlen
);
3303 ret_addrlen
= addrlen
;
3304 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3305 if (!is_error(ret
)) {
3306 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3307 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3308 ret
= -TARGET_EFAULT
;
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3316 abi_ulong target_addrlen_addr
)
3318 socklen_t addrlen
, ret_addrlen
;
3322 if (get_user_u32(addrlen
, target_addrlen_addr
))
3323 return -TARGET_EFAULT
;
3325 if ((int)addrlen
< 0) {
3326 return -TARGET_EINVAL
;
3329 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3330 return -TARGET_EFAULT
;
3332 addr
= alloca(addrlen
);
3334 ret_addrlen
= addrlen
;
3335 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3336 if (!is_error(ret
)) {
3337 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3338 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3339 ret
= -TARGET_EFAULT
;
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3347 abi_ulong target_tab_addr
)
3352 target_to_host_sock_type(&type
);
3354 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3355 if (!is_error(ret
)) {
3356 if (put_user_s32(tab
[0], target_tab_addr
)
3357 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3358 ret
= -TARGET_EFAULT
;
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3365 abi_ulong target_addr
, socklen_t addrlen
)
3369 void *copy_msg
= NULL
;
3372 if ((int)addrlen
< 0) {
3373 return -TARGET_EINVAL
;
3376 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3378 return -TARGET_EFAULT
;
3379 if (fd_trans_target_to_host_data(fd
)) {
3380 copy_msg
= host_msg
;
3381 host_msg
= g_malloc(len
);
3382 memcpy(host_msg
, copy_msg
, len
);
3383 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3389 addr
= alloca(addrlen
+1);
3390 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3394 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3396 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3401 host_msg
= copy_msg
;
3403 unlock_user(host_msg
, msg
, 0);
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3409 abi_ulong target_addr
,
3410 abi_ulong target_addrlen
)
3412 socklen_t addrlen
, ret_addrlen
;
3417 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3419 return -TARGET_EFAULT
;
3421 if (get_user_u32(addrlen
, target_addrlen
)) {
3422 ret
= -TARGET_EFAULT
;
3425 if ((int)addrlen
< 0) {
3426 ret
= -TARGET_EINVAL
;
3429 addr
= alloca(addrlen
);
3430 ret_addrlen
= addrlen
;
3431 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3432 addr
, &ret_addrlen
));
3434 addr
= NULL
; /* To keep compiler quiet. */
3435 addrlen
= 0; /* To keep compiler quiet. */
3436 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3438 if (!is_error(ret
)) {
3439 if (fd_trans_host_to_target_data(fd
)) {
3441 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3442 if (is_error(trans
)) {
3448 host_to_target_sockaddr(target_addr
, addr
,
3449 MIN(addrlen
, ret_addrlen
));
3450 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3451 ret
= -TARGET_EFAULT
;
3455 unlock_user(host_msg
, msg
, len
);
3458 unlock_user(host_msg
, msg
, 0);
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3467 static const unsigned nargs
[] = { /* number of arguments per operation */
3468 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3469 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3472 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3476 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3477 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3479 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3481 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3482 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3484 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3486 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3487 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3489 abi_long a
[6]; /* max 6 args */
3492 /* check the range of the first argument num */
3493 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3495 return -TARGET_EINVAL
;
3497 /* ensure we have space for args */
3498 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3499 return -TARGET_EINVAL
;
3501 /* collect the arguments in a[] according to nargs[] */
3502 for (i
= 0; i
< nargs
[num
]; ++i
) {
3503 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3504 return -TARGET_EFAULT
;
3507 /* now when we have the args, invoke the appropriate underlying function */
3509 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3510 return do_socket(a
[0], a
[1], a
[2]);
3511 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3512 return do_bind(a
[0], a
[1], a
[2]);
3513 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3514 return do_connect(a
[0], a
[1], a
[2]);
3515 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3516 return get_errno(listen(a
[0], a
[1]));
3517 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3518 return do_accept4(a
[0], a
[1], a
[2], 0);
3519 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3520 return do_getsockname(a
[0], a
[1], a
[2]);
3521 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3522 return do_getpeername(a
[0], a
[1], a
[2]);
3523 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3524 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3525 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3526 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3527 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3528 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3529 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3530 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3531 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3532 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3533 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3534 return get_errno(shutdown(a
[0], a
[1]));
3535 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3536 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3537 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3538 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3539 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3540 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3541 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3542 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3543 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3544 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3545 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3546 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3547 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3548 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3550 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3551 return -TARGET_EINVAL
;
3556 #define N_SHM_REGIONS 32
3558 static struct shm_region
{
3562 } shm_regions
[N_SHM_REGIONS
];
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3568 struct target_ipc_perm sem_perm
;
3569 abi_ulong sem_otime
;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused1
;
3573 abi_ulong sem_ctime
;
3574 #if TARGET_ABI_BITS == 32
3575 abi_ulong __unused2
;
3577 abi_ulong sem_nsems
;
3578 abi_ulong __unused3
;
3579 abi_ulong __unused4
;
3583 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3584 abi_ulong target_addr
)
3586 struct target_ipc_perm
*target_ip
;
3587 struct target_semid64_ds
*target_sd
;
3589 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3590 return -TARGET_EFAULT
;
3591 target_ip
= &(target_sd
->sem_perm
);
3592 host_ip
->__key
= tswap32(target_ip
->__key
);
3593 host_ip
->uid
= tswap32(target_ip
->uid
);
3594 host_ip
->gid
= tswap32(target_ip
->gid
);
3595 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3596 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598 host_ip
->mode
= tswap32(target_ip
->mode
);
3600 host_ip
->mode
= tswap16(target_ip
->mode
);
3602 #if defined(TARGET_PPC)
3603 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3605 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3607 unlock_user_struct(target_sd
, target_addr
, 0);
3611 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3612 struct ipc_perm
*host_ip
)
3614 struct target_ipc_perm
*target_ip
;
3615 struct target_semid64_ds
*target_sd
;
3617 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3618 return -TARGET_EFAULT
;
3619 target_ip
= &(target_sd
->sem_perm
);
3620 target_ip
->__key
= tswap32(host_ip
->__key
);
3621 target_ip
->uid
= tswap32(host_ip
->uid
);
3622 target_ip
->gid
= tswap32(host_ip
->gid
);
3623 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3624 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626 target_ip
->mode
= tswap32(host_ip
->mode
);
3628 target_ip
->mode
= tswap16(host_ip
->mode
);
3630 #if defined(TARGET_PPC)
3631 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3633 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3635 unlock_user_struct(target_sd
, target_addr
, 1);
3639 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3640 abi_ulong target_addr
)
3642 struct target_semid64_ds
*target_sd
;
3644 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3645 return -TARGET_EFAULT
;
3646 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3647 return -TARGET_EFAULT
;
3648 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3649 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3650 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3651 unlock_user_struct(target_sd
, target_addr
, 0);
3655 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3656 struct semid_ds
*host_sd
)
3658 struct target_semid64_ds
*target_sd
;
3660 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3661 return -TARGET_EFAULT
;
3662 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3663 return -TARGET_EFAULT
;
3664 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3665 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3666 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3667 unlock_user_struct(target_sd
, target_addr
, 1);
3671 struct target_seminfo
{
3684 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3685 struct seminfo
*host_seminfo
)
3687 struct target_seminfo
*target_seminfo
;
3688 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3689 return -TARGET_EFAULT
;
3690 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3691 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3692 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3693 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3694 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3695 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3696 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3697 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3698 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3699 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3700 unlock_user_struct(target_seminfo
, target_addr
, 1);
3706 struct semid_ds
*buf
;
3707 unsigned short *array
;
3708 struct seminfo
*__buf
;
3711 union target_semun
{
3718 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3719 abi_ulong target_addr
)
3722 unsigned short *array
;
3724 struct semid_ds semid_ds
;
3727 semun
.buf
= &semid_ds
;
3729 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3731 return get_errno(ret
);
3733 nsems
= semid_ds
.sem_nsems
;
3735 *host_array
= g_try_new(unsigned short, nsems
);
3737 return -TARGET_ENOMEM
;
3739 array
= lock_user(VERIFY_READ
, target_addr
,
3740 nsems
*sizeof(unsigned short), 1);
3742 g_free(*host_array
);
3743 return -TARGET_EFAULT
;
3746 for(i
=0; i
<nsems
; i
++) {
3747 __get_user((*host_array
)[i
], &array
[i
]);
3749 unlock_user(array
, target_addr
, 0);
3754 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3755 unsigned short **host_array
)
3758 unsigned short *array
;
3760 struct semid_ds semid_ds
;
3763 semun
.buf
= &semid_ds
;
3765 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3767 return get_errno(ret
);
3769 nsems
= semid_ds
.sem_nsems
;
3771 array
= lock_user(VERIFY_WRITE
, target_addr
,
3772 nsems
*sizeof(unsigned short), 0);
3774 return -TARGET_EFAULT
;
3776 for(i
=0; i
<nsems
; i
++) {
3777 __put_user((*host_array
)[i
], &array
[i
]);
3779 g_free(*host_array
);
3780 unlock_user(array
, target_addr
, 1);
3785 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3786 abi_ulong target_arg
)
3788 union target_semun target_su
= { .buf
= target_arg
};
3790 struct semid_ds dsarg
;
3791 unsigned short *array
= NULL
;
3792 struct seminfo seminfo
;
3793 abi_long ret
= -TARGET_EINVAL
;
3800 /* In 64 bit cross-endian situations, we will erroneously pick up
3801 * the wrong half of the union for the "val" element. To rectify
3802 * this, the entire 8-byte structure is byteswapped, followed by
3803 * a swap of the 4 byte val field. In other cases, the data is
3804 * already in proper host byte order. */
3805 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3806 target_su
.buf
= tswapal(target_su
.buf
);
3807 arg
.val
= tswap32(target_su
.val
);
3809 arg
.val
= target_su
.val
;
3811 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3815 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3819 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3820 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3827 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3831 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3832 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3838 arg
.__buf
= &seminfo
;
3839 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3840 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3848 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3855 struct target_sembuf
{
3856 unsigned short sem_num
;
3861 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3862 abi_ulong target_addr
,
3865 struct target_sembuf
*target_sembuf
;
3868 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3869 nsops
*sizeof(struct target_sembuf
), 1);
3871 return -TARGET_EFAULT
;
3873 for(i
=0; i
<nsops
; i
++) {
3874 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3875 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3876 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3879 unlock_user(target_sembuf
, target_addr
, 0);
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885 defined(TARGET_NR_semtimedop)
3888 * This macro is required to handle the s390 variants, which passes the
3889 * arguments in a different order than default.
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893 (__nsops), (__timeout), (__sops)
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), 0, (__sops), (__timeout)
3899 static inline abi_long
do_semtimedop(int semid
,
3904 struct sembuf sops
[nsops
];
3905 struct timespec ts
, *pts
= NULL
;
3910 if (target_to_host_timespec(pts
, timeout
)) {
3911 return -TARGET_EFAULT
;
3915 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3916 return -TARGET_EFAULT
;
3918 ret
= -TARGET_ENOSYS
;
3919 #ifdef __NR_semtimedop
3920 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
3923 if (ret
== -TARGET_ENOSYS
) {
3924 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
3925 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
3932 struct target_msqid_ds
3934 struct target_ipc_perm msg_perm
;
3935 abi_ulong msg_stime
;
3936 #if TARGET_ABI_BITS == 32
3937 abi_ulong __unused1
;
3939 abi_ulong msg_rtime
;
3940 #if TARGET_ABI_BITS == 32
3941 abi_ulong __unused2
;
3943 abi_ulong msg_ctime
;
3944 #if TARGET_ABI_BITS == 32
3945 abi_ulong __unused3
;
3947 abi_ulong __msg_cbytes
;
3949 abi_ulong msg_qbytes
;
3950 abi_ulong msg_lspid
;
3951 abi_ulong msg_lrpid
;
3952 abi_ulong __unused4
;
3953 abi_ulong __unused5
;
3956 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3957 abi_ulong target_addr
)
3959 struct target_msqid_ds
*target_md
;
3961 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3962 return -TARGET_EFAULT
;
3963 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3964 return -TARGET_EFAULT
;
3965 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3966 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3967 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3968 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3969 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3970 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3971 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3972 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3973 unlock_user_struct(target_md
, target_addr
, 0);
3977 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3978 struct msqid_ds
*host_md
)
3980 struct target_msqid_ds
*target_md
;
3982 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3983 return -TARGET_EFAULT
;
3984 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3985 return -TARGET_EFAULT
;
3986 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3987 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3988 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3989 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3990 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3991 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3992 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3993 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3994 unlock_user_struct(target_md
, target_addr
, 1);
3998 struct target_msginfo
{
4006 unsigned short int msgseg
;
4009 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4010 struct msginfo
*host_msginfo
)
4012 struct target_msginfo
*target_msginfo
;
4013 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4014 return -TARGET_EFAULT
;
4015 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4016 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4017 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4018 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4019 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4020 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4021 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4022 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4023 unlock_user_struct(target_msginfo
, target_addr
, 1);
4027 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4029 struct msqid_ds dsarg
;
4030 struct msginfo msginfo
;
4031 abi_long ret
= -TARGET_EINVAL
;
4039 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4040 return -TARGET_EFAULT
;
4041 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4042 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4043 return -TARGET_EFAULT
;
4046 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4050 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4051 if (host_to_target_msginfo(ptr
, &msginfo
))
4052 return -TARGET_EFAULT
;
4059 struct target_msgbuf
{
4064 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4065 ssize_t msgsz
, int msgflg
)
4067 struct target_msgbuf
*target_mb
;
4068 struct msgbuf
*host_mb
;
4072 return -TARGET_EINVAL
;
4075 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4076 return -TARGET_EFAULT
;
4077 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4079 unlock_user_struct(target_mb
, msgp
, 0);
4080 return -TARGET_ENOMEM
;
4082 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4083 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4084 ret
= -TARGET_ENOSYS
;
4086 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4089 if (ret
== -TARGET_ENOSYS
) {
4091 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4094 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4100 unlock_user_struct(target_mb
, msgp
, 0);
4106 #if defined(__sparc__)
4107 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4108 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4109 #elif defined(__s390x__)
4110 /* The s390 sys_ipc variant has only five parameters. */
4111 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4112 ((long int[]){(long int)__msgp, __msgtyp})
4114 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4115 ((long int[]){(long int)__msgp, __msgtyp}), 0
4119 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4120 ssize_t msgsz
, abi_long msgtyp
,
4123 struct target_msgbuf
*target_mb
;
4125 struct msgbuf
*host_mb
;
4129 return -TARGET_EINVAL
;
4132 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4133 return -TARGET_EFAULT
;
4135 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4137 ret
= -TARGET_ENOMEM
;
4140 ret
= -TARGET_ENOSYS
;
4142 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4145 if (ret
== -TARGET_ENOSYS
) {
4146 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4147 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4152 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4153 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4154 if (!target_mtext
) {
4155 ret
= -TARGET_EFAULT
;
4158 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4159 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4162 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4166 unlock_user_struct(target_mb
, msgp
, 1);
4171 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4172 abi_ulong target_addr
)
4174 struct target_shmid_ds
*target_sd
;
4176 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4177 return -TARGET_EFAULT
;
4178 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4179 return -TARGET_EFAULT
;
4180 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4181 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4182 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4183 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4184 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4185 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4186 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4187 unlock_user_struct(target_sd
, target_addr
, 0);
4191 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4192 struct shmid_ds
*host_sd
)
4194 struct target_shmid_ds
*target_sd
;
4196 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4197 return -TARGET_EFAULT
;
4198 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4199 return -TARGET_EFAULT
;
4200 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4201 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4202 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4203 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4204 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4205 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4206 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4207 unlock_user_struct(target_sd
, target_addr
, 1);
4211 struct target_shminfo
{
4219 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4220 struct shminfo
*host_shminfo
)
4222 struct target_shminfo
*target_shminfo
;
4223 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4224 return -TARGET_EFAULT
;
4225 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4226 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4227 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4228 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4229 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4230 unlock_user_struct(target_shminfo
, target_addr
, 1);
4234 struct target_shm_info
{
4239 abi_ulong swap_attempts
;
4240 abi_ulong swap_successes
;
4243 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4244 struct shm_info
*host_shm_info
)
4246 struct target_shm_info
*target_shm_info
;
4247 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4248 return -TARGET_EFAULT
;
4249 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4250 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4251 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4252 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4253 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4254 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4255 unlock_user_struct(target_shm_info
, target_addr
, 1);
4259 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4261 struct shmid_ds dsarg
;
4262 struct shminfo shminfo
;
4263 struct shm_info shm_info
;
4264 abi_long ret
= -TARGET_EINVAL
;
4272 if (target_to_host_shmid_ds(&dsarg
, buf
))
4273 return -TARGET_EFAULT
;
4274 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4275 if (host_to_target_shmid_ds(buf
, &dsarg
))
4276 return -TARGET_EFAULT
;
4279 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4280 if (host_to_target_shminfo(buf
, &shminfo
))
4281 return -TARGET_EFAULT
;
4284 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4285 if (host_to_target_shm_info(buf
, &shm_info
))
4286 return -TARGET_EFAULT
;
4291 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4298 #ifndef TARGET_FORCE_SHMLBA
4299 /* For most architectures, SHMLBA is the same as the page size;
4300 * some architectures have larger values, in which case they should
4301 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4302 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4303 * and defining its own value for SHMLBA.
4305 * The kernel also permits SHMLBA to be set by the architecture to a
4306 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4307 * this means that addresses are rounded to the large size if
4308 * SHM_RND is set but addresses not aligned to that size are not rejected
4309 * as long as they are at least page-aligned. Since the only architecture
4310 * which uses this is ia64 this code doesn't provide for that oddity.
4312 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4314 return TARGET_PAGE_SIZE
;
4318 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4319 int shmid
, abi_ulong shmaddr
, int shmflg
)
4323 struct shmid_ds shm_info
;
4327 /* find out the length of the shared memory segment */
4328 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4329 if (is_error(ret
)) {
4330 /* can't get length, bail out */
4334 shmlba
= target_shmlba(cpu_env
);
4336 if (shmaddr
& (shmlba
- 1)) {
4337 if (shmflg
& SHM_RND
) {
4338 shmaddr
&= ~(shmlba
- 1);
4340 return -TARGET_EINVAL
;
4343 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4344 return -TARGET_EINVAL
;
4350 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4352 abi_ulong mmap_start
;
4354 /* In order to use the host shmat, we need to honor host SHMLBA. */
4355 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4357 if (mmap_start
== -1) {
4359 host_raddr
= (void *)-1;
4361 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4364 if (host_raddr
== (void *)-1) {
4366 return get_errno((long)host_raddr
);
4368 raddr
=h2g((unsigned long)host_raddr
);
4370 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4371 PAGE_VALID
| PAGE_READ
|
4372 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4374 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4375 if (!shm_regions
[i
].in_use
) {
4376 shm_regions
[i
].in_use
= true;
4377 shm_regions
[i
].start
= raddr
;
4378 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4388 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4395 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4396 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4397 shm_regions
[i
].in_use
= false;
4398 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4402 rv
= get_errno(shmdt(g2h(shmaddr
)));
4409 #ifdef TARGET_NR_ipc
4410 /* ??? This only works with linear mappings. */
4411 /* do_ipc() must return target values and target errnos. */
4412 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4413 unsigned int call
, abi_long first
,
4414 abi_long second
, abi_long third
,
4415 abi_long ptr
, abi_long fifth
)
4420 version
= call
>> 16;
4425 ret
= do_semtimedop(first
, ptr
, second
, 0);
4427 case IPCOP_semtimedop
:
4429 * The s390 sys_ipc variant has only five parameters instead of six
4430 * (as for default variant) and the only difference is the handling of
4431 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4432 * to a struct timespec where the generic variant uses fifth parameter.
4434 #if defined(TARGET_S390X)
4435 ret
= do_semtimedop(first
, ptr
, second
, third
);
4437 ret
= do_semtimedop(first
, ptr
, second
, fifth
);
4442 ret
= get_errno(semget(first
, second
, third
));
4445 case IPCOP_semctl
: {
4446 /* The semun argument to semctl is passed by value, so dereference the
4449 get_user_ual(atptr
, ptr
);
4450 ret
= do_semctl(first
, second
, third
, atptr
);
4455 ret
= get_errno(msgget(first
, second
));
4459 ret
= do_msgsnd(first
, ptr
, second
, third
);
4463 ret
= do_msgctl(first
, second
, ptr
);
4470 struct target_ipc_kludge
{
4475 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4476 ret
= -TARGET_EFAULT
;
4480 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4482 unlock_user_struct(tmp
, ptr
, 0);
4486 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4495 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4496 if (is_error(raddr
))
4497 return get_errno(raddr
);
4498 if (put_user_ual(raddr
, third
))
4499 return -TARGET_EFAULT
;
4503 ret
= -TARGET_EINVAL
;
4508 ret
= do_shmdt(ptr
);
4512 /* IPC_* flag values are the same on all linux platforms */
4513 ret
= get_errno(shmget(first
, second
, third
));
4516 /* IPC_* and SHM_* command values are the same on all linux platforms */
4518 ret
= do_shmctl(first
, second
, ptr
);
4521 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4523 ret
= -TARGET_ENOSYS
;
4530 /* kernel structure types definitions */
4532 #define STRUCT(name, ...) STRUCT_ ## name,
4533 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4535 #include "syscall_types.h"
4539 #undef STRUCT_SPECIAL
4541 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4542 #define STRUCT_SPECIAL(name)
4543 #include "syscall_types.h"
4545 #undef STRUCT_SPECIAL
4547 #define MAX_STRUCT_SIZE 4096
4549 #ifdef CONFIG_FIEMAP
4550 /* So fiemap access checks don't overflow on 32 bit systems.
4551 * This is very slightly smaller than the limit imposed by
4552 * the underlying kernel.
4554 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4555 / sizeof(struct fiemap_extent))
4557 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4558 int fd
, int cmd
, abi_long arg
)
4560 /* The parameter for this ioctl is a struct fiemap followed
4561 * by an array of struct fiemap_extent whose size is set
4562 * in fiemap->fm_extent_count. The array is filled in by the
4565 int target_size_in
, target_size_out
;
4567 const argtype
*arg_type
= ie
->arg_type
;
4568 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4571 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4575 assert(arg_type
[0] == TYPE_PTR
);
4576 assert(ie
->access
== IOC_RW
);
4578 target_size_in
= thunk_type_size(arg_type
, 0);
4579 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4581 return -TARGET_EFAULT
;
4583 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4584 unlock_user(argptr
, arg
, 0);
4585 fm
= (struct fiemap
*)buf_temp
;
4586 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4587 return -TARGET_EINVAL
;
4590 outbufsz
= sizeof (*fm
) +
4591 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4593 if (outbufsz
> MAX_STRUCT_SIZE
) {
4594 /* We can't fit all the extents into the fixed size buffer.
4595 * Allocate one that is large enough and use it instead.
4597 fm
= g_try_malloc(outbufsz
);
4599 return -TARGET_ENOMEM
;
4601 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4604 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4605 if (!is_error(ret
)) {
4606 target_size_out
= target_size_in
;
4607 /* An extent_count of 0 means we were only counting the extents
4608 * so there are no structs to copy
4610 if (fm
->fm_extent_count
!= 0) {
4611 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4613 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4615 ret
= -TARGET_EFAULT
;
4617 /* Convert the struct fiemap */
4618 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4619 if (fm
->fm_extent_count
!= 0) {
4620 p
= argptr
+ target_size_in
;
4621 /* ...and then all the struct fiemap_extents */
4622 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4623 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4628 unlock_user(argptr
, arg
, target_size_out
);
4638 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4639 int fd
, int cmd
, abi_long arg
)
4641 const argtype
*arg_type
= ie
->arg_type
;
4645 struct ifconf
*host_ifconf
;
4647 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4648 int target_ifreq_size
;
4653 abi_long target_ifc_buf
;
4657 assert(arg_type
[0] == TYPE_PTR
);
4658 assert(ie
->access
== IOC_RW
);
4661 target_size
= thunk_type_size(arg_type
, 0);
4663 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4665 return -TARGET_EFAULT
;
4666 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4667 unlock_user(argptr
, arg
, 0);
4669 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4670 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4671 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4673 if (target_ifc_buf
!= 0) {
4674 target_ifc_len
= host_ifconf
->ifc_len
;
4675 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4676 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4678 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4679 if (outbufsz
> MAX_STRUCT_SIZE
) {
4681 * We can't fit all the extents into the fixed size buffer.
4682 * Allocate one that is large enough and use it instead.
4684 host_ifconf
= malloc(outbufsz
);
4686 return -TARGET_ENOMEM
;
4688 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4691 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4693 host_ifconf
->ifc_len
= host_ifc_len
;
4695 host_ifc_buf
= NULL
;
4697 host_ifconf
->ifc_buf
= host_ifc_buf
;
4699 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4700 if (!is_error(ret
)) {
4701 /* convert host ifc_len to target ifc_len */
4703 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4704 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4705 host_ifconf
->ifc_len
= target_ifc_len
;
4707 /* restore target ifc_buf */
4709 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4711 /* copy struct ifconf to target user */
4713 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4715 return -TARGET_EFAULT
;
4716 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4717 unlock_user(argptr
, arg
, target_size
);
4719 if (target_ifc_buf
!= 0) {
4720 /* copy ifreq[] to target user */
4721 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4722 for (i
= 0; i
< nb_ifreq
; i
++) {
4723 thunk_convert(argptr
+ i
* target_ifreq_size
,
4724 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4725 ifreq_arg_type
, THUNK_TARGET
);
4727 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4738 #if defined(CONFIG_USBFS)
4739 #if HOST_LONG_BITS > 64
4740 #error USBDEVFS thunks do not support >64 bit hosts yet.
4743 uint64_t target_urb_adr
;
4744 uint64_t target_buf_adr
;
4745 char *target_buf_ptr
;
4746 struct usbdevfs_urb host_urb
;
4749 static GHashTable
*usbdevfs_urb_hashtable(void)
4751 static GHashTable
*urb_hashtable
;
4753 if (!urb_hashtable
) {
4754 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4756 return urb_hashtable
;
4759 static void urb_hashtable_insert(struct live_urb
*urb
)
4761 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4762 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4765 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4767 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4768 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4771 static void urb_hashtable_remove(struct live_urb
*urb
)
4773 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4774 g_hash_table_remove(urb_hashtable
, urb
);
4778 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4779 int fd
, int cmd
, abi_long arg
)
4781 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4782 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4783 struct live_urb
*lurb
;
4787 uintptr_t target_urb_adr
;
4790 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4792 memset(buf_temp
, 0, sizeof(uint64_t));
4793 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4794 if (is_error(ret
)) {
4798 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4799 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4800 if (!lurb
->target_urb_adr
) {
4801 return -TARGET_EFAULT
;
4803 urb_hashtable_remove(lurb
);
4804 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4805 lurb
->host_urb
.buffer_length
);
4806 lurb
->target_buf_ptr
= NULL
;
4808 /* restore the guest buffer pointer */
4809 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4811 /* update the guest urb struct */
4812 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4815 return -TARGET_EFAULT
;
4817 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4818 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4820 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4821 /* write back the urb handle */
4822 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4825 return -TARGET_EFAULT
;
4828 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4829 target_urb_adr
= lurb
->target_urb_adr
;
4830 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4831 unlock_user(argptr
, arg
, target_size
);
4838 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4839 uint8_t *buf_temp
__attribute__((unused
)),
4840 int fd
, int cmd
, abi_long arg
)
4842 struct live_urb
*lurb
;
4844 /* map target address back to host URB with metadata. */
4845 lurb
= urb_hashtable_lookup(arg
);
4847 return -TARGET_EFAULT
;
4849 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4853 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4854 int fd
, int cmd
, abi_long arg
)
4856 const argtype
*arg_type
= ie
->arg_type
;
4861 struct live_urb
*lurb
;
4864 * each submitted URB needs to map to a unique ID for the
4865 * kernel, and that unique ID needs to be a pointer to
4866 * host memory. hence, we need to malloc for each URB.
4867 * isochronous transfers have a variable length struct.
4870 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4872 /* construct host copy of urb and metadata */
4873 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4875 return -TARGET_ENOMEM
;
4878 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4881 return -TARGET_EFAULT
;
4883 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4884 unlock_user(argptr
, arg
, 0);
4886 lurb
->target_urb_adr
= arg
;
4887 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4889 /* buffer space used depends on endpoint type so lock the entire buffer */
4890 /* control type urbs should check the buffer contents for true direction */
4891 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4892 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4893 lurb
->host_urb
.buffer_length
, 1);
4894 if (lurb
->target_buf_ptr
== NULL
) {
4896 return -TARGET_EFAULT
;
4899 /* update buffer pointer in host copy */
4900 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4902 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4903 if (is_error(ret
)) {
4904 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4907 urb_hashtable_insert(lurb
);
4912 #endif /* CONFIG_USBFS */
4914 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4915 int cmd
, abi_long arg
)
4918 struct dm_ioctl
*host_dm
;
4919 abi_long guest_data
;
4920 uint32_t guest_data_size
;
4922 const argtype
*arg_type
= ie
->arg_type
;
4924 void *big_buf
= NULL
;
4928 target_size
= thunk_type_size(arg_type
, 0);
4929 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4931 ret
= -TARGET_EFAULT
;
4934 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4935 unlock_user(argptr
, arg
, 0);
4937 /* buf_temp is too small, so fetch things into a bigger buffer */
4938 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4939 memcpy(big_buf
, buf_temp
, target_size
);
4943 guest_data
= arg
+ host_dm
->data_start
;
4944 if ((guest_data
- arg
) < 0) {
4945 ret
= -TARGET_EINVAL
;
4948 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4949 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4951 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4953 ret
= -TARGET_EFAULT
;
4957 switch (ie
->host_cmd
) {
4959 case DM_LIST_DEVICES
:
4962 case DM_DEV_SUSPEND
:
4965 case DM_TABLE_STATUS
:
4966 case DM_TABLE_CLEAR
:
4968 case DM_LIST_VERSIONS
:
4972 case DM_DEV_SET_GEOMETRY
:
4973 /* data contains only strings */
4974 memcpy(host_data
, argptr
, guest_data_size
);
4977 memcpy(host_data
, argptr
, guest_data_size
);
4978 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4982 void *gspec
= argptr
;
4983 void *cur_data
= host_data
;
4984 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4985 int spec_size
= thunk_type_size(arg_type
, 0);
4988 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4989 struct dm_target_spec
*spec
= cur_data
;
4993 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4994 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4996 spec
->next
= sizeof(*spec
) + slen
;
4997 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4999 cur_data
+= spec
->next
;
5004 ret
= -TARGET_EINVAL
;
5005 unlock_user(argptr
, guest_data
, 0);
5008 unlock_user(argptr
, guest_data
, 0);
5010 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5011 if (!is_error(ret
)) {
5012 guest_data
= arg
+ host_dm
->data_start
;
5013 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5014 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5015 switch (ie
->host_cmd
) {
5020 case DM_DEV_SUSPEND
:
5023 case DM_TABLE_CLEAR
:
5025 case DM_DEV_SET_GEOMETRY
:
5026 /* no return data */
5028 case DM_LIST_DEVICES
:
5030 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5031 uint32_t remaining_data
= guest_data_size
;
5032 void *cur_data
= argptr
;
5033 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5034 int nl_size
= 12; /* can't use thunk_size due to alignment */
5037 uint32_t next
= nl
->next
;
5039 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5041 if (remaining_data
< nl
->next
) {
5042 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5045 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5046 strcpy(cur_data
+ nl_size
, nl
->name
);
5047 cur_data
+= nl
->next
;
5048 remaining_data
-= nl
->next
;
5052 nl
= (void*)nl
+ next
;
5057 case DM_TABLE_STATUS
:
5059 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5060 void *cur_data
= argptr
;
5061 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5062 int spec_size
= thunk_type_size(arg_type
, 0);
5065 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5066 uint32_t next
= spec
->next
;
5067 int slen
= strlen((char*)&spec
[1]) + 1;
5068 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5069 if (guest_data_size
< spec
->next
) {
5070 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5073 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5074 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5075 cur_data
= argptr
+ spec
->next
;
5076 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5082 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5083 int count
= *(uint32_t*)hdata
;
5084 uint64_t *hdev
= hdata
+ 8;
5085 uint64_t *gdev
= argptr
+ 8;
5088 *(uint32_t*)argptr
= tswap32(count
);
5089 for (i
= 0; i
< count
; i
++) {
5090 *gdev
= tswap64(*hdev
);
5096 case DM_LIST_VERSIONS
:
5098 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5099 uint32_t remaining_data
= guest_data_size
;
5100 void *cur_data
= argptr
;
5101 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5102 int vers_size
= thunk_type_size(arg_type
, 0);
5105 uint32_t next
= vers
->next
;
5107 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5109 if (remaining_data
< vers
->next
) {
5110 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5113 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5114 strcpy(cur_data
+ vers_size
, vers
->name
);
5115 cur_data
+= vers
->next
;
5116 remaining_data
-= vers
->next
;
5120 vers
= (void*)vers
+ next
;
5125 unlock_user(argptr
, guest_data
, 0);
5126 ret
= -TARGET_EINVAL
;
5129 unlock_user(argptr
, guest_data
, guest_data_size
);
5131 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5133 ret
= -TARGET_EFAULT
;
5136 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5137 unlock_user(argptr
, arg
, target_size
);
5144 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5145 int cmd
, abi_long arg
)
5149 const argtype
*arg_type
= ie
->arg_type
;
5150 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5153 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5154 struct blkpg_partition host_part
;
5156 /* Read and convert blkpg */
5158 target_size
= thunk_type_size(arg_type
, 0);
5159 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5161 ret
= -TARGET_EFAULT
;
5164 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5165 unlock_user(argptr
, arg
, 0);
5167 switch (host_blkpg
->op
) {
5168 case BLKPG_ADD_PARTITION
:
5169 case BLKPG_DEL_PARTITION
:
5170 /* payload is struct blkpg_partition */
5173 /* Unknown opcode */
5174 ret
= -TARGET_EINVAL
;
5178 /* Read and convert blkpg->data */
5179 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5180 target_size
= thunk_type_size(part_arg_type
, 0);
5181 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5183 ret
= -TARGET_EFAULT
;
5186 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5187 unlock_user(argptr
, arg
, 0);
5189 /* Swizzle the data pointer to our local copy and call! */
5190 host_blkpg
->data
= &host_part
;
5191 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5197 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5198 int fd
, int cmd
, abi_long arg
)
5200 const argtype
*arg_type
= ie
->arg_type
;
5201 const StructEntry
*se
;
5202 const argtype
*field_types
;
5203 const int *dst_offsets
, *src_offsets
;
5206 abi_ulong
*target_rt_dev_ptr
= NULL
;
5207 unsigned long *host_rt_dev_ptr
= NULL
;
5211 assert(ie
->access
== IOC_W
);
5212 assert(*arg_type
== TYPE_PTR
);
5214 assert(*arg_type
== TYPE_STRUCT
);
5215 target_size
= thunk_type_size(arg_type
, 0);
5216 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5218 return -TARGET_EFAULT
;
5221 assert(*arg_type
== (int)STRUCT_rtentry
);
5222 se
= struct_entries
+ *arg_type
++;
5223 assert(se
->convert
[0] == NULL
);
5224 /* convert struct here to be able to catch rt_dev string */
5225 field_types
= se
->field_types
;
5226 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5227 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5228 for (i
= 0; i
< se
->nb_fields
; i
++) {
5229 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5230 assert(*field_types
== TYPE_PTRVOID
);
5231 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5232 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5233 if (*target_rt_dev_ptr
!= 0) {
5234 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5235 tswapal(*target_rt_dev_ptr
));
5236 if (!*host_rt_dev_ptr
) {
5237 unlock_user(argptr
, arg
, 0);
5238 return -TARGET_EFAULT
;
5241 *host_rt_dev_ptr
= 0;
5246 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5247 argptr
+ src_offsets
[i
],
5248 field_types
, THUNK_HOST
);
5250 unlock_user(argptr
, arg
, 0);
5252 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5254 assert(host_rt_dev_ptr
!= NULL
);
5255 assert(target_rt_dev_ptr
!= NULL
);
5256 if (*host_rt_dev_ptr
!= 0) {
5257 unlock_user((void *)*host_rt_dev_ptr
,
5258 *target_rt_dev_ptr
, 0);
5263 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5264 int fd
, int cmd
, abi_long arg
)
5266 int sig
= target_to_host_signal(arg
);
5267 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5270 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5271 int fd
, int cmd
, abi_long arg
)
5276 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5277 if (is_error(ret
)) {
5281 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5282 if (copy_to_user_timeval(arg
, &tv
)) {
5283 return -TARGET_EFAULT
;
5286 if (copy_to_user_timeval64(arg
, &tv
)) {
5287 return -TARGET_EFAULT
;
5294 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5295 int fd
, int cmd
, abi_long arg
)
5300 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5301 if (is_error(ret
)) {
5305 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5306 if (host_to_target_timespec(arg
, &ts
)) {
5307 return -TARGET_EFAULT
;
5310 if (host_to_target_timespec64(arg
, &ts
)) {
5311 return -TARGET_EFAULT
;
5319 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5320 int fd
, int cmd
, abi_long arg
)
5322 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5323 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5329 static void unlock_drm_version(struct drm_version
*host_ver
,
5330 struct target_drm_version
*target_ver
,
5333 unlock_user(host_ver
->name
, target_ver
->name
,
5334 copy
? host_ver
->name_len
: 0);
5335 unlock_user(host_ver
->date
, target_ver
->date
,
5336 copy
? host_ver
->date_len
: 0);
5337 unlock_user(host_ver
->desc
, target_ver
->desc
,
5338 copy
? host_ver
->desc_len
: 0);
5341 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5342 struct target_drm_version
*target_ver
)
5344 memset(host_ver
, 0, sizeof(*host_ver
));
5346 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5347 if (host_ver
->name_len
) {
5348 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5349 target_ver
->name_len
, 0);
5350 if (!host_ver
->name
) {
5355 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5356 if (host_ver
->date_len
) {
5357 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5358 target_ver
->date_len
, 0);
5359 if (!host_ver
->date
) {
5364 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5365 if (host_ver
->desc_len
) {
5366 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5367 target_ver
->desc_len
, 0);
5368 if (!host_ver
->desc
) {
5375 unlock_drm_version(host_ver
, target_ver
, false);
5379 static inline void host_to_target_drmversion(
5380 struct target_drm_version
*target_ver
,
5381 struct drm_version
*host_ver
)
5383 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5384 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5385 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5386 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5387 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5388 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5389 unlock_drm_version(host_ver
, target_ver
, true);
5392 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5393 int fd
, int cmd
, abi_long arg
)
5395 struct drm_version
*ver
;
5396 struct target_drm_version
*target_ver
;
5399 switch (ie
->host_cmd
) {
5400 case DRM_IOCTL_VERSION
:
5401 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5402 return -TARGET_EFAULT
;
5404 ver
= (struct drm_version
*)buf_temp
;
5405 ret
= target_to_host_drmversion(ver
, target_ver
);
5406 if (!is_error(ret
)) {
5407 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5408 if (is_error(ret
)) {
5409 unlock_drm_version(ver
, target_ver
, false);
5411 host_to_target_drmversion(target_ver
, ver
);
5414 unlock_user_struct(target_ver
, arg
, 0);
5417 return -TARGET_ENOSYS
;
5422 IOCTLEntry ioctl_entries
[] = {
5423 #define IOCTL(cmd, access, ...) \
5424 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5425 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5426 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5427 #define IOCTL_IGNORE(cmd) \
5428 { TARGET_ ## cmd, 0, #cmd },
5433 /* ??? Implement proper locking for ioctls. */
5434 /* do_ioctl() Must return target values and target errnos. */
5435 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5437 const IOCTLEntry
*ie
;
5438 const argtype
*arg_type
;
5440 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5446 if (ie
->target_cmd
== 0) {
5448 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5449 return -TARGET_ENOSYS
;
5451 if (ie
->target_cmd
== cmd
)
5455 arg_type
= ie
->arg_type
;
5457 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5458 } else if (!ie
->host_cmd
) {
5459 /* Some architectures define BSD ioctls in their headers
5460 that are not implemented in Linux. */
5461 return -TARGET_ENOSYS
;
5464 switch(arg_type
[0]) {
5467 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5473 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5477 target_size
= thunk_type_size(arg_type
, 0);
5478 switch(ie
->access
) {
5480 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5481 if (!is_error(ret
)) {
5482 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5484 return -TARGET_EFAULT
;
5485 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5486 unlock_user(argptr
, arg
, target_size
);
5490 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5492 return -TARGET_EFAULT
;
5493 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5494 unlock_user(argptr
, arg
, 0);
5495 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5499 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5501 return -TARGET_EFAULT
;
5502 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5503 unlock_user(argptr
, arg
, 0);
5504 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5505 if (!is_error(ret
)) {
5506 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5508 return -TARGET_EFAULT
;
5509 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5510 unlock_user(argptr
, arg
, target_size
);
5516 qemu_log_mask(LOG_UNIMP
,
5517 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5518 (long)cmd
, arg_type
[0]);
5519 ret
= -TARGET_ENOSYS
;
5525 static const bitmask_transtbl iflag_tbl
[] = {
5526 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5527 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5528 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5529 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5530 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5531 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5532 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5533 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5534 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5535 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5536 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5537 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5538 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5539 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5543 static const bitmask_transtbl oflag_tbl
[] = {
5544 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5545 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5546 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5547 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5548 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5549 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5550 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5551 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5552 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5553 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5554 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5555 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5556 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5557 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5558 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5559 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5560 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5561 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5562 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5563 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5564 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5565 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5566 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5567 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5571 static const bitmask_transtbl cflag_tbl
[] = {
5572 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5573 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5574 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5575 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5576 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5577 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5578 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5579 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5580 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5581 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5582 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5583 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5584 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5585 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5586 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5587 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5588 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5589 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5590 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5591 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5592 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5593 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5594 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5595 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5596 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5597 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5598 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5599 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5600 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5601 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5602 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5606 static const bitmask_transtbl lflag_tbl
[] = {
5607 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5608 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5609 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5610 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5611 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5612 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5613 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5614 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5615 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5616 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5617 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5618 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5619 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5620 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5621 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5625 static void target_to_host_termios (void *dst
, const void *src
)
5627 struct host_termios
*host
= dst
;
5628 const struct target_termios
*target
= src
;
5631 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5633 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5635 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5637 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5638 host
->c_line
= target
->c_line
;
5640 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5641 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5642 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5643 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5644 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5645 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5646 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5647 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5648 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5649 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5650 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5651 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5652 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5653 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5654 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5655 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5656 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5657 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5660 static void host_to_target_termios (void *dst
, const void *src
)
5662 struct target_termios
*target
= dst
;
5663 const struct host_termios
*host
= src
;
5666 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5668 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5670 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5672 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5673 target
->c_line
= host
->c_line
;
5675 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5676 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5677 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5678 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5679 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5680 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5681 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5682 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5683 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5684 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5685 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5686 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5687 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5688 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5689 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5690 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5691 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5692 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5695 static const StructEntry struct_termios_def
= {
5696 .convert
= { host_to_target_termios
, target_to_host_termios
},
5697 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5698 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5701 static bitmask_transtbl mmap_flags_tbl
[] = {
5702 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5703 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5704 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5705 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5706 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5707 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5708 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5709 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5710 MAP_DENYWRITE
, MAP_DENYWRITE
},
5711 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5712 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5713 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5714 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5715 MAP_NORESERVE
, MAP_NORESERVE
},
5716 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5717 /* MAP_STACK had been ignored by the kernel for quite some time.
5718 Recognize it for the target insofar as we do not want to pass
5719 it through to the host. */
5720 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5725 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5726 * TARGET_I386 is defined if TARGET_X86_64 is defined
5728 #if defined(TARGET_I386)
5730 /* NOTE: there is really one LDT for all the threads */
5731 static uint8_t *ldt_table
;
5733 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5740 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5741 if (size
> bytecount
)
5743 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5745 return -TARGET_EFAULT
;
5746 /* ??? Should this by byteswapped? */
5747 memcpy(p
, ldt_table
, size
);
5748 unlock_user(p
, ptr
, size
);
5752 /* XXX: add locking support */
5753 static abi_long
write_ldt(CPUX86State
*env
,
5754 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5756 struct target_modify_ldt_ldt_s ldt_info
;
5757 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5758 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5759 int seg_not_present
, useable
, lm
;
5760 uint32_t *lp
, entry_1
, entry_2
;
5762 if (bytecount
!= sizeof(ldt_info
))
5763 return -TARGET_EINVAL
;
5764 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5765 return -TARGET_EFAULT
;
5766 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5767 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5768 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5769 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5770 unlock_user_struct(target_ldt_info
, ptr
, 0);
5772 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5773 return -TARGET_EINVAL
;
5774 seg_32bit
= ldt_info
.flags
& 1;
5775 contents
= (ldt_info
.flags
>> 1) & 3;
5776 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5777 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5778 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5779 useable
= (ldt_info
.flags
>> 6) & 1;
5783 lm
= (ldt_info
.flags
>> 7) & 1;
5785 if (contents
== 3) {
5787 return -TARGET_EINVAL
;
5788 if (seg_not_present
== 0)
5789 return -TARGET_EINVAL
;
5791 /* allocate the LDT */
5793 env
->ldt
.base
= target_mmap(0,
5794 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5795 PROT_READ
|PROT_WRITE
,
5796 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5797 if (env
->ldt
.base
== -1)
5798 return -TARGET_ENOMEM
;
5799 memset(g2h(env
->ldt
.base
), 0,
5800 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5801 env
->ldt
.limit
= 0xffff;
5802 ldt_table
= g2h(env
->ldt
.base
);
5805 /* NOTE: same code as Linux kernel */
5806 /* Allow LDTs to be cleared by the user. */
5807 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5810 read_exec_only
== 1 &&
5812 limit_in_pages
== 0 &&
5813 seg_not_present
== 1 &&
5821 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5822 (ldt_info
.limit
& 0x0ffff);
5823 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5824 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5825 (ldt_info
.limit
& 0xf0000) |
5826 ((read_exec_only
^ 1) << 9) |
5828 ((seg_not_present
^ 1) << 15) |
5830 (limit_in_pages
<< 23) |
5834 entry_2
|= (useable
<< 20);
5836 /* Install the new entry ... */
5838 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5839 lp
[0] = tswap32(entry_1
);
5840 lp
[1] = tswap32(entry_2
);
5844 /* specific and weird i386 syscalls */
5845 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5846 unsigned long bytecount
)
5852 ret
= read_ldt(ptr
, bytecount
);
5855 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5858 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5861 ret
= -TARGET_ENOSYS
;
5867 #if defined(TARGET_ABI32)
5868 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5870 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5871 struct target_modify_ldt_ldt_s ldt_info
;
5872 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5873 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5874 int seg_not_present
, useable
, lm
;
5875 uint32_t *lp
, entry_1
, entry_2
;
5878 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5879 if (!target_ldt_info
)
5880 return -TARGET_EFAULT
;
5881 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5882 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5883 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5884 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5885 if (ldt_info
.entry_number
== -1) {
5886 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5887 if (gdt_table
[i
] == 0) {
5888 ldt_info
.entry_number
= i
;
5889 target_ldt_info
->entry_number
= tswap32(i
);
5894 unlock_user_struct(target_ldt_info
, ptr
, 1);
5896 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5897 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5898 return -TARGET_EINVAL
;
5899 seg_32bit
= ldt_info
.flags
& 1;
5900 contents
= (ldt_info
.flags
>> 1) & 3;
5901 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5902 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5903 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5904 useable
= (ldt_info
.flags
>> 6) & 1;
5908 lm
= (ldt_info
.flags
>> 7) & 1;
5911 if (contents
== 3) {
5912 if (seg_not_present
== 0)
5913 return -TARGET_EINVAL
;
5916 /* NOTE: same code as Linux kernel */
5917 /* Allow LDTs to be cleared by the user. */
5918 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5919 if ((contents
== 0 &&
5920 read_exec_only
== 1 &&
5922 limit_in_pages
== 0 &&
5923 seg_not_present
== 1 &&
5931 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5932 (ldt_info
.limit
& 0x0ffff);
5933 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5934 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5935 (ldt_info
.limit
& 0xf0000) |
5936 ((read_exec_only
^ 1) << 9) |
5938 ((seg_not_present
^ 1) << 15) |
5940 (limit_in_pages
<< 23) |
5945 /* Install the new entry ... */
5947 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5948 lp
[0] = tswap32(entry_1
);
5949 lp
[1] = tswap32(entry_2
);
5953 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5955 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5956 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5957 uint32_t base_addr
, limit
, flags
;
5958 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5959 int seg_not_present
, useable
, lm
;
5960 uint32_t *lp
, entry_1
, entry_2
;
5962 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5963 if (!target_ldt_info
)
5964 return -TARGET_EFAULT
;
5965 idx
= tswap32(target_ldt_info
->entry_number
);
5966 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5967 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5968 unlock_user_struct(target_ldt_info
, ptr
, 1);
5969 return -TARGET_EINVAL
;
5971 lp
= (uint32_t *)(gdt_table
+ idx
);
5972 entry_1
= tswap32(lp
[0]);
5973 entry_2
= tswap32(lp
[1]);
5975 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5976 contents
= (entry_2
>> 10) & 3;
5977 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5978 seg_32bit
= (entry_2
>> 22) & 1;
5979 limit_in_pages
= (entry_2
>> 23) & 1;
5980 useable
= (entry_2
>> 20) & 1;
5984 lm
= (entry_2
>> 21) & 1;
5986 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5987 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5988 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5989 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5990 base_addr
= (entry_1
>> 16) |
5991 (entry_2
& 0xff000000) |
5992 ((entry_2
& 0xff) << 16);
5993 target_ldt_info
->base_addr
= tswapal(base_addr
);
5994 target_ldt_info
->limit
= tswap32(limit
);
5995 target_ldt_info
->flags
= tswap32(flags
);
5996 unlock_user_struct(target_ldt_info
, ptr
, 1);
6000 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6002 return -TARGET_ENOSYS
;
6005 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6012 case TARGET_ARCH_SET_GS
:
6013 case TARGET_ARCH_SET_FS
:
6014 if (code
== TARGET_ARCH_SET_GS
)
6018 cpu_x86_load_seg(env
, idx
, 0);
6019 env
->segs
[idx
].base
= addr
;
6021 case TARGET_ARCH_GET_GS
:
6022 case TARGET_ARCH_GET_FS
:
6023 if (code
== TARGET_ARCH_GET_GS
)
6027 val
= env
->segs
[idx
].base
;
6028 if (put_user(val
, addr
, abi_ulong
))
6029 ret
= -TARGET_EFAULT
;
6032 ret
= -TARGET_EINVAL
;
6037 #endif /* defined(TARGET_ABI32 */
6039 #endif /* defined(TARGET_I386) */
6041 #define NEW_STACK_SIZE 0x40000
6044 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6047 pthread_mutex_t mutex
;
6048 pthread_cond_t cond
;
6051 abi_ulong child_tidptr
;
6052 abi_ulong parent_tidptr
;
6056 static void *clone_func(void *arg
)
6058 new_thread_info
*info
= arg
;
6063 rcu_register_thread();
6064 tcg_register_thread();
6068 ts
= (TaskState
*)cpu
->opaque
;
6069 info
->tid
= sys_gettid();
6071 if (info
->child_tidptr
)
6072 put_user_u32(info
->tid
, info
->child_tidptr
);
6073 if (info
->parent_tidptr
)
6074 put_user_u32(info
->tid
, info
->parent_tidptr
);
6075 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6076 /* Enable signals. */
6077 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6078 /* Signal to the parent that we're ready. */
6079 pthread_mutex_lock(&info
->mutex
);
6080 pthread_cond_broadcast(&info
->cond
);
6081 pthread_mutex_unlock(&info
->mutex
);
6082 /* Wait until the parent has finished initializing the tls state. */
6083 pthread_mutex_lock(&clone_lock
);
6084 pthread_mutex_unlock(&clone_lock
);
6090 /* do_fork() Must return host values and target errnos (unlike most
6091 do_*() functions). */
6092 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6093 abi_ulong parent_tidptr
, target_ulong newtls
,
6094 abi_ulong child_tidptr
)
6096 CPUState
*cpu
= env_cpu(env
);
6100 CPUArchState
*new_env
;
6103 flags
&= ~CLONE_IGNORED_FLAGS
;
6105 /* Emulate vfork() with fork() */
6106 if (flags
& CLONE_VFORK
)
6107 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6109 if (flags
& CLONE_VM
) {
6110 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6111 new_thread_info info
;
6112 pthread_attr_t attr
;
6114 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6115 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6116 return -TARGET_EINVAL
;
6119 ts
= g_new0(TaskState
, 1);
6120 init_task_state(ts
);
6122 /* Grab a mutex so that thread setup appears atomic. */
6123 pthread_mutex_lock(&clone_lock
);
6125 /* we create a new CPU instance. */
6126 new_env
= cpu_copy(env
);
6127 /* Init regs that differ from the parent. */
6128 cpu_clone_regs_child(new_env
, newsp
, flags
);
6129 cpu_clone_regs_parent(env
, flags
);
6130 new_cpu
= env_cpu(new_env
);
6131 new_cpu
->opaque
= ts
;
6132 ts
->bprm
= parent_ts
->bprm
;
6133 ts
->info
= parent_ts
->info
;
6134 ts
->signal_mask
= parent_ts
->signal_mask
;
6136 if (flags
& CLONE_CHILD_CLEARTID
) {
6137 ts
->child_tidptr
= child_tidptr
;
6140 if (flags
& CLONE_SETTLS
) {
6141 cpu_set_tls (new_env
, newtls
);
6144 memset(&info
, 0, sizeof(info
));
6145 pthread_mutex_init(&info
.mutex
, NULL
);
6146 pthread_mutex_lock(&info
.mutex
);
6147 pthread_cond_init(&info
.cond
, NULL
);
6149 if (flags
& CLONE_CHILD_SETTID
) {
6150 info
.child_tidptr
= child_tidptr
;
6152 if (flags
& CLONE_PARENT_SETTID
) {
6153 info
.parent_tidptr
= parent_tidptr
;
6156 ret
= pthread_attr_init(&attr
);
6157 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6158 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6159 /* It is not safe to deliver signals until the child has finished
6160 initializing, so temporarily block all signals. */
6161 sigfillset(&sigmask
);
6162 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6163 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6165 /* If this is our first additional thread, we need to ensure we
6166 * generate code for parallel execution and flush old translations.
6168 if (!parallel_cpus
) {
6169 parallel_cpus
= true;
6173 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6174 /* TODO: Free new CPU state if thread creation failed. */
6176 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6177 pthread_attr_destroy(&attr
);
6179 /* Wait for the child to initialize. */
6180 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6185 pthread_mutex_unlock(&info
.mutex
);
6186 pthread_cond_destroy(&info
.cond
);
6187 pthread_mutex_destroy(&info
.mutex
);
6188 pthread_mutex_unlock(&clone_lock
);
6190 /* if no CLONE_VM, we consider it is a fork */
6191 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6192 return -TARGET_EINVAL
;
6195 /* We can't support custom termination signals */
6196 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6197 return -TARGET_EINVAL
;
6200 if (block_signals()) {
6201 return -TARGET_ERESTARTSYS
;
6207 /* Child Process. */
6208 cpu_clone_regs_child(env
, newsp
, flags
);
6210 /* There is a race condition here. The parent process could
6211 theoretically read the TID in the child process before the child
6212 tid is set. This would require using either ptrace
6213 (not implemented) or having *_tidptr to point at a shared memory
6214 mapping. We can't repeat the spinlock hack used above because
6215 the child process gets its own copy of the lock. */
6216 if (flags
& CLONE_CHILD_SETTID
)
6217 put_user_u32(sys_gettid(), child_tidptr
);
6218 if (flags
& CLONE_PARENT_SETTID
)
6219 put_user_u32(sys_gettid(), parent_tidptr
);
6220 ts
= (TaskState
*)cpu
->opaque
;
6221 if (flags
& CLONE_SETTLS
)
6222 cpu_set_tls (env
, newtls
);
6223 if (flags
& CLONE_CHILD_CLEARTID
)
6224 ts
->child_tidptr
= child_tidptr
;
6226 cpu_clone_regs_parent(env
, flags
);
6233 /* warning : doesn't handle linux specific flags... */
6234 static int target_to_host_fcntl_cmd(int cmd
)
6239 case TARGET_F_DUPFD
:
6240 case TARGET_F_GETFD
:
6241 case TARGET_F_SETFD
:
6242 case TARGET_F_GETFL
:
6243 case TARGET_F_SETFL
:
6244 case TARGET_F_OFD_GETLK
:
6245 case TARGET_F_OFD_SETLK
:
6246 case TARGET_F_OFD_SETLKW
:
6249 case TARGET_F_GETLK
:
6252 case TARGET_F_SETLK
:
6255 case TARGET_F_SETLKW
:
6258 case TARGET_F_GETOWN
:
6261 case TARGET_F_SETOWN
:
6264 case TARGET_F_GETSIG
:
6267 case TARGET_F_SETSIG
:
6270 #if TARGET_ABI_BITS == 32
6271 case TARGET_F_GETLK64
:
6274 case TARGET_F_SETLK64
:
6277 case TARGET_F_SETLKW64
:
6281 case TARGET_F_SETLEASE
:
6284 case TARGET_F_GETLEASE
:
6287 #ifdef F_DUPFD_CLOEXEC
6288 case TARGET_F_DUPFD_CLOEXEC
:
6289 ret
= F_DUPFD_CLOEXEC
;
6292 case TARGET_F_NOTIFY
:
6296 case TARGET_F_GETOWN_EX
:
6301 case TARGET_F_SETOWN_EX
:
6306 case TARGET_F_SETPIPE_SZ
:
6309 case TARGET_F_GETPIPE_SZ
:
6314 ret
= -TARGET_EINVAL
;
6318 #if defined(__powerpc64__)
6319 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6320 * is not supported by kernel. The glibc fcntl call actually adjusts
6321 * them to 5, 6 and 7 before making the syscall(). Since we make the
6322 * syscall directly, adjust to what is supported by the kernel.
6324 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6325 ret
-= F_GETLK64
- 5;
6332 #define FLOCK_TRANSTBL \
6334 TRANSTBL_CONVERT(F_RDLCK); \
6335 TRANSTBL_CONVERT(F_WRLCK); \
6336 TRANSTBL_CONVERT(F_UNLCK); \
6337 TRANSTBL_CONVERT(F_EXLCK); \
6338 TRANSTBL_CONVERT(F_SHLCK); \
6341 static int target_to_host_flock(int type
)
6343 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6345 #undef TRANSTBL_CONVERT
6346 return -TARGET_EINVAL
;
6349 static int host_to_target_flock(int type
)
6351 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6353 #undef TRANSTBL_CONVERT
6354 /* if we don't know how to convert the value coming
6355 * from the host we copy to the target field as-is
6360 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6361 abi_ulong target_flock_addr
)
6363 struct target_flock
*target_fl
;
6366 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6367 return -TARGET_EFAULT
;
6370 __get_user(l_type
, &target_fl
->l_type
);
6371 l_type
= target_to_host_flock(l_type
);
6375 fl
->l_type
= l_type
;
6376 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6377 __get_user(fl
->l_start
, &target_fl
->l_start
);
6378 __get_user(fl
->l_len
, &target_fl
->l_len
);
6379 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6380 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6384 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6385 const struct flock64
*fl
)
6387 struct target_flock
*target_fl
;
6390 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6391 return -TARGET_EFAULT
;
6394 l_type
= host_to_target_flock(fl
->l_type
);
6395 __put_user(l_type
, &target_fl
->l_type
);
6396 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6397 __put_user(fl
->l_start
, &target_fl
->l_start
);
6398 __put_user(fl
->l_len
, &target_fl
->l_len
);
6399 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6400 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6404 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6405 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6407 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6408 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6409 abi_ulong target_flock_addr
)
6411 struct target_oabi_flock64
*target_fl
;
6414 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6415 return -TARGET_EFAULT
;
6418 __get_user(l_type
, &target_fl
->l_type
);
6419 l_type
= target_to_host_flock(l_type
);
6423 fl
->l_type
= l_type
;
6424 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6425 __get_user(fl
->l_start
, &target_fl
->l_start
);
6426 __get_user(fl
->l_len
, &target_fl
->l_len
);
6427 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6428 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6432 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6433 const struct flock64
*fl
)
6435 struct target_oabi_flock64
*target_fl
;
6438 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6439 return -TARGET_EFAULT
;
6442 l_type
= host_to_target_flock(fl
->l_type
);
6443 __put_user(l_type
, &target_fl
->l_type
);
6444 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6445 __put_user(fl
->l_start
, &target_fl
->l_start
);
6446 __put_user(fl
->l_len
, &target_fl
->l_len
);
6447 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6448 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6453 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6454 abi_ulong target_flock_addr
)
6456 struct target_flock64
*target_fl
;
6459 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6460 return -TARGET_EFAULT
;
6463 __get_user(l_type
, &target_fl
->l_type
);
6464 l_type
= target_to_host_flock(l_type
);
6468 fl
->l_type
= l_type
;
6469 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6470 __get_user(fl
->l_start
, &target_fl
->l_start
);
6471 __get_user(fl
->l_len
, &target_fl
->l_len
);
6472 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6473 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6477 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6478 const struct flock64
*fl
)
6480 struct target_flock64
*target_fl
;
6483 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6484 return -TARGET_EFAULT
;
6487 l_type
= host_to_target_flock(fl
->l_type
);
6488 __put_user(l_type
, &target_fl
->l_type
);
6489 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6490 __put_user(fl
->l_start
, &target_fl
->l_start
);
6491 __put_user(fl
->l_len
, &target_fl
->l_len
);
6492 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6493 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6497 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6499 struct flock64 fl64
;
6501 struct f_owner_ex fox
;
6502 struct target_f_owner_ex
*target_fox
;
6505 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6507 if (host_cmd
== -TARGET_EINVAL
)
6511 case TARGET_F_GETLK
:
6512 ret
= copy_from_user_flock(&fl64
, arg
);
6516 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6518 ret
= copy_to_user_flock(arg
, &fl64
);
6522 case TARGET_F_SETLK
:
6523 case TARGET_F_SETLKW
:
6524 ret
= copy_from_user_flock(&fl64
, arg
);
6528 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6531 case TARGET_F_GETLK64
:
6532 case TARGET_F_OFD_GETLK
:
6533 ret
= copy_from_user_flock64(&fl64
, arg
);
6537 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6539 ret
= copy_to_user_flock64(arg
, &fl64
);
6542 case TARGET_F_SETLK64
:
6543 case TARGET_F_SETLKW64
:
6544 case TARGET_F_OFD_SETLK
:
6545 case TARGET_F_OFD_SETLKW
:
6546 ret
= copy_from_user_flock64(&fl64
, arg
);
6550 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6553 case TARGET_F_GETFL
:
6554 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6556 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6560 case TARGET_F_SETFL
:
6561 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6562 target_to_host_bitmask(arg
,
6567 case TARGET_F_GETOWN_EX
:
6568 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6570 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6571 return -TARGET_EFAULT
;
6572 target_fox
->type
= tswap32(fox
.type
);
6573 target_fox
->pid
= tswap32(fox
.pid
);
6574 unlock_user_struct(target_fox
, arg
, 1);
6580 case TARGET_F_SETOWN_EX
:
6581 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6582 return -TARGET_EFAULT
;
6583 fox
.type
= tswap32(target_fox
->type
);
6584 fox
.pid
= tswap32(target_fox
->pid
);
6585 unlock_user_struct(target_fox
, arg
, 0);
6586 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6590 case TARGET_F_SETOWN
:
6591 case TARGET_F_GETOWN
:
6592 case TARGET_F_SETSIG
:
6593 case TARGET_F_GETSIG
:
6594 case TARGET_F_SETLEASE
:
6595 case TARGET_F_GETLEASE
:
6596 case TARGET_F_SETPIPE_SZ
:
6597 case TARGET_F_GETPIPE_SZ
:
6598 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6602 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6610 static inline int high2lowuid(int uid
)
6618 static inline int high2lowgid(int gid
)
6626 static inline int low2highuid(int uid
)
6628 if ((int16_t)uid
== -1)
6634 static inline int low2highgid(int gid
)
6636 if ((int16_t)gid
== -1)
6641 static inline int tswapid(int id
)
6646 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6648 #else /* !USE_UID16 */
6649 static inline int high2lowuid(int uid
)
6653 static inline int high2lowgid(int gid
)
6657 static inline int low2highuid(int uid
)
6661 static inline int low2highgid(int gid
)
6665 static inline int tswapid(int id
)
6670 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6672 #endif /* USE_UID16 */
6674 /* We must do direct syscalls for setting UID/GID, because we want to
6675 * implement the Linux system call semantics of "change only for this thread",
6676 * not the libc/POSIX semantics of "change for all threads in process".
6677 * (See http://ewontfix.com/17/ for more details.)
6678 * We use the 32-bit version of the syscalls if present; if it is not
6679 * then either the host architecture supports 32-bit UIDs natively with
6680 * the standard syscall, or the 16-bit UID is the best we can do.
6682 #ifdef __NR_setuid32
6683 #define __NR_sys_setuid __NR_setuid32
6685 #define __NR_sys_setuid __NR_setuid
6687 #ifdef __NR_setgid32
6688 #define __NR_sys_setgid __NR_setgid32
6690 #define __NR_sys_setgid __NR_setgid
6692 #ifdef __NR_setresuid32
6693 #define __NR_sys_setresuid __NR_setresuid32
6695 #define __NR_sys_setresuid __NR_setresuid
6697 #ifdef __NR_setresgid32
6698 #define __NR_sys_setresgid __NR_setresgid32
6700 #define __NR_sys_setresgid __NR_setresgid
6703 _syscall1(int, sys_setuid
, uid_t
, uid
)
6704 _syscall1(int, sys_setgid
, gid_t
, gid
)
6705 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6706 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6708 void syscall_init(void)
6711 const argtype
*arg_type
;
6715 thunk_init(STRUCT_MAX
);
6717 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6718 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6719 #include "syscall_types.h"
6721 #undef STRUCT_SPECIAL
6723 /* Build target_to_host_errno_table[] table from
6724 * host_to_target_errno_table[]. */
6725 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6726 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6729 /* we patch the ioctl size if necessary. We rely on the fact that
6730 no ioctl has all the bits at '1' in the size field */
6732 while (ie
->target_cmd
!= 0) {
6733 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6734 TARGET_IOC_SIZEMASK
) {
6735 arg_type
= ie
->arg_type
;
6736 if (arg_type
[0] != TYPE_PTR
) {
6737 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6742 size
= thunk_type_size(arg_type
, 0);
6743 ie
->target_cmd
= (ie
->target_cmd
&
6744 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6745 (size
<< TARGET_IOC_SIZESHIFT
);
6748 /* automatic consistency check if same arch */
6749 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6750 (defined(__x86_64__) && defined(TARGET_X86_64))
6751 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6752 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6753 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6760 #ifdef TARGET_NR_truncate64
6761 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6766 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6770 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6774 #ifdef TARGET_NR_ftruncate64
6775 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6780 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6784 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6788 #if defined(TARGET_NR_timer_settime) || \
6789 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6790 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
6791 abi_ulong target_addr
)
6793 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
6794 offsetof(struct target_itimerspec
,
6796 target_to_host_timespec(&host_its
->it_value
, target_addr
+
6797 offsetof(struct target_itimerspec
,
6799 return -TARGET_EFAULT
;
6806 #if defined(TARGET_NR_timer_settime64) || \
6807 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6808 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
6809 abi_ulong target_addr
)
6811 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
6812 offsetof(struct target__kernel_itimerspec
,
6814 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
6815 offsetof(struct target__kernel_itimerspec
,
6817 return -TARGET_EFAULT
;
6824 #if ((defined(TARGET_NR_timerfd_gettime) || \
6825 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6826 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6827 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6828 struct itimerspec
*host_its
)
6830 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6832 &host_its
->it_interval
) ||
6833 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6835 &host_its
->it_value
)) {
6836 return -TARGET_EFAULT
;
6842 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6843 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6844 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6845 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
6846 struct itimerspec
*host_its
)
6848 if (host_to_target_timespec64(target_addr
+
6849 offsetof(struct target__kernel_itimerspec
,
6851 &host_its
->it_interval
) ||
6852 host_to_target_timespec64(target_addr
+
6853 offsetof(struct target__kernel_itimerspec
,
6855 &host_its
->it_value
)) {
6856 return -TARGET_EFAULT
;
6862 #if defined(TARGET_NR_adjtimex) || \
6863 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6864 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6865 abi_long target_addr
)
6867 struct target_timex
*target_tx
;
6869 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6870 return -TARGET_EFAULT
;
6873 __get_user(host_tx
->modes
, &target_tx
->modes
);
6874 __get_user(host_tx
->offset
, &target_tx
->offset
);
6875 __get_user(host_tx
->freq
, &target_tx
->freq
);
6876 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6877 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6878 __get_user(host_tx
->status
, &target_tx
->status
);
6879 __get_user(host_tx
->constant
, &target_tx
->constant
);
6880 __get_user(host_tx
->precision
, &target_tx
->precision
);
6881 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6882 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6883 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6884 __get_user(host_tx
->tick
, &target_tx
->tick
);
6885 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6886 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6887 __get_user(host_tx
->shift
, &target_tx
->shift
);
6888 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6889 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6890 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6891 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6892 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6893 __get_user(host_tx
->tai
, &target_tx
->tai
);
6895 unlock_user_struct(target_tx
, target_addr
, 0);
6899 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6900 struct timex
*host_tx
)
6902 struct target_timex
*target_tx
;
6904 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6905 return -TARGET_EFAULT
;
6908 __put_user(host_tx
->modes
, &target_tx
->modes
);
6909 __put_user(host_tx
->offset
, &target_tx
->offset
);
6910 __put_user(host_tx
->freq
, &target_tx
->freq
);
6911 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6912 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6913 __put_user(host_tx
->status
, &target_tx
->status
);
6914 __put_user(host_tx
->constant
, &target_tx
->constant
);
6915 __put_user(host_tx
->precision
, &target_tx
->precision
);
6916 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6917 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6918 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6919 __put_user(host_tx
->tick
, &target_tx
->tick
);
6920 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6921 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6922 __put_user(host_tx
->shift
, &target_tx
->shift
);
6923 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6924 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6925 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6926 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6927 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6928 __put_user(host_tx
->tai
, &target_tx
->tai
);
6930 unlock_user_struct(target_tx
, target_addr
, 1);
6935 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6936 abi_ulong target_addr
)
6938 struct target_sigevent
*target_sevp
;
6940 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6941 return -TARGET_EFAULT
;
6944 /* This union is awkward on 64 bit systems because it has a 32 bit
6945 * integer and a pointer in it; we follow the conversion approach
6946 * used for handling sigval types in signal.c so the guest should get
6947 * the correct value back even if we did a 64 bit byteswap and it's
6948 * using the 32 bit integer.
6950 host_sevp
->sigev_value
.sival_ptr
=
6951 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6952 host_sevp
->sigev_signo
=
6953 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6954 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6955 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6957 unlock_user_struct(target_sevp
, target_addr
, 1);
6961 #if defined(TARGET_NR_mlockall)
6962 static inline int target_to_host_mlockall_arg(int arg
)
6966 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6967 result
|= MCL_CURRENT
;
6969 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6970 result
|= MCL_FUTURE
;
6976 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6977 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6978 defined(TARGET_NR_newfstatat))
6979 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6980 abi_ulong target_addr
,
6981 struct stat
*host_st
)
6983 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6984 if (((CPUARMState
*)cpu_env
)->eabi
) {
6985 struct target_eabi_stat64
*target_st
;
6987 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6988 return -TARGET_EFAULT
;
6989 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6990 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6991 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6992 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6993 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6995 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6996 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6997 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6998 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6999 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7000 __put_user(host_st
->st_size
, &target_st
->st_size
);
7001 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7002 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7003 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7004 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7005 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7006 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7007 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7008 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7009 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7011 unlock_user_struct(target_st
, target_addr
, 1);
7015 #if defined(TARGET_HAS_STRUCT_STAT64)
7016 struct target_stat64
*target_st
;
7018 struct target_stat
*target_st
;
7021 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7022 return -TARGET_EFAULT
;
7023 memset(target_st
, 0, sizeof(*target_st
));
7024 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7025 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7026 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7027 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7029 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7030 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7031 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7032 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7033 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7034 /* XXX: better use of kernel struct */
7035 __put_user(host_st
->st_size
, &target_st
->st_size
);
7036 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7037 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7038 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7039 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7040 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7041 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7042 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7043 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7044 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7046 unlock_user_struct(target_st
, target_addr
, 1);
7053 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7054 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7055 abi_ulong target_addr
)
7057 struct target_statx
*target_stx
;
7059 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7060 return -TARGET_EFAULT
;
7062 memset(target_stx
, 0, sizeof(*target_stx
));
7064 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7065 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7066 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7067 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7068 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7069 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7070 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7071 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7072 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7073 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7074 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7075 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7076 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7077 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7078 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7079 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7080 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7081 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7082 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7083 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7084 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7085 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7086 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7088 unlock_user_struct(target_stx
, target_addr
, 1);
7094 static int do_sys_futex(int *uaddr
, int op
, int val
,
7095 const struct timespec
*timeout
, int *uaddr2
,
7098 #if HOST_LONG_BITS == 64
7099 #if defined(__NR_futex)
7100 /* always a 64-bit time_t, it doesn't define _time64 version */
7101 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7104 #else /* HOST_LONG_BITS == 64 */
7105 #if defined(__NR_futex_time64)
7106 if (sizeof(timeout
->tv_sec
) == 8) {
7107 /* _time64 function on 32bit arch */
7108 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7111 #if defined(__NR_futex)
7112 /* old function on 32bit arch */
7113 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7115 #endif /* HOST_LONG_BITS == 64 */
7116 g_assert_not_reached();
7119 static int do_safe_futex(int *uaddr
, int op
, int val
,
7120 const struct timespec
*timeout
, int *uaddr2
,
7123 #if HOST_LONG_BITS == 64
7124 #if defined(__NR_futex)
7125 /* always a 64-bit time_t, it doesn't define _time64 version */
7126 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7128 #else /* HOST_LONG_BITS == 64 */
7129 #if defined(__NR_futex_time64)
7130 if (sizeof(timeout
->tv_sec
) == 8) {
7131 /* _time64 function on 32bit arch */
7132 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7136 #if defined(__NR_futex)
7137 /* old function on 32bit arch */
7138 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7140 #endif /* HOST_LONG_BITS == 64 */
7141 return -TARGET_ENOSYS
;
7144 /* ??? Using host futex calls even when target atomic operations
7145 are not really atomic probably breaks things. However implementing
7146 futexes locally would make futexes shared between multiple processes
7147 tricky. However they're probably useless because guest atomic
7148 operations won't work either. */
7149 #if defined(TARGET_NR_futex)
7150 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7151 target_ulong uaddr2
, int val3
)
7153 struct timespec ts
, *pts
;
7156 /* ??? We assume FUTEX_* constants are the same on both host
7158 #ifdef FUTEX_CMD_MASK
7159 base_op
= op
& FUTEX_CMD_MASK
;
7165 case FUTEX_WAIT_BITSET
:
7168 target_to_host_timespec(pts
, timeout
);
7172 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7174 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7176 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7178 case FUTEX_CMP_REQUEUE
:
7180 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7181 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7182 But the prototype takes a `struct timespec *'; insert casts
7183 to satisfy the compiler. We do not need to tswap TIMEOUT
7184 since it's not compared to guest memory. */
7185 pts
= (struct timespec
*)(uintptr_t) timeout
;
7186 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7187 (base_op
== FUTEX_CMP_REQUEUE
7191 return -TARGET_ENOSYS
;
7196 #if defined(TARGET_NR_futex_time64)
7197 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7198 target_ulong uaddr2
, int val3
)
7200 struct timespec ts
, *pts
;
7203 /* ??? We assume FUTEX_* constants are the same on both host
7205 #ifdef FUTEX_CMD_MASK
7206 base_op
= op
& FUTEX_CMD_MASK
;
7212 case FUTEX_WAIT_BITSET
:
7215 target_to_host_timespec64(pts
, timeout
);
7219 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7221 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7223 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7225 case FUTEX_CMP_REQUEUE
:
7227 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7228 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7229 But the prototype takes a `struct timespec *'; insert casts
7230 to satisfy the compiler. We do not need to tswap TIMEOUT
7231 since it's not compared to guest memory. */
7232 pts
= (struct timespec
*)(uintptr_t) timeout
;
7233 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7234 (base_op
== FUTEX_CMP_REQUEUE
7238 return -TARGET_ENOSYS
;
7243 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7244 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7245 abi_long handle
, abi_long mount_id
,
7248 struct file_handle
*target_fh
;
7249 struct file_handle
*fh
;
7253 unsigned int size
, total_size
;
7255 if (get_user_s32(size
, handle
)) {
7256 return -TARGET_EFAULT
;
7259 name
= lock_user_string(pathname
);
7261 return -TARGET_EFAULT
;
7264 total_size
= sizeof(struct file_handle
) + size
;
7265 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7267 unlock_user(name
, pathname
, 0);
7268 return -TARGET_EFAULT
;
7271 fh
= g_malloc0(total_size
);
7272 fh
->handle_bytes
= size
;
7274 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7275 unlock_user(name
, pathname
, 0);
7277 /* man name_to_handle_at(2):
7278 * Other than the use of the handle_bytes field, the caller should treat
7279 * the file_handle structure as an opaque data type
7282 memcpy(target_fh
, fh
, total_size
);
7283 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7284 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7286 unlock_user(target_fh
, handle
, total_size
);
7288 if (put_user_s32(mid
, mount_id
)) {
7289 return -TARGET_EFAULT
;
7297 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7298 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7301 struct file_handle
*target_fh
;
7302 struct file_handle
*fh
;
7303 unsigned int size
, total_size
;
7306 if (get_user_s32(size
, handle
)) {
7307 return -TARGET_EFAULT
;
7310 total_size
= sizeof(struct file_handle
) + size
;
7311 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7313 return -TARGET_EFAULT
;
7316 fh
= g_memdup(target_fh
, total_size
);
7317 fh
->handle_bytes
= size
;
7318 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7320 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7321 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7325 unlock_user(target_fh
, handle
, total_size
);
7331 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7333 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7336 target_sigset_t
*target_mask
;
7340 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7341 return -TARGET_EINVAL
;
7343 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7344 return -TARGET_EFAULT
;
7347 target_to_host_sigset(&host_mask
, target_mask
);
7349 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7351 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7353 fd_trans_register(ret
, &target_signalfd_trans
);
7356 unlock_user_struct(target_mask
, mask
, 0);
7362 /* Map host to target signal numbers for the wait family of syscalls.
7363 Assume all other status bits are the same. */
7364 int host_to_target_waitstatus(int status
)
7366 if (WIFSIGNALED(status
)) {
7367 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7369 if (WIFSTOPPED(status
)) {
7370 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7376 static int open_self_cmdline(void *cpu_env
, int fd
)
7378 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7379 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7382 for (i
= 0; i
< bprm
->argc
; i
++) {
7383 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7385 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7393 static int open_self_maps(void *cpu_env
, int fd
)
7395 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7396 TaskState
*ts
= cpu
->opaque
;
7397 GSList
*map_info
= read_self_maps();
7401 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7402 MapInfo
*e
= (MapInfo
*) s
->data
;
7404 if (h2g_valid(e
->start
)) {
7405 unsigned long min
= e
->start
;
7406 unsigned long max
= e
->end
;
7407 int flags
= page_get_flags(h2g(min
));
7410 max
= h2g_valid(max
- 1) ?
7411 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7413 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7417 if (h2g(min
) == ts
->info
->stack_limit
) {
7423 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7424 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7425 h2g(min
), h2g(max
- 1) + 1,
7426 e
->is_read
? 'r' : '-',
7427 e
->is_write
? 'w' : '-',
7428 e
->is_exec
? 'x' : '-',
7429 e
->is_priv
? 'p' : '-',
7430 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7432 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7439 free_self_maps(map_info
);
7441 #ifdef TARGET_VSYSCALL_PAGE
7443 * We only support execution from the vsyscall page.
7444 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7446 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7447 " --xp 00000000 00:00 0",
7448 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7449 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7455 static int open_self_stat(void *cpu_env
, int fd
)
7457 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7458 TaskState
*ts
= cpu
->opaque
;
7459 g_autoptr(GString
) buf
= g_string_new(NULL
);
7462 for (i
= 0; i
< 44; i
++) {
7465 g_string_printf(buf
, FMT_pid
" ", getpid());
7466 } else if (i
== 1) {
7468 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7469 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7470 g_string_printf(buf
, "(%.15s) ", bin
);
7471 } else if (i
== 27) {
7473 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7475 /* for the rest, there is MasterCard */
7476 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7479 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7487 static int open_self_auxv(void *cpu_env
, int fd
)
7489 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7490 TaskState
*ts
= cpu
->opaque
;
7491 abi_ulong auxv
= ts
->info
->saved_auxv
;
7492 abi_ulong len
= ts
->info
->auxv_len
;
7496 * Auxiliary vector is stored in target process stack.
7497 * read in whole auxv vector and copy it to file
7499 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7503 r
= write(fd
, ptr
, len
);
7510 lseek(fd
, 0, SEEK_SET
);
7511 unlock_user(ptr
, auxv
, len
);
7517 static int is_proc_myself(const char *filename
, const char *entry
)
7519 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7520 filename
+= strlen("/proc/");
7521 if (!strncmp(filename
, "self/", strlen("self/"))) {
7522 filename
+= strlen("self/");
7523 } else if (*filename
>= '1' && *filename
<= '9') {
7525 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7526 if (!strncmp(filename
, myself
, strlen(myself
))) {
7527 filename
+= strlen(myself
);
7534 if (!strcmp(filename
, entry
)) {
7541 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7542 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7543 static int is_proc(const char *filename
, const char *entry
)
7545 return strcmp(filename
, entry
) == 0;
7549 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7550 static int open_net_route(void *cpu_env
, int fd
)
7557 fp
= fopen("/proc/net/route", "r");
7564 read
= getline(&line
, &len
, fp
);
7565 dprintf(fd
, "%s", line
);
7569 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7571 uint32_t dest
, gw
, mask
;
7572 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7575 fields
= sscanf(line
,
7576 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7577 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7578 &mask
, &mtu
, &window
, &irtt
);
7582 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7583 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7584 metric
, tswap32(mask
), mtu
, window
, irtt
);
7594 #if defined(TARGET_SPARC)
7595 static int open_cpuinfo(void *cpu_env
, int fd
)
7597 dprintf(fd
, "type\t\t: sun4u\n");
7602 #if defined(TARGET_HPPA)
7603 static int open_cpuinfo(void *cpu_env
, int fd
)
7605 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7606 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7607 dprintf(fd
, "capabilities\t: os32\n");
7608 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7609 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7614 #if defined(TARGET_M68K)
7615 static int open_hardware(void *cpu_env
, int fd
)
7617 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7622 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7625 const char *filename
;
7626 int (*fill
)(void *cpu_env
, int fd
);
7627 int (*cmp
)(const char *s1
, const char *s2
);
7629 const struct fake_open
*fake_open
;
7630 static const struct fake_open fakes
[] = {
7631 { "maps", open_self_maps
, is_proc_myself
},
7632 { "stat", open_self_stat
, is_proc_myself
},
7633 { "auxv", open_self_auxv
, is_proc_myself
},
7634 { "cmdline", open_self_cmdline
, is_proc_myself
},
7635 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7636 { "/proc/net/route", open_net_route
, is_proc
},
7638 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7639 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7641 #if defined(TARGET_M68K)
7642 { "/proc/hardware", open_hardware
, is_proc
},
7644 { NULL
, NULL
, NULL
}
7647 if (is_proc_myself(pathname
, "exe")) {
7648 int execfd
= qemu_getauxval(AT_EXECFD
);
7649 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7652 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7653 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7658 if (fake_open
->filename
) {
7660 char filename
[PATH_MAX
];
7663 /* create temporary file to map stat to */
7664 tmpdir
= getenv("TMPDIR");
7667 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7668 fd
= mkstemp(filename
);
7674 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7680 lseek(fd
, 0, SEEK_SET
);
7685 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7688 #define TIMER_MAGIC 0x0caf0000
7689 #define TIMER_MAGIC_MASK 0xffff0000
7691 /* Convert QEMU provided timer ID back to internal 16bit index format */
7692 static target_timer_t
get_timer_id(abi_long arg
)
7694 target_timer_t timerid
= arg
;
7696 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7697 return -TARGET_EINVAL
;
7702 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7703 return -TARGET_EINVAL
;
7709 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7711 abi_ulong target_addr
,
7714 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7715 unsigned host_bits
= sizeof(*host_mask
) * 8;
7716 abi_ulong
*target_mask
;
7719 assert(host_size
>= target_size
);
7721 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7723 return -TARGET_EFAULT
;
7725 memset(host_mask
, 0, host_size
);
7727 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7728 unsigned bit
= i
* target_bits
;
7731 __get_user(val
, &target_mask
[i
]);
7732 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7733 if (val
& (1UL << j
)) {
7734 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7739 unlock_user(target_mask
, target_addr
, 0);
7743 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7745 abi_ulong target_addr
,
7748 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7749 unsigned host_bits
= sizeof(*host_mask
) * 8;
7750 abi_ulong
*target_mask
;
7753 assert(host_size
>= target_size
);
7755 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7757 return -TARGET_EFAULT
;
7760 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7761 unsigned bit
= i
* target_bits
;
7764 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7765 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7769 __put_user(val
, &target_mask
[i
]);
7772 unlock_user(target_mask
, target_addr
, target_size
);
7776 /* This is an internal helper for do_syscall so that it is easier
7777 * to have a single return point, so that actions, such as logging
7778 * of syscall results, can be performed.
7779 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7781 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7782 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7783 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7786 CPUState
*cpu
= env_cpu(cpu_env
);
7788 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7789 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7790 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7791 || defined(TARGET_NR_statx)
7794 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7795 || defined(TARGET_NR_fstatfs)
7801 case TARGET_NR_exit
:
7802 /* In old applications this may be used to implement _exit(2).
7803 However in threaded applictions it is used for thread termination,
7804 and _exit_group is used for application termination.
7805 Do thread termination if we have more then one thread. */
7807 if (block_signals()) {
7808 return -TARGET_ERESTARTSYS
;
7811 pthread_mutex_lock(&clone_lock
);
7813 if (CPU_NEXT(first_cpu
)) {
7814 TaskState
*ts
= cpu
->opaque
;
7816 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7817 object_unref(OBJECT(cpu
));
7819 * At this point the CPU should be unrealized and removed
7820 * from cpu lists. We can clean-up the rest of the thread
7821 * data without the lock held.
7824 pthread_mutex_unlock(&clone_lock
);
7826 if (ts
->child_tidptr
) {
7827 put_user_u32(0, ts
->child_tidptr
);
7828 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7833 rcu_unregister_thread();
7837 pthread_mutex_unlock(&clone_lock
);
7838 preexit_cleanup(cpu_env
, arg1
);
7840 return 0; /* avoid warning */
7841 case TARGET_NR_read
:
7842 if (arg2
== 0 && arg3
== 0) {
7843 return get_errno(safe_read(arg1
, 0, 0));
7845 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7846 return -TARGET_EFAULT
;
7847 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7849 fd_trans_host_to_target_data(arg1
)) {
7850 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7852 unlock_user(p
, arg2
, ret
);
7855 case TARGET_NR_write
:
7856 if (arg2
== 0 && arg3
== 0) {
7857 return get_errno(safe_write(arg1
, 0, 0));
7859 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7860 return -TARGET_EFAULT
;
7861 if (fd_trans_target_to_host_data(arg1
)) {
7862 void *copy
= g_malloc(arg3
);
7863 memcpy(copy
, p
, arg3
);
7864 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7866 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7870 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7872 unlock_user(p
, arg2
, 0);
7875 #ifdef TARGET_NR_open
7876 case TARGET_NR_open
:
7877 if (!(p
= lock_user_string(arg1
)))
7878 return -TARGET_EFAULT
;
7879 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7880 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7882 fd_trans_unregister(ret
);
7883 unlock_user(p
, arg1
, 0);
7886 case TARGET_NR_openat
:
7887 if (!(p
= lock_user_string(arg2
)))
7888 return -TARGET_EFAULT
;
7889 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7890 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7892 fd_trans_unregister(ret
);
7893 unlock_user(p
, arg2
, 0);
7895 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7896 case TARGET_NR_name_to_handle_at
:
7897 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7901 case TARGET_NR_open_by_handle_at
:
7902 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7903 fd_trans_unregister(ret
);
7906 case TARGET_NR_close
:
7907 fd_trans_unregister(arg1
);
7908 return get_errno(close(arg1
));
7911 return do_brk(arg1
);
7912 #ifdef TARGET_NR_fork
7913 case TARGET_NR_fork
:
7914 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7916 #ifdef TARGET_NR_waitpid
7917 case TARGET_NR_waitpid
:
7920 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7921 if (!is_error(ret
) && arg2
&& ret
7922 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7923 return -TARGET_EFAULT
;
7927 #ifdef TARGET_NR_waitid
7928 case TARGET_NR_waitid
:
7932 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7933 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7934 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7935 return -TARGET_EFAULT
;
7936 host_to_target_siginfo(p
, &info
);
7937 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7942 #ifdef TARGET_NR_creat /* not on alpha */
7943 case TARGET_NR_creat
:
7944 if (!(p
= lock_user_string(arg1
)))
7945 return -TARGET_EFAULT
;
7946 ret
= get_errno(creat(p
, arg2
));
7947 fd_trans_unregister(ret
);
7948 unlock_user(p
, arg1
, 0);
7951 #ifdef TARGET_NR_link
7952 case TARGET_NR_link
:
7955 p
= lock_user_string(arg1
);
7956 p2
= lock_user_string(arg2
);
7958 ret
= -TARGET_EFAULT
;
7960 ret
= get_errno(link(p
, p2
));
7961 unlock_user(p2
, arg2
, 0);
7962 unlock_user(p
, arg1
, 0);
7966 #if defined(TARGET_NR_linkat)
7967 case TARGET_NR_linkat
:
7971 return -TARGET_EFAULT
;
7972 p
= lock_user_string(arg2
);
7973 p2
= lock_user_string(arg4
);
7975 ret
= -TARGET_EFAULT
;
7977 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7978 unlock_user(p
, arg2
, 0);
7979 unlock_user(p2
, arg4
, 0);
7983 #ifdef TARGET_NR_unlink
7984 case TARGET_NR_unlink
:
7985 if (!(p
= lock_user_string(arg1
)))
7986 return -TARGET_EFAULT
;
7987 ret
= get_errno(unlink(p
));
7988 unlock_user(p
, arg1
, 0);
7991 #if defined(TARGET_NR_unlinkat)
7992 case TARGET_NR_unlinkat
:
7993 if (!(p
= lock_user_string(arg2
)))
7994 return -TARGET_EFAULT
;
7995 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7996 unlock_user(p
, arg2
, 0);
7999 case TARGET_NR_execve
:
8001 char **argp
, **envp
;
8004 abi_ulong guest_argp
;
8005 abi_ulong guest_envp
;
8012 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8013 if (get_user_ual(addr
, gp
))
8014 return -TARGET_EFAULT
;
8021 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8022 if (get_user_ual(addr
, gp
))
8023 return -TARGET_EFAULT
;
8029 argp
= g_new0(char *, argc
+ 1);
8030 envp
= g_new0(char *, envc
+ 1);
8032 for (gp
= guest_argp
, q
= argp
; gp
;
8033 gp
+= sizeof(abi_ulong
), q
++) {
8034 if (get_user_ual(addr
, gp
))
8038 if (!(*q
= lock_user_string(addr
)))
8040 total_size
+= strlen(*q
) + 1;
8044 for (gp
= guest_envp
, q
= envp
; gp
;
8045 gp
+= sizeof(abi_ulong
), q
++) {
8046 if (get_user_ual(addr
, gp
))
8050 if (!(*q
= lock_user_string(addr
)))
8052 total_size
+= strlen(*q
) + 1;
8056 if (!(p
= lock_user_string(arg1
)))
8058 /* Although execve() is not an interruptible syscall it is
8059 * a special case where we must use the safe_syscall wrapper:
8060 * if we allow a signal to happen before we make the host
8061 * syscall then we will 'lose' it, because at the point of
8062 * execve the process leaves QEMU's control. So we use the
8063 * safe syscall wrapper to ensure that we either take the
8064 * signal as a guest signal, or else it does not happen
8065 * before the execve completes and makes it the other
8066 * program's problem.
8068 ret
= get_errno(safe_execve(p
, argp
, envp
));
8069 unlock_user(p
, arg1
, 0);
8074 ret
= -TARGET_EFAULT
;
8077 for (gp
= guest_argp
, q
= argp
; *q
;
8078 gp
+= sizeof(abi_ulong
), q
++) {
8079 if (get_user_ual(addr
, gp
)
8082 unlock_user(*q
, addr
, 0);
8084 for (gp
= guest_envp
, q
= envp
; *q
;
8085 gp
+= sizeof(abi_ulong
), q
++) {
8086 if (get_user_ual(addr
, gp
)
8089 unlock_user(*q
, addr
, 0);
8096 case TARGET_NR_chdir
:
8097 if (!(p
= lock_user_string(arg1
)))
8098 return -TARGET_EFAULT
;
8099 ret
= get_errno(chdir(p
));
8100 unlock_user(p
, arg1
, 0);
8102 #ifdef TARGET_NR_time
8103 case TARGET_NR_time
:
8106 ret
= get_errno(time(&host_time
));
8109 && put_user_sal(host_time
, arg1
))
8110 return -TARGET_EFAULT
;
8114 #ifdef TARGET_NR_mknod
8115 case TARGET_NR_mknod
:
8116 if (!(p
= lock_user_string(arg1
)))
8117 return -TARGET_EFAULT
;
8118 ret
= get_errno(mknod(p
, arg2
, arg3
));
8119 unlock_user(p
, arg1
, 0);
8122 #if defined(TARGET_NR_mknodat)
8123 case TARGET_NR_mknodat
:
8124 if (!(p
= lock_user_string(arg2
)))
8125 return -TARGET_EFAULT
;
8126 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8127 unlock_user(p
, arg2
, 0);
8130 #ifdef TARGET_NR_chmod
8131 case TARGET_NR_chmod
:
8132 if (!(p
= lock_user_string(arg1
)))
8133 return -TARGET_EFAULT
;
8134 ret
= get_errno(chmod(p
, arg2
));
8135 unlock_user(p
, arg1
, 0);
8138 #ifdef TARGET_NR_lseek
8139 case TARGET_NR_lseek
:
8140 return get_errno(lseek(arg1
, arg2
, arg3
));
8142 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8143 /* Alpha specific */
8144 case TARGET_NR_getxpid
:
8145 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8146 return get_errno(getpid());
8148 #ifdef TARGET_NR_getpid
8149 case TARGET_NR_getpid
:
8150 return get_errno(getpid());
8152 case TARGET_NR_mount
:
8154 /* need to look at the data field */
8158 p
= lock_user_string(arg1
);
8160 return -TARGET_EFAULT
;
8166 p2
= lock_user_string(arg2
);
8169 unlock_user(p
, arg1
, 0);
8171 return -TARGET_EFAULT
;
8175 p3
= lock_user_string(arg3
);
8178 unlock_user(p
, arg1
, 0);
8180 unlock_user(p2
, arg2
, 0);
8181 return -TARGET_EFAULT
;
8187 /* FIXME - arg5 should be locked, but it isn't clear how to
8188 * do that since it's not guaranteed to be a NULL-terminated
8192 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8194 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8196 ret
= get_errno(ret
);
8199 unlock_user(p
, arg1
, 0);
8201 unlock_user(p2
, arg2
, 0);
8203 unlock_user(p3
, arg3
, 0);
8207 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8208 #if defined(TARGET_NR_umount)
8209 case TARGET_NR_umount
:
8211 #if defined(TARGET_NR_oldumount)
8212 case TARGET_NR_oldumount
:
8214 if (!(p
= lock_user_string(arg1
)))
8215 return -TARGET_EFAULT
;
8216 ret
= get_errno(umount(p
));
8217 unlock_user(p
, arg1
, 0);
8220 #ifdef TARGET_NR_stime /* not on alpha */
8221 case TARGET_NR_stime
:
8225 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8226 return -TARGET_EFAULT
;
8228 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8231 #ifdef TARGET_NR_alarm /* not on alpha */
8232 case TARGET_NR_alarm
:
8235 #ifdef TARGET_NR_pause /* not on alpha */
8236 case TARGET_NR_pause
:
8237 if (!block_signals()) {
8238 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8240 return -TARGET_EINTR
;
8242 #ifdef TARGET_NR_utime
8243 case TARGET_NR_utime
:
8245 struct utimbuf tbuf
, *host_tbuf
;
8246 struct target_utimbuf
*target_tbuf
;
8248 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8249 return -TARGET_EFAULT
;
8250 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8251 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8252 unlock_user_struct(target_tbuf
, arg2
, 0);
8257 if (!(p
= lock_user_string(arg1
)))
8258 return -TARGET_EFAULT
;
8259 ret
= get_errno(utime(p
, host_tbuf
));
8260 unlock_user(p
, arg1
, 0);
8264 #ifdef TARGET_NR_utimes
8265 case TARGET_NR_utimes
:
8267 struct timeval
*tvp
, tv
[2];
8269 if (copy_from_user_timeval(&tv
[0], arg2
)
8270 || copy_from_user_timeval(&tv
[1],
8271 arg2
+ sizeof(struct target_timeval
)))
8272 return -TARGET_EFAULT
;
8277 if (!(p
= lock_user_string(arg1
)))
8278 return -TARGET_EFAULT
;
8279 ret
= get_errno(utimes(p
, tvp
));
8280 unlock_user(p
, arg1
, 0);
8284 #if defined(TARGET_NR_futimesat)
8285 case TARGET_NR_futimesat
:
8287 struct timeval
*tvp
, tv
[2];
8289 if (copy_from_user_timeval(&tv
[0], arg3
)
8290 || copy_from_user_timeval(&tv
[1],
8291 arg3
+ sizeof(struct target_timeval
)))
8292 return -TARGET_EFAULT
;
8297 if (!(p
= lock_user_string(arg2
))) {
8298 return -TARGET_EFAULT
;
8300 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8301 unlock_user(p
, arg2
, 0);
8305 #ifdef TARGET_NR_access
8306 case TARGET_NR_access
:
8307 if (!(p
= lock_user_string(arg1
))) {
8308 return -TARGET_EFAULT
;
8310 ret
= get_errno(access(path(p
), arg2
));
8311 unlock_user(p
, arg1
, 0);
8314 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8315 case TARGET_NR_faccessat
:
8316 if (!(p
= lock_user_string(arg2
))) {
8317 return -TARGET_EFAULT
;
8319 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8320 unlock_user(p
, arg2
, 0);
8323 #ifdef TARGET_NR_nice /* not on alpha */
8324 case TARGET_NR_nice
:
8325 return get_errno(nice(arg1
));
8327 case TARGET_NR_sync
:
8330 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8331 case TARGET_NR_syncfs
:
8332 return get_errno(syncfs(arg1
));
8334 case TARGET_NR_kill
:
8335 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8336 #ifdef TARGET_NR_rename
8337 case TARGET_NR_rename
:
8340 p
= lock_user_string(arg1
);
8341 p2
= lock_user_string(arg2
);
8343 ret
= -TARGET_EFAULT
;
8345 ret
= get_errno(rename(p
, p2
));
8346 unlock_user(p2
, arg2
, 0);
8347 unlock_user(p
, arg1
, 0);
8351 #if defined(TARGET_NR_renameat)
8352 case TARGET_NR_renameat
:
8355 p
= lock_user_string(arg2
);
8356 p2
= lock_user_string(arg4
);
8358 ret
= -TARGET_EFAULT
;
8360 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8361 unlock_user(p2
, arg4
, 0);
8362 unlock_user(p
, arg2
, 0);
8366 #if defined(TARGET_NR_renameat2)
8367 case TARGET_NR_renameat2
:
8370 p
= lock_user_string(arg2
);
8371 p2
= lock_user_string(arg4
);
8373 ret
= -TARGET_EFAULT
;
8375 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8377 unlock_user(p2
, arg4
, 0);
8378 unlock_user(p
, arg2
, 0);
8382 #ifdef TARGET_NR_mkdir
8383 case TARGET_NR_mkdir
:
8384 if (!(p
= lock_user_string(arg1
)))
8385 return -TARGET_EFAULT
;
8386 ret
= get_errno(mkdir(p
, arg2
));
8387 unlock_user(p
, arg1
, 0);
8390 #if defined(TARGET_NR_mkdirat)
8391 case TARGET_NR_mkdirat
:
8392 if (!(p
= lock_user_string(arg2
)))
8393 return -TARGET_EFAULT
;
8394 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8395 unlock_user(p
, arg2
, 0);
8398 #ifdef TARGET_NR_rmdir
8399 case TARGET_NR_rmdir
:
8400 if (!(p
= lock_user_string(arg1
)))
8401 return -TARGET_EFAULT
;
8402 ret
= get_errno(rmdir(p
));
8403 unlock_user(p
, arg1
, 0);
8407 ret
= get_errno(dup(arg1
));
8409 fd_trans_dup(arg1
, ret
);
8412 #ifdef TARGET_NR_pipe
8413 case TARGET_NR_pipe
:
8414 return do_pipe(cpu_env
, arg1
, 0, 0);
8416 #ifdef TARGET_NR_pipe2
8417 case TARGET_NR_pipe2
:
8418 return do_pipe(cpu_env
, arg1
,
8419 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8421 case TARGET_NR_times
:
8423 struct target_tms
*tmsp
;
8425 ret
= get_errno(times(&tms
));
8427 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8429 return -TARGET_EFAULT
;
8430 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8431 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8432 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8433 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8436 ret
= host_to_target_clock_t(ret
);
8439 case TARGET_NR_acct
:
8441 ret
= get_errno(acct(NULL
));
8443 if (!(p
= lock_user_string(arg1
))) {
8444 return -TARGET_EFAULT
;
8446 ret
= get_errno(acct(path(p
)));
8447 unlock_user(p
, arg1
, 0);
8450 #ifdef TARGET_NR_umount2
8451 case TARGET_NR_umount2
:
8452 if (!(p
= lock_user_string(arg1
)))
8453 return -TARGET_EFAULT
;
8454 ret
= get_errno(umount2(p
, arg2
));
8455 unlock_user(p
, arg1
, 0);
8458 case TARGET_NR_ioctl
:
8459 return do_ioctl(arg1
, arg2
, arg3
);
8460 #ifdef TARGET_NR_fcntl
8461 case TARGET_NR_fcntl
:
8462 return do_fcntl(arg1
, arg2
, arg3
);
8464 case TARGET_NR_setpgid
:
8465 return get_errno(setpgid(arg1
, arg2
));
8466 case TARGET_NR_umask
:
8467 return get_errno(umask(arg1
));
8468 case TARGET_NR_chroot
:
8469 if (!(p
= lock_user_string(arg1
)))
8470 return -TARGET_EFAULT
;
8471 ret
= get_errno(chroot(p
));
8472 unlock_user(p
, arg1
, 0);
8474 #ifdef TARGET_NR_dup2
8475 case TARGET_NR_dup2
:
8476 ret
= get_errno(dup2(arg1
, arg2
));
8478 fd_trans_dup(arg1
, arg2
);
8482 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8483 case TARGET_NR_dup3
:
8487 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8490 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8491 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8493 fd_trans_dup(arg1
, arg2
);
8498 #ifdef TARGET_NR_getppid /* not on alpha */
8499 case TARGET_NR_getppid
:
8500 return get_errno(getppid());
8502 #ifdef TARGET_NR_getpgrp
8503 case TARGET_NR_getpgrp
:
8504 return get_errno(getpgrp());
8506 case TARGET_NR_setsid
:
8507 return get_errno(setsid());
8508 #ifdef TARGET_NR_sigaction
8509 case TARGET_NR_sigaction
:
8511 #if defined(TARGET_ALPHA)
8512 struct target_sigaction act
, oact
, *pact
= 0;
8513 struct target_old_sigaction
*old_act
;
8515 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8516 return -TARGET_EFAULT
;
8517 act
._sa_handler
= old_act
->_sa_handler
;
8518 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8519 act
.sa_flags
= old_act
->sa_flags
;
8520 act
.sa_restorer
= 0;
8521 unlock_user_struct(old_act
, arg2
, 0);
8524 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8525 if (!is_error(ret
) && arg3
) {
8526 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8527 return -TARGET_EFAULT
;
8528 old_act
->_sa_handler
= oact
._sa_handler
;
8529 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8530 old_act
->sa_flags
= oact
.sa_flags
;
8531 unlock_user_struct(old_act
, arg3
, 1);
8533 #elif defined(TARGET_MIPS)
8534 struct target_sigaction act
, oact
, *pact
, *old_act
;
8537 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8538 return -TARGET_EFAULT
;
8539 act
._sa_handler
= old_act
->_sa_handler
;
8540 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8541 act
.sa_flags
= old_act
->sa_flags
;
8542 unlock_user_struct(old_act
, arg2
, 0);
8548 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8550 if (!is_error(ret
) && arg3
) {
8551 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8552 return -TARGET_EFAULT
;
8553 old_act
->_sa_handler
= oact
._sa_handler
;
8554 old_act
->sa_flags
= oact
.sa_flags
;
8555 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8556 old_act
->sa_mask
.sig
[1] = 0;
8557 old_act
->sa_mask
.sig
[2] = 0;
8558 old_act
->sa_mask
.sig
[3] = 0;
8559 unlock_user_struct(old_act
, arg3
, 1);
8562 struct target_old_sigaction
*old_act
;
8563 struct target_sigaction act
, oact
, *pact
;
8565 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8566 return -TARGET_EFAULT
;
8567 act
._sa_handler
= old_act
->_sa_handler
;
8568 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8569 act
.sa_flags
= old_act
->sa_flags
;
8570 act
.sa_restorer
= old_act
->sa_restorer
;
8571 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8572 act
.ka_restorer
= 0;
8574 unlock_user_struct(old_act
, arg2
, 0);
8579 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8580 if (!is_error(ret
) && arg3
) {
8581 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8582 return -TARGET_EFAULT
;
8583 old_act
->_sa_handler
= oact
._sa_handler
;
8584 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8585 old_act
->sa_flags
= oact
.sa_flags
;
8586 old_act
->sa_restorer
= oact
.sa_restorer
;
8587 unlock_user_struct(old_act
, arg3
, 1);
8593 case TARGET_NR_rt_sigaction
:
8595 #if defined(TARGET_ALPHA)
8596 /* For Alpha and SPARC this is a 5 argument syscall, with
8597 * a 'restorer' parameter which must be copied into the
8598 * sa_restorer field of the sigaction struct.
8599 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8600 * and arg5 is the sigsetsize.
8601 * Alpha also has a separate rt_sigaction struct that it uses
8602 * here; SPARC uses the usual sigaction struct.
8604 struct target_rt_sigaction
*rt_act
;
8605 struct target_sigaction act
, oact
, *pact
= 0;
8607 if (arg4
!= sizeof(target_sigset_t
)) {
8608 return -TARGET_EINVAL
;
8611 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8612 return -TARGET_EFAULT
;
8613 act
._sa_handler
= rt_act
->_sa_handler
;
8614 act
.sa_mask
= rt_act
->sa_mask
;
8615 act
.sa_flags
= rt_act
->sa_flags
;
8616 act
.sa_restorer
= arg5
;
8617 unlock_user_struct(rt_act
, arg2
, 0);
8620 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8621 if (!is_error(ret
) && arg3
) {
8622 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8623 return -TARGET_EFAULT
;
8624 rt_act
->_sa_handler
= oact
._sa_handler
;
8625 rt_act
->sa_mask
= oact
.sa_mask
;
8626 rt_act
->sa_flags
= oact
.sa_flags
;
8627 unlock_user_struct(rt_act
, arg3
, 1);
8631 target_ulong restorer
= arg4
;
8632 target_ulong sigsetsize
= arg5
;
8634 target_ulong sigsetsize
= arg4
;
8636 struct target_sigaction
*act
;
8637 struct target_sigaction
*oact
;
8639 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8640 return -TARGET_EINVAL
;
8643 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8644 return -TARGET_EFAULT
;
8646 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8647 act
->ka_restorer
= restorer
;
8653 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8654 ret
= -TARGET_EFAULT
;
8655 goto rt_sigaction_fail
;
8659 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8662 unlock_user_struct(act
, arg2
, 0);
8664 unlock_user_struct(oact
, arg3
, 1);
8668 #ifdef TARGET_NR_sgetmask /* not on alpha */
8669 case TARGET_NR_sgetmask
:
8672 abi_ulong target_set
;
8673 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8675 host_to_target_old_sigset(&target_set
, &cur_set
);
8681 #ifdef TARGET_NR_ssetmask /* not on alpha */
8682 case TARGET_NR_ssetmask
:
8685 abi_ulong target_set
= arg1
;
8686 target_to_host_old_sigset(&set
, &target_set
);
8687 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8689 host_to_target_old_sigset(&target_set
, &oset
);
8695 #ifdef TARGET_NR_sigprocmask
8696 case TARGET_NR_sigprocmask
:
8698 #if defined(TARGET_ALPHA)
8699 sigset_t set
, oldset
;
8704 case TARGET_SIG_BLOCK
:
8707 case TARGET_SIG_UNBLOCK
:
8710 case TARGET_SIG_SETMASK
:
8714 return -TARGET_EINVAL
;
8717 target_to_host_old_sigset(&set
, &mask
);
8719 ret
= do_sigprocmask(how
, &set
, &oldset
);
8720 if (!is_error(ret
)) {
8721 host_to_target_old_sigset(&mask
, &oldset
);
8723 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8726 sigset_t set
, oldset
, *set_ptr
;
8731 case TARGET_SIG_BLOCK
:
8734 case TARGET_SIG_UNBLOCK
:
8737 case TARGET_SIG_SETMASK
:
8741 return -TARGET_EINVAL
;
8743 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8744 return -TARGET_EFAULT
;
8745 target_to_host_old_sigset(&set
, p
);
8746 unlock_user(p
, arg2
, 0);
8752 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8753 if (!is_error(ret
) && arg3
) {
8754 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8755 return -TARGET_EFAULT
;
8756 host_to_target_old_sigset(p
, &oldset
);
8757 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8763 case TARGET_NR_rt_sigprocmask
:
8766 sigset_t set
, oldset
, *set_ptr
;
8768 if (arg4
!= sizeof(target_sigset_t
)) {
8769 return -TARGET_EINVAL
;
8774 case TARGET_SIG_BLOCK
:
8777 case TARGET_SIG_UNBLOCK
:
8780 case TARGET_SIG_SETMASK
:
8784 return -TARGET_EINVAL
;
8786 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8787 return -TARGET_EFAULT
;
8788 target_to_host_sigset(&set
, p
);
8789 unlock_user(p
, arg2
, 0);
8795 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8796 if (!is_error(ret
) && arg3
) {
8797 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8798 return -TARGET_EFAULT
;
8799 host_to_target_sigset(p
, &oldset
);
8800 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8804 #ifdef TARGET_NR_sigpending
8805 case TARGET_NR_sigpending
:
8808 ret
= get_errno(sigpending(&set
));
8809 if (!is_error(ret
)) {
8810 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8811 return -TARGET_EFAULT
;
8812 host_to_target_old_sigset(p
, &set
);
8813 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8818 case TARGET_NR_rt_sigpending
:
8822 /* Yes, this check is >, not != like most. We follow the kernel's
8823 * logic and it does it like this because it implements
8824 * NR_sigpending through the same code path, and in that case
8825 * the old_sigset_t is smaller in size.
8827 if (arg2
> sizeof(target_sigset_t
)) {
8828 return -TARGET_EINVAL
;
8831 ret
= get_errno(sigpending(&set
));
8832 if (!is_error(ret
)) {
8833 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8834 return -TARGET_EFAULT
;
8835 host_to_target_sigset(p
, &set
);
8836 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8840 #ifdef TARGET_NR_sigsuspend
8841 case TARGET_NR_sigsuspend
:
8843 TaskState
*ts
= cpu
->opaque
;
8844 #if defined(TARGET_ALPHA)
8845 abi_ulong mask
= arg1
;
8846 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8848 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8849 return -TARGET_EFAULT
;
8850 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8851 unlock_user(p
, arg1
, 0);
8853 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8855 if (ret
!= -TARGET_ERESTARTSYS
) {
8856 ts
->in_sigsuspend
= 1;
8861 case TARGET_NR_rt_sigsuspend
:
8863 TaskState
*ts
= cpu
->opaque
;
8865 if (arg2
!= sizeof(target_sigset_t
)) {
8866 return -TARGET_EINVAL
;
8868 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8869 return -TARGET_EFAULT
;
8870 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8871 unlock_user(p
, arg1
, 0);
8872 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8874 if (ret
!= -TARGET_ERESTARTSYS
) {
8875 ts
->in_sigsuspend
= 1;
8879 #ifdef TARGET_NR_rt_sigtimedwait
8880 case TARGET_NR_rt_sigtimedwait
:
8883 struct timespec uts
, *puts
;
8886 if (arg4
!= sizeof(target_sigset_t
)) {
8887 return -TARGET_EINVAL
;
8890 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8891 return -TARGET_EFAULT
;
8892 target_to_host_sigset(&set
, p
);
8893 unlock_user(p
, arg1
, 0);
8896 if (target_to_host_timespec(puts
, arg3
)) {
8897 return -TARGET_EFAULT
;
8902 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8904 if (!is_error(ret
)) {
8906 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8909 return -TARGET_EFAULT
;
8911 host_to_target_siginfo(p
, &uinfo
);
8912 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8914 ret
= host_to_target_signal(ret
);
8919 case TARGET_NR_rt_sigqueueinfo
:
8923 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8925 return -TARGET_EFAULT
;
8927 target_to_host_siginfo(&uinfo
, p
);
8928 unlock_user(p
, arg3
, 0);
8929 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8932 case TARGET_NR_rt_tgsigqueueinfo
:
8936 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8938 return -TARGET_EFAULT
;
8940 target_to_host_siginfo(&uinfo
, p
);
8941 unlock_user(p
, arg4
, 0);
8942 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8945 #ifdef TARGET_NR_sigreturn
8946 case TARGET_NR_sigreturn
:
8947 if (block_signals()) {
8948 return -TARGET_ERESTARTSYS
;
8950 return do_sigreturn(cpu_env
);
8952 case TARGET_NR_rt_sigreturn
:
8953 if (block_signals()) {
8954 return -TARGET_ERESTARTSYS
;
8956 return do_rt_sigreturn(cpu_env
);
8957 case TARGET_NR_sethostname
:
8958 if (!(p
= lock_user_string(arg1
)))
8959 return -TARGET_EFAULT
;
8960 ret
= get_errno(sethostname(p
, arg2
));
8961 unlock_user(p
, arg1
, 0);
8963 #ifdef TARGET_NR_setrlimit
8964 case TARGET_NR_setrlimit
:
8966 int resource
= target_to_host_resource(arg1
);
8967 struct target_rlimit
*target_rlim
;
8969 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8970 return -TARGET_EFAULT
;
8971 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8972 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8973 unlock_user_struct(target_rlim
, arg2
, 0);
8975 * If we just passed through resource limit settings for memory then
8976 * they would also apply to QEMU's own allocations, and QEMU will
8977 * crash or hang or die if its allocations fail. Ideally we would
8978 * track the guest allocations in QEMU and apply the limits ourselves.
8979 * For now, just tell the guest the call succeeded but don't actually
8982 if (resource
!= RLIMIT_AS
&&
8983 resource
!= RLIMIT_DATA
&&
8984 resource
!= RLIMIT_STACK
) {
8985 return get_errno(setrlimit(resource
, &rlim
));
8991 #ifdef TARGET_NR_getrlimit
8992 case TARGET_NR_getrlimit
:
8994 int resource
= target_to_host_resource(arg1
);
8995 struct target_rlimit
*target_rlim
;
8998 ret
= get_errno(getrlimit(resource
, &rlim
));
8999 if (!is_error(ret
)) {
9000 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9001 return -TARGET_EFAULT
;
9002 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9003 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9004 unlock_user_struct(target_rlim
, arg2
, 1);
9009 case TARGET_NR_getrusage
:
9011 struct rusage rusage
;
9012 ret
= get_errno(getrusage(arg1
, &rusage
));
9013 if (!is_error(ret
)) {
9014 ret
= host_to_target_rusage(arg2
, &rusage
);
9018 #if defined(TARGET_NR_gettimeofday)
9019 case TARGET_NR_gettimeofday
:
9024 ret
= get_errno(gettimeofday(&tv
, &tz
));
9025 if (!is_error(ret
)) {
9026 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9027 return -TARGET_EFAULT
;
9029 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9030 return -TARGET_EFAULT
;
9036 #if defined(TARGET_NR_settimeofday)
9037 case TARGET_NR_settimeofday
:
9039 struct timeval tv
, *ptv
= NULL
;
9040 struct timezone tz
, *ptz
= NULL
;
9043 if (copy_from_user_timeval(&tv
, arg1
)) {
9044 return -TARGET_EFAULT
;
9050 if (copy_from_user_timezone(&tz
, arg2
)) {
9051 return -TARGET_EFAULT
;
9056 return get_errno(settimeofday(ptv
, ptz
));
9059 #if defined(TARGET_NR_select)
9060 case TARGET_NR_select
:
9061 #if defined(TARGET_WANT_NI_OLD_SELECT)
9062 /* some architectures used to have old_select here
9063 * but now ENOSYS it.
9065 ret
= -TARGET_ENOSYS
;
9066 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9067 ret
= do_old_select(arg1
);
9069 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9073 #ifdef TARGET_NR_pselect6
9074 case TARGET_NR_pselect6
:
9076 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9077 fd_set rfds
, wfds
, efds
;
9078 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9079 struct timespec ts
, *ts_ptr
;
9082 * The 6th arg is actually two args smashed together,
9083 * so we cannot use the C library.
9091 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9092 target_sigset_t
*target_sigset
;
9100 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9104 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9108 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9114 * This takes a timespec, and not a timeval, so we cannot
9115 * use the do_select() helper ...
9118 if (target_to_host_timespec(&ts
, ts_addr
)) {
9119 return -TARGET_EFAULT
;
9126 /* Extract the two packed args for the sigset */
9129 sig
.size
= SIGSET_T_SIZE
;
9131 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9133 return -TARGET_EFAULT
;
9135 arg_sigset
= tswapal(arg7
[0]);
9136 arg_sigsize
= tswapal(arg7
[1]);
9137 unlock_user(arg7
, arg6
, 0);
9141 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9142 /* Like the kernel, we enforce correct size sigsets */
9143 return -TARGET_EINVAL
;
9145 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9146 sizeof(*target_sigset
), 1);
9147 if (!target_sigset
) {
9148 return -TARGET_EFAULT
;
9150 target_to_host_sigset(&set
, target_sigset
);
9151 unlock_user(target_sigset
, arg_sigset
, 0);
9159 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9162 if (!is_error(ret
)) {
9163 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9164 return -TARGET_EFAULT
;
9165 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9166 return -TARGET_EFAULT
;
9167 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9168 return -TARGET_EFAULT
;
9170 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9171 return -TARGET_EFAULT
;
9176 #ifdef TARGET_NR_symlink
9177 case TARGET_NR_symlink
:
9180 p
= lock_user_string(arg1
);
9181 p2
= lock_user_string(arg2
);
9183 ret
= -TARGET_EFAULT
;
9185 ret
= get_errno(symlink(p
, p2
));
9186 unlock_user(p2
, arg2
, 0);
9187 unlock_user(p
, arg1
, 0);
9191 #if defined(TARGET_NR_symlinkat)
9192 case TARGET_NR_symlinkat
:
9195 p
= lock_user_string(arg1
);
9196 p2
= lock_user_string(arg3
);
9198 ret
= -TARGET_EFAULT
;
9200 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9201 unlock_user(p2
, arg3
, 0);
9202 unlock_user(p
, arg1
, 0);
9206 #ifdef TARGET_NR_readlink
9207 case TARGET_NR_readlink
:
9210 p
= lock_user_string(arg1
);
9211 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9213 ret
= -TARGET_EFAULT
;
9215 /* Short circuit this for the magic exe check. */
9216 ret
= -TARGET_EINVAL
;
9217 } else if (is_proc_myself((const char *)p
, "exe")) {
9218 char real
[PATH_MAX
], *temp
;
9219 temp
= realpath(exec_path
, real
);
9220 /* Return value is # of bytes that we wrote to the buffer. */
9222 ret
= get_errno(-1);
9224 /* Don't worry about sign mismatch as earlier mapping
9225 * logic would have thrown a bad address error. */
9226 ret
= MIN(strlen(real
), arg3
);
9227 /* We cannot NUL terminate the string. */
9228 memcpy(p2
, real
, ret
);
9231 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9233 unlock_user(p2
, arg2
, ret
);
9234 unlock_user(p
, arg1
, 0);
9238 #if defined(TARGET_NR_readlinkat)
9239 case TARGET_NR_readlinkat
:
9242 p
= lock_user_string(arg2
);
9243 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9245 ret
= -TARGET_EFAULT
;
9246 } else if (is_proc_myself((const char *)p
, "exe")) {
9247 char real
[PATH_MAX
], *temp
;
9248 temp
= realpath(exec_path
, real
);
9249 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9250 snprintf((char *)p2
, arg4
, "%s", real
);
9252 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9254 unlock_user(p2
, arg3
, ret
);
9255 unlock_user(p
, arg2
, 0);
9259 #ifdef TARGET_NR_swapon
9260 case TARGET_NR_swapon
:
9261 if (!(p
= lock_user_string(arg1
)))
9262 return -TARGET_EFAULT
;
9263 ret
= get_errno(swapon(p
, arg2
));
9264 unlock_user(p
, arg1
, 0);
9267 case TARGET_NR_reboot
:
9268 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9269 /* arg4 must be ignored in all other cases */
9270 p
= lock_user_string(arg4
);
9272 return -TARGET_EFAULT
;
9274 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9275 unlock_user(p
, arg4
, 0);
9277 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9280 #ifdef TARGET_NR_mmap
9281 case TARGET_NR_mmap
:
9282 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9283 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9284 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9285 || defined(TARGET_S390X)
9288 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9289 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9290 return -TARGET_EFAULT
;
9297 unlock_user(v
, arg1
, 0);
9298 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9299 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9303 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9304 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9310 #ifdef TARGET_NR_mmap2
9311 case TARGET_NR_mmap2
:
9313 #define MMAP_SHIFT 12
9315 ret
= target_mmap(arg1
, arg2
, arg3
,
9316 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9317 arg5
, arg6
<< MMAP_SHIFT
);
9318 return get_errno(ret
);
9320 case TARGET_NR_munmap
:
9321 return get_errno(target_munmap(arg1
, arg2
));
9322 case TARGET_NR_mprotect
:
9324 TaskState
*ts
= cpu
->opaque
;
9325 /* Special hack to detect libc making the stack executable. */
9326 if ((arg3
& PROT_GROWSDOWN
)
9327 && arg1
>= ts
->info
->stack_limit
9328 && arg1
<= ts
->info
->start_stack
) {
9329 arg3
&= ~PROT_GROWSDOWN
;
9330 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9331 arg1
= ts
->info
->stack_limit
;
9334 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9335 #ifdef TARGET_NR_mremap
9336 case TARGET_NR_mremap
:
9337 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9339 /* ??? msync/mlock/munlock are broken for softmmu. */
9340 #ifdef TARGET_NR_msync
9341 case TARGET_NR_msync
:
9342 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9344 #ifdef TARGET_NR_mlock
9345 case TARGET_NR_mlock
:
9346 return get_errno(mlock(g2h(arg1
), arg2
));
9348 #ifdef TARGET_NR_munlock
9349 case TARGET_NR_munlock
:
9350 return get_errno(munlock(g2h(arg1
), arg2
));
9352 #ifdef TARGET_NR_mlockall
9353 case TARGET_NR_mlockall
:
9354 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9356 #ifdef TARGET_NR_munlockall
9357 case TARGET_NR_munlockall
:
9358 return get_errno(munlockall());
9360 #ifdef TARGET_NR_truncate
9361 case TARGET_NR_truncate
:
9362 if (!(p
= lock_user_string(arg1
)))
9363 return -TARGET_EFAULT
;
9364 ret
= get_errno(truncate(p
, arg2
));
9365 unlock_user(p
, arg1
, 0);
9368 #ifdef TARGET_NR_ftruncate
9369 case TARGET_NR_ftruncate
:
9370 return get_errno(ftruncate(arg1
, arg2
));
9372 case TARGET_NR_fchmod
:
9373 return get_errno(fchmod(arg1
, arg2
));
9374 #if defined(TARGET_NR_fchmodat)
9375 case TARGET_NR_fchmodat
:
9376 if (!(p
= lock_user_string(arg2
)))
9377 return -TARGET_EFAULT
;
9378 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9379 unlock_user(p
, arg2
, 0);
9382 case TARGET_NR_getpriority
:
9383 /* Note that negative values are valid for getpriority, so we must
9384 differentiate based on errno settings. */
9386 ret
= getpriority(arg1
, arg2
);
9387 if (ret
== -1 && errno
!= 0) {
9388 return -host_to_target_errno(errno
);
9391 /* Return value is the unbiased priority. Signal no error. */
9392 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9394 /* Return value is a biased priority to avoid negative numbers. */
9398 case TARGET_NR_setpriority
:
9399 return get_errno(setpriority(arg1
, arg2
, arg3
));
9400 #ifdef TARGET_NR_statfs
9401 case TARGET_NR_statfs
:
9402 if (!(p
= lock_user_string(arg1
))) {
9403 return -TARGET_EFAULT
;
9405 ret
= get_errno(statfs(path(p
), &stfs
));
9406 unlock_user(p
, arg1
, 0);
9408 if (!is_error(ret
)) {
9409 struct target_statfs
*target_stfs
;
9411 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9412 return -TARGET_EFAULT
;
9413 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9414 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9415 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9416 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9417 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9418 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9419 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9420 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9421 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9422 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9423 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9424 #ifdef _STATFS_F_FLAGS
9425 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9427 __put_user(0, &target_stfs
->f_flags
);
9429 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9430 unlock_user_struct(target_stfs
, arg2
, 1);
9434 #ifdef TARGET_NR_fstatfs
9435 case TARGET_NR_fstatfs
:
9436 ret
= get_errno(fstatfs(arg1
, &stfs
));
9437 goto convert_statfs
;
9439 #ifdef TARGET_NR_statfs64
9440 case TARGET_NR_statfs64
:
9441 if (!(p
= lock_user_string(arg1
))) {
9442 return -TARGET_EFAULT
;
9444 ret
= get_errno(statfs(path(p
), &stfs
));
9445 unlock_user(p
, arg1
, 0);
9447 if (!is_error(ret
)) {
9448 struct target_statfs64
*target_stfs
;
9450 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9451 return -TARGET_EFAULT
;
9452 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9453 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9454 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9455 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9456 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9457 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9458 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9459 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9460 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9461 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9462 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9463 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9464 unlock_user_struct(target_stfs
, arg3
, 1);
9467 case TARGET_NR_fstatfs64
:
9468 ret
= get_errno(fstatfs(arg1
, &stfs
));
9469 goto convert_statfs64
;
9471 #ifdef TARGET_NR_socketcall
9472 case TARGET_NR_socketcall
:
9473 return do_socketcall(arg1
, arg2
);
9475 #ifdef TARGET_NR_accept
9476 case TARGET_NR_accept
:
9477 return do_accept4(arg1
, arg2
, arg3
, 0);
9479 #ifdef TARGET_NR_accept4
9480 case TARGET_NR_accept4
:
9481 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9483 #ifdef TARGET_NR_bind
9484 case TARGET_NR_bind
:
9485 return do_bind(arg1
, arg2
, arg3
);
9487 #ifdef TARGET_NR_connect
9488 case TARGET_NR_connect
:
9489 return do_connect(arg1
, arg2
, arg3
);
9491 #ifdef TARGET_NR_getpeername
9492 case TARGET_NR_getpeername
:
9493 return do_getpeername(arg1
, arg2
, arg3
);
9495 #ifdef TARGET_NR_getsockname
9496 case TARGET_NR_getsockname
:
9497 return do_getsockname(arg1
, arg2
, arg3
);
9499 #ifdef TARGET_NR_getsockopt
9500 case TARGET_NR_getsockopt
:
9501 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9503 #ifdef TARGET_NR_listen
9504 case TARGET_NR_listen
:
9505 return get_errno(listen(arg1
, arg2
));
9507 #ifdef TARGET_NR_recv
9508 case TARGET_NR_recv
:
9509 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9511 #ifdef TARGET_NR_recvfrom
9512 case TARGET_NR_recvfrom
:
9513 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9515 #ifdef TARGET_NR_recvmsg
9516 case TARGET_NR_recvmsg
:
9517 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9519 #ifdef TARGET_NR_send
9520 case TARGET_NR_send
:
9521 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9523 #ifdef TARGET_NR_sendmsg
9524 case TARGET_NR_sendmsg
:
9525 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9527 #ifdef TARGET_NR_sendmmsg
9528 case TARGET_NR_sendmmsg
:
9529 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9531 #ifdef TARGET_NR_recvmmsg
9532 case TARGET_NR_recvmmsg
:
9533 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9535 #ifdef TARGET_NR_sendto
9536 case TARGET_NR_sendto
:
9537 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9539 #ifdef TARGET_NR_shutdown
9540 case TARGET_NR_shutdown
:
9541 return get_errno(shutdown(arg1
, arg2
));
9543 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9544 case TARGET_NR_getrandom
:
9545 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9547 return -TARGET_EFAULT
;
9549 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9550 unlock_user(p
, arg1
, ret
);
9553 #ifdef TARGET_NR_socket
9554 case TARGET_NR_socket
:
9555 return do_socket(arg1
, arg2
, arg3
);
9557 #ifdef TARGET_NR_socketpair
9558 case TARGET_NR_socketpair
:
9559 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9561 #ifdef TARGET_NR_setsockopt
9562 case TARGET_NR_setsockopt
:
9563 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9565 #if defined(TARGET_NR_syslog)
9566 case TARGET_NR_syslog
:
9571 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9572 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9573 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9574 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9575 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9576 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9577 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9578 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9579 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9580 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9581 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9582 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9585 return -TARGET_EINVAL
;
9590 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9592 return -TARGET_EFAULT
;
9594 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9595 unlock_user(p
, arg2
, arg3
);
9599 return -TARGET_EINVAL
;
9604 case TARGET_NR_setitimer
:
9606 struct itimerval value
, ovalue
, *pvalue
;
9610 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9611 || copy_from_user_timeval(&pvalue
->it_value
,
9612 arg2
+ sizeof(struct target_timeval
)))
9613 return -TARGET_EFAULT
;
9617 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9618 if (!is_error(ret
) && arg3
) {
9619 if (copy_to_user_timeval(arg3
,
9620 &ovalue
.it_interval
)
9621 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9623 return -TARGET_EFAULT
;
9627 case TARGET_NR_getitimer
:
9629 struct itimerval value
;
9631 ret
= get_errno(getitimer(arg1
, &value
));
9632 if (!is_error(ret
) && arg2
) {
9633 if (copy_to_user_timeval(arg2
,
9635 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9637 return -TARGET_EFAULT
;
9641 #ifdef TARGET_NR_stat
9642 case TARGET_NR_stat
:
9643 if (!(p
= lock_user_string(arg1
))) {
9644 return -TARGET_EFAULT
;
9646 ret
= get_errno(stat(path(p
), &st
));
9647 unlock_user(p
, arg1
, 0);
9650 #ifdef TARGET_NR_lstat
9651 case TARGET_NR_lstat
:
9652 if (!(p
= lock_user_string(arg1
))) {
9653 return -TARGET_EFAULT
;
9655 ret
= get_errno(lstat(path(p
), &st
));
9656 unlock_user(p
, arg1
, 0);
9659 #ifdef TARGET_NR_fstat
9660 case TARGET_NR_fstat
:
9662 ret
= get_errno(fstat(arg1
, &st
));
9663 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9666 if (!is_error(ret
)) {
9667 struct target_stat
*target_st
;
9669 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9670 return -TARGET_EFAULT
;
9671 memset(target_st
, 0, sizeof(*target_st
));
9672 __put_user(st
.st_dev
, &target_st
->st_dev
);
9673 __put_user(st
.st_ino
, &target_st
->st_ino
);
9674 __put_user(st
.st_mode
, &target_st
->st_mode
);
9675 __put_user(st
.st_uid
, &target_st
->st_uid
);
9676 __put_user(st
.st_gid
, &target_st
->st_gid
);
9677 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9678 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9679 __put_user(st
.st_size
, &target_st
->st_size
);
9680 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9681 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9682 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9683 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9684 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9685 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9686 defined(TARGET_STAT_HAVE_NSEC)
9687 __put_user(st
.st_atim
.tv_nsec
,
9688 &target_st
->target_st_atime_nsec
);
9689 __put_user(st
.st_mtim
.tv_nsec
,
9690 &target_st
->target_st_mtime_nsec
);
9691 __put_user(st
.st_ctim
.tv_nsec
,
9692 &target_st
->target_st_ctime_nsec
);
9694 unlock_user_struct(target_st
, arg2
, 1);
9699 case TARGET_NR_vhangup
:
9700 return get_errno(vhangup());
9701 #ifdef TARGET_NR_syscall
9702 case TARGET_NR_syscall
:
9703 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9704 arg6
, arg7
, arg8
, 0);
9706 #if defined(TARGET_NR_wait4)
9707 case TARGET_NR_wait4
:
9710 abi_long status_ptr
= arg2
;
9711 struct rusage rusage
, *rusage_ptr
;
9712 abi_ulong target_rusage
= arg4
;
9713 abi_long rusage_err
;
9715 rusage_ptr
= &rusage
;
9718 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9719 if (!is_error(ret
)) {
9720 if (status_ptr
&& ret
) {
9721 status
= host_to_target_waitstatus(status
);
9722 if (put_user_s32(status
, status_ptr
))
9723 return -TARGET_EFAULT
;
9725 if (target_rusage
) {
9726 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9735 #ifdef TARGET_NR_swapoff
9736 case TARGET_NR_swapoff
:
9737 if (!(p
= lock_user_string(arg1
)))
9738 return -TARGET_EFAULT
;
9739 ret
= get_errno(swapoff(p
));
9740 unlock_user(p
, arg1
, 0);
9743 case TARGET_NR_sysinfo
:
9745 struct target_sysinfo
*target_value
;
9746 struct sysinfo value
;
9747 ret
= get_errno(sysinfo(&value
));
9748 if (!is_error(ret
) && arg1
)
9750 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9751 return -TARGET_EFAULT
;
9752 __put_user(value
.uptime
, &target_value
->uptime
);
9753 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9754 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9755 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9756 __put_user(value
.totalram
, &target_value
->totalram
);
9757 __put_user(value
.freeram
, &target_value
->freeram
);
9758 __put_user(value
.sharedram
, &target_value
->sharedram
);
9759 __put_user(value
.bufferram
, &target_value
->bufferram
);
9760 __put_user(value
.totalswap
, &target_value
->totalswap
);
9761 __put_user(value
.freeswap
, &target_value
->freeswap
);
9762 __put_user(value
.procs
, &target_value
->procs
);
9763 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9764 __put_user(value
.freehigh
, &target_value
->freehigh
);
9765 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9766 unlock_user_struct(target_value
, arg1
, 1);
9770 #ifdef TARGET_NR_ipc
9772 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9774 #ifdef TARGET_NR_semget
9775 case TARGET_NR_semget
:
9776 return get_errno(semget(arg1
, arg2
, arg3
));
9778 #ifdef TARGET_NR_semop
9779 case TARGET_NR_semop
:
9780 return do_semtimedop(arg1
, arg2
, arg3
, 0);
9782 #ifdef TARGET_NR_semtimedop
9783 case TARGET_NR_semtimedop
:
9784 return do_semtimedop(arg1
, arg2
, arg3
, arg4
);
9786 #ifdef TARGET_NR_semctl
9787 case TARGET_NR_semctl
:
9788 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9790 #ifdef TARGET_NR_msgctl
9791 case TARGET_NR_msgctl
:
9792 return do_msgctl(arg1
, arg2
, arg3
);
9794 #ifdef TARGET_NR_msgget
9795 case TARGET_NR_msgget
:
9796 return get_errno(msgget(arg1
, arg2
));
9798 #ifdef TARGET_NR_msgrcv
9799 case TARGET_NR_msgrcv
:
9800 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9802 #ifdef TARGET_NR_msgsnd
9803 case TARGET_NR_msgsnd
:
9804 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9806 #ifdef TARGET_NR_shmget
9807 case TARGET_NR_shmget
:
9808 return get_errno(shmget(arg1
, arg2
, arg3
));
9810 #ifdef TARGET_NR_shmctl
9811 case TARGET_NR_shmctl
:
9812 return do_shmctl(arg1
, arg2
, arg3
);
9814 #ifdef TARGET_NR_shmat
9815 case TARGET_NR_shmat
:
9816 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9818 #ifdef TARGET_NR_shmdt
9819 case TARGET_NR_shmdt
:
9820 return do_shmdt(arg1
);
9822 case TARGET_NR_fsync
:
9823 return get_errno(fsync(arg1
));
9824 case TARGET_NR_clone
:
9825 /* Linux manages to have three different orderings for its
9826 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9827 * match the kernel's CONFIG_CLONE_* settings.
9828 * Microblaze is further special in that it uses a sixth
9829 * implicit argument to clone for the TLS pointer.
9831 #if defined(TARGET_MICROBLAZE)
9832 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9833 #elif defined(TARGET_CLONE_BACKWARDS)
9834 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9835 #elif defined(TARGET_CLONE_BACKWARDS2)
9836 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9838 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9841 #ifdef __NR_exit_group
9842 /* new thread calls */
9843 case TARGET_NR_exit_group
:
9844 preexit_cleanup(cpu_env
, arg1
);
9845 return get_errno(exit_group(arg1
));
9847 case TARGET_NR_setdomainname
:
9848 if (!(p
= lock_user_string(arg1
)))
9849 return -TARGET_EFAULT
;
9850 ret
= get_errno(setdomainname(p
, arg2
));
9851 unlock_user(p
, arg1
, 0);
9853 case TARGET_NR_uname
:
9854 /* no need to transcode because we use the linux syscall */
9856 struct new_utsname
* buf
;
9858 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9859 return -TARGET_EFAULT
;
9860 ret
= get_errno(sys_uname(buf
));
9861 if (!is_error(ret
)) {
9862 /* Overwrite the native machine name with whatever is being
9864 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9865 sizeof(buf
->machine
));
9866 /* Allow the user to override the reported release. */
9867 if (qemu_uname_release
&& *qemu_uname_release
) {
9868 g_strlcpy(buf
->release
, qemu_uname_release
,
9869 sizeof(buf
->release
));
9872 unlock_user_struct(buf
, arg1
, 1);
9876 case TARGET_NR_modify_ldt
:
9877 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9878 #if !defined(TARGET_X86_64)
9879 case TARGET_NR_vm86
:
9880 return do_vm86(cpu_env
, arg1
, arg2
);
9883 #if defined(TARGET_NR_adjtimex)
9884 case TARGET_NR_adjtimex
:
9886 struct timex host_buf
;
9888 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9889 return -TARGET_EFAULT
;
9891 ret
= get_errno(adjtimex(&host_buf
));
9892 if (!is_error(ret
)) {
9893 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9894 return -TARGET_EFAULT
;
9900 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9901 case TARGET_NR_clock_adjtime
:
9903 struct timex htx
, *phtx
= &htx
;
9905 if (target_to_host_timex(phtx
, arg2
) != 0) {
9906 return -TARGET_EFAULT
;
9908 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9909 if (!is_error(ret
) && phtx
) {
9910 if (host_to_target_timex(arg2
, phtx
) != 0) {
9911 return -TARGET_EFAULT
;
9917 case TARGET_NR_getpgid
:
9918 return get_errno(getpgid(arg1
));
9919 case TARGET_NR_fchdir
:
9920 return get_errno(fchdir(arg1
));
9921 case TARGET_NR_personality
:
9922 return get_errno(personality(arg1
));
9923 #ifdef TARGET_NR__llseek /* Not on alpha */
9924 case TARGET_NR__llseek
:
9927 #if !defined(__NR_llseek)
9928 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9930 ret
= get_errno(res
);
9935 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9937 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9938 return -TARGET_EFAULT
;
9943 #ifdef TARGET_NR_getdents
9944 case TARGET_NR_getdents
:
9945 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9946 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9948 struct target_dirent
*target_dirp
;
9949 struct linux_dirent
*dirp
;
9950 abi_long count
= arg3
;
9952 dirp
= g_try_malloc(count
);
9954 return -TARGET_ENOMEM
;
9957 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9958 if (!is_error(ret
)) {
9959 struct linux_dirent
*de
;
9960 struct target_dirent
*tde
;
9962 int reclen
, treclen
;
9963 int count1
, tnamelen
;
9967 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9968 return -TARGET_EFAULT
;
9971 reclen
= de
->d_reclen
;
9972 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9973 assert(tnamelen
>= 0);
9974 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9975 assert(count1
+ treclen
<= count
);
9976 tde
->d_reclen
= tswap16(treclen
);
9977 tde
->d_ino
= tswapal(de
->d_ino
);
9978 tde
->d_off
= tswapal(de
->d_off
);
9979 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9980 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9982 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9986 unlock_user(target_dirp
, arg2
, ret
);
9992 struct linux_dirent
*dirp
;
9993 abi_long count
= arg3
;
9995 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9996 return -TARGET_EFAULT
;
9997 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9998 if (!is_error(ret
)) {
9999 struct linux_dirent
*de
;
10004 reclen
= de
->d_reclen
;
10007 de
->d_reclen
= tswap16(reclen
);
10008 tswapls(&de
->d_ino
);
10009 tswapls(&de
->d_off
);
10010 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10014 unlock_user(dirp
, arg2
, ret
);
10018 /* Implement getdents in terms of getdents64 */
10020 struct linux_dirent64
*dirp
;
10021 abi_long count
= arg3
;
10023 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10025 return -TARGET_EFAULT
;
10027 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10028 if (!is_error(ret
)) {
10029 /* Convert the dirent64 structs to target dirent. We do this
10030 * in-place, since we can guarantee that a target_dirent is no
10031 * larger than a dirent64; however this means we have to be
10032 * careful to read everything before writing in the new format.
10034 struct linux_dirent64
*de
;
10035 struct target_dirent
*tde
;
10040 tde
= (struct target_dirent
*)dirp
;
10042 int namelen
, treclen
;
10043 int reclen
= de
->d_reclen
;
10044 uint64_t ino
= de
->d_ino
;
10045 int64_t off
= de
->d_off
;
10046 uint8_t type
= de
->d_type
;
10048 namelen
= strlen(de
->d_name
);
10049 treclen
= offsetof(struct target_dirent
, d_name
)
10051 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10053 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10054 tde
->d_ino
= tswapal(ino
);
10055 tde
->d_off
= tswapal(off
);
10056 tde
->d_reclen
= tswap16(treclen
);
10057 /* The target_dirent type is in what was formerly a padding
10058 * byte at the end of the structure:
10060 *(((char *)tde
) + treclen
- 1) = type
;
10062 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10063 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10069 unlock_user(dirp
, arg2
, ret
);
10073 #endif /* TARGET_NR_getdents */
10074 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10075 case TARGET_NR_getdents64
:
10077 struct linux_dirent64
*dirp
;
10078 abi_long count
= arg3
;
10079 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10080 return -TARGET_EFAULT
;
10081 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10082 if (!is_error(ret
)) {
10083 struct linux_dirent64
*de
;
10088 reclen
= de
->d_reclen
;
10091 de
->d_reclen
= tswap16(reclen
);
10092 tswap64s((uint64_t *)&de
->d_ino
);
10093 tswap64s((uint64_t *)&de
->d_off
);
10094 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10098 unlock_user(dirp
, arg2
, ret
);
10101 #endif /* TARGET_NR_getdents64 */
10102 #if defined(TARGET_NR__newselect)
10103 case TARGET_NR__newselect
:
10104 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10106 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10107 # ifdef TARGET_NR_poll
10108 case TARGET_NR_poll
:
10110 # ifdef TARGET_NR_ppoll
10111 case TARGET_NR_ppoll
:
10114 struct target_pollfd
*target_pfd
;
10115 unsigned int nfds
= arg2
;
10116 struct pollfd
*pfd
;
10122 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10123 return -TARGET_EINVAL
;
10126 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10127 sizeof(struct target_pollfd
) * nfds
, 1);
10129 return -TARGET_EFAULT
;
10132 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10133 for (i
= 0; i
< nfds
; i
++) {
10134 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10135 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10140 # ifdef TARGET_NR_ppoll
10141 case TARGET_NR_ppoll
:
10143 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10144 target_sigset_t
*target_set
;
10145 sigset_t _set
, *set
= &_set
;
10148 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10149 unlock_user(target_pfd
, arg1
, 0);
10150 return -TARGET_EFAULT
;
10157 if (arg5
!= sizeof(target_sigset_t
)) {
10158 unlock_user(target_pfd
, arg1
, 0);
10159 return -TARGET_EINVAL
;
10162 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10164 unlock_user(target_pfd
, arg1
, 0);
10165 return -TARGET_EFAULT
;
10167 target_to_host_sigset(set
, target_set
);
10172 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10173 set
, SIGSET_T_SIZE
));
10175 if (!is_error(ret
) && arg3
) {
10176 host_to_target_timespec(arg3
, timeout_ts
);
10179 unlock_user(target_set
, arg4
, 0);
10184 # ifdef TARGET_NR_poll
10185 case TARGET_NR_poll
:
10187 struct timespec ts
, *pts
;
10190 /* Convert ms to secs, ns */
10191 ts
.tv_sec
= arg3
/ 1000;
10192 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10195 /* -ve poll() timeout means "infinite" */
10198 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10203 g_assert_not_reached();
10206 if (!is_error(ret
)) {
10207 for(i
= 0; i
< nfds
; i
++) {
10208 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10211 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10215 case TARGET_NR_flock
:
10216 /* NOTE: the flock constant seems to be the same for every
10218 return get_errno(safe_flock(arg1
, arg2
));
10219 case TARGET_NR_readv
:
10221 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10223 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10224 unlock_iovec(vec
, arg2
, arg3
, 1);
10226 ret
= -host_to_target_errno(errno
);
10230 case TARGET_NR_writev
:
10232 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10234 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10235 unlock_iovec(vec
, arg2
, arg3
, 0);
10237 ret
= -host_to_target_errno(errno
);
10241 #if defined(TARGET_NR_preadv)
10242 case TARGET_NR_preadv
:
10244 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10246 unsigned long low
, high
;
10248 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10249 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10250 unlock_iovec(vec
, arg2
, arg3
, 1);
10252 ret
= -host_to_target_errno(errno
);
10257 #if defined(TARGET_NR_pwritev)
10258 case TARGET_NR_pwritev
:
10260 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10262 unsigned long low
, high
;
10264 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10265 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10266 unlock_iovec(vec
, arg2
, arg3
, 0);
10268 ret
= -host_to_target_errno(errno
);
10273 case TARGET_NR_getsid
:
10274 return get_errno(getsid(arg1
));
10275 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10276 case TARGET_NR_fdatasync
:
10277 return get_errno(fdatasync(arg1
));
10279 #ifdef TARGET_NR__sysctl
10280 case TARGET_NR__sysctl
:
10281 /* We don't implement this, but ENOTDIR is always a safe
10283 return -TARGET_ENOTDIR
;
10285 case TARGET_NR_sched_getaffinity
:
10287 unsigned int mask_size
;
10288 unsigned long *mask
;
10291 * sched_getaffinity needs multiples of ulong, so need to take
10292 * care of mismatches between target ulong and host ulong sizes.
10294 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10295 return -TARGET_EINVAL
;
10297 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10299 mask
= alloca(mask_size
);
10300 memset(mask
, 0, mask_size
);
10301 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10303 if (!is_error(ret
)) {
10305 /* More data returned than the caller's buffer will fit.
10306 * This only happens if sizeof(abi_long) < sizeof(long)
10307 * and the caller passed us a buffer holding an odd number
10308 * of abi_longs. If the host kernel is actually using the
10309 * extra 4 bytes then fail EINVAL; otherwise we can just
10310 * ignore them and only copy the interesting part.
10312 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10313 if (numcpus
> arg2
* 8) {
10314 return -TARGET_EINVAL
;
10319 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10320 return -TARGET_EFAULT
;
10325 case TARGET_NR_sched_setaffinity
:
10327 unsigned int mask_size
;
10328 unsigned long *mask
;
10331 * sched_setaffinity needs multiples of ulong, so need to take
10332 * care of mismatches between target ulong and host ulong sizes.
10334 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10335 return -TARGET_EINVAL
;
10337 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10338 mask
= alloca(mask_size
);
10340 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10345 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10347 case TARGET_NR_getcpu
:
10349 unsigned cpu
, node
;
10350 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10351 arg2
? &node
: NULL
,
10353 if (is_error(ret
)) {
10356 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10357 return -TARGET_EFAULT
;
10359 if (arg2
&& put_user_u32(node
, arg2
)) {
10360 return -TARGET_EFAULT
;
10364 case TARGET_NR_sched_setparam
:
10366 struct sched_param
*target_schp
;
10367 struct sched_param schp
;
10370 return -TARGET_EINVAL
;
10372 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10373 return -TARGET_EFAULT
;
10374 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10375 unlock_user_struct(target_schp
, arg2
, 0);
10376 return get_errno(sched_setparam(arg1
, &schp
));
10378 case TARGET_NR_sched_getparam
:
10380 struct sched_param
*target_schp
;
10381 struct sched_param schp
;
10384 return -TARGET_EINVAL
;
10386 ret
= get_errno(sched_getparam(arg1
, &schp
));
10387 if (!is_error(ret
)) {
10388 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10389 return -TARGET_EFAULT
;
10390 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10391 unlock_user_struct(target_schp
, arg2
, 1);
10395 case TARGET_NR_sched_setscheduler
:
10397 struct sched_param
*target_schp
;
10398 struct sched_param schp
;
10400 return -TARGET_EINVAL
;
10402 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10403 return -TARGET_EFAULT
;
10404 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10405 unlock_user_struct(target_schp
, arg3
, 0);
10406 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10408 case TARGET_NR_sched_getscheduler
:
10409 return get_errno(sched_getscheduler(arg1
));
10410 case TARGET_NR_sched_yield
:
10411 return get_errno(sched_yield());
10412 case TARGET_NR_sched_get_priority_max
:
10413 return get_errno(sched_get_priority_max(arg1
));
10414 case TARGET_NR_sched_get_priority_min
:
10415 return get_errno(sched_get_priority_min(arg1
));
10416 #ifdef TARGET_NR_sched_rr_get_interval
10417 case TARGET_NR_sched_rr_get_interval
:
10419 struct timespec ts
;
10420 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10421 if (!is_error(ret
)) {
10422 ret
= host_to_target_timespec(arg2
, &ts
);
10427 #if defined(TARGET_NR_nanosleep)
10428 case TARGET_NR_nanosleep
:
10430 struct timespec req
, rem
;
10431 target_to_host_timespec(&req
, arg1
);
10432 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10433 if (is_error(ret
) && arg2
) {
10434 host_to_target_timespec(arg2
, &rem
);
10439 case TARGET_NR_prctl
:
10441 case PR_GET_PDEATHSIG
:
10444 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10445 if (!is_error(ret
) && arg2
10446 && put_user_ual(deathsig
, arg2
)) {
10447 return -TARGET_EFAULT
;
10454 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10456 return -TARGET_EFAULT
;
10458 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10459 arg3
, arg4
, arg5
));
10460 unlock_user(name
, arg2
, 16);
10465 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10467 return -TARGET_EFAULT
;
10469 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10470 arg3
, arg4
, arg5
));
10471 unlock_user(name
, arg2
, 0);
10476 case TARGET_PR_GET_FP_MODE
:
10478 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10480 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10481 ret
|= TARGET_PR_FP_MODE_FR
;
10483 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10484 ret
|= TARGET_PR_FP_MODE_FRE
;
10488 case TARGET_PR_SET_FP_MODE
:
10490 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10491 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10492 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10493 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10494 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10496 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10497 TARGET_PR_FP_MODE_FRE
;
10499 /* If nothing to change, return right away, successfully. */
10500 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10503 /* Check the value is valid */
10504 if (arg2
& ~known_bits
) {
10505 return -TARGET_EOPNOTSUPP
;
10507 /* Setting FRE without FR is not supported. */
10508 if (new_fre
&& !new_fr
) {
10509 return -TARGET_EOPNOTSUPP
;
10511 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10512 /* FR1 is not supported */
10513 return -TARGET_EOPNOTSUPP
;
10515 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10516 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10517 /* cannot set FR=0 */
10518 return -TARGET_EOPNOTSUPP
;
10520 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10521 /* Cannot set FRE=1 */
10522 return -TARGET_EOPNOTSUPP
;
10526 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10527 for (i
= 0; i
< 32 ; i
+= 2) {
10528 if (!old_fr
&& new_fr
) {
10529 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10530 } else if (old_fr
&& !new_fr
) {
10531 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10536 env
->CP0_Status
|= (1 << CP0St_FR
);
10537 env
->hflags
|= MIPS_HFLAG_F64
;
10539 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10540 env
->hflags
&= ~MIPS_HFLAG_F64
;
10543 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10544 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10545 env
->hflags
|= MIPS_HFLAG_FRE
;
10548 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10549 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10555 #ifdef TARGET_AARCH64
10556 case TARGET_PR_SVE_SET_VL
:
10558 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10559 * PR_SVE_VL_INHERIT. Note the kernel definition
10560 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10561 * even though the current architectural maximum is VQ=16.
10563 ret
= -TARGET_EINVAL
;
10564 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10565 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10566 CPUARMState
*env
= cpu_env
;
10567 ARMCPU
*cpu
= env_archcpu(env
);
10568 uint32_t vq
, old_vq
;
10570 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10571 vq
= MAX(arg2
/ 16, 1);
10572 vq
= MIN(vq
, cpu
->sve_max_vq
);
10575 aarch64_sve_narrow_vq(env
, vq
);
10577 env
->vfp
.zcr_el
[1] = vq
- 1;
10578 arm_rebuild_hflags(env
);
10582 case TARGET_PR_SVE_GET_VL
:
10583 ret
= -TARGET_EINVAL
;
10585 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10586 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10587 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10591 case TARGET_PR_PAC_RESET_KEYS
:
10593 CPUARMState
*env
= cpu_env
;
10594 ARMCPU
*cpu
= env_archcpu(env
);
10596 if (arg3
|| arg4
|| arg5
) {
10597 return -TARGET_EINVAL
;
10599 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10600 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10601 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10602 TARGET_PR_PAC_APGAKEY
);
10608 } else if (arg2
& ~all
) {
10609 return -TARGET_EINVAL
;
10611 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10612 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10613 sizeof(ARMPACKey
), &err
);
10615 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10616 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10617 sizeof(ARMPACKey
), &err
);
10619 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10620 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10621 sizeof(ARMPACKey
), &err
);
10623 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10624 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10625 sizeof(ARMPACKey
), &err
);
10627 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10628 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10629 sizeof(ARMPACKey
), &err
);
10633 * Some unknown failure in the crypto. The best
10634 * we can do is log it and fail the syscall.
10635 * The real syscall cannot fail this way.
10637 qemu_log_mask(LOG_UNIMP
,
10638 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10639 error_get_pretty(err
));
10641 return -TARGET_EIO
;
10646 return -TARGET_EINVAL
;
10647 #endif /* AARCH64 */
10648 case PR_GET_SECCOMP
:
10649 case PR_SET_SECCOMP
:
10650 /* Disable seccomp to prevent the target disabling syscalls we
10652 return -TARGET_EINVAL
;
10654 /* Most prctl options have no pointer arguments */
10655 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10658 #ifdef TARGET_NR_arch_prctl
10659 case TARGET_NR_arch_prctl
:
10660 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10662 #ifdef TARGET_NR_pread64
10663 case TARGET_NR_pread64
:
10664 if (regpairs_aligned(cpu_env
, num
)) {
10668 if (arg2
== 0 && arg3
== 0) {
10669 /* Special-case NULL buffer and zero length, which should succeed */
10672 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10674 return -TARGET_EFAULT
;
10677 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10678 unlock_user(p
, arg2
, ret
);
10680 case TARGET_NR_pwrite64
:
10681 if (regpairs_aligned(cpu_env
, num
)) {
10685 if (arg2
== 0 && arg3
== 0) {
10686 /* Special-case NULL buffer and zero length, which should succeed */
10689 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10691 return -TARGET_EFAULT
;
10694 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10695 unlock_user(p
, arg2
, 0);
10698 case TARGET_NR_getcwd
:
10699 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10700 return -TARGET_EFAULT
;
10701 ret
= get_errno(sys_getcwd1(p
, arg2
));
10702 unlock_user(p
, arg1
, ret
);
10704 case TARGET_NR_capget
:
10705 case TARGET_NR_capset
:
10707 struct target_user_cap_header
*target_header
;
10708 struct target_user_cap_data
*target_data
= NULL
;
10709 struct __user_cap_header_struct header
;
10710 struct __user_cap_data_struct data
[2];
10711 struct __user_cap_data_struct
*dataptr
= NULL
;
10712 int i
, target_datalen
;
10713 int data_items
= 1;
10715 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10716 return -TARGET_EFAULT
;
10718 header
.version
= tswap32(target_header
->version
);
10719 header
.pid
= tswap32(target_header
->pid
);
10721 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10722 /* Version 2 and up takes pointer to two user_data structs */
10726 target_datalen
= sizeof(*target_data
) * data_items
;
10729 if (num
== TARGET_NR_capget
) {
10730 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10732 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10734 if (!target_data
) {
10735 unlock_user_struct(target_header
, arg1
, 0);
10736 return -TARGET_EFAULT
;
10739 if (num
== TARGET_NR_capset
) {
10740 for (i
= 0; i
< data_items
; i
++) {
10741 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10742 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10743 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10750 if (num
== TARGET_NR_capget
) {
10751 ret
= get_errno(capget(&header
, dataptr
));
10753 ret
= get_errno(capset(&header
, dataptr
));
10756 /* The kernel always updates version for both capget and capset */
10757 target_header
->version
= tswap32(header
.version
);
10758 unlock_user_struct(target_header
, arg1
, 1);
10761 if (num
== TARGET_NR_capget
) {
10762 for (i
= 0; i
< data_items
; i
++) {
10763 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10764 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10765 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10767 unlock_user(target_data
, arg2
, target_datalen
);
10769 unlock_user(target_data
, arg2
, 0);
10774 case TARGET_NR_sigaltstack
:
10775 return do_sigaltstack(arg1
, arg2
,
10776 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10778 #ifdef CONFIG_SENDFILE
10779 #ifdef TARGET_NR_sendfile
10780 case TARGET_NR_sendfile
:
10782 off_t
*offp
= NULL
;
10785 ret
= get_user_sal(off
, arg3
);
10786 if (is_error(ret
)) {
10791 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10792 if (!is_error(ret
) && arg3
) {
10793 abi_long ret2
= put_user_sal(off
, arg3
);
10794 if (is_error(ret2
)) {
10801 #ifdef TARGET_NR_sendfile64
10802 case TARGET_NR_sendfile64
:
10804 off_t
*offp
= NULL
;
10807 ret
= get_user_s64(off
, arg3
);
10808 if (is_error(ret
)) {
10813 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10814 if (!is_error(ret
) && arg3
) {
10815 abi_long ret2
= put_user_s64(off
, arg3
);
10816 if (is_error(ret2
)) {
10824 #ifdef TARGET_NR_vfork
10825 case TARGET_NR_vfork
:
10826 return get_errno(do_fork(cpu_env
,
10827 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10830 #ifdef TARGET_NR_ugetrlimit
10831 case TARGET_NR_ugetrlimit
:
10833 struct rlimit rlim
;
10834 int resource
= target_to_host_resource(arg1
);
10835 ret
= get_errno(getrlimit(resource
, &rlim
));
10836 if (!is_error(ret
)) {
10837 struct target_rlimit
*target_rlim
;
10838 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10839 return -TARGET_EFAULT
;
10840 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10841 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10842 unlock_user_struct(target_rlim
, arg2
, 1);
10847 #ifdef TARGET_NR_truncate64
10848 case TARGET_NR_truncate64
:
10849 if (!(p
= lock_user_string(arg1
)))
10850 return -TARGET_EFAULT
;
10851 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10852 unlock_user(p
, arg1
, 0);
10855 #ifdef TARGET_NR_ftruncate64
10856 case TARGET_NR_ftruncate64
:
10857 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10859 #ifdef TARGET_NR_stat64
10860 case TARGET_NR_stat64
:
10861 if (!(p
= lock_user_string(arg1
))) {
10862 return -TARGET_EFAULT
;
10864 ret
= get_errno(stat(path(p
), &st
));
10865 unlock_user(p
, arg1
, 0);
10866 if (!is_error(ret
))
10867 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10870 #ifdef TARGET_NR_lstat64
10871 case TARGET_NR_lstat64
:
10872 if (!(p
= lock_user_string(arg1
))) {
10873 return -TARGET_EFAULT
;
10875 ret
= get_errno(lstat(path(p
), &st
));
10876 unlock_user(p
, arg1
, 0);
10877 if (!is_error(ret
))
10878 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10881 #ifdef TARGET_NR_fstat64
10882 case TARGET_NR_fstat64
:
10883 ret
= get_errno(fstat(arg1
, &st
));
10884 if (!is_error(ret
))
10885 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10888 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10889 #ifdef TARGET_NR_fstatat64
10890 case TARGET_NR_fstatat64
:
10892 #ifdef TARGET_NR_newfstatat
10893 case TARGET_NR_newfstatat
:
10895 if (!(p
= lock_user_string(arg2
))) {
10896 return -TARGET_EFAULT
;
10898 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10899 unlock_user(p
, arg2
, 0);
10900 if (!is_error(ret
))
10901 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10904 #if defined(TARGET_NR_statx)
10905 case TARGET_NR_statx
:
10907 struct target_statx
*target_stx
;
10911 p
= lock_user_string(arg2
);
10913 return -TARGET_EFAULT
;
10915 #if defined(__NR_statx)
10918 * It is assumed that struct statx is architecture independent.
10920 struct target_statx host_stx
;
10923 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10924 if (!is_error(ret
)) {
10925 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10926 unlock_user(p
, arg2
, 0);
10927 return -TARGET_EFAULT
;
10931 if (ret
!= -TARGET_ENOSYS
) {
10932 unlock_user(p
, arg2
, 0);
10937 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10938 unlock_user(p
, arg2
, 0);
10940 if (!is_error(ret
)) {
10941 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10942 return -TARGET_EFAULT
;
10944 memset(target_stx
, 0, sizeof(*target_stx
));
10945 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10946 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10947 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10948 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10949 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10950 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10951 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10952 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10953 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10954 __put_user(st
.st_size
, &target_stx
->stx_size
);
10955 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10956 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10957 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10958 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10959 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10960 unlock_user_struct(target_stx
, arg5
, 1);
10965 #ifdef TARGET_NR_lchown
10966 case TARGET_NR_lchown
:
10967 if (!(p
= lock_user_string(arg1
)))
10968 return -TARGET_EFAULT
;
10969 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10970 unlock_user(p
, arg1
, 0);
10973 #ifdef TARGET_NR_getuid
10974 case TARGET_NR_getuid
:
10975 return get_errno(high2lowuid(getuid()));
10977 #ifdef TARGET_NR_getgid
10978 case TARGET_NR_getgid
:
10979 return get_errno(high2lowgid(getgid()));
10981 #ifdef TARGET_NR_geteuid
10982 case TARGET_NR_geteuid
:
10983 return get_errno(high2lowuid(geteuid()));
10985 #ifdef TARGET_NR_getegid
10986 case TARGET_NR_getegid
:
10987 return get_errno(high2lowgid(getegid()));
10989 case TARGET_NR_setreuid
:
10990 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10991 case TARGET_NR_setregid
:
10992 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10993 case TARGET_NR_getgroups
:
10995 int gidsetsize
= arg1
;
10996 target_id
*target_grouplist
;
11000 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11001 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11002 if (gidsetsize
== 0)
11004 if (!is_error(ret
)) {
11005 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11006 if (!target_grouplist
)
11007 return -TARGET_EFAULT
;
11008 for(i
= 0;i
< ret
; i
++)
11009 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11010 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11014 case TARGET_NR_setgroups
:
11016 int gidsetsize
= arg1
;
11017 target_id
*target_grouplist
;
11018 gid_t
*grouplist
= NULL
;
11021 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11022 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11023 if (!target_grouplist
) {
11024 return -TARGET_EFAULT
;
11026 for (i
= 0; i
< gidsetsize
; i
++) {
11027 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11029 unlock_user(target_grouplist
, arg2
, 0);
11031 return get_errno(setgroups(gidsetsize
, grouplist
));
11033 case TARGET_NR_fchown
:
11034 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11035 #if defined(TARGET_NR_fchownat)
11036 case TARGET_NR_fchownat
:
11037 if (!(p
= lock_user_string(arg2
)))
11038 return -TARGET_EFAULT
;
11039 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11040 low2highgid(arg4
), arg5
));
11041 unlock_user(p
, arg2
, 0);
11044 #ifdef TARGET_NR_setresuid
11045 case TARGET_NR_setresuid
:
11046 return get_errno(sys_setresuid(low2highuid(arg1
),
11048 low2highuid(arg3
)));
11050 #ifdef TARGET_NR_getresuid
11051 case TARGET_NR_getresuid
:
11053 uid_t ruid
, euid
, suid
;
11054 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11055 if (!is_error(ret
)) {
11056 if (put_user_id(high2lowuid(ruid
), arg1
)
11057 || put_user_id(high2lowuid(euid
), arg2
)
11058 || put_user_id(high2lowuid(suid
), arg3
))
11059 return -TARGET_EFAULT
;
11064 #ifdef TARGET_NR_getresgid
11065 case TARGET_NR_setresgid
:
11066 return get_errno(sys_setresgid(low2highgid(arg1
),
11068 low2highgid(arg3
)));
11070 #ifdef TARGET_NR_getresgid
11071 case TARGET_NR_getresgid
:
11073 gid_t rgid
, egid
, sgid
;
11074 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11075 if (!is_error(ret
)) {
11076 if (put_user_id(high2lowgid(rgid
), arg1
)
11077 || put_user_id(high2lowgid(egid
), arg2
)
11078 || put_user_id(high2lowgid(sgid
), arg3
))
11079 return -TARGET_EFAULT
;
11084 #ifdef TARGET_NR_chown
11085 case TARGET_NR_chown
:
11086 if (!(p
= lock_user_string(arg1
)))
11087 return -TARGET_EFAULT
;
11088 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11089 unlock_user(p
, arg1
, 0);
11092 case TARGET_NR_setuid
:
11093 return get_errno(sys_setuid(low2highuid(arg1
)));
11094 case TARGET_NR_setgid
:
11095 return get_errno(sys_setgid(low2highgid(arg1
)));
11096 case TARGET_NR_setfsuid
:
11097 return get_errno(setfsuid(arg1
));
11098 case TARGET_NR_setfsgid
:
11099 return get_errno(setfsgid(arg1
));
11101 #ifdef TARGET_NR_lchown32
11102 case TARGET_NR_lchown32
:
11103 if (!(p
= lock_user_string(arg1
)))
11104 return -TARGET_EFAULT
;
11105 ret
= get_errno(lchown(p
, arg2
, arg3
));
11106 unlock_user(p
, arg1
, 0);
11109 #ifdef TARGET_NR_getuid32
11110 case TARGET_NR_getuid32
:
11111 return get_errno(getuid());
11114 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11115 /* Alpha specific */
11116 case TARGET_NR_getxuid
:
11120 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11122 return get_errno(getuid());
11124 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11125 /* Alpha specific */
11126 case TARGET_NR_getxgid
:
11130 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11132 return get_errno(getgid());
11134 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11135 /* Alpha specific */
11136 case TARGET_NR_osf_getsysinfo
:
11137 ret
= -TARGET_EOPNOTSUPP
;
11139 case TARGET_GSI_IEEE_FP_CONTROL
:
11141 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11142 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11144 swcr
&= ~SWCR_STATUS_MASK
;
11145 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11147 if (put_user_u64 (swcr
, arg2
))
11148 return -TARGET_EFAULT
;
11153 /* case GSI_IEEE_STATE_AT_SIGNAL:
11154 -- Not implemented in linux kernel.
11156 -- Retrieves current unaligned access state; not much used.
11157 case GSI_PROC_TYPE:
11158 -- Retrieves implver information; surely not used.
11159 case GSI_GET_HWRPB:
11160 -- Grabs a copy of the HWRPB; surely not used.
11165 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11166 /* Alpha specific */
11167 case TARGET_NR_osf_setsysinfo
:
11168 ret
= -TARGET_EOPNOTSUPP
;
11170 case TARGET_SSI_IEEE_FP_CONTROL
:
11172 uint64_t swcr
, fpcr
;
11174 if (get_user_u64 (swcr
, arg2
)) {
11175 return -TARGET_EFAULT
;
11179 * The kernel calls swcr_update_status to update the
11180 * status bits from the fpcr at every point that it
11181 * could be queried. Therefore, we store the status
11182 * bits only in FPCR.
11184 ((CPUAlphaState
*)cpu_env
)->swcr
11185 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11187 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11188 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11189 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11190 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11195 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11197 uint64_t exc
, fpcr
, fex
;
11199 if (get_user_u64(exc
, arg2
)) {
11200 return -TARGET_EFAULT
;
11202 exc
&= SWCR_STATUS_MASK
;
11203 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11205 /* Old exceptions are not signaled. */
11206 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11208 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11209 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11211 /* Update the hardware fpcr. */
11212 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11213 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11216 int si_code
= TARGET_FPE_FLTUNK
;
11217 target_siginfo_t info
;
11219 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11220 si_code
= TARGET_FPE_FLTUND
;
11222 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11223 si_code
= TARGET_FPE_FLTRES
;
11225 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11226 si_code
= TARGET_FPE_FLTUND
;
11228 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11229 si_code
= TARGET_FPE_FLTOVF
;
11231 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11232 si_code
= TARGET_FPE_FLTDIV
;
11234 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11235 si_code
= TARGET_FPE_FLTINV
;
11238 info
.si_signo
= SIGFPE
;
11240 info
.si_code
= si_code
;
11241 info
._sifields
._sigfault
._addr
11242 = ((CPUArchState
*)cpu_env
)->pc
;
11243 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11244 QEMU_SI_FAULT
, &info
);
11250 /* case SSI_NVPAIRS:
11251 -- Used with SSIN_UACPROC to enable unaligned accesses.
11252 case SSI_IEEE_STATE_AT_SIGNAL:
11253 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11254 -- Not implemented in linux kernel
11259 #ifdef TARGET_NR_osf_sigprocmask
11260 /* Alpha specific. */
11261 case TARGET_NR_osf_sigprocmask
:
11265 sigset_t set
, oldset
;
11268 case TARGET_SIG_BLOCK
:
11271 case TARGET_SIG_UNBLOCK
:
11274 case TARGET_SIG_SETMASK
:
11278 return -TARGET_EINVAL
;
11281 target_to_host_old_sigset(&set
, &mask
);
11282 ret
= do_sigprocmask(how
, &set
, &oldset
);
11284 host_to_target_old_sigset(&mask
, &oldset
);
11291 #ifdef TARGET_NR_getgid32
11292 case TARGET_NR_getgid32
:
11293 return get_errno(getgid());
11295 #ifdef TARGET_NR_geteuid32
11296 case TARGET_NR_geteuid32
:
11297 return get_errno(geteuid());
11299 #ifdef TARGET_NR_getegid32
11300 case TARGET_NR_getegid32
:
11301 return get_errno(getegid());
11303 #ifdef TARGET_NR_setreuid32
11304 case TARGET_NR_setreuid32
:
11305 return get_errno(setreuid(arg1
, arg2
));
11307 #ifdef TARGET_NR_setregid32
11308 case TARGET_NR_setregid32
:
11309 return get_errno(setregid(arg1
, arg2
));
11311 #ifdef TARGET_NR_getgroups32
11312 case TARGET_NR_getgroups32
:
11314 int gidsetsize
= arg1
;
11315 uint32_t *target_grouplist
;
11319 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11320 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11321 if (gidsetsize
== 0)
11323 if (!is_error(ret
)) {
11324 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11325 if (!target_grouplist
) {
11326 return -TARGET_EFAULT
;
11328 for(i
= 0;i
< ret
; i
++)
11329 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11330 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11335 #ifdef TARGET_NR_setgroups32
11336 case TARGET_NR_setgroups32
:
11338 int gidsetsize
= arg1
;
11339 uint32_t *target_grouplist
;
11343 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11344 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11345 if (!target_grouplist
) {
11346 return -TARGET_EFAULT
;
11348 for(i
= 0;i
< gidsetsize
; i
++)
11349 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11350 unlock_user(target_grouplist
, arg2
, 0);
11351 return get_errno(setgroups(gidsetsize
, grouplist
));
11354 #ifdef TARGET_NR_fchown32
11355 case TARGET_NR_fchown32
:
11356 return get_errno(fchown(arg1
, arg2
, arg3
));
11358 #ifdef TARGET_NR_setresuid32
11359 case TARGET_NR_setresuid32
:
11360 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11362 #ifdef TARGET_NR_getresuid32
11363 case TARGET_NR_getresuid32
:
11365 uid_t ruid
, euid
, suid
;
11366 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11367 if (!is_error(ret
)) {
11368 if (put_user_u32(ruid
, arg1
)
11369 || put_user_u32(euid
, arg2
)
11370 || put_user_u32(suid
, arg3
))
11371 return -TARGET_EFAULT
;
11376 #ifdef TARGET_NR_setresgid32
11377 case TARGET_NR_setresgid32
:
11378 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11380 #ifdef TARGET_NR_getresgid32
11381 case TARGET_NR_getresgid32
:
11383 gid_t rgid
, egid
, sgid
;
11384 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11385 if (!is_error(ret
)) {
11386 if (put_user_u32(rgid
, arg1
)
11387 || put_user_u32(egid
, arg2
)
11388 || put_user_u32(sgid
, arg3
))
11389 return -TARGET_EFAULT
;
11394 #ifdef TARGET_NR_chown32
11395 case TARGET_NR_chown32
:
11396 if (!(p
= lock_user_string(arg1
)))
11397 return -TARGET_EFAULT
;
11398 ret
= get_errno(chown(p
, arg2
, arg3
));
11399 unlock_user(p
, arg1
, 0);
11402 #ifdef TARGET_NR_setuid32
11403 case TARGET_NR_setuid32
:
11404 return get_errno(sys_setuid(arg1
));
11406 #ifdef TARGET_NR_setgid32
11407 case TARGET_NR_setgid32
:
11408 return get_errno(sys_setgid(arg1
));
11410 #ifdef TARGET_NR_setfsuid32
11411 case TARGET_NR_setfsuid32
:
11412 return get_errno(setfsuid(arg1
));
11414 #ifdef TARGET_NR_setfsgid32
11415 case TARGET_NR_setfsgid32
:
11416 return get_errno(setfsgid(arg1
));
11418 #ifdef TARGET_NR_mincore
11419 case TARGET_NR_mincore
:
11421 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11423 return -TARGET_ENOMEM
;
11425 p
= lock_user_string(arg3
);
11427 ret
= -TARGET_EFAULT
;
11429 ret
= get_errno(mincore(a
, arg2
, p
));
11430 unlock_user(p
, arg3
, ret
);
11432 unlock_user(a
, arg1
, 0);
11436 #ifdef TARGET_NR_arm_fadvise64_64
11437 case TARGET_NR_arm_fadvise64_64
:
11438 /* arm_fadvise64_64 looks like fadvise64_64 but
11439 * with different argument order: fd, advice, offset, len
11440 * rather than the usual fd, offset, len, advice.
11441 * Note that offset and len are both 64-bit so appear as
11442 * pairs of 32-bit registers.
11444 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11445 target_offset64(arg5
, arg6
), arg2
);
11446 return -host_to_target_errno(ret
);
11449 #if TARGET_ABI_BITS == 32
11451 #ifdef TARGET_NR_fadvise64_64
11452 case TARGET_NR_fadvise64_64
:
11453 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11454 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11462 /* 6 args: fd, offset (high, low), len (high, low), advice */
11463 if (regpairs_aligned(cpu_env
, num
)) {
11464 /* offset is in (3,4), len in (5,6) and advice in 7 */
11472 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11473 target_offset64(arg4
, arg5
), arg6
);
11474 return -host_to_target_errno(ret
);
11477 #ifdef TARGET_NR_fadvise64
11478 case TARGET_NR_fadvise64
:
11479 /* 5 args: fd, offset (high, low), len, advice */
11480 if (regpairs_aligned(cpu_env
, num
)) {
11481 /* offset is in (3,4), len in 5 and advice in 6 */
11487 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11488 return -host_to_target_errno(ret
);
11491 #else /* not a 32-bit ABI */
11492 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11493 #ifdef TARGET_NR_fadvise64_64
11494 case TARGET_NR_fadvise64_64
:
11496 #ifdef TARGET_NR_fadvise64
11497 case TARGET_NR_fadvise64
:
11499 #ifdef TARGET_S390X
11501 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11502 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11503 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11504 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11508 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11510 #endif /* end of 64-bit ABI fadvise handling */
11512 #ifdef TARGET_NR_madvise
11513 case TARGET_NR_madvise
:
11514 /* A straight passthrough may not be safe because qemu sometimes
11515 turns private file-backed mappings into anonymous mappings.
11516 This will break MADV_DONTNEED.
11517 This is a hint, so ignoring and returning success is ok. */
11520 #ifdef TARGET_NR_fcntl64
11521 case TARGET_NR_fcntl64
:
11525 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11526 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11529 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11530 copyfrom
= copy_from_user_oabi_flock64
;
11531 copyto
= copy_to_user_oabi_flock64
;
11535 cmd
= target_to_host_fcntl_cmd(arg2
);
11536 if (cmd
== -TARGET_EINVAL
) {
11541 case TARGET_F_GETLK64
:
11542 ret
= copyfrom(&fl
, arg3
);
11546 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11548 ret
= copyto(arg3
, &fl
);
11552 case TARGET_F_SETLK64
:
11553 case TARGET_F_SETLKW64
:
11554 ret
= copyfrom(&fl
, arg3
);
11558 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11561 ret
= do_fcntl(arg1
, arg2
, arg3
);
11567 #ifdef TARGET_NR_cacheflush
11568 case TARGET_NR_cacheflush
:
11569 /* self-modifying code is handled automatically, so nothing needed */
11572 #ifdef TARGET_NR_getpagesize
11573 case TARGET_NR_getpagesize
:
11574 return TARGET_PAGE_SIZE
;
11576 case TARGET_NR_gettid
:
11577 return get_errno(sys_gettid());
11578 #ifdef TARGET_NR_readahead
11579 case TARGET_NR_readahead
:
11580 #if TARGET_ABI_BITS == 32
11581 if (regpairs_aligned(cpu_env
, num
)) {
11586 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11588 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11593 #ifdef TARGET_NR_setxattr
11594 case TARGET_NR_listxattr
:
11595 case TARGET_NR_llistxattr
:
11599 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11601 return -TARGET_EFAULT
;
11604 p
= lock_user_string(arg1
);
11606 if (num
== TARGET_NR_listxattr
) {
11607 ret
= get_errno(listxattr(p
, b
, arg3
));
11609 ret
= get_errno(llistxattr(p
, b
, arg3
));
11612 ret
= -TARGET_EFAULT
;
11614 unlock_user(p
, arg1
, 0);
11615 unlock_user(b
, arg2
, arg3
);
11618 case TARGET_NR_flistxattr
:
11622 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11624 return -TARGET_EFAULT
;
11627 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11628 unlock_user(b
, arg2
, arg3
);
11631 case TARGET_NR_setxattr
:
11632 case TARGET_NR_lsetxattr
:
11634 void *p
, *n
, *v
= 0;
11636 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11638 return -TARGET_EFAULT
;
11641 p
= lock_user_string(arg1
);
11642 n
= lock_user_string(arg2
);
11644 if (num
== TARGET_NR_setxattr
) {
11645 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11647 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11650 ret
= -TARGET_EFAULT
;
11652 unlock_user(p
, arg1
, 0);
11653 unlock_user(n
, arg2
, 0);
11654 unlock_user(v
, arg3
, 0);
11657 case TARGET_NR_fsetxattr
:
11661 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11663 return -TARGET_EFAULT
;
11666 n
= lock_user_string(arg2
);
11668 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11670 ret
= -TARGET_EFAULT
;
11672 unlock_user(n
, arg2
, 0);
11673 unlock_user(v
, arg3
, 0);
11676 case TARGET_NR_getxattr
:
11677 case TARGET_NR_lgetxattr
:
11679 void *p
, *n
, *v
= 0;
11681 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11683 return -TARGET_EFAULT
;
11686 p
= lock_user_string(arg1
);
11687 n
= lock_user_string(arg2
);
11689 if (num
== TARGET_NR_getxattr
) {
11690 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11692 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11695 ret
= -TARGET_EFAULT
;
11697 unlock_user(p
, arg1
, 0);
11698 unlock_user(n
, arg2
, 0);
11699 unlock_user(v
, arg3
, arg4
);
11702 case TARGET_NR_fgetxattr
:
11706 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11708 return -TARGET_EFAULT
;
11711 n
= lock_user_string(arg2
);
11713 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11715 ret
= -TARGET_EFAULT
;
11717 unlock_user(n
, arg2
, 0);
11718 unlock_user(v
, arg3
, arg4
);
11721 case TARGET_NR_removexattr
:
11722 case TARGET_NR_lremovexattr
:
11725 p
= lock_user_string(arg1
);
11726 n
= lock_user_string(arg2
);
11728 if (num
== TARGET_NR_removexattr
) {
11729 ret
= get_errno(removexattr(p
, n
));
11731 ret
= get_errno(lremovexattr(p
, n
));
11734 ret
= -TARGET_EFAULT
;
11736 unlock_user(p
, arg1
, 0);
11737 unlock_user(n
, arg2
, 0);
11740 case TARGET_NR_fremovexattr
:
11743 n
= lock_user_string(arg2
);
11745 ret
= get_errno(fremovexattr(arg1
, n
));
11747 ret
= -TARGET_EFAULT
;
11749 unlock_user(n
, arg2
, 0);
11753 #endif /* CONFIG_ATTR */
11754 #ifdef TARGET_NR_set_thread_area
11755 case TARGET_NR_set_thread_area
:
11756 #if defined(TARGET_MIPS)
11757 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11759 #elif defined(TARGET_CRIS)
11761 ret
= -TARGET_EINVAL
;
11763 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11767 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11768 return do_set_thread_area(cpu_env
, arg1
);
11769 #elif defined(TARGET_M68K)
11771 TaskState
*ts
= cpu
->opaque
;
11772 ts
->tp_value
= arg1
;
11776 return -TARGET_ENOSYS
;
11779 #ifdef TARGET_NR_get_thread_area
11780 case TARGET_NR_get_thread_area
:
11781 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11782 return do_get_thread_area(cpu_env
, arg1
);
11783 #elif defined(TARGET_M68K)
11785 TaskState
*ts
= cpu
->opaque
;
11786 return ts
->tp_value
;
11789 return -TARGET_ENOSYS
;
11792 #ifdef TARGET_NR_getdomainname
11793 case TARGET_NR_getdomainname
:
11794 return -TARGET_ENOSYS
;
11797 #ifdef TARGET_NR_clock_settime
11798 case TARGET_NR_clock_settime
:
11800 struct timespec ts
;
11802 ret
= target_to_host_timespec(&ts
, arg2
);
11803 if (!is_error(ret
)) {
11804 ret
= get_errno(clock_settime(arg1
, &ts
));
11809 #ifdef TARGET_NR_clock_settime64
11810 case TARGET_NR_clock_settime64
:
11812 struct timespec ts
;
11814 ret
= target_to_host_timespec64(&ts
, arg2
);
11815 if (!is_error(ret
)) {
11816 ret
= get_errno(clock_settime(arg1
, &ts
));
11821 #ifdef TARGET_NR_clock_gettime
11822 case TARGET_NR_clock_gettime
:
11824 struct timespec ts
;
11825 ret
= get_errno(clock_gettime(arg1
, &ts
));
11826 if (!is_error(ret
)) {
11827 ret
= host_to_target_timespec(arg2
, &ts
);
11832 #ifdef TARGET_NR_clock_gettime64
11833 case TARGET_NR_clock_gettime64
:
11835 struct timespec ts
;
11836 ret
= get_errno(clock_gettime(arg1
, &ts
));
11837 if (!is_error(ret
)) {
11838 ret
= host_to_target_timespec64(arg2
, &ts
);
11843 #ifdef TARGET_NR_clock_getres
11844 case TARGET_NR_clock_getres
:
11846 struct timespec ts
;
11847 ret
= get_errno(clock_getres(arg1
, &ts
));
11848 if (!is_error(ret
)) {
11849 host_to_target_timespec(arg2
, &ts
);
11854 #ifdef TARGET_NR_clock_getres_time64
11855 case TARGET_NR_clock_getres_time64
:
11857 struct timespec ts
;
11858 ret
= get_errno(clock_getres(arg1
, &ts
));
11859 if (!is_error(ret
)) {
11860 host_to_target_timespec64(arg2
, &ts
);
11865 #ifdef TARGET_NR_clock_nanosleep
11866 case TARGET_NR_clock_nanosleep
:
11868 struct timespec ts
;
11869 target_to_host_timespec(&ts
, arg3
);
11870 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11871 &ts
, arg4
? &ts
: NULL
));
11873 * if the call is interrupted by a signal handler, it fails
11874 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11875 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11877 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
) {
11878 host_to_target_timespec(arg4
, &ts
);
11885 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11886 case TARGET_NR_set_tid_address
:
11887 return get_errno(set_tid_address((int *)g2h(arg1
)));
11890 case TARGET_NR_tkill
:
11891 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11893 case TARGET_NR_tgkill
:
11894 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11895 target_to_host_signal(arg3
)));
11897 #ifdef TARGET_NR_set_robust_list
11898 case TARGET_NR_set_robust_list
:
11899 case TARGET_NR_get_robust_list
:
11900 /* The ABI for supporting robust futexes has userspace pass
11901 * the kernel a pointer to a linked list which is updated by
11902 * userspace after the syscall; the list is walked by the kernel
11903 * when the thread exits. Since the linked list in QEMU guest
11904 * memory isn't a valid linked list for the host and we have
11905 * no way to reliably intercept the thread-death event, we can't
11906 * support these. Silently return ENOSYS so that guest userspace
11907 * falls back to a non-robust futex implementation (which should
11908 * be OK except in the corner case of the guest crashing while
11909 * holding a mutex that is shared with another process via
11912 return -TARGET_ENOSYS
;
11915 #if defined(TARGET_NR_utimensat)
11916 case TARGET_NR_utimensat
:
11918 struct timespec
*tsp
, ts
[2];
11922 if (target_to_host_timespec(ts
, arg3
)) {
11923 return -TARGET_EFAULT
;
11925 if (target_to_host_timespec(ts
+ 1, arg3
+
11926 sizeof(struct target_timespec
))) {
11927 return -TARGET_EFAULT
;
11932 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11934 if (!(p
= lock_user_string(arg2
))) {
11935 return -TARGET_EFAULT
;
11937 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11938 unlock_user(p
, arg2
, 0);
11943 #ifdef TARGET_NR_futex
11944 case TARGET_NR_futex
:
11945 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11947 #ifdef TARGET_NR_futex_time64
11948 case TARGET_NR_futex_time64
:
11949 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11951 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11952 case TARGET_NR_inotify_init
:
11953 ret
= get_errno(sys_inotify_init());
11955 fd_trans_register(ret
, &target_inotify_trans
);
11959 #ifdef CONFIG_INOTIFY1
11960 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11961 case TARGET_NR_inotify_init1
:
11962 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11963 fcntl_flags_tbl
)));
11965 fd_trans_register(ret
, &target_inotify_trans
);
11970 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11971 case TARGET_NR_inotify_add_watch
:
11972 p
= lock_user_string(arg2
);
11973 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11974 unlock_user(p
, arg2
, 0);
11977 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11978 case TARGET_NR_inotify_rm_watch
:
11979 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11982 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11983 case TARGET_NR_mq_open
:
11985 struct mq_attr posix_mq_attr
;
11986 struct mq_attr
*pposix_mq_attr
;
11989 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11990 pposix_mq_attr
= NULL
;
11992 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11993 return -TARGET_EFAULT
;
11995 pposix_mq_attr
= &posix_mq_attr
;
11997 p
= lock_user_string(arg1
- 1);
11999 return -TARGET_EFAULT
;
12001 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12002 unlock_user (p
, arg1
, 0);
12006 case TARGET_NR_mq_unlink
:
12007 p
= lock_user_string(arg1
- 1);
12009 return -TARGET_EFAULT
;
12011 ret
= get_errno(mq_unlink(p
));
12012 unlock_user (p
, arg1
, 0);
12015 #ifdef TARGET_NR_mq_timedsend
12016 case TARGET_NR_mq_timedsend
:
12018 struct timespec ts
;
12020 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12022 target_to_host_timespec(&ts
, arg5
);
12023 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12024 host_to_target_timespec(arg5
, &ts
);
12026 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12028 unlock_user (p
, arg2
, arg3
);
12033 #ifdef TARGET_NR_mq_timedreceive
12034 case TARGET_NR_mq_timedreceive
:
12036 struct timespec ts
;
12039 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12041 target_to_host_timespec(&ts
, arg5
);
12042 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12044 host_to_target_timespec(arg5
, &ts
);
12046 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12049 unlock_user (p
, arg2
, arg3
);
12051 put_user_u32(prio
, arg4
);
12056 /* Not implemented for now... */
12057 /* case TARGET_NR_mq_notify: */
12060 case TARGET_NR_mq_getsetattr
:
12062 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12065 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12066 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12067 &posix_mq_attr_out
));
12068 } else if (arg3
!= 0) {
12069 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12071 if (ret
== 0 && arg3
!= 0) {
12072 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12078 #ifdef CONFIG_SPLICE
12079 #ifdef TARGET_NR_tee
12080 case TARGET_NR_tee
:
12082 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12086 #ifdef TARGET_NR_splice
12087 case TARGET_NR_splice
:
12089 loff_t loff_in
, loff_out
;
12090 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12092 if (get_user_u64(loff_in
, arg2
)) {
12093 return -TARGET_EFAULT
;
12095 ploff_in
= &loff_in
;
12098 if (get_user_u64(loff_out
, arg4
)) {
12099 return -TARGET_EFAULT
;
12101 ploff_out
= &loff_out
;
12103 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12105 if (put_user_u64(loff_in
, arg2
)) {
12106 return -TARGET_EFAULT
;
12110 if (put_user_u64(loff_out
, arg4
)) {
12111 return -TARGET_EFAULT
;
12117 #ifdef TARGET_NR_vmsplice
12118 case TARGET_NR_vmsplice
:
12120 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12122 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12123 unlock_iovec(vec
, arg2
, arg3
, 0);
12125 ret
= -host_to_target_errno(errno
);
12130 #endif /* CONFIG_SPLICE */
12131 #ifdef CONFIG_EVENTFD
12132 #if defined(TARGET_NR_eventfd)
12133 case TARGET_NR_eventfd
:
12134 ret
= get_errno(eventfd(arg1
, 0));
12136 fd_trans_register(ret
, &target_eventfd_trans
);
12140 #if defined(TARGET_NR_eventfd2)
12141 case TARGET_NR_eventfd2
:
12143 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12144 if (arg2
& TARGET_O_NONBLOCK
) {
12145 host_flags
|= O_NONBLOCK
;
12147 if (arg2
& TARGET_O_CLOEXEC
) {
12148 host_flags
|= O_CLOEXEC
;
12150 ret
= get_errno(eventfd(arg1
, host_flags
));
12152 fd_trans_register(ret
, &target_eventfd_trans
);
12157 #endif /* CONFIG_EVENTFD */
12158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12159 case TARGET_NR_fallocate
:
12160 #if TARGET_ABI_BITS == 32
12161 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12162 target_offset64(arg5
, arg6
)));
12164 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12168 #if defined(CONFIG_SYNC_FILE_RANGE)
12169 #if defined(TARGET_NR_sync_file_range)
12170 case TARGET_NR_sync_file_range
:
12171 #if TARGET_ABI_BITS == 32
12172 #if defined(TARGET_MIPS)
12173 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12174 target_offset64(arg5
, arg6
), arg7
));
12176 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12177 target_offset64(arg4
, arg5
), arg6
));
12178 #endif /* !TARGET_MIPS */
12180 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12184 #if defined(TARGET_NR_sync_file_range2) || \
12185 defined(TARGET_NR_arm_sync_file_range)
12186 #if defined(TARGET_NR_sync_file_range2)
12187 case TARGET_NR_sync_file_range2
:
12189 #if defined(TARGET_NR_arm_sync_file_range)
12190 case TARGET_NR_arm_sync_file_range
:
12192 /* This is like sync_file_range but the arguments are reordered */
12193 #if TARGET_ABI_BITS == 32
12194 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12195 target_offset64(arg5
, arg6
), arg2
));
12197 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12202 #if defined(TARGET_NR_signalfd4)
12203 case TARGET_NR_signalfd4
:
12204 return do_signalfd4(arg1
, arg2
, arg4
);
12206 #if defined(TARGET_NR_signalfd)
12207 case TARGET_NR_signalfd
:
12208 return do_signalfd4(arg1
, arg2
, 0);
12210 #if defined(CONFIG_EPOLL)
12211 #if defined(TARGET_NR_epoll_create)
12212 case TARGET_NR_epoll_create
:
12213 return get_errno(epoll_create(arg1
));
12215 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12216 case TARGET_NR_epoll_create1
:
12217 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12219 #if defined(TARGET_NR_epoll_ctl)
12220 case TARGET_NR_epoll_ctl
:
12222 struct epoll_event ep
;
12223 struct epoll_event
*epp
= 0;
12225 struct target_epoll_event
*target_ep
;
12226 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12227 return -TARGET_EFAULT
;
12229 ep
.events
= tswap32(target_ep
->events
);
12230 /* The epoll_data_t union is just opaque data to the kernel,
12231 * so we transfer all 64 bits across and need not worry what
12232 * actual data type it is.
12234 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12235 unlock_user_struct(target_ep
, arg4
, 0);
12238 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12242 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12243 #if defined(TARGET_NR_epoll_wait)
12244 case TARGET_NR_epoll_wait
:
12246 #if defined(TARGET_NR_epoll_pwait)
12247 case TARGET_NR_epoll_pwait
:
12250 struct target_epoll_event
*target_ep
;
12251 struct epoll_event
*ep
;
12253 int maxevents
= arg3
;
12254 int timeout
= arg4
;
12256 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12257 return -TARGET_EINVAL
;
12260 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12261 maxevents
* sizeof(struct target_epoll_event
), 1);
12263 return -TARGET_EFAULT
;
12266 ep
= g_try_new(struct epoll_event
, maxevents
);
12268 unlock_user(target_ep
, arg2
, 0);
12269 return -TARGET_ENOMEM
;
12273 #if defined(TARGET_NR_epoll_pwait)
12274 case TARGET_NR_epoll_pwait
:
12276 target_sigset_t
*target_set
;
12277 sigset_t _set
, *set
= &_set
;
12280 if (arg6
!= sizeof(target_sigset_t
)) {
12281 ret
= -TARGET_EINVAL
;
12285 target_set
= lock_user(VERIFY_READ
, arg5
,
12286 sizeof(target_sigset_t
), 1);
12288 ret
= -TARGET_EFAULT
;
12291 target_to_host_sigset(set
, target_set
);
12292 unlock_user(target_set
, arg5
, 0);
12297 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12298 set
, SIGSET_T_SIZE
));
12302 #if defined(TARGET_NR_epoll_wait)
12303 case TARGET_NR_epoll_wait
:
12304 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12309 ret
= -TARGET_ENOSYS
;
12311 if (!is_error(ret
)) {
12313 for (i
= 0; i
< ret
; i
++) {
12314 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12315 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12317 unlock_user(target_ep
, arg2
,
12318 ret
* sizeof(struct target_epoll_event
));
12320 unlock_user(target_ep
, arg2
, 0);
12327 #ifdef TARGET_NR_prlimit64
12328 case TARGET_NR_prlimit64
:
12330 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12331 struct target_rlimit64
*target_rnew
, *target_rold
;
12332 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12333 int resource
= target_to_host_resource(arg2
);
12335 if (arg3
&& (resource
!= RLIMIT_AS
&&
12336 resource
!= RLIMIT_DATA
&&
12337 resource
!= RLIMIT_STACK
)) {
12338 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12339 return -TARGET_EFAULT
;
12341 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12342 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12343 unlock_user_struct(target_rnew
, arg3
, 0);
12347 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12348 if (!is_error(ret
) && arg4
) {
12349 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12350 return -TARGET_EFAULT
;
12352 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12353 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12354 unlock_user_struct(target_rold
, arg4
, 1);
12359 #ifdef TARGET_NR_gethostname
12360 case TARGET_NR_gethostname
:
12362 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12364 ret
= get_errno(gethostname(name
, arg2
));
12365 unlock_user(name
, arg1
, arg2
);
12367 ret
= -TARGET_EFAULT
;
12372 #ifdef TARGET_NR_atomic_cmpxchg_32
12373 case TARGET_NR_atomic_cmpxchg_32
:
12375 /* should use start_exclusive from main.c */
12376 abi_ulong mem_value
;
12377 if (get_user_u32(mem_value
, arg6
)) {
12378 target_siginfo_t info
;
12379 info
.si_signo
= SIGSEGV
;
12381 info
.si_code
= TARGET_SEGV_MAPERR
;
12382 info
._sifields
._sigfault
._addr
= arg6
;
12383 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12384 QEMU_SI_FAULT
, &info
);
12388 if (mem_value
== arg2
)
12389 put_user_u32(arg1
, arg6
);
12393 #ifdef TARGET_NR_atomic_barrier
12394 case TARGET_NR_atomic_barrier
:
12395 /* Like the kernel implementation and the
12396 qemu arm barrier, no-op this? */
12400 #ifdef TARGET_NR_timer_create
12401 case TARGET_NR_timer_create
:
12403 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12405 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12408 int timer_index
= next_free_host_timer();
12410 if (timer_index
< 0) {
12411 ret
= -TARGET_EAGAIN
;
12413 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12416 phost_sevp
= &host_sevp
;
12417 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12423 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12427 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12428 return -TARGET_EFAULT
;
12436 #ifdef TARGET_NR_timer_settime
12437 case TARGET_NR_timer_settime
:
12439 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12440 * struct itimerspec * old_value */
12441 target_timer_t timerid
= get_timer_id(arg1
);
12445 } else if (arg3
== 0) {
12446 ret
= -TARGET_EINVAL
;
12448 timer_t htimer
= g_posix_timers
[timerid
];
12449 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12451 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12452 return -TARGET_EFAULT
;
12455 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12456 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12457 return -TARGET_EFAULT
;
12464 #ifdef TARGET_NR_timer_settime64
12465 case TARGET_NR_timer_settime64
:
12467 target_timer_t timerid
= get_timer_id(arg1
);
12471 } else if (arg3
== 0) {
12472 ret
= -TARGET_EINVAL
;
12474 timer_t htimer
= g_posix_timers
[timerid
];
12475 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12477 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12478 return -TARGET_EFAULT
;
12481 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12482 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12483 return -TARGET_EFAULT
;
12490 #ifdef TARGET_NR_timer_gettime
12491 case TARGET_NR_timer_gettime
:
12493 /* args: timer_t timerid, struct itimerspec *curr_value */
12494 target_timer_t timerid
= get_timer_id(arg1
);
12498 } else if (!arg2
) {
12499 ret
= -TARGET_EFAULT
;
12501 timer_t htimer
= g_posix_timers
[timerid
];
12502 struct itimerspec hspec
;
12503 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12505 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12506 ret
= -TARGET_EFAULT
;
12513 #ifdef TARGET_NR_timer_gettime64
12514 case TARGET_NR_timer_gettime64
:
12516 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12517 target_timer_t timerid
= get_timer_id(arg1
);
12521 } else if (!arg2
) {
12522 ret
= -TARGET_EFAULT
;
12524 timer_t htimer
= g_posix_timers
[timerid
];
12525 struct itimerspec hspec
;
12526 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12528 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12529 ret
= -TARGET_EFAULT
;
12536 #ifdef TARGET_NR_timer_getoverrun
12537 case TARGET_NR_timer_getoverrun
:
12539 /* args: timer_t timerid */
12540 target_timer_t timerid
= get_timer_id(arg1
);
12545 timer_t htimer
= g_posix_timers
[timerid
];
12546 ret
= get_errno(timer_getoverrun(htimer
));
12552 #ifdef TARGET_NR_timer_delete
12553 case TARGET_NR_timer_delete
:
12555 /* args: timer_t timerid */
12556 target_timer_t timerid
= get_timer_id(arg1
);
12561 timer_t htimer
= g_posix_timers
[timerid
];
12562 ret
= get_errno(timer_delete(htimer
));
12563 g_posix_timers
[timerid
] = 0;
12569 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12570 case TARGET_NR_timerfd_create
:
12571 return get_errno(timerfd_create(arg1
,
12572 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12575 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12576 case TARGET_NR_timerfd_gettime
:
12578 struct itimerspec its_curr
;
12580 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12582 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12583 return -TARGET_EFAULT
;
12589 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12590 case TARGET_NR_timerfd_gettime64
:
12592 struct itimerspec its_curr
;
12594 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12596 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12597 return -TARGET_EFAULT
;
12603 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12604 case TARGET_NR_timerfd_settime
:
12606 struct itimerspec its_new
, its_old
, *p_new
;
12609 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12610 return -TARGET_EFAULT
;
12617 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12619 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12620 return -TARGET_EFAULT
;
12626 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12627 case TARGET_NR_timerfd_settime64
:
12629 struct itimerspec its_new
, its_old
, *p_new
;
12632 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
12633 return -TARGET_EFAULT
;
12640 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12642 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
12643 return -TARGET_EFAULT
;
12649 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12650 case TARGET_NR_ioprio_get
:
12651 return get_errno(ioprio_get(arg1
, arg2
));
12654 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12655 case TARGET_NR_ioprio_set
:
12656 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12659 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12660 case TARGET_NR_setns
:
12661 return get_errno(setns(arg1
, arg2
));
12663 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12664 case TARGET_NR_unshare
:
12665 return get_errno(unshare(arg1
));
12667 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12668 case TARGET_NR_kcmp
:
12669 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12671 #ifdef TARGET_NR_swapcontext
12672 case TARGET_NR_swapcontext
:
12673 /* PowerPC specific. */
12674 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12676 #ifdef TARGET_NR_memfd_create
12677 case TARGET_NR_memfd_create
:
12678 p
= lock_user_string(arg1
);
12680 return -TARGET_EFAULT
;
12682 ret
= get_errno(memfd_create(p
, arg2
));
12683 fd_trans_unregister(ret
);
12684 unlock_user(p
, arg1
, 0);
12687 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12688 case TARGET_NR_membarrier
:
12689 return get_errno(membarrier(arg1
, arg2
));
12693 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12694 return -TARGET_ENOSYS
;
12699 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12700 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12701 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12704 CPUState
*cpu
= env_cpu(cpu_env
);
12707 #ifdef DEBUG_ERESTARTSYS
12708 /* Debug-only code for exercising the syscall-restart code paths
12709 * in the per-architecture cpu main loops: restart every syscall
12710 * the guest makes once before letting it through.
12716 return -TARGET_ERESTARTSYS
;
12721 record_syscall_start(cpu
, num
, arg1
,
12722 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12724 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12725 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12728 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12729 arg5
, arg6
, arg7
, arg8
);
12731 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12732 print_syscall_ret(num
, ret
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12735 record_syscall_return(cpu
, num
, ret
);