4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
119 #include "qemu/guest-random.h"
120 #include "qemu/selfmap.h"
121 #include "user/syscall-trace.h"
122 #include "qapi/error.h"
123 #include "fd-trans.h"
127 #define CLONE_IO 0x80000000 /* Clone io context */
130 /* We can't directly call the host clone syscall, because this will
131 * badly confuse libc (breaking mutexes, for example). So we must
132 * divide clone flags into:
133 * * flag combinations that look like pthread_create()
134 * * flag combinations that look like fork()
135 * * flags we can implement within QEMU itself
136 * * flags we can't support and will return an error for
138 /* For thread creation, all these flags must be present; for
139 * fork, none must be present.
141 #define CLONE_THREAD_FLAGS \
142 (CLONE_VM | CLONE_FS | CLONE_FILES | \
143 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
145 /* These flags are ignored:
146 * CLONE_DETACHED is now ignored by the kernel;
147 * CLONE_IO is just an optimisation hint to the I/O scheduler
149 #define CLONE_IGNORED_FLAGS \
150 (CLONE_DETACHED | CLONE_IO)
152 /* Flags for fork which we can implement within QEMU itself */
153 #define CLONE_OPTIONAL_FORK_FLAGS \
154 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
155 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
157 /* Flags for thread creation which we can implement within QEMU itself */
158 #define CLONE_OPTIONAL_THREAD_FLAGS \
159 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
160 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
162 #define CLONE_INVALID_FORK_FLAGS \
163 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
165 #define CLONE_INVALID_THREAD_FLAGS \
166 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
167 CLONE_IGNORED_FLAGS))
169 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
170 * have almost all been allocated. We cannot support any of
171 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
172 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
173 * The checks against the invalid thread masks above will catch these.
174 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
177 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
178 * once. This exercises the codepaths for restart.
180 //#define DEBUG_ERESTARTSYS
182 //#include <linux/msdos_fs.h>
183 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
184 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
194 #define _syscall0(type,name) \
195 static type name (void) \
197 return syscall(__NR_##name); \
200 #define _syscall1(type,name,type1,arg1) \
201 static type name (type1 arg1) \
203 return syscall(__NR_##name, arg1); \
206 #define _syscall2(type,name,type1,arg1,type2,arg2) \
207 static type name (type1 arg1,type2 arg2) \
209 return syscall(__NR_##name, arg1, arg2); \
212 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
213 static type name (type1 arg1,type2 arg2,type3 arg3) \
215 return syscall(__NR_##name, arg1, arg2, arg3); \
218 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
221 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
224 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
232 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
233 type5,arg5,type6,arg6) \
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
237 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
241 #define __NR_sys_uname __NR_uname
242 #define __NR_sys_getcwd1 __NR_getcwd
243 #define __NR_sys_getdents __NR_getdents
244 #define __NR_sys_getdents64 __NR_getdents64
245 #define __NR_sys_getpriority __NR_getpriority
246 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
247 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
248 #define __NR_sys_syslog __NR_syslog
249 #if defined(__NR_futex)
250 # define __NR_sys_futex __NR_futex
252 #if defined(__NR_futex_time64)
253 # define __NR_sys_futex_time64 __NR_futex_time64
255 #define __NR_sys_inotify_init __NR_inotify_init
256 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
257 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
258 #define __NR_sys_statx __NR_statx
260 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
261 #define __NR__llseek __NR_lseek
264 /* Newer kernel ports have llseek() instead of _llseek() */
265 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
266 #define TARGET_NR__llseek TARGET_NR_llseek
269 #define __NR_sys_gettid __NR_gettid
270 _syscall0(int, sys_gettid
)
272 /* For the 64-bit guest on 32-bit host case we must emulate
273 * getdents using getdents64, because otherwise the host
274 * might hand us back more dirent records than we can fit
275 * into the guest buffer after structure format conversion.
276 * Otherwise we emulate getdents with getdents if the host has it.
278 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
279 #define EMULATE_GETDENTS_WITH_GETDENTS
282 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
283 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
285 #if (defined(TARGET_NR_getdents) && \
286 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
287 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
288 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
290 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
291 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
292 loff_t
*, res
, uint
, wh
);
294 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
295 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
297 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
298 #ifdef __NR_exit_group
299 _syscall1(int,exit_group
,int,error_code
)
301 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
302 _syscall1(int,set_tid_address
,int *,tidptr
)
304 #if defined(__NR_futex)
305 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
306 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
308 #if defined(__NR_futex_time64)
309 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
310 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
312 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
313 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
314 unsigned long *, user_mask_ptr
);
315 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
316 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
317 unsigned long *, user_mask_ptr
);
318 #define __NR_sys_getcpu __NR_getcpu
319 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
320 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
322 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
323 struct __user_cap_data_struct
*, data
);
324 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
325 struct __user_cap_data_struct
*, data
);
326 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
327 _syscall2(int, ioprio_get
, int, which
, int, who
)
329 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
330 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
332 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
333 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
336 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
337 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
338 unsigned long, idx1
, unsigned long, idx2
)
342 * It is assumed that struct statx is architecture independent.
344 #if defined(TARGET_NR_statx) && defined(__NR_statx)
345 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
346 unsigned int, mask
, struct target_statx
*, statxbuf
)
348 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
349 _syscall2(int, membarrier
, int, cmd
, int, flags
)
352 static bitmask_transtbl fcntl_flags_tbl
[] = {
353 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
354 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
355 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
356 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
357 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
358 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
359 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
360 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
361 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
362 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
363 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
364 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
365 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
366 #if defined(O_DIRECT)
367 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
369 #if defined(O_NOATIME)
370 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
372 #if defined(O_CLOEXEC)
373 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
376 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
378 #if defined(O_TMPFILE)
379 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
381 /* Don't terminate the list prematurely on 64-bit host+guest. */
382 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
383 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
388 static int sys_getcwd1(char *buf
, size_t size
)
390 if (getcwd(buf
, size
) == NULL
) {
391 /* getcwd() sets errno */
394 return strlen(buf
)+1;
397 #ifdef TARGET_NR_utimensat
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
401 const struct timespec
*,tsp
,int,flags
)
403 static int sys_utimensat(int dirfd
, const char *pathname
,
404 const struct timespec times
[2], int flags
)
410 #endif /* TARGET_NR_utimensat */
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
416 const char *, new, unsigned int, flags
)
418 static int sys_renameat2(int oldfd
, const char *old
,
419 int newfd
, const char *new, int flags
)
422 return renameat(oldfd
, old
, newfd
, new);
428 #endif /* TARGET_NR_renameat2 */
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
436 return (inotify_init());
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
442 return (inotify_add_watch(fd
, pathname
, mask
));
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
448 return (inotify_rm_watch(fd
, wd
));
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags
)
455 return (inotify_init1(flags
));
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY */
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64
{
477 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
478 const struct host_rlimit64
*, new_limit
,
479 struct host_rlimit64
*, old_limit
)
483 #if defined(TARGET_NR_timer_create)
484 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers
[32] = { 0, } ;
487 static inline int next_free_host_timer(void)
490 /* FIXME: Does finding the next free slot require a lock? */
491 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
492 if (g_posix_timers
[k
] == 0) {
493 g_posix_timers
[k
] = (timer_t
) 1;
501 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
503 static inline int regpairs_aligned(void *cpu_env
, int num
)
505 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
507 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
508 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
509 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
510 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
511 * of registers which translates to the same as ARM/MIPS, because we start with
513 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
514 #elif defined(TARGET_SH4)
515 /* SH4 doesn't align register pairs, except for p{read,write}64 */
516 static inline int regpairs_aligned(void *cpu_env
, int num
)
519 case TARGET_NR_pread64
:
520 case TARGET_NR_pwrite64
:
527 #elif defined(TARGET_XTENSA)
528 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
530 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
533 #define ERRNO_TABLE_SIZE 1200
535 /* target_to_host_errno_table[] is initialized from
536 * host_to_target_errno_table[] in syscall_init(). */
537 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
541 * This list is the union of errno values overridden in asm-<arch>/errno.h
542 * minus the errnos that are not actually generic to all archs.
544 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
545 [EAGAIN
] = TARGET_EAGAIN
,
546 [EIDRM
] = TARGET_EIDRM
,
547 [ECHRNG
] = TARGET_ECHRNG
,
548 [EL2NSYNC
] = TARGET_EL2NSYNC
,
549 [EL3HLT
] = TARGET_EL3HLT
,
550 [EL3RST
] = TARGET_EL3RST
,
551 [ELNRNG
] = TARGET_ELNRNG
,
552 [EUNATCH
] = TARGET_EUNATCH
,
553 [ENOCSI
] = TARGET_ENOCSI
,
554 [EL2HLT
] = TARGET_EL2HLT
,
555 [EDEADLK
] = TARGET_EDEADLK
,
556 [ENOLCK
] = TARGET_ENOLCK
,
557 [EBADE
] = TARGET_EBADE
,
558 [EBADR
] = TARGET_EBADR
,
559 [EXFULL
] = TARGET_EXFULL
,
560 [ENOANO
] = TARGET_ENOANO
,
561 [EBADRQC
] = TARGET_EBADRQC
,
562 [EBADSLT
] = TARGET_EBADSLT
,
563 [EBFONT
] = TARGET_EBFONT
,
564 [ENOSTR
] = TARGET_ENOSTR
,
565 [ENODATA
] = TARGET_ENODATA
,
566 [ETIME
] = TARGET_ETIME
,
567 [ENOSR
] = TARGET_ENOSR
,
568 [ENONET
] = TARGET_ENONET
,
569 [ENOPKG
] = TARGET_ENOPKG
,
570 [EREMOTE
] = TARGET_EREMOTE
,
571 [ENOLINK
] = TARGET_ENOLINK
,
572 [EADV
] = TARGET_EADV
,
573 [ESRMNT
] = TARGET_ESRMNT
,
574 [ECOMM
] = TARGET_ECOMM
,
575 [EPROTO
] = TARGET_EPROTO
,
576 [EDOTDOT
] = TARGET_EDOTDOT
,
577 [EMULTIHOP
] = TARGET_EMULTIHOP
,
578 [EBADMSG
] = TARGET_EBADMSG
,
579 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
580 [EOVERFLOW
] = TARGET_EOVERFLOW
,
581 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
582 [EBADFD
] = TARGET_EBADFD
,
583 [EREMCHG
] = TARGET_EREMCHG
,
584 [ELIBACC
] = TARGET_ELIBACC
,
585 [ELIBBAD
] = TARGET_ELIBBAD
,
586 [ELIBSCN
] = TARGET_ELIBSCN
,
587 [ELIBMAX
] = TARGET_ELIBMAX
,
588 [ELIBEXEC
] = TARGET_ELIBEXEC
,
589 [EILSEQ
] = TARGET_EILSEQ
,
590 [ENOSYS
] = TARGET_ENOSYS
,
591 [ELOOP
] = TARGET_ELOOP
,
592 [ERESTART
] = TARGET_ERESTART
,
593 [ESTRPIPE
] = TARGET_ESTRPIPE
,
594 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
595 [EUSERS
] = TARGET_EUSERS
,
596 [ENOTSOCK
] = TARGET_ENOTSOCK
,
597 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
598 [EMSGSIZE
] = TARGET_EMSGSIZE
,
599 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
600 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
601 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
602 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
603 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
604 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
605 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
606 [EADDRINUSE
] = TARGET_EADDRINUSE
,
607 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
608 [ENETDOWN
] = TARGET_ENETDOWN
,
609 [ENETUNREACH
] = TARGET_ENETUNREACH
,
610 [ENETRESET
] = TARGET_ENETRESET
,
611 [ECONNABORTED
] = TARGET_ECONNABORTED
,
612 [ECONNRESET
] = TARGET_ECONNRESET
,
613 [ENOBUFS
] = TARGET_ENOBUFS
,
614 [EISCONN
] = TARGET_EISCONN
,
615 [ENOTCONN
] = TARGET_ENOTCONN
,
616 [EUCLEAN
] = TARGET_EUCLEAN
,
617 [ENOTNAM
] = TARGET_ENOTNAM
,
618 [ENAVAIL
] = TARGET_ENAVAIL
,
619 [EISNAM
] = TARGET_EISNAM
,
620 [EREMOTEIO
] = TARGET_EREMOTEIO
,
621 [EDQUOT
] = TARGET_EDQUOT
,
622 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
623 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
624 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
625 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
626 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
627 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
628 [EALREADY
] = TARGET_EALREADY
,
629 [EINPROGRESS
] = TARGET_EINPROGRESS
,
630 [ESTALE
] = TARGET_ESTALE
,
631 [ECANCELED
] = TARGET_ECANCELED
,
632 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
633 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
635 [ENOKEY
] = TARGET_ENOKEY
,
638 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
641 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
644 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
647 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
649 #ifdef ENOTRECOVERABLE
650 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
653 [ENOMSG
] = TARGET_ENOMSG
,
656 [ERFKILL
] = TARGET_ERFKILL
,
659 [EHWPOISON
] = TARGET_EHWPOISON
,
663 static inline int host_to_target_errno(int err
)
665 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
666 host_to_target_errno_table
[err
]) {
667 return host_to_target_errno_table
[err
];
672 static inline int target_to_host_errno(int err
)
674 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
675 target_to_host_errno_table
[err
]) {
676 return target_to_host_errno_table
[err
];
681 static inline abi_long
get_errno(abi_long ret
)
684 return -host_to_target_errno(errno
);
689 const char *target_strerror(int err
)
691 if (err
== TARGET_ERESTARTSYS
) {
692 return "To be restarted";
694 if (err
== TARGET_QEMU_ESIGRETURN
) {
695 return "Successful exit from sigreturn";
698 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
701 return strerror(target_to_host_errno(err
));
704 #define safe_syscall0(type, name) \
705 static type safe_##name(void) \
707 return safe_syscall(__NR_##name); \
710 #define safe_syscall1(type, name, type1, arg1) \
711 static type safe_##name(type1 arg1) \
713 return safe_syscall(__NR_##name, arg1); \
716 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
717 static type safe_##name(type1 arg1, type2 arg2) \
719 return safe_syscall(__NR_##name, arg1, arg2); \
722 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
725 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
728 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
730 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
732 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
735 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
736 type4, arg4, type5, arg5) \
737 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
740 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
743 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
744 type4, arg4, type5, arg5, type6, arg6) \
745 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
746 type5 arg5, type6 arg6) \
748 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
751 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
752 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
753 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
754 int, flags
, mode_t
, mode
)
755 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
756 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
757 struct rusage
*, rusage
)
759 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
760 int, options
, struct rusage
*, rusage
)
761 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
762 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
763 defined(TARGET_NR_pselect6)
764 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
765 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
767 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
768 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
769 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
772 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
773 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
775 #if defined(__NR_futex)
776 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
777 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
779 #if defined(__NR_futex_time64)
780 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
781 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
783 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
784 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
785 safe_syscall2(int, tkill
, int, tid
, int, sig
)
786 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
787 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
788 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
789 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
790 unsigned long, pos_l
, unsigned long, pos_h
)
791 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
792 unsigned long, pos_l
, unsigned long, pos_h
)
793 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
795 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
796 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
797 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
798 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
799 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
800 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
801 safe_syscall2(int, flock
, int, fd
, int, operation
)
802 #ifdef TARGET_NR_rt_sigtimedwait
803 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
804 const struct timespec
*, uts
, size_t, sigsetsize
)
806 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
808 #if defined(TARGET_NR_nanosleep)
809 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
810 struct timespec
*, rem
)
812 #ifdef TARGET_NR_clock_nanosleep
813 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
814 const struct timespec
*, req
, struct timespec
*, rem
)
817 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
818 void *, ptr
, long, fifth
)
821 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
825 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
826 long, msgtype
, int, flags
)
828 #ifdef __NR_semtimedop
829 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
830 unsigned, nsops
, const struct timespec
*, timeout
)
832 #ifdef TARGET_NR_mq_timedsend
833 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
834 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
836 #ifdef TARGET_NR_mq_timedreceive
837 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
838 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
840 /* We do ioctl like this rather than via safe_syscall3 to preserve the
841 * "third argument might be integer or pointer or not present" behaviour of
844 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
845 /* Similarly for fcntl. Note that callers must always:
846 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
847 * use the flock64 struct rather than unsuffixed flock
848 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
851 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
853 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
856 static inline int host_to_target_sock_type(int host_type
)
860 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
862 target_type
= TARGET_SOCK_DGRAM
;
865 target_type
= TARGET_SOCK_STREAM
;
868 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
872 #if defined(SOCK_CLOEXEC)
873 if (host_type
& SOCK_CLOEXEC
) {
874 target_type
|= TARGET_SOCK_CLOEXEC
;
878 #if defined(SOCK_NONBLOCK)
879 if (host_type
& SOCK_NONBLOCK
) {
880 target_type
|= TARGET_SOCK_NONBLOCK
;
887 static abi_ulong target_brk
;
888 static abi_ulong target_original_brk
;
889 static abi_ulong brk_page
;
891 void target_set_brk(abi_ulong new_brk
)
893 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
894 brk_page
= HOST_PAGE_ALIGN(target_brk
);
897 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
898 #define DEBUGF_BRK(message, args...)
900 /* do_brk() must return target values and target errnos. */
901 abi_long
do_brk(abi_ulong new_brk
)
903 abi_long mapped_addr
;
904 abi_ulong new_alloc_size
;
906 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
909 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
912 if (new_brk
< target_original_brk
) {
913 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
918 /* If the new brk is less than the highest page reserved to the
919 * target heap allocation, set it and we're almost done... */
920 if (new_brk
<= brk_page
) {
921 /* Heap contents are initialized to zero, as for anonymous
923 if (new_brk
> target_brk
) {
924 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
926 target_brk
= new_brk
;
927 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
931 /* We need to allocate more memory after the brk... Note that
932 * we don't use MAP_FIXED because that will map over the top of
933 * any existing mapping (like the one with the host libc or qemu
934 * itself); instead we treat "mapped but at wrong address" as
935 * a failure and unmap again.
937 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
938 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
939 PROT_READ
|PROT_WRITE
,
940 MAP_ANON
|MAP_PRIVATE
, 0, 0));
942 if (mapped_addr
== brk_page
) {
943 /* Heap contents are initialized to zero, as for anonymous
944 * mapped pages. Technically the new pages are already
945 * initialized to zero since they *are* anonymous mapped
946 * pages, however we have to take care with the contents that
947 * come from the remaining part of the previous page: it may
948 * contains garbage data due to a previous heap usage (grown
950 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
952 target_brk
= new_brk
;
953 brk_page
= HOST_PAGE_ALIGN(target_brk
);
954 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
957 } else if (mapped_addr
!= -1) {
958 /* Mapped but at wrong address, meaning there wasn't actually
959 * enough space for this brk.
961 target_munmap(mapped_addr
, new_alloc_size
);
963 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
966 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
969 #if defined(TARGET_ALPHA)
970 /* We (partially) emulate OSF/1 on Alpha, which requires we
971 return a proper errno, not an unchanged brk value. */
972 return -TARGET_ENOMEM
;
974 /* For everything else, return the previous break. */
978 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
979 defined(TARGET_NR_pselect6)
980 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
981 abi_ulong target_fds_addr
,
985 abi_ulong b
, *target_fds
;
987 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
988 if (!(target_fds
= lock_user(VERIFY_READ
,
990 sizeof(abi_ulong
) * nw
,
992 return -TARGET_EFAULT
;
996 for (i
= 0; i
< nw
; i
++) {
997 /* grab the abi_ulong */
998 __get_user(b
, &target_fds
[i
]);
999 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1000 /* check the bit inside the abi_ulong */
1007 unlock_user(target_fds
, target_fds_addr
, 0);
1012 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1013 abi_ulong target_fds_addr
,
1016 if (target_fds_addr
) {
1017 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1018 return -TARGET_EFAULT
;
1026 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1032 abi_ulong
*target_fds
;
1034 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1035 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1037 sizeof(abi_ulong
) * nw
,
1039 return -TARGET_EFAULT
;
1042 for (i
= 0; i
< nw
; i
++) {
1044 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1045 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1048 __put_user(v
, &target_fds
[i
]);
1051 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1057 #if defined(__alpha__)
1058 #define HOST_HZ 1024
1063 static inline abi_long
host_to_target_clock_t(long ticks
)
1065 #if HOST_HZ == TARGET_HZ
1068 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1072 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1073 const struct rusage
*rusage
)
1075 struct target_rusage
*target_rusage
;
1077 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1078 return -TARGET_EFAULT
;
1079 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1080 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1081 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1082 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1083 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1084 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1085 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1086 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1087 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1088 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1089 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1090 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1091 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1092 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1093 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1094 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1095 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1096 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1097 unlock_user_struct(target_rusage
, target_addr
, 1);
1102 #ifdef TARGET_NR_setrlimit
1103 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1105 abi_ulong target_rlim_swap
;
1108 target_rlim_swap
= tswapal(target_rlim
);
1109 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1110 return RLIM_INFINITY
;
1112 result
= target_rlim_swap
;
1113 if (target_rlim_swap
!= (rlim_t
)result
)
1114 return RLIM_INFINITY
;
1120 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1121 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1123 abi_ulong target_rlim_swap
;
1126 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1127 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1129 target_rlim_swap
= rlim
;
1130 result
= tswapal(target_rlim_swap
);
1136 static inline int target_to_host_resource(int code
)
1139 case TARGET_RLIMIT_AS
:
1141 case TARGET_RLIMIT_CORE
:
1143 case TARGET_RLIMIT_CPU
:
1145 case TARGET_RLIMIT_DATA
:
1147 case TARGET_RLIMIT_FSIZE
:
1148 return RLIMIT_FSIZE
;
1149 case TARGET_RLIMIT_LOCKS
:
1150 return RLIMIT_LOCKS
;
1151 case TARGET_RLIMIT_MEMLOCK
:
1152 return RLIMIT_MEMLOCK
;
1153 case TARGET_RLIMIT_MSGQUEUE
:
1154 return RLIMIT_MSGQUEUE
;
1155 case TARGET_RLIMIT_NICE
:
1157 case TARGET_RLIMIT_NOFILE
:
1158 return RLIMIT_NOFILE
;
1159 case TARGET_RLIMIT_NPROC
:
1160 return RLIMIT_NPROC
;
1161 case TARGET_RLIMIT_RSS
:
1163 case TARGET_RLIMIT_RTPRIO
:
1164 return RLIMIT_RTPRIO
;
1165 case TARGET_RLIMIT_SIGPENDING
:
1166 return RLIMIT_SIGPENDING
;
1167 case TARGET_RLIMIT_STACK
:
1168 return RLIMIT_STACK
;
1174 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1175 abi_ulong target_tv_addr
)
1177 struct target_timeval
*target_tv
;
1179 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1180 return -TARGET_EFAULT
;
1183 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1184 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1186 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1191 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1192 const struct timeval
*tv
)
1194 struct target_timeval
*target_tv
;
1196 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1197 return -TARGET_EFAULT
;
1200 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1201 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1203 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1208 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1209 const struct timeval
*tv
)
1211 struct target__kernel_sock_timeval
*target_tv
;
1213 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1214 return -TARGET_EFAULT
;
1217 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1218 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1220 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1225 #if defined(TARGET_NR_futex) || \
1226 defined(TARGET_NR_rt_sigtimedwait) || \
1227 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1228 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1229 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1230 defined(TARGET_NR_mq_timedreceive)
1231 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1232 abi_ulong target_addr
)
1234 struct target_timespec
*target_ts
;
1236 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1237 return -TARGET_EFAULT
;
1239 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1240 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1241 unlock_user_struct(target_ts
, target_addr
, 0);
1246 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1247 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1248 abi_ulong target_addr
)
1250 struct target__kernel_timespec
*target_ts
;
1252 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1253 return -TARGET_EFAULT
;
1255 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1256 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1257 unlock_user_struct(target_ts
, target_addr
, 0);
1262 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1263 struct timespec
*host_ts
)
1265 struct target_timespec
*target_ts
;
1267 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1268 return -TARGET_EFAULT
;
1270 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1271 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1272 unlock_user_struct(target_ts
, target_addr
, 1);
1276 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1277 struct timespec
*host_ts
)
1279 struct target__kernel_timespec
*target_ts
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1282 return -TARGET_EFAULT
;
1284 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1285 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1286 unlock_user_struct(target_ts
, target_addr
, 1);
1290 #if defined(TARGET_NR_gettimeofday)
1291 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1292 struct timezone
*tz
)
1294 struct target_timezone
*target_tz
;
1296 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1297 return -TARGET_EFAULT
;
1300 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1301 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1303 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1309 #if defined(TARGET_NR_settimeofday)
1310 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1311 abi_ulong target_tz_addr
)
1313 struct target_timezone
*target_tz
;
1315 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1316 return -TARGET_EFAULT
;
1319 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1320 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1322 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1328 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1331 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1332 abi_ulong target_mq_attr_addr
)
1334 struct target_mq_attr
*target_mq_attr
;
1336 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1337 target_mq_attr_addr
, 1))
1338 return -TARGET_EFAULT
;
1340 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1341 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1342 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1343 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1345 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1350 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1351 const struct mq_attr
*attr
)
1353 struct target_mq_attr
*target_mq_attr
;
1355 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1356 target_mq_attr_addr
, 0))
1357 return -TARGET_EFAULT
;
1359 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1360 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1361 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1362 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1364 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1370 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1371 /* do_select() must return target values and target errnos. */
1372 static abi_long
do_select(int n
,
1373 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1374 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1376 fd_set rfds
, wfds
, efds
;
1377 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1379 struct timespec ts
, *ts_ptr
;
1382 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1386 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1390 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1395 if (target_tv_addr
) {
1396 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1397 return -TARGET_EFAULT
;
1398 ts
.tv_sec
= tv
.tv_sec
;
1399 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1405 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1408 if (!is_error(ret
)) {
1409 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1410 return -TARGET_EFAULT
;
1411 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1412 return -TARGET_EFAULT
;
1413 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1414 return -TARGET_EFAULT
;
1416 if (target_tv_addr
) {
1417 tv
.tv_sec
= ts
.tv_sec
;
1418 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1419 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1420 return -TARGET_EFAULT
;
1428 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1429 static abi_long
do_old_select(abi_ulong arg1
)
1431 struct target_sel_arg_struct
*sel
;
1432 abi_ulong inp
, outp
, exp
, tvp
;
1435 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1436 return -TARGET_EFAULT
;
1439 nsel
= tswapal(sel
->n
);
1440 inp
= tswapal(sel
->inp
);
1441 outp
= tswapal(sel
->outp
);
1442 exp
= tswapal(sel
->exp
);
1443 tvp
= tswapal(sel
->tvp
);
1445 unlock_user_struct(sel
, arg1
, 0);
1447 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1452 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1455 return pipe2(host_pipe
, flags
);
1461 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1462 int flags
, int is_pipe2
)
1466 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1469 return get_errno(ret
);
1471 /* Several targets have special calling conventions for the original
1472 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1474 #if defined(TARGET_ALPHA)
1475 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1476 return host_pipe
[0];
1477 #elif defined(TARGET_MIPS)
1478 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1479 return host_pipe
[0];
1480 #elif defined(TARGET_SH4)
1481 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1482 return host_pipe
[0];
1483 #elif defined(TARGET_SPARC)
1484 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1485 return host_pipe
[0];
1489 if (put_user_s32(host_pipe
[0], pipedes
)
1490 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1491 return -TARGET_EFAULT
;
1492 return get_errno(ret
);
1495 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1496 abi_ulong target_addr
,
1499 struct target_ip_mreqn
*target_smreqn
;
1501 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1503 return -TARGET_EFAULT
;
1504 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1505 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1506 if (len
== sizeof(struct target_ip_mreqn
))
1507 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1508 unlock_user(target_smreqn
, target_addr
, 0);
1513 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1514 abi_ulong target_addr
,
1517 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1518 sa_family_t sa_family
;
1519 struct target_sockaddr
*target_saddr
;
1521 if (fd_trans_target_to_host_addr(fd
)) {
1522 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1525 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1527 return -TARGET_EFAULT
;
1529 sa_family
= tswap16(target_saddr
->sa_family
);
1531 /* Oops. The caller might send a incomplete sun_path; sun_path
1532 * must be terminated by \0 (see the manual page), but
1533 * unfortunately it is quite common to specify sockaddr_un
1534 * length as "strlen(x->sun_path)" while it should be
1535 * "strlen(...) + 1". We'll fix that here if needed.
1536 * Linux kernel has a similar feature.
1539 if (sa_family
== AF_UNIX
) {
1540 if (len
< unix_maxlen
&& len
> 0) {
1541 char *cp
= (char*)target_saddr
;
1543 if ( cp
[len
-1] && !cp
[len
] )
1546 if (len
> unix_maxlen
)
1550 memcpy(addr
, target_saddr
, len
);
1551 addr
->sa_family
= sa_family
;
1552 if (sa_family
== AF_NETLINK
) {
1553 struct sockaddr_nl
*nladdr
;
1555 nladdr
= (struct sockaddr_nl
*)addr
;
1556 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1557 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1558 } else if (sa_family
== AF_PACKET
) {
1559 struct target_sockaddr_ll
*lladdr
;
1561 lladdr
= (struct target_sockaddr_ll
*)addr
;
1562 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1563 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1565 unlock_user(target_saddr
, target_addr
, 0);
1570 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1571 struct sockaddr
*addr
,
1574 struct target_sockaddr
*target_saddr
;
1581 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1583 return -TARGET_EFAULT
;
1584 memcpy(target_saddr
, addr
, len
);
1585 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1586 sizeof(target_saddr
->sa_family
)) {
1587 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1589 if (addr
->sa_family
== AF_NETLINK
&&
1590 len
>= sizeof(struct target_sockaddr_nl
)) {
1591 struct target_sockaddr_nl
*target_nl
=
1592 (struct target_sockaddr_nl
*)target_saddr
;
1593 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1594 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1595 } else if (addr
->sa_family
== AF_PACKET
) {
1596 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1597 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1598 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1599 } else if (addr
->sa_family
== AF_INET6
&&
1600 len
>= sizeof(struct target_sockaddr_in6
)) {
1601 struct target_sockaddr_in6
*target_in6
=
1602 (struct target_sockaddr_in6
*)target_saddr
;
1603 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1605 unlock_user(target_saddr
, target_addr
, len
);
1610 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1611 struct target_msghdr
*target_msgh
)
1613 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1614 abi_long msg_controllen
;
1615 abi_ulong target_cmsg_addr
;
1616 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1617 socklen_t space
= 0;
1619 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1620 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1622 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1623 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1624 target_cmsg_start
= target_cmsg
;
1626 return -TARGET_EFAULT
;
1628 while (cmsg
&& target_cmsg
) {
1629 void *data
= CMSG_DATA(cmsg
);
1630 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1632 int len
= tswapal(target_cmsg
->cmsg_len
)
1633 - sizeof(struct target_cmsghdr
);
1635 space
+= CMSG_SPACE(len
);
1636 if (space
> msgh
->msg_controllen
) {
1637 space
-= CMSG_SPACE(len
);
1638 /* This is a QEMU bug, since we allocated the payload
1639 * area ourselves (unlike overflow in host-to-target
1640 * conversion, which is just the guest giving us a buffer
1641 * that's too small). It can't happen for the payload types
1642 * we currently support; if it becomes an issue in future
1643 * we would need to improve our allocation strategy to
1644 * something more intelligent than "twice the size of the
1645 * target buffer we're reading from".
1647 qemu_log_mask(LOG_UNIMP
,
1648 ("Unsupported ancillary data %d/%d: "
1649 "unhandled msg size\n"),
1650 tswap32(target_cmsg
->cmsg_level
),
1651 tswap32(target_cmsg
->cmsg_type
));
1655 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1656 cmsg
->cmsg_level
= SOL_SOCKET
;
1658 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1660 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1661 cmsg
->cmsg_len
= CMSG_LEN(len
);
1663 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1664 int *fd
= (int *)data
;
1665 int *target_fd
= (int *)target_data
;
1666 int i
, numfds
= len
/ sizeof(int);
1668 for (i
= 0; i
< numfds
; i
++) {
1669 __get_user(fd
[i
], target_fd
+ i
);
1671 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1672 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1673 struct ucred
*cred
= (struct ucred
*)data
;
1674 struct target_ucred
*target_cred
=
1675 (struct target_ucred
*)target_data
;
1677 __get_user(cred
->pid
, &target_cred
->pid
);
1678 __get_user(cred
->uid
, &target_cred
->uid
);
1679 __get_user(cred
->gid
, &target_cred
->gid
);
1681 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1682 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1683 memcpy(data
, target_data
, len
);
1686 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1687 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1690 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1692 msgh
->msg_controllen
= space
;
1696 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1697 struct msghdr
*msgh
)
1699 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1700 abi_long msg_controllen
;
1701 abi_ulong target_cmsg_addr
;
1702 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1703 socklen_t space
= 0;
1705 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1706 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1708 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1709 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1710 target_cmsg_start
= target_cmsg
;
1712 return -TARGET_EFAULT
;
1714 while (cmsg
&& target_cmsg
) {
1715 void *data
= CMSG_DATA(cmsg
);
1716 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1718 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1719 int tgt_len
, tgt_space
;
1721 /* We never copy a half-header but may copy half-data;
1722 * this is Linux's behaviour in put_cmsg(). Note that
1723 * truncation here is a guest problem (which we report
1724 * to the guest via the CTRUNC bit), unlike truncation
1725 * in target_to_host_cmsg, which is a QEMU bug.
1727 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1728 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1732 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1733 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1735 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1737 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1739 /* Payload types which need a different size of payload on
1740 * the target must adjust tgt_len here.
1743 switch (cmsg
->cmsg_level
) {
1745 switch (cmsg
->cmsg_type
) {
1747 tgt_len
= sizeof(struct target_timeval
);
1757 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1758 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1759 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1762 /* We must now copy-and-convert len bytes of payload
1763 * into tgt_len bytes of destination space. Bear in mind
1764 * that in both source and destination we may be dealing
1765 * with a truncated value!
1767 switch (cmsg
->cmsg_level
) {
1769 switch (cmsg
->cmsg_type
) {
1772 int *fd
= (int *)data
;
1773 int *target_fd
= (int *)target_data
;
1774 int i
, numfds
= tgt_len
/ sizeof(int);
1776 for (i
= 0; i
< numfds
; i
++) {
1777 __put_user(fd
[i
], target_fd
+ i
);
1783 struct timeval
*tv
= (struct timeval
*)data
;
1784 struct target_timeval
*target_tv
=
1785 (struct target_timeval
*)target_data
;
1787 if (len
!= sizeof(struct timeval
) ||
1788 tgt_len
!= sizeof(struct target_timeval
)) {
1792 /* copy struct timeval to target */
1793 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1794 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1797 case SCM_CREDENTIALS
:
1799 struct ucred
*cred
= (struct ucred
*)data
;
1800 struct target_ucred
*target_cred
=
1801 (struct target_ucred
*)target_data
;
1803 __put_user(cred
->pid
, &target_cred
->pid
);
1804 __put_user(cred
->uid
, &target_cred
->uid
);
1805 __put_user(cred
->gid
, &target_cred
->gid
);
1814 switch (cmsg
->cmsg_type
) {
1817 uint32_t *v
= (uint32_t *)data
;
1818 uint32_t *t_int
= (uint32_t *)target_data
;
1820 if (len
!= sizeof(uint32_t) ||
1821 tgt_len
!= sizeof(uint32_t)) {
1824 __put_user(*v
, t_int
);
1830 struct sock_extended_err ee
;
1831 struct sockaddr_in offender
;
1833 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1834 struct errhdr_t
*target_errh
=
1835 (struct errhdr_t
*)target_data
;
1837 if (len
!= sizeof(struct errhdr_t
) ||
1838 tgt_len
!= sizeof(struct errhdr_t
)) {
1841 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1842 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1843 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1844 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1845 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1846 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1847 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1848 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1849 (void *) &errh
->offender
, sizeof(errh
->offender
));
1858 switch (cmsg
->cmsg_type
) {
1861 uint32_t *v
= (uint32_t *)data
;
1862 uint32_t *t_int
= (uint32_t *)target_data
;
1864 if (len
!= sizeof(uint32_t) ||
1865 tgt_len
!= sizeof(uint32_t)) {
1868 __put_user(*v
, t_int
);
1874 struct sock_extended_err ee
;
1875 struct sockaddr_in6 offender
;
1877 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1878 struct errhdr6_t
*target_errh
=
1879 (struct errhdr6_t
*)target_data
;
1881 if (len
!= sizeof(struct errhdr6_t
) ||
1882 tgt_len
!= sizeof(struct errhdr6_t
)) {
1885 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1886 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1887 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1888 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1889 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1890 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1891 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1892 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1893 (void *) &errh
->offender
, sizeof(errh
->offender
));
1903 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1904 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1905 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1906 if (tgt_len
> len
) {
1907 memset(target_data
+ len
, 0, tgt_len
- len
);
1911 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1912 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1913 if (msg_controllen
< tgt_space
) {
1914 tgt_space
= msg_controllen
;
1916 msg_controllen
-= tgt_space
;
1918 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1919 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1922 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1924 target_msgh
->msg_controllen
= tswapal(space
);
1928 /* do_setsockopt() Must return target values and target errnos. */
1929 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1930 abi_ulong optval_addr
, socklen_t optlen
)
1934 struct ip_mreqn
*ip_mreq
;
1935 struct ip_mreq_source
*ip_mreq_source
;
1939 /* TCP options all take an 'int' value. */
1940 if (optlen
< sizeof(uint32_t))
1941 return -TARGET_EINVAL
;
1943 if (get_user_u32(val
, optval_addr
))
1944 return -TARGET_EFAULT
;
1945 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1952 case IP_ROUTER_ALERT
:
1956 case IP_MTU_DISCOVER
:
1963 case IP_MULTICAST_TTL
:
1964 case IP_MULTICAST_LOOP
:
1966 if (optlen
>= sizeof(uint32_t)) {
1967 if (get_user_u32(val
, optval_addr
))
1968 return -TARGET_EFAULT
;
1969 } else if (optlen
>= 1) {
1970 if (get_user_u8(val
, optval_addr
))
1971 return -TARGET_EFAULT
;
1973 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1975 case IP_ADD_MEMBERSHIP
:
1976 case IP_DROP_MEMBERSHIP
:
1977 if (optlen
< sizeof (struct target_ip_mreq
) ||
1978 optlen
> sizeof (struct target_ip_mreqn
))
1979 return -TARGET_EINVAL
;
1981 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1982 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1983 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1986 case IP_BLOCK_SOURCE
:
1987 case IP_UNBLOCK_SOURCE
:
1988 case IP_ADD_SOURCE_MEMBERSHIP
:
1989 case IP_DROP_SOURCE_MEMBERSHIP
:
1990 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1991 return -TARGET_EINVAL
;
1993 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1994 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1995 unlock_user (ip_mreq_source
, optval_addr
, 0);
2004 case IPV6_MTU_DISCOVER
:
2007 case IPV6_RECVPKTINFO
:
2008 case IPV6_UNICAST_HOPS
:
2009 case IPV6_MULTICAST_HOPS
:
2010 case IPV6_MULTICAST_LOOP
:
2012 case IPV6_RECVHOPLIMIT
:
2013 case IPV6_2292HOPLIMIT
:
2016 case IPV6_2292PKTINFO
:
2017 case IPV6_RECVTCLASS
:
2018 case IPV6_RECVRTHDR
:
2019 case IPV6_2292RTHDR
:
2020 case IPV6_RECVHOPOPTS
:
2021 case IPV6_2292HOPOPTS
:
2022 case IPV6_RECVDSTOPTS
:
2023 case IPV6_2292DSTOPTS
:
2025 #ifdef IPV6_RECVPATHMTU
2026 case IPV6_RECVPATHMTU
:
2028 #ifdef IPV6_TRANSPARENT
2029 case IPV6_TRANSPARENT
:
2031 #ifdef IPV6_FREEBIND
2034 #ifdef IPV6_RECVORIGDSTADDR
2035 case IPV6_RECVORIGDSTADDR
:
2038 if (optlen
< sizeof(uint32_t)) {
2039 return -TARGET_EINVAL
;
2041 if (get_user_u32(val
, optval_addr
)) {
2042 return -TARGET_EFAULT
;
2044 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2045 &val
, sizeof(val
)));
2049 struct in6_pktinfo pki
;
2051 if (optlen
< sizeof(pki
)) {
2052 return -TARGET_EINVAL
;
2055 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2056 return -TARGET_EFAULT
;
2059 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2061 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2062 &pki
, sizeof(pki
)));
2065 case IPV6_ADD_MEMBERSHIP
:
2066 case IPV6_DROP_MEMBERSHIP
:
2068 struct ipv6_mreq ipv6mreq
;
2070 if (optlen
< sizeof(ipv6mreq
)) {
2071 return -TARGET_EINVAL
;
2074 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2075 return -TARGET_EFAULT
;
2078 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2080 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2081 &ipv6mreq
, sizeof(ipv6mreq
)));
2092 struct icmp6_filter icmp6f
;
2094 if (optlen
> sizeof(icmp6f
)) {
2095 optlen
= sizeof(icmp6f
);
2098 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2099 return -TARGET_EFAULT
;
2102 for (val
= 0; val
< 8; val
++) {
2103 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2106 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2118 /* those take an u32 value */
2119 if (optlen
< sizeof(uint32_t)) {
2120 return -TARGET_EINVAL
;
2123 if (get_user_u32(val
, optval_addr
)) {
2124 return -TARGET_EFAULT
;
2126 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2127 &val
, sizeof(val
)));
2134 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2139 char *alg_key
= g_malloc(optlen
);
2142 return -TARGET_ENOMEM
;
2144 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2146 return -TARGET_EFAULT
;
2148 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2153 case ALG_SET_AEAD_AUTHSIZE
:
2155 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2164 case TARGET_SOL_SOCKET
:
2166 case TARGET_SO_RCVTIMEO
:
2170 optname
= SO_RCVTIMEO
;
2173 if (optlen
!= sizeof(struct target_timeval
)) {
2174 return -TARGET_EINVAL
;
2177 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2178 return -TARGET_EFAULT
;
2181 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2185 case TARGET_SO_SNDTIMEO
:
2186 optname
= SO_SNDTIMEO
;
2188 case TARGET_SO_ATTACH_FILTER
:
2190 struct target_sock_fprog
*tfprog
;
2191 struct target_sock_filter
*tfilter
;
2192 struct sock_fprog fprog
;
2193 struct sock_filter
*filter
;
2196 if (optlen
!= sizeof(*tfprog
)) {
2197 return -TARGET_EINVAL
;
2199 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2200 return -TARGET_EFAULT
;
2202 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2203 tswapal(tfprog
->filter
), 0)) {
2204 unlock_user_struct(tfprog
, optval_addr
, 1);
2205 return -TARGET_EFAULT
;
2208 fprog
.len
= tswap16(tfprog
->len
);
2209 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2210 if (filter
== NULL
) {
2211 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2212 unlock_user_struct(tfprog
, optval_addr
, 1);
2213 return -TARGET_ENOMEM
;
2215 for (i
= 0; i
< fprog
.len
; i
++) {
2216 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2217 filter
[i
].jt
= tfilter
[i
].jt
;
2218 filter
[i
].jf
= tfilter
[i
].jf
;
2219 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2221 fprog
.filter
= filter
;
2223 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2224 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2227 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2228 unlock_user_struct(tfprog
, optval_addr
, 1);
2231 case TARGET_SO_BINDTODEVICE
:
2233 char *dev_ifname
, *addr_ifname
;
2235 if (optlen
> IFNAMSIZ
- 1) {
2236 optlen
= IFNAMSIZ
- 1;
2238 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2240 return -TARGET_EFAULT
;
2242 optname
= SO_BINDTODEVICE
;
2243 addr_ifname
= alloca(IFNAMSIZ
);
2244 memcpy(addr_ifname
, dev_ifname
, optlen
);
2245 addr_ifname
[optlen
] = 0;
2246 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2247 addr_ifname
, optlen
));
2248 unlock_user (dev_ifname
, optval_addr
, 0);
2251 case TARGET_SO_LINGER
:
2254 struct target_linger
*tlg
;
2256 if (optlen
!= sizeof(struct target_linger
)) {
2257 return -TARGET_EINVAL
;
2259 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2260 return -TARGET_EFAULT
;
2262 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2263 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2264 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2266 unlock_user_struct(tlg
, optval_addr
, 0);
2269 /* Options with 'int' argument. */
2270 case TARGET_SO_DEBUG
:
2273 case TARGET_SO_REUSEADDR
:
2274 optname
= SO_REUSEADDR
;
2277 case TARGET_SO_REUSEPORT
:
2278 optname
= SO_REUSEPORT
;
2281 case TARGET_SO_TYPE
:
2284 case TARGET_SO_ERROR
:
2287 case TARGET_SO_DONTROUTE
:
2288 optname
= SO_DONTROUTE
;
2290 case TARGET_SO_BROADCAST
:
2291 optname
= SO_BROADCAST
;
2293 case TARGET_SO_SNDBUF
:
2294 optname
= SO_SNDBUF
;
2296 case TARGET_SO_SNDBUFFORCE
:
2297 optname
= SO_SNDBUFFORCE
;
2299 case TARGET_SO_RCVBUF
:
2300 optname
= SO_RCVBUF
;
2302 case TARGET_SO_RCVBUFFORCE
:
2303 optname
= SO_RCVBUFFORCE
;
2305 case TARGET_SO_KEEPALIVE
:
2306 optname
= SO_KEEPALIVE
;
2308 case TARGET_SO_OOBINLINE
:
2309 optname
= SO_OOBINLINE
;
2311 case TARGET_SO_NO_CHECK
:
2312 optname
= SO_NO_CHECK
;
2314 case TARGET_SO_PRIORITY
:
2315 optname
= SO_PRIORITY
;
2318 case TARGET_SO_BSDCOMPAT
:
2319 optname
= SO_BSDCOMPAT
;
2322 case TARGET_SO_PASSCRED
:
2323 optname
= SO_PASSCRED
;
2325 case TARGET_SO_PASSSEC
:
2326 optname
= SO_PASSSEC
;
2328 case TARGET_SO_TIMESTAMP
:
2329 optname
= SO_TIMESTAMP
;
2331 case TARGET_SO_RCVLOWAT
:
2332 optname
= SO_RCVLOWAT
;
2337 if (optlen
< sizeof(uint32_t))
2338 return -TARGET_EINVAL
;
2340 if (get_user_u32(val
, optval_addr
))
2341 return -TARGET_EFAULT
;
2342 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2347 case NETLINK_PKTINFO
:
2348 case NETLINK_ADD_MEMBERSHIP
:
2349 case NETLINK_DROP_MEMBERSHIP
:
2350 case NETLINK_BROADCAST_ERROR
:
2351 case NETLINK_NO_ENOBUFS
:
2352 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2353 case NETLINK_LISTEN_ALL_NSID
:
2354 case NETLINK_CAP_ACK
:
2355 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2356 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2357 case NETLINK_EXT_ACK
:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2360 case NETLINK_GET_STRICT_CHK
:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2367 if (optlen
< sizeof(uint32_t)) {
2368 return -TARGET_EINVAL
;
2370 if (get_user_u32(val
, optval_addr
)) {
2371 return -TARGET_EFAULT
;
2373 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2376 #endif /* SOL_NETLINK */
2379 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2381 ret
= -TARGET_ENOPROTOOPT
;
2386 /* do_getsockopt() Must return target values and target errnos. */
2387 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2388 abi_ulong optval_addr
, abi_ulong optlen
)
2395 case TARGET_SOL_SOCKET
:
2398 /* These don't just return a single integer */
2399 case TARGET_SO_PEERNAME
:
2401 case TARGET_SO_RCVTIMEO
: {
2405 optname
= SO_RCVTIMEO
;
2408 if (get_user_u32(len
, optlen
)) {
2409 return -TARGET_EFAULT
;
2412 return -TARGET_EINVAL
;
2416 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2421 if (len
> sizeof(struct target_timeval
)) {
2422 len
= sizeof(struct target_timeval
);
2424 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2425 return -TARGET_EFAULT
;
2427 if (put_user_u32(len
, optlen
)) {
2428 return -TARGET_EFAULT
;
2432 case TARGET_SO_SNDTIMEO
:
2433 optname
= SO_SNDTIMEO
;
2435 case TARGET_SO_PEERCRED
: {
2438 struct target_ucred
*tcr
;
2440 if (get_user_u32(len
, optlen
)) {
2441 return -TARGET_EFAULT
;
2444 return -TARGET_EINVAL
;
2448 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2456 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2457 return -TARGET_EFAULT
;
2459 __put_user(cr
.pid
, &tcr
->pid
);
2460 __put_user(cr
.uid
, &tcr
->uid
);
2461 __put_user(cr
.gid
, &tcr
->gid
);
2462 unlock_user_struct(tcr
, optval_addr
, 1);
2463 if (put_user_u32(len
, optlen
)) {
2464 return -TARGET_EFAULT
;
2468 case TARGET_SO_PEERSEC
: {
2471 if (get_user_u32(len
, optlen
)) {
2472 return -TARGET_EFAULT
;
2475 return -TARGET_EINVAL
;
2477 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2479 return -TARGET_EFAULT
;
2482 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2484 if (put_user_u32(lv
, optlen
)) {
2485 ret
= -TARGET_EFAULT
;
2487 unlock_user(name
, optval_addr
, lv
);
2490 case TARGET_SO_LINGER
:
2494 struct target_linger
*tlg
;
2496 if (get_user_u32(len
, optlen
)) {
2497 return -TARGET_EFAULT
;
2500 return -TARGET_EINVAL
;
2504 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2512 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2513 return -TARGET_EFAULT
;
2515 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2516 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2517 unlock_user_struct(tlg
, optval_addr
, 1);
2518 if (put_user_u32(len
, optlen
)) {
2519 return -TARGET_EFAULT
;
2523 /* Options with 'int' argument. */
2524 case TARGET_SO_DEBUG
:
2527 case TARGET_SO_REUSEADDR
:
2528 optname
= SO_REUSEADDR
;
2531 case TARGET_SO_REUSEPORT
:
2532 optname
= SO_REUSEPORT
;
2535 case TARGET_SO_TYPE
:
2538 case TARGET_SO_ERROR
:
2541 case TARGET_SO_DONTROUTE
:
2542 optname
= SO_DONTROUTE
;
2544 case TARGET_SO_BROADCAST
:
2545 optname
= SO_BROADCAST
;
2547 case TARGET_SO_SNDBUF
:
2548 optname
= SO_SNDBUF
;
2550 case TARGET_SO_RCVBUF
:
2551 optname
= SO_RCVBUF
;
2553 case TARGET_SO_KEEPALIVE
:
2554 optname
= SO_KEEPALIVE
;
2556 case TARGET_SO_OOBINLINE
:
2557 optname
= SO_OOBINLINE
;
2559 case TARGET_SO_NO_CHECK
:
2560 optname
= SO_NO_CHECK
;
2562 case TARGET_SO_PRIORITY
:
2563 optname
= SO_PRIORITY
;
2566 case TARGET_SO_BSDCOMPAT
:
2567 optname
= SO_BSDCOMPAT
;
2570 case TARGET_SO_PASSCRED
:
2571 optname
= SO_PASSCRED
;
2573 case TARGET_SO_TIMESTAMP
:
2574 optname
= SO_TIMESTAMP
;
2576 case TARGET_SO_RCVLOWAT
:
2577 optname
= SO_RCVLOWAT
;
2579 case TARGET_SO_ACCEPTCONN
:
2580 optname
= SO_ACCEPTCONN
;
2587 /* TCP options all take an 'int' value. */
2589 if (get_user_u32(len
, optlen
))
2590 return -TARGET_EFAULT
;
2592 return -TARGET_EINVAL
;
2594 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2597 if (optname
== SO_TYPE
) {
2598 val
= host_to_target_sock_type(val
);
2603 if (put_user_u32(val
, optval_addr
))
2604 return -TARGET_EFAULT
;
2606 if (put_user_u8(val
, optval_addr
))
2607 return -TARGET_EFAULT
;
2609 if (put_user_u32(len
, optlen
))
2610 return -TARGET_EFAULT
;
2617 case IP_ROUTER_ALERT
:
2621 case IP_MTU_DISCOVER
:
2627 case IP_MULTICAST_TTL
:
2628 case IP_MULTICAST_LOOP
:
2629 if (get_user_u32(len
, optlen
))
2630 return -TARGET_EFAULT
;
2632 return -TARGET_EINVAL
;
2634 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2637 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2639 if (put_user_u32(len
, optlen
)
2640 || put_user_u8(val
, optval_addr
))
2641 return -TARGET_EFAULT
;
2643 if (len
> sizeof(int))
2645 if (put_user_u32(len
, optlen
)
2646 || put_user_u32(val
, optval_addr
))
2647 return -TARGET_EFAULT
;
2651 ret
= -TARGET_ENOPROTOOPT
;
2657 case IPV6_MTU_DISCOVER
:
2660 case IPV6_RECVPKTINFO
:
2661 case IPV6_UNICAST_HOPS
:
2662 case IPV6_MULTICAST_HOPS
:
2663 case IPV6_MULTICAST_LOOP
:
2665 case IPV6_RECVHOPLIMIT
:
2666 case IPV6_2292HOPLIMIT
:
2669 case IPV6_2292PKTINFO
:
2670 case IPV6_RECVTCLASS
:
2671 case IPV6_RECVRTHDR
:
2672 case IPV6_2292RTHDR
:
2673 case IPV6_RECVHOPOPTS
:
2674 case IPV6_2292HOPOPTS
:
2675 case IPV6_RECVDSTOPTS
:
2676 case IPV6_2292DSTOPTS
:
2678 #ifdef IPV6_RECVPATHMTU
2679 case IPV6_RECVPATHMTU
:
2681 #ifdef IPV6_TRANSPARENT
2682 case IPV6_TRANSPARENT
:
2684 #ifdef IPV6_FREEBIND
2687 #ifdef IPV6_RECVORIGDSTADDR
2688 case IPV6_RECVORIGDSTADDR
:
2690 if (get_user_u32(len
, optlen
))
2691 return -TARGET_EFAULT
;
2693 return -TARGET_EINVAL
;
2695 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2698 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2700 if (put_user_u32(len
, optlen
)
2701 || put_user_u8(val
, optval_addr
))
2702 return -TARGET_EFAULT
;
2704 if (len
> sizeof(int))
2706 if (put_user_u32(len
, optlen
)
2707 || put_user_u32(val
, optval_addr
))
2708 return -TARGET_EFAULT
;
2712 ret
= -TARGET_ENOPROTOOPT
;
2719 case NETLINK_PKTINFO
:
2720 case NETLINK_BROADCAST_ERROR
:
2721 case NETLINK_NO_ENOBUFS
:
2722 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2723 case NETLINK_LISTEN_ALL_NSID
:
2724 case NETLINK_CAP_ACK
:
2725 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2726 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2727 case NETLINK_EXT_ACK
:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2730 case NETLINK_GET_STRICT_CHK
:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 if (get_user_u32(len
, optlen
)) {
2733 return -TARGET_EFAULT
;
2735 if (len
!= sizeof(val
)) {
2736 return -TARGET_EINVAL
;
2739 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2743 if (put_user_u32(lv
, optlen
)
2744 || put_user_u32(val
, optval_addr
)) {
2745 return -TARGET_EFAULT
;
2748 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2749 case NETLINK_LIST_MEMBERSHIPS
:
2753 if (get_user_u32(len
, optlen
)) {
2754 return -TARGET_EFAULT
;
2757 return -TARGET_EINVAL
;
2759 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2761 return -TARGET_EFAULT
;
2764 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2766 unlock_user(results
, optval_addr
, 0);
2769 /* swap host endianess to target endianess. */
2770 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2771 results
[i
] = tswap32(results
[i
]);
2773 if (put_user_u32(lv
, optlen
)) {
2774 return -TARGET_EFAULT
;
2776 unlock_user(results
, optval_addr
, 0);
2779 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2784 #endif /* SOL_NETLINK */
2787 qemu_log_mask(LOG_UNIMP
,
2788 "getsockopt level=%d optname=%d not yet supported\n",
2790 ret
= -TARGET_EOPNOTSUPP
;
2796 /* Convert target low/high pair representing file offset into the host
2797 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2798 * as the kernel doesn't handle them either.
2800 static void target_to_host_low_high(abi_ulong tlow
,
2802 unsigned long *hlow
,
2803 unsigned long *hhigh
)
2805 uint64_t off
= tlow
|
2806 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2807 TARGET_LONG_BITS
/ 2;
2810 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2813 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2814 abi_ulong count
, int copy
)
2816 struct target_iovec
*target_vec
;
2818 abi_ulong total_len
, max_len
;
2821 bool bad_address
= false;
2827 if (count
> IOV_MAX
) {
2832 vec
= g_try_new0(struct iovec
, count
);
2838 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2839 count
* sizeof(struct target_iovec
), 1);
2840 if (target_vec
== NULL
) {
2845 /* ??? If host page size > target page size, this will result in a
2846 value larger than what we can actually support. */
2847 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2850 for (i
= 0; i
< count
; i
++) {
2851 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2852 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2857 } else if (len
== 0) {
2858 /* Zero length pointer is ignored. */
2859 vec
[i
].iov_base
= 0;
2861 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2862 /* If the first buffer pointer is bad, this is a fault. But
2863 * subsequent bad buffers will result in a partial write; this
2864 * is realized by filling the vector with null pointers and
2866 if (!vec
[i
].iov_base
) {
2877 if (len
> max_len
- total_len
) {
2878 len
= max_len
- total_len
;
2881 vec
[i
].iov_len
= len
;
2885 unlock_user(target_vec
, target_addr
, 0);
2890 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2891 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2894 unlock_user(target_vec
, target_addr
, 0);
2901 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2902 abi_ulong count
, int copy
)
2904 struct target_iovec
*target_vec
;
2907 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2908 count
* sizeof(struct target_iovec
), 1);
2910 for (i
= 0; i
< count
; i
++) {
2911 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2912 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2916 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2918 unlock_user(target_vec
, target_addr
, 0);
2924 static inline int target_to_host_sock_type(int *type
)
2927 int target_type
= *type
;
2929 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2930 case TARGET_SOCK_DGRAM
:
2931 host_type
= SOCK_DGRAM
;
2933 case TARGET_SOCK_STREAM
:
2934 host_type
= SOCK_STREAM
;
2937 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2940 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2941 #if defined(SOCK_CLOEXEC)
2942 host_type
|= SOCK_CLOEXEC
;
2944 return -TARGET_EINVAL
;
2947 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2948 #if defined(SOCK_NONBLOCK)
2949 host_type
|= SOCK_NONBLOCK
;
2950 #elif !defined(O_NONBLOCK)
2951 return -TARGET_EINVAL
;
2958 /* Try to emulate socket type flags after socket creation. */
2959 static int sock_flags_fixup(int fd
, int target_type
)
2961 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2962 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2963 int flags
= fcntl(fd
, F_GETFL
);
2964 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2966 return -TARGET_EINVAL
;
2973 /* do_socket() Must return target values and target errnos. */
2974 static abi_long
do_socket(int domain
, int type
, int protocol
)
2976 int target_type
= type
;
2979 ret
= target_to_host_sock_type(&type
);
2984 if (domain
== PF_NETLINK
&& !(
2985 #ifdef CONFIG_RTNETLINK
2986 protocol
== NETLINK_ROUTE
||
2988 protocol
== NETLINK_KOBJECT_UEVENT
||
2989 protocol
== NETLINK_AUDIT
)) {
2990 return -EPFNOSUPPORT
;
2993 if (domain
== AF_PACKET
||
2994 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2995 protocol
= tswap16(protocol
);
2998 ret
= get_errno(socket(domain
, type
, protocol
));
3000 ret
= sock_flags_fixup(ret
, target_type
);
3001 if (type
== SOCK_PACKET
) {
3002 /* Manage an obsolete case :
3003 * if socket type is SOCK_PACKET, bind by name
3005 fd_trans_register(ret
, &target_packet_trans
);
3006 } else if (domain
== PF_NETLINK
) {
3008 #ifdef CONFIG_RTNETLINK
3010 fd_trans_register(ret
, &target_netlink_route_trans
);
3013 case NETLINK_KOBJECT_UEVENT
:
3014 /* nothing to do: messages are strings */
3017 fd_trans_register(ret
, &target_netlink_audit_trans
);
3020 g_assert_not_reached();
3027 /* do_bind() Must return target values and target errnos. */
3028 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3034 if ((int)addrlen
< 0) {
3035 return -TARGET_EINVAL
;
3038 addr
= alloca(addrlen
+1);
3040 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3044 return get_errno(bind(sockfd
, addr
, addrlen
));
3047 /* do_connect() Must return target values and target errnos. */
3048 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3054 if ((int)addrlen
< 0) {
3055 return -TARGET_EINVAL
;
3058 addr
= alloca(addrlen
+1);
3060 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3064 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3067 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3068 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3069 int flags
, int send
)
3075 abi_ulong target_vec
;
3077 if (msgp
->msg_name
) {
3078 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3079 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3080 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3081 tswapal(msgp
->msg_name
),
3083 if (ret
== -TARGET_EFAULT
) {
3084 /* For connected sockets msg_name and msg_namelen must
3085 * be ignored, so returning EFAULT immediately is wrong.
3086 * Instead, pass a bad msg_name to the host kernel, and
3087 * let it decide whether to return EFAULT or not.
3089 msg
.msg_name
= (void *)-1;
3094 msg
.msg_name
= NULL
;
3095 msg
.msg_namelen
= 0;
3097 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3098 msg
.msg_control
= alloca(msg
.msg_controllen
);
3099 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3101 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3103 count
= tswapal(msgp
->msg_iovlen
);
3104 target_vec
= tswapal(msgp
->msg_iov
);
3106 if (count
> IOV_MAX
) {
3107 /* sendrcvmsg returns a different errno for this condition than
3108 * readv/writev, so we must catch it here before lock_iovec() does.
3110 ret
= -TARGET_EMSGSIZE
;
3114 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3115 target_vec
, count
, send
);
3117 ret
= -host_to_target_errno(errno
);
3120 msg
.msg_iovlen
= count
;
3124 if (fd_trans_target_to_host_data(fd
)) {
3127 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3128 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3129 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3130 msg
.msg_iov
->iov_len
);
3132 msg
.msg_iov
->iov_base
= host_msg
;
3133 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3137 ret
= target_to_host_cmsg(&msg
, msgp
);
3139 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3143 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3144 if (!is_error(ret
)) {
3146 if (fd_trans_host_to_target_data(fd
)) {
3147 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3148 MIN(msg
.msg_iov
->iov_len
, len
));
3150 ret
= host_to_target_cmsg(msgp
, &msg
);
3152 if (!is_error(ret
)) {
3153 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3154 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3155 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3156 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3157 msg
.msg_name
, msg
.msg_namelen
);
3169 unlock_iovec(vec
, target_vec
, count
, !send
);
3174 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3175 int flags
, int send
)
3178 struct target_msghdr
*msgp
;
3180 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3184 return -TARGET_EFAULT
;
3186 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3187 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3191 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3192 * so it might not have this *mmsg-specific flag either.
3194 #ifndef MSG_WAITFORONE
3195 #define MSG_WAITFORONE 0x10000
3198 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3199 unsigned int vlen
, unsigned int flags
,
3202 struct target_mmsghdr
*mmsgp
;
3206 if (vlen
> UIO_MAXIOV
) {
3210 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3212 return -TARGET_EFAULT
;
3215 for (i
= 0; i
< vlen
; i
++) {
3216 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3217 if (is_error(ret
)) {
3220 mmsgp
[i
].msg_len
= tswap32(ret
);
3221 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3222 if (flags
& MSG_WAITFORONE
) {
3223 flags
|= MSG_DONTWAIT
;
3227 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3229 /* Return number of datagrams sent if we sent any at all;
3230 * otherwise return the error.
3238 /* do_accept4() Must return target values and target errnos. */
3239 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3240 abi_ulong target_addrlen_addr
, int flags
)
3242 socklen_t addrlen
, ret_addrlen
;
3247 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3249 if (target_addr
== 0) {
3250 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3253 /* linux returns EINVAL if addrlen pointer is invalid */
3254 if (get_user_u32(addrlen
, target_addrlen_addr
))
3255 return -TARGET_EINVAL
;
3257 if ((int)addrlen
< 0) {
3258 return -TARGET_EINVAL
;
3261 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3262 return -TARGET_EINVAL
;
3264 addr
= alloca(addrlen
);
3266 ret_addrlen
= addrlen
;
3267 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3268 if (!is_error(ret
)) {
3269 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3270 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3271 ret
= -TARGET_EFAULT
;
3277 /* do_getpeername() Must return target values and target errnos. */
3278 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3279 abi_ulong target_addrlen_addr
)
3281 socklen_t addrlen
, ret_addrlen
;
3285 if (get_user_u32(addrlen
, target_addrlen_addr
))
3286 return -TARGET_EFAULT
;
3288 if ((int)addrlen
< 0) {
3289 return -TARGET_EINVAL
;
3292 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3293 return -TARGET_EFAULT
;
3295 addr
= alloca(addrlen
);
3297 ret_addrlen
= addrlen
;
3298 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3299 if (!is_error(ret
)) {
3300 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3301 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3302 ret
= -TARGET_EFAULT
;
3308 /* do_getsockname() Must return target values and target errnos. */
3309 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3310 abi_ulong target_addrlen_addr
)
3312 socklen_t addrlen
, ret_addrlen
;
3316 if (get_user_u32(addrlen
, target_addrlen_addr
))
3317 return -TARGET_EFAULT
;
3319 if ((int)addrlen
< 0) {
3320 return -TARGET_EINVAL
;
3323 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3324 return -TARGET_EFAULT
;
3326 addr
= alloca(addrlen
);
3328 ret_addrlen
= addrlen
;
3329 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3330 if (!is_error(ret
)) {
3331 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3332 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3333 ret
= -TARGET_EFAULT
;
3339 /* do_socketpair() Must return target values and target errnos. */
3340 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3341 abi_ulong target_tab_addr
)
3346 target_to_host_sock_type(&type
);
3348 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3349 if (!is_error(ret
)) {
3350 if (put_user_s32(tab
[0], target_tab_addr
)
3351 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3352 ret
= -TARGET_EFAULT
;
3357 /* do_sendto() Must return target values and target errnos. */
3358 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3359 abi_ulong target_addr
, socklen_t addrlen
)
3363 void *copy_msg
= NULL
;
3366 if ((int)addrlen
< 0) {
3367 return -TARGET_EINVAL
;
3370 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3372 return -TARGET_EFAULT
;
3373 if (fd_trans_target_to_host_data(fd
)) {
3374 copy_msg
= host_msg
;
3375 host_msg
= g_malloc(len
);
3376 memcpy(host_msg
, copy_msg
, len
);
3377 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3383 addr
= alloca(addrlen
+1);
3384 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3388 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3390 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3395 host_msg
= copy_msg
;
3397 unlock_user(host_msg
, msg
, 0);
3401 /* do_recvfrom() Must return target values and target errnos. */
3402 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3403 abi_ulong target_addr
,
3404 abi_ulong target_addrlen
)
3406 socklen_t addrlen
, ret_addrlen
;
3411 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3413 return -TARGET_EFAULT
;
3415 if (get_user_u32(addrlen
, target_addrlen
)) {
3416 ret
= -TARGET_EFAULT
;
3419 if ((int)addrlen
< 0) {
3420 ret
= -TARGET_EINVAL
;
3423 addr
= alloca(addrlen
);
3424 ret_addrlen
= addrlen
;
3425 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3426 addr
, &ret_addrlen
));
3428 addr
= NULL
; /* To keep compiler quiet. */
3429 addrlen
= 0; /* To keep compiler quiet. */
3430 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3432 if (!is_error(ret
)) {
3433 if (fd_trans_host_to_target_data(fd
)) {
3435 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3436 if (is_error(trans
)) {
3442 host_to_target_sockaddr(target_addr
, addr
,
3443 MIN(addrlen
, ret_addrlen
));
3444 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3445 ret
= -TARGET_EFAULT
;
3449 unlock_user(host_msg
, msg
, len
);
3452 unlock_user(host_msg
, msg
, 0);
3457 #ifdef TARGET_NR_socketcall
3458 /* do_socketcall() must return target values and target errnos. */
3459 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3461 static const unsigned nargs
[] = { /* number of arguments per operation */
3462 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3463 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3464 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3465 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3466 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3468 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3469 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3470 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3471 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3472 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3473 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3474 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3475 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3476 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3477 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3478 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3479 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3480 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3481 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3483 abi_long a
[6]; /* max 6 args */
3486 /* check the range of the first argument num */
3487 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3488 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3489 return -TARGET_EINVAL
;
3491 /* ensure we have space for args */
3492 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3493 return -TARGET_EINVAL
;
3495 /* collect the arguments in a[] according to nargs[] */
3496 for (i
= 0; i
< nargs
[num
]; ++i
) {
3497 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3498 return -TARGET_EFAULT
;
3501 /* now when we have the args, invoke the appropriate underlying function */
3503 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3504 return do_socket(a
[0], a
[1], a
[2]);
3505 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3506 return do_bind(a
[0], a
[1], a
[2]);
3507 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3508 return do_connect(a
[0], a
[1], a
[2]);
3509 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3510 return get_errno(listen(a
[0], a
[1]));
3511 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3512 return do_accept4(a
[0], a
[1], a
[2], 0);
3513 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3514 return do_getsockname(a
[0], a
[1], a
[2]);
3515 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3516 return do_getpeername(a
[0], a
[1], a
[2]);
3517 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3518 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3519 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3520 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3521 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3522 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3523 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3524 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3525 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3526 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3527 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3528 return get_errno(shutdown(a
[0], a
[1]));
3529 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3530 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3531 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3532 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3533 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3534 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3535 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3536 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3537 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3538 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3539 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3540 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3541 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3542 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3544 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3545 return -TARGET_EINVAL
;
3550 #define N_SHM_REGIONS 32
3552 static struct shm_region
{
3556 } shm_regions
[N_SHM_REGIONS
];
3558 #ifndef TARGET_SEMID64_DS
3559 /* asm-generic version of this struct */
3560 struct target_semid64_ds
3562 struct target_ipc_perm sem_perm
;
3563 abi_ulong sem_otime
;
3564 #if TARGET_ABI_BITS == 32
3565 abi_ulong __unused1
;
3567 abi_ulong sem_ctime
;
3568 #if TARGET_ABI_BITS == 32
3569 abi_ulong __unused2
;
3571 abi_ulong sem_nsems
;
3572 abi_ulong __unused3
;
3573 abi_ulong __unused4
;
3577 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3578 abi_ulong target_addr
)
3580 struct target_ipc_perm
*target_ip
;
3581 struct target_semid64_ds
*target_sd
;
3583 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3584 return -TARGET_EFAULT
;
3585 target_ip
= &(target_sd
->sem_perm
);
3586 host_ip
->__key
= tswap32(target_ip
->__key
);
3587 host_ip
->uid
= tswap32(target_ip
->uid
);
3588 host_ip
->gid
= tswap32(target_ip
->gid
);
3589 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3590 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3591 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3592 host_ip
->mode
= tswap32(target_ip
->mode
);
3594 host_ip
->mode
= tswap16(target_ip
->mode
);
3596 #if defined(TARGET_PPC)
3597 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3599 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3601 unlock_user_struct(target_sd
, target_addr
, 0);
3605 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3606 struct ipc_perm
*host_ip
)
3608 struct target_ipc_perm
*target_ip
;
3609 struct target_semid64_ds
*target_sd
;
3611 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3612 return -TARGET_EFAULT
;
3613 target_ip
= &(target_sd
->sem_perm
);
3614 target_ip
->__key
= tswap32(host_ip
->__key
);
3615 target_ip
->uid
= tswap32(host_ip
->uid
);
3616 target_ip
->gid
= tswap32(host_ip
->gid
);
3617 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3618 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3619 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3620 target_ip
->mode
= tswap32(host_ip
->mode
);
3622 target_ip
->mode
= tswap16(host_ip
->mode
);
3624 #if defined(TARGET_PPC)
3625 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3627 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3629 unlock_user_struct(target_sd
, target_addr
, 1);
3633 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3634 abi_ulong target_addr
)
3636 struct target_semid64_ds
*target_sd
;
3638 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3639 return -TARGET_EFAULT
;
3640 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3641 return -TARGET_EFAULT
;
3642 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3643 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3644 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3645 unlock_user_struct(target_sd
, target_addr
, 0);
3649 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3650 struct semid_ds
*host_sd
)
3652 struct target_semid64_ds
*target_sd
;
3654 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3655 return -TARGET_EFAULT
;
3656 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3657 return -TARGET_EFAULT
;
3658 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3659 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3660 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3661 unlock_user_struct(target_sd
, target_addr
, 1);
3665 struct target_seminfo
{
3678 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3679 struct seminfo
*host_seminfo
)
3681 struct target_seminfo
*target_seminfo
;
3682 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3683 return -TARGET_EFAULT
;
3684 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3685 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3686 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3687 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3688 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3689 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3690 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3691 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3692 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3693 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3694 unlock_user_struct(target_seminfo
, target_addr
, 1);
3700 struct semid_ds
*buf
;
3701 unsigned short *array
;
3702 struct seminfo
*__buf
;
3705 union target_semun
{
3712 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3713 abi_ulong target_addr
)
3716 unsigned short *array
;
3718 struct semid_ds semid_ds
;
3721 semun
.buf
= &semid_ds
;
3723 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3725 return get_errno(ret
);
3727 nsems
= semid_ds
.sem_nsems
;
3729 *host_array
= g_try_new(unsigned short, nsems
);
3731 return -TARGET_ENOMEM
;
3733 array
= lock_user(VERIFY_READ
, target_addr
,
3734 nsems
*sizeof(unsigned short), 1);
3736 g_free(*host_array
);
3737 return -TARGET_EFAULT
;
3740 for(i
=0; i
<nsems
; i
++) {
3741 __get_user((*host_array
)[i
], &array
[i
]);
3743 unlock_user(array
, target_addr
, 0);
3748 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3749 unsigned short **host_array
)
3752 unsigned short *array
;
3754 struct semid_ds semid_ds
;
3757 semun
.buf
= &semid_ds
;
3759 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3761 return get_errno(ret
);
3763 nsems
= semid_ds
.sem_nsems
;
3765 array
= lock_user(VERIFY_WRITE
, target_addr
,
3766 nsems
*sizeof(unsigned short), 0);
3768 return -TARGET_EFAULT
;
3770 for(i
=0; i
<nsems
; i
++) {
3771 __put_user((*host_array
)[i
], &array
[i
]);
3773 g_free(*host_array
);
3774 unlock_user(array
, target_addr
, 1);
3779 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3780 abi_ulong target_arg
)
3782 union target_semun target_su
= { .buf
= target_arg
};
3784 struct semid_ds dsarg
;
3785 unsigned short *array
= NULL
;
3786 struct seminfo seminfo
;
3787 abi_long ret
= -TARGET_EINVAL
;
3794 /* In 64 bit cross-endian situations, we will erroneously pick up
3795 * the wrong half of the union for the "val" element. To rectify
3796 * this, the entire 8-byte structure is byteswapped, followed by
3797 * a swap of the 4 byte val field. In other cases, the data is
3798 * already in proper host byte order. */
3799 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3800 target_su
.buf
= tswapal(target_su
.buf
);
3801 arg
.val
= tswap32(target_su
.val
);
3803 arg
.val
= target_su
.val
;
3805 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3809 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3813 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3814 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3821 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3825 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3826 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3832 arg
.__buf
= &seminfo
;
3833 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3834 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3842 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3849 struct target_sembuf
{
3850 unsigned short sem_num
;
3855 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3856 abi_ulong target_addr
,
3859 struct target_sembuf
*target_sembuf
;
3862 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3863 nsops
*sizeof(struct target_sembuf
), 1);
3865 return -TARGET_EFAULT
;
3867 for(i
=0; i
<nsops
; i
++) {
3868 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3869 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3870 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3873 unlock_user(target_sembuf
, target_addr
, 0);
3878 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3880 struct sembuf sops
[nsops
];
3883 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3884 return -TARGET_EFAULT
;
3886 ret
= -TARGET_ENOSYS
;
3887 #ifdef __NR_semtimedop
3888 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3891 if (ret
== -TARGET_ENOSYS
) {
3892 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3898 struct target_msqid_ds
3900 struct target_ipc_perm msg_perm
;
3901 abi_ulong msg_stime
;
3902 #if TARGET_ABI_BITS == 32
3903 abi_ulong __unused1
;
3905 abi_ulong msg_rtime
;
3906 #if TARGET_ABI_BITS == 32
3907 abi_ulong __unused2
;
3909 abi_ulong msg_ctime
;
3910 #if TARGET_ABI_BITS == 32
3911 abi_ulong __unused3
;
3913 abi_ulong __msg_cbytes
;
3915 abi_ulong msg_qbytes
;
3916 abi_ulong msg_lspid
;
3917 abi_ulong msg_lrpid
;
3918 abi_ulong __unused4
;
3919 abi_ulong __unused5
;
3922 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3923 abi_ulong target_addr
)
3925 struct target_msqid_ds
*target_md
;
3927 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3928 return -TARGET_EFAULT
;
3929 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3930 return -TARGET_EFAULT
;
3931 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3932 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3933 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3934 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3935 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3936 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3937 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3938 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3939 unlock_user_struct(target_md
, target_addr
, 0);
3943 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3944 struct msqid_ds
*host_md
)
3946 struct target_msqid_ds
*target_md
;
3948 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3949 return -TARGET_EFAULT
;
3950 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3951 return -TARGET_EFAULT
;
3952 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3953 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3954 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3955 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3956 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3957 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3958 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3959 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3960 unlock_user_struct(target_md
, target_addr
, 1);
3964 struct target_msginfo
{
3972 unsigned short int msgseg
;
3975 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3976 struct msginfo
*host_msginfo
)
3978 struct target_msginfo
*target_msginfo
;
3979 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3980 return -TARGET_EFAULT
;
3981 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3982 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3983 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3984 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3985 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3986 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3987 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3988 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3989 unlock_user_struct(target_msginfo
, target_addr
, 1);
3993 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3995 struct msqid_ds dsarg
;
3996 struct msginfo msginfo
;
3997 abi_long ret
= -TARGET_EINVAL
;
4005 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4006 return -TARGET_EFAULT
;
4007 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4008 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4009 return -TARGET_EFAULT
;
4012 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4016 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4017 if (host_to_target_msginfo(ptr
, &msginfo
))
4018 return -TARGET_EFAULT
;
4025 struct target_msgbuf
{
4030 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4031 ssize_t msgsz
, int msgflg
)
4033 struct target_msgbuf
*target_mb
;
4034 struct msgbuf
*host_mb
;
4038 return -TARGET_EINVAL
;
4041 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4042 return -TARGET_EFAULT
;
4043 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4045 unlock_user_struct(target_mb
, msgp
, 0);
4046 return -TARGET_ENOMEM
;
4048 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4049 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4050 ret
= -TARGET_ENOSYS
;
4052 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4055 if (ret
== -TARGET_ENOSYS
) {
4056 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4061 unlock_user_struct(target_mb
, msgp
, 0);
4066 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4067 ssize_t msgsz
, abi_long msgtyp
,
4070 struct target_msgbuf
*target_mb
;
4072 struct msgbuf
*host_mb
;
4076 return -TARGET_EINVAL
;
4079 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4080 return -TARGET_EFAULT
;
4082 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4084 ret
= -TARGET_ENOMEM
;
4087 ret
= -TARGET_ENOSYS
;
4089 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4092 if (ret
== -TARGET_ENOSYS
) {
4093 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4094 msgflg
, host_mb
, msgtyp
));
4099 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4100 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4101 if (!target_mtext
) {
4102 ret
= -TARGET_EFAULT
;
4105 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4106 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4109 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4113 unlock_user_struct(target_mb
, msgp
, 1);
4118 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4119 abi_ulong target_addr
)
4121 struct target_shmid_ds
*target_sd
;
4123 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4124 return -TARGET_EFAULT
;
4125 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4126 return -TARGET_EFAULT
;
4127 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4128 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4129 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4130 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4131 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4132 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4133 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4134 unlock_user_struct(target_sd
, target_addr
, 0);
4138 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4139 struct shmid_ds
*host_sd
)
4141 struct target_shmid_ds
*target_sd
;
4143 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4144 return -TARGET_EFAULT
;
4145 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4146 return -TARGET_EFAULT
;
4147 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4148 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4149 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4150 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4151 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4152 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4153 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4154 unlock_user_struct(target_sd
, target_addr
, 1);
4158 struct target_shminfo
{
4166 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4167 struct shminfo
*host_shminfo
)
4169 struct target_shminfo
*target_shminfo
;
4170 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4171 return -TARGET_EFAULT
;
4172 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4173 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4174 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4175 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4176 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4177 unlock_user_struct(target_shminfo
, target_addr
, 1);
4181 struct target_shm_info
{
4186 abi_ulong swap_attempts
;
4187 abi_ulong swap_successes
;
4190 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4191 struct shm_info
*host_shm_info
)
4193 struct target_shm_info
*target_shm_info
;
4194 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4195 return -TARGET_EFAULT
;
4196 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4197 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4198 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4199 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4200 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4201 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4202 unlock_user_struct(target_shm_info
, target_addr
, 1);
4206 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4208 struct shmid_ds dsarg
;
4209 struct shminfo shminfo
;
4210 struct shm_info shm_info
;
4211 abi_long ret
= -TARGET_EINVAL
;
4219 if (target_to_host_shmid_ds(&dsarg
, buf
))
4220 return -TARGET_EFAULT
;
4221 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4222 if (host_to_target_shmid_ds(buf
, &dsarg
))
4223 return -TARGET_EFAULT
;
4226 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4227 if (host_to_target_shminfo(buf
, &shminfo
))
4228 return -TARGET_EFAULT
;
4231 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4232 if (host_to_target_shm_info(buf
, &shm_info
))
4233 return -TARGET_EFAULT
;
4238 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4245 #ifndef TARGET_FORCE_SHMLBA
4246 /* For most architectures, SHMLBA is the same as the page size;
4247 * some architectures have larger values, in which case they should
4248 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4249 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4250 * and defining its own value for SHMLBA.
4252 * The kernel also permits SHMLBA to be set by the architecture to a
4253 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4254 * this means that addresses are rounded to the large size if
4255 * SHM_RND is set but addresses not aligned to that size are not rejected
4256 * as long as they are at least page-aligned. Since the only architecture
4257 * which uses this is ia64 this code doesn't provide for that oddity.
4259 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4261 return TARGET_PAGE_SIZE
;
4265 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4266 int shmid
, abi_ulong shmaddr
, int shmflg
)
4270 struct shmid_ds shm_info
;
4274 /* find out the length of the shared memory segment */
4275 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4276 if (is_error(ret
)) {
4277 /* can't get length, bail out */
4281 shmlba
= target_shmlba(cpu_env
);
4283 if (shmaddr
& (shmlba
- 1)) {
4284 if (shmflg
& SHM_RND
) {
4285 shmaddr
&= ~(shmlba
- 1);
4287 return -TARGET_EINVAL
;
4290 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4291 return -TARGET_EINVAL
;
4297 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4299 abi_ulong mmap_start
;
4301 /* In order to use the host shmat, we need to honor host SHMLBA. */
4302 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4304 if (mmap_start
== -1) {
4306 host_raddr
= (void *)-1;
4308 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4311 if (host_raddr
== (void *)-1) {
4313 return get_errno((long)host_raddr
);
4315 raddr
=h2g((unsigned long)host_raddr
);
4317 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4318 PAGE_VALID
| PAGE_READ
|
4319 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4321 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4322 if (!shm_regions
[i
].in_use
) {
4323 shm_regions
[i
].in_use
= true;
4324 shm_regions
[i
].start
= raddr
;
4325 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4335 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4342 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4343 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4344 shm_regions
[i
].in_use
= false;
4345 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4349 rv
= get_errno(shmdt(g2h(shmaddr
)));
4356 #ifdef TARGET_NR_ipc
4357 /* ??? This only works with linear mappings. */
4358 /* do_ipc() must return target values and target errnos. */
4359 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4360 unsigned int call
, abi_long first
,
4361 abi_long second
, abi_long third
,
4362 abi_long ptr
, abi_long fifth
)
4367 version
= call
>> 16;
4372 ret
= do_semop(first
, ptr
, second
);
4376 ret
= get_errno(semget(first
, second
, third
));
4379 case IPCOP_semctl
: {
4380 /* The semun argument to semctl is passed by value, so dereference the
4383 get_user_ual(atptr
, ptr
);
4384 ret
= do_semctl(first
, second
, third
, atptr
);
4389 ret
= get_errno(msgget(first
, second
));
4393 ret
= do_msgsnd(first
, ptr
, second
, third
);
4397 ret
= do_msgctl(first
, second
, ptr
);
4404 struct target_ipc_kludge
{
4409 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4410 ret
= -TARGET_EFAULT
;
4414 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4416 unlock_user_struct(tmp
, ptr
, 0);
4420 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4429 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4430 if (is_error(raddr
))
4431 return get_errno(raddr
);
4432 if (put_user_ual(raddr
, third
))
4433 return -TARGET_EFAULT
;
4437 ret
= -TARGET_EINVAL
;
4442 ret
= do_shmdt(ptr
);
4446 /* IPC_* flag values are the same on all linux platforms */
4447 ret
= get_errno(shmget(first
, second
, third
));
4450 /* IPC_* and SHM_* command values are the same on all linux platforms */
4452 ret
= do_shmctl(first
, second
, ptr
);
4455 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4457 ret
= -TARGET_ENOSYS
;
4464 /* kernel structure types definitions */
4466 #define STRUCT(name, ...) STRUCT_ ## name,
4467 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4469 #include "syscall_types.h"
4473 #undef STRUCT_SPECIAL
4475 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4476 #define STRUCT_SPECIAL(name)
4477 #include "syscall_types.h"
4479 #undef STRUCT_SPECIAL
4481 typedef struct IOCTLEntry IOCTLEntry
;
4483 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4484 int fd
, int cmd
, abi_long arg
);
4488 unsigned int host_cmd
;
4491 do_ioctl_fn
*do_ioctl
;
4492 const argtype arg_type
[5];
4495 #define IOC_R 0x0001
4496 #define IOC_W 0x0002
4497 #define IOC_RW (IOC_R | IOC_W)
4499 #define MAX_STRUCT_SIZE 4096
4501 #ifdef CONFIG_FIEMAP
4502 /* So fiemap access checks don't overflow on 32 bit systems.
4503 * This is very slightly smaller than the limit imposed by
4504 * the underlying kernel.
4506 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4507 / sizeof(struct fiemap_extent))
4509 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4510 int fd
, int cmd
, abi_long arg
)
4512 /* The parameter for this ioctl is a struct fiemap followed
4513 * by an array of struct fiemap_extent whose size is set
4514 * in fiemap->fm_extent_count. The array is filled in by the
4517 int target_size_in
, target_size_out
;
4519 const argtype
*arg_type
= ie
->arg_type
;
4520 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4523 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4527 assert(arg_type
[0] == TYPE_PTR
);
4528 assert(ie
->access
== IOC_RW
);
4530 target_size_in
= thunk_type_size(arg_type
, 0);
4531 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4533 return -TARGET_EFAULT
;
4535 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4536 unlock_user(argptr
, arg
, 0);
4537 fm
= (struct fiemap
*)buf_temp
;
4538 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4539 return -TARGET_EINVAL
;
4542 outbufsz
= sizeof (*fm
) +
4543 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4545 if (outbufsz
> MAX_STRUCT_SIZE
) {
4546 /* We can't fit all the extents into the fixed size buffer.
4547 * Allocate one that is large enough and use it instead.
4549 fm
= g_try_malloc(outbufsz
);
4551 return -TARGET_ENOMEM
;
4553 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4556 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4557 if (!is_error(ret
)) {
4558 target_size_out
= target_size_in
;
4559 /* An extent_count of 0 means we were only counting the extents
4560 * so there are no structs to copy
4562 if (fm
->fm_extent_count
!= 0) {
4563 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4565 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4567 ret
= -TARGET_EFAULT
;
4569 /* Convert the struct fiemap */
4570 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4571 if (fm
->fm_extent_count
!= 0) {
4572 p
= argptr
+ target_size_in
;
4573 /* ...and then all the struct fiemap_extents */
4574 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4575 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4580 unlock_user(argptr
, arg
, target_size_out
);
4590 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4591 int fd
, int cmd
, abi_long arg
)
4593 const argtype
*arg_type
= ie
->arg_type
;
4597 struct ifconf
*host_ifconf
;
4599 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4600 int target_ifreq_size
;
4605 abi_long target_ifc_buf
;
4609 assert(arg_type
[0] == TYPE_PTR
);
4610 assert(ie
->access
== IOC_RW
);
4613 target_size
= thunk_type_size(arg_type
, 0);
4615 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4617 return -TARGET_EFAULT
;
4618 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4619 unlock_user(argptr
, arg
, 0);
4621 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4622 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4623 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4625 if (target_ifc_buf
!= 0) {
4626 target_ifc_len
= host_ifconf
->ifc_len
;
4627 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4628 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4630 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4631 if (outbufsz
> MAX_STRUCT_SIZE
) {
4633 * We can't fit all the extents into the fixed size buffer.
4634 * Allocate one that is large enough and use it instead.
4636 host_ifconf
= malloc(outbufsz
);
4638 return -TARGET_ENOMEM
;
4640 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4643 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4645 host_ifconf
->ifc_len
= host_ifc_len
;
4647 host_ifc_buf
= NULL
;
4649 host_ifconf
->ifc_buf
= host_ifc_buf
;
4651 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4652 if (!is_error(ret
)) {
4653 /* convert host ifc_len to target ifc_len */
4655 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4656 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4657 host_ifconf
->ifc_len
= target_ifc_len
;
4659 /* restore target ifc_buf */
4661 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4663 /* copy struct ifconf to target user */
4665 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4667 return -TARGET_EFAULT
;
4668 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4669 unlock_user(argptr
, arg
, target_size
);
4671 if (target_ifc_buf
!= 0) {
4672 /* copy ifreq[] to target user */
4673 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4674 for (i
= 0; i
< nb_ifreq
; i
++) {
4675 thunk_convert(argptr
+ i
* target_ifreq_size
,
4676 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4677 ifreq_arg_type
, THUNK_TARGET
);
4679 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4690 #if defined(CONFIG_USBFS)
4691 #if HOST_LONG_BITS > 64
4692 #error USBDEVFS thunks do not support >64 bit hosts yet.
4695 uint64_t target_urb_adr
;
4696 uint64_t target_buf_adr
;
4697 char *target_buf_ptr
;
4698 struct usbdevfs_urb host_urb
;
4701 static GHashTable
*usbdevfs_urb_hashtable(void)
4703 static GHashTable
*urb_hashtable
;
4705 if (!urb_hashtable
) {
4706 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4708 return urb_hashtable
;
4711 static void urb_hashtable_insert(struct live_urb
*urb
)
4713 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4714 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4717 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4719 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4720 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4723 static void urb_hashtable_remove(struct live_urb
*urb
)
4725 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4726 g_hash_table_remove(urb_hashtable
, urb
);
4730 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4731 int fd
, int cmd
, abi_long arg
)
4733 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4734 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4735 struct live_urb
*lurb
;
4739 uintptr_t target_urb_adr
;
4742 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4744 memset(buf_temp
, 0, sizeof(uint64_t));
4745 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4746 if (is_error(ret
)) {
4750 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4751 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4752 if (!lurb
->target_urb_adr
) {
4753 return -TARGET_EFAULT
;
4755 urb_hashtable_remove(lurb
);
4756 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4757 lurb
->host_urb
.buffer_length
);
4758 lurb
->target_buf_ptr
= NULL
;
4760 /* restore the guest buffer pointer */
4761 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4763 /* update the guest urb struct */
4764 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4767 return -TARGET_EFAULT
;
4769 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4770 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4772 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4773 /* write back the urb handle */
4774 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4777 return -TARGET_EFAULT
;
4780 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4781 target_urb_adr
= lurb
->target_urb_adr
;
4782 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4783 unlock_user(argptr
, arg
, target_size
);
4790 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4791 uint8_t *buf_temp
__attribute__((unused
)),
4792 int fd
, int cmd
, abi_long arg
)
4794 struct live_urb
*lurb
;
4796 /* map target address back to host URB with metadata. */
4797 lurb
= urb_hashtable_lookup(arg
);
4799 return -TARGET_EFAULT
;
4801 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4805 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4806 int fd
, int cmd
, abi_long arg
)
4808 const argtype
*arg_type
= ie
->arg_type
;
4813 struct live_urb
*lurb
;
4816 * each submitted URB needs to map to a unique ID for the
4817 * kernel, and that unique ID needs to be a pointer to
4818 * host memory. hence, we need to malloc for each URB.
4819 * isochronous transfers have a variable length struct.
4822 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4824 /* construct host copy of urb and metadata */
4825 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4827 return -TARGET_ENOMEM
;
4830 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4833 return -TARGET_EFAULT
;
4835 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4836 unlock_user(argptr
, arg
, 0);
4838 lurb
->target_urb_adr
= arg
;
4839 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4841 /* buffer space used depends on endpoint type so lock the entire buffer */
4842 /* control type urbs should check the buffer contents for true direction */
4843 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4844 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4845 lurb
->host_urb
.buffer_length
, 1);
4846 if (lurb
->target_buf_ptr
== NULL
) {
4848 return -TARGET_EFAULT
;
4851 /* update buffer pointer in host copy */
4852 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4854 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4855 if (is_error(ret
)) {
4856 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4859 urb_hashtable_insert(lurb
);
4864 #endif /* CONFIG_USBFS */
4866 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4867 int cmd
, abi_long arg
)
4870 struct dm_ioctl
*host_dm
;
4871 abi_long guest_data
;
4872 uint32_t guest_data_size
;
4874 const argtype
*arg_type
= ie
->arg_type
;
4876 void *big_buf
= NULL
;
4880 target_size
= thunk_type_size(arg_type
, 0);
4881 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4883 ret
= -TARGET_EFAULT
;
4886 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4887 unlock_user(argptr
, arg
, 0);
4889 /* buf_temp is too small, so fetch things into a bigger buffer */
4890 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4891 memcpy(big_buf
, buf_temp
, target_size
);
4895 guest_data
= arg
+ host_dm
->data_start
;
4896 if ((guest_data
- arg
) < 0) {
4897 ret
= -TARGET_EINVAL
;
4900 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4901 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4903 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4905 ret
= -TARGET_EFAULT
;
4909 switch (ie
->host_cmd
) {
4911 case DM_LIST_DEVICES
:
4914 case DM_DEV_SUSPEND
:
4917 case DM_TABLE_STATUS
:
4918 case DM_TABLE_CLEAR
:
4920 case DM_LIST_VERSIONS
:
4924 case DM_DEV_SET_GEOMETRY
:
4925 /* data contains only strings */
4926 memcpy(host_data
, argptr
, guest_data_size
);
4929 memcpy(host_data
, argptr
, guest_data_size
);
4930 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4934 void *gspec
= argptr
;
4935 void *cur_data
= host_data
;
4936 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4937 int spec_size
= thunk_type_size(arg_type
, 0);
4940 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4941 struct dm_target_spec
*spec
= cur_data
;
4945 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4946 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4948 spec
->next
= sizeof(*spec
) + slen
;
4949 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4951 cur_data
+= spec
->next
;
4956 ret
= -TARGET_EINVAL
;
4957 unlock_user(argptr
, guest_data
, 0);
4960 unlock_user(argptr
, guest_data
, 0);
4962 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4963 if (!is_error(ret
)) {
4964 guest_data
= arg
+ host_dm
->data_start
;
4965 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4966 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4967 switch (ie
->host_cmd
) {
4972 case DM_DEV_SUSPEND
:
4975 case DM_TABLE_CLEAR
:
4977 case DM_DEV_SET_GEOMETRY
:
4978 /* no return data */
4980 case DM_LIST_DEVICES
:
4982 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4983 uint32_t remaining_data
= guest_data_size
;
4984 void *cur_data
= argptr
;
4985 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4986 int nl_size
= 12; /* can't use thunk_size due to alignment */
4989 uint32_t next
= nl
->next
;
4991 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4993 if (remaining_data
< nl
->next
) {
4994 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4997 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4998 strcpy(cur_data
+ nl_size
, nl
->name
);
4999 cur_data
+= nl
->next
;
5000 remaining_data
-= nl
->next
;
5004 nl
= (void*)nl
+ next
;
5009 case DM_TABLE_STATUS
:
5011 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5012 void *cur_data
= argptr
;
5013 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5014 int spec_size
= thunk_type_size(arg_type
, 0);
5017 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5018 uint32_t next
= spec
->next
;
5019 int slen
= strlen((char*)&spec
[1]) + 1;
5020 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5021 if (guest_data_size
< spec
->next
) {
5022 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5025 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5026 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5027 cur_data
= argptr
+ spec
->next
;
5028 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5034 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5035 int count
= *(uint32_t*)hdata
;
5036 uint64_t *hdev
= hdata
+ 8;
5037 uint64_t *gdev
= argptr
+ 8;
5040 *(uint32_t*)argptr
= tswap32(count
);
5041 for (i
= 0; i
< count
; i
++) {
5042 *gdev
= tswap64(*hdev
);
5048 case DM_LIST_VERSIONS
:
5050 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5051 uint32_t remaining_data
= guest_data_size
;
5052 void *cur_data
= argptr
;
5053 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5054 int vers_size
= thunk_type_size(arg_type
, 0);
5057 uint32_t next
= vers
->next
;
5059 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5061 if (remaining_data
< vers
->next
) {
5062 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5065 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5066 strcpy(cur_data
+ vers_size
, vers
->name
);
5067 cur_data
+= vers
->next
;
5068 remaining_data
-= vers
->next
;
5072 vers
= (void*)vers
+ next
;
5077 unlock_user(argptr
, guest_data
, 0);
5078 ret
= -TARGET_EINVAL
;
5081 unlock_user(argptr
, guest_data
, guest_data_size
);
5083 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5085 ret
= -TARGET_EFAULT
;
5088 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5089 unlock_user(argptr
, arg
, target_size
);
5096 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5097 int cmd
, abi_long arg
)
5101 const argtype
*arg_type
= ie
->arg_type
;
5102 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5105 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5106 struct blkpg_partition host_part
;
5108 /* Read and convert blkpg */
5110 target_size
= thunk_type_size(arg_type
, 0);
5111 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5113 ret
= -TARGET_EFAULT
;
5116 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5117 unlock_user(argptr
, arg
, 0);
5119 switch (host_blkpg
->op
) {
5120 case BLKPG_ADD_PARTITION
:
5121 case BLKPG_DEL_PARTITION
:
5122 /* payload is struct blkpg_partition */
5125 /* Unknown opcode */
5126 ret
= -TARGET_EINVAL
;
5130 /* Read and convert blkpg->data */
5131 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5132 target_size
= thunk_type_size(part_arg_type
, 0);
5133 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5135 ret
= -TARGET_EFAULT
;
5138 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5139 unlock_user(argptr
, arg
, 0);
5141 /* Swizzle the data pointer to our local copy and call! */
5142 host_blkpg
->data
= &host_part
;
5143 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5149 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5150 int fd
, int cmd
, abi_long arg
)
5152 const argtype
*arg_type
= ie
->arg_type
;
5153 const StructEntry
*se
;
5154 const argtype
*field_types
;
5155 const int *dst_offsets
, *src_offsets
;
5158 abi_ulong
*target_rt_dev_ptr
= NULL
;
5159 unsigned long *host_rt_dev_ptr
= NULL
;
5163 assert(ie
->access
== IOC_W
);
5164 assert(*arg_type
== TYPE_PTR
);
5166 assert(*arg_type
== TYPE_STRUCT
);
5167 target_size
= thunk_type_size(arg_type
, 0);
5168 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5170 return -TARGET_EFAULT
;
5173 assert(*arg_type
== (int)STRUCT_rtentry
);
5174 se
= struct_entries
+ *arg_type
++;
5175 assert(se
->convert
[0] == NULL
);
5176 /* convert struct here to be able to catch rt_dev string */
5177 field_types
= se
->field_types
;
5178 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5179 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5180 for (i
= 0; i
< se
->nb_fields
; i
++) {
5181 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5182 assert(*field_types
== TYPE_PTRVOID
);
5183 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5184 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5185 if (*target_rt_dev_ptr
!= 0) {
5186 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5187 tswapal(*target_rt_dev_ptr
));
5188 if (!*host_rt_dev_ptr
) {
5189 unlock_user(argptr
, arg
, 0);
5190 return -TARGET_EFAULT
;
5193 *host_rt_dev_ptr
= 0;
5198 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5199 argptr
+ src_offsets
[i
],
5200 field_types
, THUNK_HOST
);
5202 unlock_user(argptr
, arg
, 0);
5204 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5206 assert(host_rt_dev_ptr
!= NULL
);
5207 assert(target_rt_dev_ptr
!= NULL
);
5208 if (*host_rt_dev_ptr
!= 0) {
5209 unlock_user((void *)*host_rt_dev_ptr
,
5210 *target_rt_dev_ptr
, 0);
5215 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5216 int fd
, int cmd
, abi_long arg
)
5218 int sig
= target_to_host_signal(arg
);
5219 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5222 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5223 int fd
, int cmd
, abi_long arg
)
5228 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5229 if (is_error(ret
)) {
5233 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5234 if (copy_to_user_timeval(arg
, &tv
)) {
5235 return -TARGET_EFAULT
;
5238 if (copy_to_user_timeval64(arg
, &tv
)) {
5239 return -TARGET_EFAULT
;
5246 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5247 int fd
, int cmd
, abi_long arg
)
5252 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5253 if (is_error(ret
)) {
5257 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5258 if (host_to_target_timespec(arg
, &ts
)) {
5259 return -TARGET_EFAULT
;
5262 if (host_to_target_timespec64(arg
, &ts
)) {
5263 return -TARGET_EFAULT
;
5271 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5272 int fd
, int cmd
, abi_long arg
)
5274 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5275 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5279 static IOCTLEntry ioctl_entries
[] = {
5280 #define IOCTL(cmd, access, ...) \
5281 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5282 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5283 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5284 #define IOCTL_IGNORE(cmd) \
5285 { TARGET_ ## cmd, 0, #cmd },
5290 /* ??? Implement proper locking for ioctls. */
5291 /* do_ioctl() Must return target values and target errnos. */
5292 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5294 const IOCTLEntry
*ie
;
5295 const argtype
*arg_type
;
5297 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5303 if (ie
->target_cmd
== 0) {
5305 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5306 return -TARGET_ENOSYS
;
5308 if (ie
->target_cmd
== cmd
)
5312 arg_type
= ie
->arg_type
;
5314 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5315 } else if (!ie
->host_cmd
) {
5316 /* Some architectures define BSD ioctls in their headers
5317 that are not implemented in Linux. */
5318 return -TARGET_ENOSYS
;
5321 switch(arg_type
[0]) {
5324 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5330 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5334 target_size
= thunk_type_size(arg_type
, 0);
5335 switch(ie
->access
) {
5337 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5338 if (!is_error(ret
)) {
5339 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5341 return -TARGET_EFAULT
;
5342 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5343 unlock_user(argptr
, arg
, target_size
);
5347 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5349 return -TARGET_EFAULT
;
5350 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5351 unlock_user(argptr
, arg
, 0);
5352 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5356 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5358 return -TARGET_EFAULT
;
5359 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5360 unlock_user(argptr
, arg
, 0);
5361 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5362 if (!is_error(ret
)) {
5363 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5365 return -TARGET_EFAULT
;
5366 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5367 unlock_user(argptr
, arg
, target_size
);
5373 qemu_log_mask(LOG_UNIMP
,
5374 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5375 (long)cmd
, arg_type
[0]);
5376 ret
= -TARGET_ENOSYS
;
5382 static const bitmask_transtbl iflag_tbl
[] = {
5383 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5384 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5385 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5386 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5387 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5388 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5389 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5390 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5391 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5392 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5393 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5394 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5395 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5396 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5400 static const bitmask_transtbl oflag_tbl
[] = {
5401 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5402 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5403 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5404 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5405 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5406 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5407 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5408 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5409 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5410 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5411 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5412 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5413 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5414 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5415 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5416 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5417 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5418 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5419 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5420 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5421 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5422 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5423 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5424 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5428 static const bitmask_transtbl cflag_tbl
[] = {
5429 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5430 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5431 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5432 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5433 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5434 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5435 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5436 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5437 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5438 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5439 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5440 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5441 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5442 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5443 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5444 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5445 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5446 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5447 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5448 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5449 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5450 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5451 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5452 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5453 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5454 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5455 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5456 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5457 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5458 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5459 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5463 static const bitmask_transtbl lflag_tbl
[] = {
5464 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5465 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5466 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5467 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5468 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5469 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5470 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5471 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5472 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5473 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5474 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5475 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5476 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5477 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5478 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5482 static void target_to_host_termios (void *dst
, const void *src
)
5484 struct host_termios
*host
= dst
;
5485 const struct target_termios
*target
= src
;
5488 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5490 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5492 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5494 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5495 host
->c_line
= target
->c_line
;
5497 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5498 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5499 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5500 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5501 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5502 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5503 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5504 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5505 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5506 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5507 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5508 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5509 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5510 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5511 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5512 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5513 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5514 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5517 static void host_to_target_termios (void *dst
, const void *src
)
5519 struct target_termios
*target
= dst
;
5520 const struct host_termios
*host
= src
;
5523 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5525 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5527 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5529 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5530 target
->c_line
= host
->c_line
;
5532 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5533 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5534 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5535 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5536 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5537 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5538 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5539 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5540 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5541 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5542 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5543 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5544 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5545 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5546 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5547 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5548 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5549 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5552 static const StructEntry struct_termios_def
= {
5553 .convert
= { host_to_target_termios
, target_to_host_termios
},
5554 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5555 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5558 static bitmask_transtbl mmap_flags_tbl
[] = {
5559 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5560 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5561 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5562 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5563 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5564 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5565 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5566 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5567 MAP_DENYWRITE
, MAP_DENYWRITE
},
5568 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5569 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5570 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5571 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5572 MAP_NORESERVE
, MAP_NORESERVE
},
5573 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5574 /* MAP_STACK had been ignored by the kernel for quite some time.
5575 Recognize it for the target insofar as we do not want to pass
5576 it through to the host. */
5577 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5582 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5583 * TARGET_I386 is defined if TARGET_X86_64 is defined
5585 #if defined(TARGET_I386)
5587 /* NOTE: there is really one LDT for all the threads */
5588 static uint8_t *ldt_table
;
5590 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5597 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5598 if (size
> bytecount
)
5600 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5602 return -TARGET_EFAULT
;
5603 /* ??? Should this by byteswapped? */
5604 memcpy(p
, ldt_table
, size
);
5605 unlock_user(p
, ptr
, size
);
5609 /* XXX: add locking support */
5610 static abi_long
write_ldt(CPUX86State
*env
,
5611 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5613 struct target_modify_ldt_ldt_s ldt_info
;
5614 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5615 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5616 int seg_not_present
, useable
, lm
;
5617 uint32_t *lp
, entry_1
, entry_2
;
5619 if (bytecount
!= sizeof(ldt_info
))
5620 return -TARGET_EINVAL
;
5621 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5622 return -TARGET_EFAULT
;
5623 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5624 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5625 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5626 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5627 unlock_user_struct(target_ldt_info
, ptr
, 0);
5629 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5630 return -TARGET_EINVAL
;
5631 seg_32bit
= ldt_info
.flags
& 1;
5632 contents
= (ldt_info
.flags
>> 1) & 3;
5633 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5634 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5635 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5636 useable
= (ldt_info
.flags
>> 6) & 1;
5640 lm
= (ldt_info
.flags
>> 7) & 1;
5642 if (contents
== 3) {
5644 return -TARGET_EINVAL
;
5645 if (seg_not_present
== 0)
5646 return -TARGET_EINVAL
;
5648 /* allocate the LDT */
5650 env
->ldt
.base
= target_mmap(0,
5651 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5652 PROT_READ
|PROT_WRITE
,
5653 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5654 if (env
->ldt
.base
== -1)
5655 return -TARGET_ENOMEM
;
5656 memset(g2h(env
->ldt
.base
), 0,
5657 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5658 env
->ldt
.limit
= 0xffff;
5659 ldt_table
= g2h(env
->ldt
.base
);
5662 /* NOTE: same code as Linux kernel */
5663 /* Allow LDTs to be cleared by the user. */
5664 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5667 read_exec_only
== 1 &&
5669 limit_in_pages
== 0 &&
5670 seg_not_present
== 1 &&
5678 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5679 (ldt_info
.limit
& 0x0ffff);
5680 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5681 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5682 (ldt_info
.limit
& 0xf0000) |
5683 ((read_exec_only
^ 1) << 9) |
5685 ((seg_not_present
^ 1) << 15) |
5687 (limit_in_pages
<< 23) |
5691 entry_2
|= (useable
<< 20);
5693 /* Install the new entry ... */
5695 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5696 lp
[0] = tswap32(entry_1
);
5697 lp
[1] = tswap32(entry_2
);
5701 /* specific and weird i386 syscalls */
5702 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5703 unsigned long bytecount
)
5709 ret
= read_ldt(ptr
, bytecount
);
5712 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5715 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5718 ret
= -TARGET_ENOSYS
;
5724 #if defined(TARGET_ABI32)
5725 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5727 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5728 struct target_modify_ldt_ldt_s ldt_info
;
5729 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5730 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5731 int seg_not_present
, useable
, lm
;
5732 uint32_t *lp
, entry_1
, entry_2
;
5735 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5736 if (!target_ldt_info
)
5737 return -TARGET_EFAULT
;
5738 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5739 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5740 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5741 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5742 if (ldt_info
.entry_number
== -1) {
5743 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5744 if (gdt_table
[i
] == 0) {
5745 ldt_info
.entry_number
= i
;
5746 target_ldt_info
->entry_number
= tswap32(i
);
5751 unlock_user_struct(target_ldt_info
, ptr
, 1);
5753 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5754 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5755 return -TARGET_EINVAL
;
5756 seg_32bit
= ldt_info
.flags
& 1;
5757 contents
= (ldt_info
.flags
>> 1) & 3;
5758 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5759 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5760 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5761 useable
= (ldt_info
.flags
>> 6) & 1;
5765 lm
= (ldt_info
.flags
>> 7) & 1;
5768 if (contents
== 3) {
5769 if (seg_not_present
== 0)
5770 return -TARGET_EINVAL
;
5773 /* NOTE: same code as Linux kernel */
5774 /* Allow LDTs to be cleared by the user. */
5775 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5776 if ((contents
== 0 &&
5777 read_exec_only
== 1 &&
5779 limit_in_pages
== 0 &&
5780 seg_not_present
== 1 &&
5788 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5789 (ldt_info
.limit
& 0x0ffff);
5790 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5791 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5792 (ldt_info
.limit
& 0xf0000) |
5793 ((read_exec_only
^ 1) << 9) |
5795 ((seg_not_present
^ 1) << 15) |
5797 (limit_in_pages
<< 23) |
5802 /* Install the new entry ... */
5804 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5805 lp
[0] = tswap32(entry_1
);
5806 lp
[1] = tswap32(entry_2
);
5810 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5812 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5813 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5814 uint32_t base_addr
, limit
, flags
;
5815 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5816 int seg_not_present
, useable
, lm
;
5817 uint32_t *lp
, entry_1
, entry_2
;
5819 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5820 if (!target_ldt_info
)
5821 return -TARGET_EFAULT
;
5822 idx
= tswap32(target_ldt_info
->entry_number
);
5823 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5824 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5825 unlock_user_struct(target_ldt_info
, ptr
, 1);
5826 return -TARGET_EINVAL
;
5828 lp
= (uint32_t *)(gdt_table
+ idx
);
5829 entry_1
= tswap32(lp
[0]);
5830 entry_2
= tswap32(lp
[1]);
5832 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5833 contents
= (entry_2
>> 10) & 3;
5834 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5835 seg_32bit
= (entry_2
>> 22) & 1;
5836 limit_in_pages
= (entry_2
>> 23) & 1;
5837 useable
= (entry_2
>> 20) & 1;
5841 lm
= (entry_2
>> 21) & 1;
5843 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5844 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5845 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5846 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5847 base_addr
= (entry_1
>> 16) |
5848 (entry_2
& 0xff000000) |
5849 ((entry_2
& 0xff) << 16);
5850 target_ldt_info
->base_addr
= tswapal(base_addr
);
5851 target_ldt_info
->limit
= tswap32(limit
);
5852 target_ldt_info
->flags
= tswap32(flags
);
5853 unlock_user_struct(target_ldt_info
, ptr
, 1);
5857 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5862 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5869 case TARGET_ARCH_SET_GS
:
5870 case TARGET_ARCH_SET_FS
:
5871 if (code
== TARGET_ARCH_SET_GS
)
5875 cpu_x86_load_seg(env
, idx
, 0);
5876 env
->segs
[idx
].base
= addr
;
5878 case TARGET_ARCH_GET_GS
:
5879 case TARGET_ARCH_GET_FS
:
5880 if (code
== TARGET_ARCH_GET_GS
)
5884 val
= env
->segs
[idx
].base
;
5885 if (put_user(val
, addr
, abi_ulong
))
5886 ret
= -TARGET_EFAULT
;
5889 ret
= -TARGET_EINVAL
;
5894 #endif /* defined(TARGET_ABI32 */
5896 #endif /* defined(TARGET_I386) */
5898 #define NEW_STACK_SIZE 0x40000
5901 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5904 pthread_mutex_t mutex
;
5905 pthread_cond_t cond
;
5908 abi_ulong child_tidptr
;
5909 abi_ulong parent_tidptr
;
5913 static void *clone_func(void *arg
)
5915 new_thread_info
*info
= arg
;
5920 rcu_register_thread();
5921 tcg_register_thread();
5925 ts
= (TaskState
*)cpu
->opaque
;
5926 info
->tid
= sys_gettid();
5928 if (info
->child_tidptr
)
5929 put_user_u32(info
->tid
, info
->child_tidptr
);
5930 if (info
->parent_tidptr
)
5931 put_user_u32(info
->tid
, info
->parent_tidptr
);
5932 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5933 /* Enable signals. */
5934 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5935 /* Signal to the parent that we're ready. */
5936 pthread_mutex_lock(&info
->mutex
);
5937 pthread_cond_broadcast(&info
->cond
);
5938 pthread_mutex_unlock(&info
->mutex
);
5939 /* Wait until the parent has finished initializing the tls state. */
5940 pthread_mutex_lock(&clone_lock
);
5941 pthread_mutex_unlock(&clone_lock
);
5947 /* do_fork() Must return host values and target errnos (unlike most
5948 do_*() functions). */
5949 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5950 abi_ulong parent_tidptr
, target_ulong newtls
,
5951 abi_ulong child_tidptr
)
5953 CPUState
*cpu
= env_cpu(env
);
5957 CPUArchState
*new_env
;
5960 flags
&= ~CLONE_IGNORED_FLAGS
;
5962 /* Emulate vfork() with fork() */
5963 if (flags
& CLONE_VFORK
)
5964 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5966 if (flags
& CLONE_VM
) {
5967 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5968 new_thread_info info
;
5969 pthread_attr_t attr
;
5971 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5972 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5973 return -TARGET_EINVAL
;
5976 ts
= g_new0(TaskState
, 1);
5977 init_task_state(ts
);
5979 /* Grab a mutex so that thread setup appears atomic. */
5980 pthread_mutex_lock(&clone_lock
);
5982 /* we create a new CPU instance. */
5983 new_env
= cpu_copy(env
);
5984 /* Init regs that differ from the parent. */
5985 cpu_clone_regs_child(new_env
, newsp
, flags
);
5986 cpu_clone_regs_parent(env
, flags
);
5987 new_cpu
= env_cpu(new_env
);
5988 new_cpu
->opaque
= ts
;
5989 ts
->bprm
= parent_ts
->bprm
;
5990 ts
->info
= parent_ts
->info
;
5991 ts
->signal_mask
= parent_ts
->signal_mask
;
5993 if (flags
& CLONE_CHILD_CLEARTID
) {
5994 ts
->child_tidptr
= child_tidptr
;
5997 if (flags
& CLONE_SETTLS
) {
5998 cpu_set_tls (new_env
, newtls
);
6001 memset(&info
, 0, sizeof(info
));
6002 pthread_mutex_init(&info
.mutex
, NULL
);
6003 pthread_mutex_lock(&info
.mutex
);
6004 pthread_cond_init(&info
.cond
, NULL
);
6006 if (flags
& CLONE_CHILD_SETTID
) {
6007 info
.child_tidptr
= child_tidptr
;
6009 if (flags
& CLONE_PARENT_SETTID
) {
6010 info
.parent_tidptr
= parent_tidptr
;
6013 ret
= pthread_attr_init(&attr
);
6014 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6015 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6016 /* It is not safe to deliver signals until the child has finished
6017 initializing, so temporarily block all signals. */
6018 sigfillset(&sigmask
);
6019 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6020 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6022 /* If this is our first additional thread, we need to ensure we
6023 * generate code for parallel execution and flush old translations.
6025 if (!parallel_cpus
) {
6026 parallel_cpus
= true;
6030 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6031 /* TODO: Free new CPU state if thread creation failed. */
6033 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6034 pthread_attr_destroy(&attr
);
6036 /* Wait for the child to initialize. */
6037 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6042 pthread_mutex_unlock(&info
.mutex
);
6043 pthread_cond_destroy(&info
.cond
);
6044 pthread_mutex_destroy(&info
.mutex
);
6045 pthread_mutex_unlock(&clone_lock
);
6047 /* if no CLONE_VM, we consider it is a fork */
6048 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6049 return -TARGET_EINVAL
;
6052 /* We can't support custom termination signals */
6053 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6054 return -TARGET_EINVAL
;
6057 if (block_signals()) {
6058 return -TARGET_ERESTARTSYS
;
6064 /* Child Process. */
6065 cpu_clone_regs_child(env
, newsp
, flags
);
6067 /* There is a race condition here. The parent process could
6068 theoretically read the TID in the child process before the child
6069 tid is set. This would require using either ptrace
6070 (not implemented) or having *_tidptr to point at a shared memory
6071 mapping. We can't repeat the spinlock hack used above because
6072 the child process gets its own copy of the lock. */
6073 if (flags
& CLONE_CHILD_SETTID
)
6074 put_user_u32(sys_gettid(), child_tidptr
);
6075 if (flags
& CLONE_PARENT_SETTID
)
6076 put_user_u32(sys_gettid(), parent_tidptr
);
6077 ts
= (TaskState
*)cpu
->opaque
;
6078 if (flags
& CLONE_SETTLS
)
6079 cpu_set_tls (env
, newtls
);
6080 if (flags
& CLONE_CHILD_CLEARTID
)
6081 ts
->child_tidptr
= child_tidptr
;
6083 cpu_clone_regs_parent(env
, flags
);
6090 /* warning : doesn't handle linux specific flags... */
6091 static int target_to_host_fcntl_cmd(int cmd
)
6096 case TARGET_F_DUPFD
:
6097 case TARGET_F_GETFD
:
6098 case TARGET_F_SETFD
:
6099 case TARGET_F_GETFL
:
6100 case TARGET_F_SETFL
:
6103 case TARGET_F_GETLK
:
6106 case TARGET_F_SETLK
:
6109 case TARGET_F_SETLKW
:
6112 case TARGET_F_GETOWN
:
6115 case TARGET_F_SETOWN
:
6118 case TARGET_F_GETSIG
:
6121 case TARGET_F_SETSIG
:
6124 #if TARGET_ABI_BITS == 32
6125 case TARGET_F_GETLK64
:
6128 case TARGET_F_SETLK64
:
6131 case TARGET_F_SETLKW64
:
6135 case TARGET_F_SETLEASE
:
6138 case TARGET_F_GETLEASE
:
6141 #ifdef F_DUPFD_CLOEXEC
6142 case TARGET_F_DUPFD_CLOEXEC
:
6143 ret
= F_DUPFD_CLOEXEC
;
6146 case TARGET_F_NOTIFY
:
6150 case TARGET_F_GETOWN_EX
:
6155 case TARGET_F_SETOWN_EX
:
6160 case TARGET_F_SETPIPE_SZ
:
6163 case TARGET_F_GETPIPE_SZ
:
6168 ret
= -TARGET_EINVAL
;
6172 #if defined(__powerpc64__)
6173 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6174 * is not supported by kernel. The glibc fcntl call actually adjusts
6175 * them to 5, 6 and 7 before making the syscall(). Since we make the
6176 * syscall directly, adjust to what is supported by the kernel.
6178 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6179 ret
-= F_GETLK64
- 5;
6186 #define FLOCK_TRANSTBL \
6188 TRANSTBL_CONVERT(F_RDLCK); \
6189 TRANSTBL_CONVERT(F_WRLCK); \
6190 TRANSTBL_CONVERT(F_UNLCK); \
6191 TRANSTBL_CONVERT(F_EXLCK); \
6192 TRANSTBL_CONVERT(F_SHLCK); \
6195 static int target_to_host_flock(int type
)
6197 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6199 #undef TRANSTBL_CONVERT
6200 return -TARGET_EINVAL
;
6203 static int host_to_target_flock(int type
)
6205 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6207 #undef TRANSTBL_CONVERT
6208 /* if we don't know how to convert the value coming
6209 * from the host we copy to the target field as-is
6214 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6215 abi_ulong target_flock_addr
)
6217 struct target_flock
*target_fl
;
6220 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6221 return -TARGET_EFAULT
;
6224 __get_user(l_type
, &target_fl
->l_type
);
6225 l_type
= target_to_host_flock(l_type
);
6229 fl
->l_type
= l_type
;
6230 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6231 __get_user(fl
->l_start
, &target_fl
->l_start
);
6232 __get_user(fl
->l_len
, &target_fl
->l_len
);
6233 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6234 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6238 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6239 const struct flock64
*fl
)
6241 struct target_flock
*target_fl
;
6244 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6245 return -TARGET_EFAULT
;
6248 l_type
= host_to_target_flock(fl
->l_type
);
6249 __put_user(l_type
, &target_fl
->l_type
);
6250 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6251 __put_user(fl
->l_start
, &target_fl
->l_start
);
6252 __put_user(fl
->l_len
, &target_fl
->l_len
);
6253 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6254 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6258 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6259 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6261 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6262 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6263 abi_ulong target_flock_addr
)
6265 struct target_oabi_flock64
*target_fl
;
6268 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6269 return -TARGET_EFAULT
;
6272 __get_user(l_type
, &target_fl
->l_type
);
6273 l_type
= target_to_host_flock(l_type
);
6277 fl
->l_type
= l_type
;
6278 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6279 __get_user(fl
->l_start
, &target_fl
->l_start
);
6280 __get_user(fl
->l_len
, &target_fl
->l_len
);
6281 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6282 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6286 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6287 const struct flock64
*fl
)
6289 struct target_oabi_flock64
*target_fl
;
6292 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6293 return -TARGET_EFAULT
;
6296 l_type
= host_to_target_flock(fl
->l_type
);
6297 __put_user(l_type
, &target_fl
->l_type
);
6298 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6299 __put_user(fl
->l_start
, &target_fl
->l_start
);
6300 __put_user(fl
->l_len
, &target_fl
->l_len
);
6301 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6302 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6307 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6308 abi_ulong target_flock_addr
)
6310 struct target_flock64
*target_fl
;
6313 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6314 return -TARGET_EFAULT
;
6317 __get_user(l_type
, &target_fl
->l_type
);
6318 l_type
= target_to_host_flock(l_type
);
6322 fl
->l_type
= l_type
;
6323 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6324 __get_user(fl
->l_start
, &target_fl
->l_start
);
6325 __get_user(fl
->l_len
, &target_fl
->l_len
);
6326 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6327 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6331 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6332 const struct flock64
*fl
)
6334 struct target_flock64
*target_fl
;
6337 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6338 return -TARGET_EFAULT
;
6341 l_type
= host_to_target_flock(fl
->l_type
);
6342 __put_user(l_type
, &target_fl
->l_type
);
6343 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6344 __put_user(fl
->l_start
, &target_fl
->l_start
);
6345 __put_user(fl
->l_len
, &target_fl
->l_len
);
6346 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6347 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6351 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6353 struct flock64 fl64
;
6355 struct f_owner_ex fox
;
6356 struct target_f_owner_ex
*target_fox
;
6359 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6361 if (host_cmd
== -TARGET_EINVAL
)
6365 case TARGET_F_GETLK
:
6366 ret
= copy_from_user_flock(&fl64
, arg
);
6370 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6372 ret
= copy_to_user_flock(arg
, &fl64
);
6376 case TARGET_F_SETLK
:
6377 case TARGET_F_SETLKW
:
6378 ret
= copy_from_user_flock(&fl64
, arg
);
6382 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6385 case TARGET_F_GETLK64
:
6386 ret
= copy_from_user_flock64(&fl64
, arg
);
6390 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6392 ret
= copy_to_user_flock64(arg
, &fl64
);
6395 case TARGET_F_SETLK64
:
6396 case TARGET_F_SETLKW64
:
6397 ret
= copy_from_user_flock64(&fl64
, arg
);
6401 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6404 case TARGET_F_GETFL
:
6405 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6407 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6411 case TARGET_F_SETFL
:
6412 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6413 target_to_host_bitmask(arg
,
6418 case TARGET_F_GETOWN_EX
:
6419 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6421 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6422 return -TARGET_EFAULT
;
6423 target_fox
->type
= tswap32(fox
.type
);
6424 target_fox
->pid
= tswap32(fox
.pid
);
6425 unlock_user_struct(target_fox
, arg
, 1);
6431 case TARGET_F_SETOWN_EX
:
6432 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6433 return -TARGET_EFAULT
;
6434 fox
.type
= tswap32(target_fox
->type
);
6435 fox
.pid
= tswap32(target_fox
->pid
);
6436 unlock_user_struct(target_fox
, arg
, 0);
6437 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6441 case TARGET_F_SETOWN
:
6442 case TARGET_F_GETOWN
:
6443 case TARGET_F_SETSIG
:
6444 case TARGET_F_GETSIG
:
6445 case TARGET_F_SETLEASE
:
6446 case TARGET_F_GETLEASE
:
6447 case TARGET_F_SETPIPE_SZ
:
6448 case TARGET_F_GETPIPE_SZ
:
6449 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6453 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6461 static inline int high2lowuid(int uid
)
6469 static inline int high2lowgid(int gid
)
6477 static inline int low2highuid(int uid
)
6479 if ((int16_t)uid
== -1)
6485 static inline int low2highgid(int gid
)
6487 if ((int16_t)gid
== -1)
6492 static inline int tswapid(int id
)
6497 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6499 #else /* !USE_UID16 */
6500 static inline int high2lowuid(int uid
)
6504 static inline int high2lowgid(int gid
)
6508 static inline int low2highuid(int uid
)
6512 static inline int low2highgid(int gid
)
6516 static inline int tswapid(int id
)
6521 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6523 #endif /* USE_UID16 */
6525 /* We must do direct syscalls for setting UID/GID, because we want to
6526 * implement the Linux system call semantics of "change only for this thread",
6527 * not the libc/POSIX semantics of "change for all threads in process".
6528 * (See http://ewontfix.com/17/ for more details.)
6529 * We use the 32-bit version of the syscalls if present; if it is not
6530 * then either the host architecture supports 32-bit UIDs natively with
6531 * the standard syscall, or the 16-bit UID is the best we can do.
6533 #ifdef __NR_setuid32
6534 #define __NR_sys_setuid __NR_setuid32
6536 #define __NR_sys_setuid __NR_setuid
6538 #ifdef __NR_setgid32
6539 #define __NR_sys_setgid __NR_setgid32
6541 #define __NR_sys_setgid __NR_setgid
6543 #ifdef __NR_setresuid32
6544 #define __NR_sys_setresuid __NR_setresuid32
6546 #define __NR_sys_setresuid __NR_setresuid
6548 #ifdef __NR_setresgid32
6549 #define __NR_sys_setresgid __NR_setresgid32
6551 #define __NR_sys_setresgid __NR_setresgid
6554 _syscall1(int, sys_setuid
, uid_t
, uid
)
6555 _syscall1(int, sys_setgid
, gid_t
, gid
)
6556 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6557 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6559 void syscall_init(void)
6562 const argtype
*arg_type
;
6566 thunk_init(STRUCT_MAX
);
6568 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6569 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6570 #include "syscall_types.h"
6572 #undef STRUCT_SPECIAL
6574 /* Build target_to_host_errno_table[] table from
6575 * host_to_target_errno_table[]. */
6576 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6577 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6580 /* we patch the ioctl size if necessary. We rely on the fact that
6581 no ioctl has all the bits at '1' in the size field */
6583 while (ie
->target_cmd
!= 0) {
6584 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6585 TARGET_IOC_SIZEMASK
) {
6586 arg_type
= ie
->arg_type
;
6587 if (arg_type
[0] != TYPE_PTR
) {
6588 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6593 size
= thunk_type_size(arg_type
, 0);
6594 ie
->target_cmd
= (ie
->target_cmd
&
6595 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6596 (size
<< TARGET_IOC_SIZESHIFT
);
6599 /* automatic consistency check if same arch */
6600 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6601 (defined(__x86_64__) && defined(TARGET_X86_64))
6602 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6603 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6604 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6611 #if TARGET_ABI_BITS == 32
6612 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6614 #ifdef TARGET_WORDS_BIGENDIAN
6615 return ((uint64_t)word0
<< 32) | word1
;
6617 return ((uint64_t)word1
<< 32) | word0
;
6620 #else /* TARGET_ABI_BITS == 32 */
6621 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6625 #endif /* TARGET_ABI_BITS != 32 */
6627 #ifdef TARGET_NR_truncate64
6628 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6633 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6637 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6641 #ifdef TARGET_NR_ftruncate64
6642 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6647 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6651 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6655 #if defined(TARGET_NR_timer_settime) || \
6656 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6657 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6658 abi_ulong target_addr
)
6660 struct target_itimerspec
*target_itspec
;
6662 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6663 return -TARGET_EFAULT
;
6666 host_itspec
->it_interval
.tv_sec
=
6667 tswapal(target_itspec
->it_interval
.tv_sec
);
6668 host_itspec
->it_interval
.tv_nsec
=
6669 tswapal(target_itspec
->it_interval
.tv_nsec
);
6670 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6671 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6673 unlock_user_struct(target_itspec
, target_addr
, 1);
6678 #if ((defined(TARGET_NR_timerfd_gettime) || \
6679 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6680 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6681 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6682 struct itimerspec
*host_its
)
6684 struct target_itimerspec
*target_itspec
;
6686 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6687 return -TARGET_EFAULT
;
6690 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6691 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6693 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6694 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6696 unlock_user_struct(target_itspec
, target_addr
, 0);
6701 #if defined(TARGET_NR_adjtimex) || \
6702 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6703 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6704 abi_long target_addr
)
6706 struct target_timex
*target_tx
;
6708 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6709 return -TARGET_EFAULT
;
6712 __get_user(host_tx
->modes
, &target_tx
->modes
);
6713 __get_user(host_tx
->offset
, &target_tx
->offset
);
6714 __get_user(host_tx
->freq
, &target_tx
->freq
);
6715 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6716 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6717 __get_user(host_tx
->status
, &target_tx
->status
);
6718 __get_user(host_tx
->constant
, &target_tx
->constant
);
6719 __get_user(host_tx
->precision
, &target_tx
->precision
);
6720 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6721 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6722 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6723 __get_user(host_tx
->tick
, &target_tx
->tick
);
6724 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6725 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6726 __get_user(host_tx
->shift
, &target_tx
->shift
);
6727 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6728 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6729 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6730 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6731 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6732 __get_user(host_tx
->tai
, &target_tx
->tai
);
6734 unlock_user_struct(target_tx
, target_addr
, 0);
6738 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6739 struct timex
*host_tx
)
6741 struct target_timex
*target_tx
;
6743 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6744 return -TARGET_EFAULT
;
6747 __put_user(host_tx
->modes
, &target_tx
->modes
);
6748 __put_user(host_tx
->offset
, &target_tx
->offset
);
6749 __put_user(host_tx
->freq
, &target_tx
->freq
);
6750 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6751 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6752 __put_user(host_tx
->status
, &target_tx
->status
);
6753 __put_user(host_tx
->constant
, &target_tx
->constant
);
6754 __put_user(host_tx
->precision
, &target_tx
->precision
);
6755 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6756 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6757 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6758 __put_user(host_tx
->tick
, &target_tx
->tick
);
6759 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6760 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6761 __put_user(host_tx
->shift
, &target_tx
->shift
);
6762 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6763 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6764 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6765 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6766 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6767 __put_user(host_tx
->tai
, &target_tx
->tai
);
6769 unlock_user_struct(target_tx
, target_addr
, 1);
6774 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6775 abi_ulong target_addr
)
6777 struct target_sigevent
*target_sevp
;
6779 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6780 return -TARGET_EFAULT
;
6783 /* This union is awkward on 64 bit systems because it has a 32 bit
6784 * integer and a pointer in it; we follow the conversion approach
6785 * used for handling sigval types in signal.c so the guest should get
6786 * the correct value back even if we did a 64 bit byteswap and it's
6787 * using the 32 bit integer.
6789 host_sevp
->sigev_value
.sival_ptr
=
6790 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6791 host_sevp
->sigev_signo
=
6792 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6793 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6794 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6796 unlock_user_struct(target_sevp
, target_addr
, 1);
6800 #if defined(TARGET_NR_mlockall)
6801 static inline int target_to_host_mlockall_arg(int arg
)
6805 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6806 result
|= MCL_CURRENT
;
6808 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6809 result
|= MCL_FUTURE
;
6815 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6816 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6817 defined(TARGET_NR_newfstatat))
6818 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6819 abi_ulong target_addr
,
6820 struct stat
*host_st
)
6822 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6823 if (((CPUARMState
*)cpu_env
)->eabi
) {
6824 struct target_eabi_stat64
*target_st
;
6826 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6827 return -TARGET_EFAULT
;
6828 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6829 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6830 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6831 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6832 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6834 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6835 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6836 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6837 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6838 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6839 __put_user(host_st
->st_size
, &target_st
->st_size
);
6840 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6841 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6842 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6843 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6844 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6845 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6846 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6847 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6848 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6850 unlock_user_struct(target_st
, target_addr
, 1);
6854 #if defined(TARGET_HAS_STRUCT_STAT64)
6855 struct target_stat64
*target_st
;
6857 struct target_stat
*target_st
;
6860 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6861 return -TARGET_EFAULT
;
6862 memset(target_st
, 0, sizeof(*target_st
));
6863 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6864 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6865 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6866 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6868 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6869 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6870 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6871 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6872 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6873 /* XXX: better use of kernel struct */
6874 __put_user(host_st
->st_size
, &target_st
->st_size
);
6875 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6876 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6877 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6878 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6879 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6880 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6881 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6882 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6883 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6885 unlock_user_struct(target_st
, target_addr
, 1);
6892 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6893 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6894 abi_ulong target_addr
)
6896 struct target_statx
*target_stx
;
6898 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6899 return -TARGET_EFAULT
;
6901 memset(target_stx
, 0, sizeof(*target_stx
));
6903 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6904 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6905 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6906 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6907 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6908 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6909 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6910 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6911 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6912 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6913 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6914 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6915 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6916 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6917 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6918 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6919 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6920 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6921 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6922 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6923 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6924 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6925 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6927 unlock_user_struct(target_stx
, target_addr
, 1);
6933 static int do_sys_futex(int *uaddr
, int op
, int val
,
6934 const struct timespec
*timeout
, int *uaddr2
,
6937 #if HOST_LONG_BITS == 64
6938 #if defined(__NR_futex)
6939 /* always a 64-bit time_t, it doesn't define _time64 version */
6940 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6943 #else /* HOST_LONG_BITS == 64 */
6944 #if defined(__NR_futex_time64)
6945 if (sizeof(timeout
->tv_sec
) == 8) {
6946 /* _time64 function on 32bit arch */
6947 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6950 #if defined(__NR_futex)
6951 /* old function on 32bit arch */
6952 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6954 #endif /* HOST_LONG_BITS == 64 */
6955 g_assert_not_reached();
6958 static int do_safe_futex(int *uaddr
, int op
, int val
,
6959 const struct timespec
*timeout
, int *uaddr2
,
6962 #if HOST_LONG_BITS == 64
6963 #if defined(__NR_futex)
6964 /* always a 64-bit time_t, it doesn't define _time64 version */
6965 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
6967 #else /* HOST_LONG_BITS == 64 */
6968 #if defined(__NR_futex_time64)
6969 if (sizeof(timeout
->tv_sec
) == 8) {
6970 /* _time64 function on 32bit arch */
6971 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
6975 #if defined(__NR_futex)
6976 /* old function on 32bit arch */
6977 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
6979 #endif /* HOST_LONG_BITS == 64 */
6980 return -TARGET_ENOSYS
;
6983 /* ??? Using host futex calls even when target atomic operations
6984 are not really atomic probably breaks things. However implementing
6985 futexes locally would make futexes shared between multiple processes
6986 tricky. However they're probably useless because guest atomic
6987 operations won't work either. */
6988 #if defined(TARGET_NR_futex)
6989 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6990 target_ulong uaddr2
, int val3
)
6992 struct timespec ts
, *pts
;
6995 /* ??? We assume FUTEX_* constants are the same on both host
6997 #ifdef FUTEX_CMD_MASK
6998 base_op
= op
& FUTEX_CMD_MASK
;
7004 case FUTEX_WAIT_BITSET
:
7007 target_to_host_timespec(pts
, timeout
);
7011 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7013 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7015 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7017 case FUTEX_CMP_REQUEUE
:
7019 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7020 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7021 But the prototype takes a `struct timespec *'; insert casts
7022 to satisfy the compiler. We do not need to tswap TIMEOUT
7023 since it's not compared to guest memory. */
7024 pts
= (struct timespec
*)(uintptr_t) timeout
;
7025 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7026 (base_op
== FUTEX_CMP_REQUEUE
7030 return -TARGET_ENOSYS
;
7035 #if defined(TARGET_NR_futex_time64)
7036 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7037 target_ulong uaddr2
, int val3
)
7039 struct timespec ts
, *pts
;
7042 /* ??? We assume FUTEX_* constants are the same on both host
7044 #ifdef FUTEX_CMD_MASK
7045 base_op
= op
& FUTEX_CMD_MASK
;
7051 case FUTEX_WAIT_BITSET
:
7054 target_to_host_timespec64(pts
, timeout
);
7058 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7060 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7062 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7064 case FUTEX_CMP_REQUEUE
:
7066 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7067 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7068 But the prototype takes a `struct timespec *'; insert casts
7069 to satisfy the compiler. We do not need to tswap TIMEOUT
7070 since it's not compared to guest memory. */
7071 pts
= (struct timespec
*)(uintptr_t) timeout
;
7072 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7073 (base_op
== FUTEX_CMP_REQUEUE
7077 return -TARGET_ENOSYS
;
7082 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7083 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7084 abi_long handle
, abi_long mount_id
,
7087 struct file_handle
*target_fh
;
7088 struct file_handle
*fh
;
7092 unsigned int size
, total_size
;
7094 if (get_user_s32(size
, handle
)) {
7095 return -TARGET_EFAULT
;
7098 name
= lock_user_string(pathname
);
7100 return -TARGET_EFAULT
;
7103 total_size
= sizeof(struct file_handle
) + size
;
7104 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7106 unlock_user(name
, pathname
, 0);
7107 return -TARGET_EFAULT
;
7110 fh
= g_malloc0(total_size
);
7111 fh
->handle_bytes
= size
;
7113 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7114 unlock_user(name
, pathname
, 0);
7116 /* man name_to_handle_at(2):
7117 * Other than the use of the handle_bytes field, the caller should treat
7118 * the file_handle structure as an opaque data type
7121 memcpy(target_fh
, fh
, total_size
);
7122 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7123 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7125 unlock_user(target_fh
, handle
, total_size
);
7127 if (put_user_s32(mid
, mount_id
)) {
7128 return -TARGET_EFAULT
;
7136 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7137 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7140 struct file_handle
*target_fh
;
7141 struct file_handle
*fh
;
7142 unsigned int size
, total_size
;
7145 if (get_user_s32(size
, handle
)) {
7146 return -TARGET_EFAULT
;
7149 total_size
= sizeof(struct file_handle
) + size
;
7150 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7152 return -TARGET_EFAULT
;
7155 fh
= g_memdup(target_fh
, total_size
);
7156 fh
->handle_bytes
= size
;
7157 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7159 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7160 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7164 unlock_user(target_fh
, handle
, total_size
);
7170 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7172 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7175 target_sigset_t
*target_mask
;
7179 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7180 return -TARGET_EINVAL
;
7182 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7183 return -TARGET_EFAULT
;
7186 target_to_host_sigset(&host_mask
, target_mask
);
7188 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7190 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7192 fd_trans_register(ret
, &target_signalfd_trans
);
7195 unlock_user_struct(target_mask
, mask
, 0);
7201 /* Map host to target signal numbers for the wait family of syscalls.
7202 Assume all other status bits are the same. */
7203 int host_to_target_waitstatus(int status
)
7205 if (WIFSIGNALED(status
)) {
7206 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7208 if (WIFSTOPPED(status
)) {
7209 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7215 static int open_self_cmdline(void *cpu_env
, int fd
)
7217 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7218 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7221 for (i
= 0; i
< bprm
->argc
; i
++) {
7222 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7224 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7232 static int open_self_maps(void *cpu_env
, int fd
)
7234 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7235 TaskState
*ts
= cpu
->opaque
;
7236 GSList
*map_info
= read_self_maps();
7240 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7241 MapInfo
*e
= (MapInfo
*) s
->data
;
7243 if (h2g_valid(e
->start
)) {
7244 unsigned long min
= e
->start
;
7245 unsigned long max
= e
->end
;
7246 int flags
= page_get_flags(h2g(min
));
7249 max
= h2g_valid(max
- 1) ?
7250 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7252 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7256 if (h2g(min
) == ts
->info
->stack_limit
) {
7262 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7263 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7264 h2g(min
), h2g(max
- 1) + 1,
7265 e
->is_read
? 'r' : '-',
7266 e
->is_write
? 'w' : '-',
7267 e
->is_exec
? 'x' : '-',
7268 e
->is_priv
? 'p' : '-',
7269 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7271 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7278 free_self_maps(map_info
);
7280 #ifdef TARGET_VSYSCALL_PAGE
7282 * We only support execution from the vsyscall page.
7283 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7285 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7286 " --xp 00000000 00:00 0",
7287 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7288 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7294 static int open_self_stat(void *cpu_env
, int fd
)
7296 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7297 TaskState
*ts
= cpu
->opaque
;
7298 g_autoptr(GString
) buf
= g_string_new(NULL
);
7301 for (i
= 0; i
< 44; i
++) {
7304 g_string_printf(buf
, FMT_pid
" ", getpid());
7305 } else if (i
== 1) {
7307 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7308 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7309 g_string_printf(buf
, "(%.15s) ", bin
);
7310 } else if (i
== 27) {
7312 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7314 /* for the rest, there is MasterCard */
7315 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7318 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7326 static int open_self_auxv(void *cpu_env
, int fd
)
7328 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7329 TaskState
*ts
= cpu
->opaque
;
7330 abi_ulong auxv
= ts
->info
->saved_auxv
;
7331 abi_ulong len
= ts
->info
->auxv_len
;
7335 * Auxiliary vector is stored in target process stack.
7336 * read in whole auxv vector and copy it to file
7338 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7342 r
= write(fd
, ptr
, len
);
7349 lseek(fd
, 0, SEEK_SET
);
7350 unlock_user(ptr
, auxv
, len
);
7356 static int is_proc_myself(const char *filename
, const char *entry
)
7358 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7359 filename
+= strlen("/proc/");
7360 if (!strncmp(filename
, "self/", strlen("self/"))) {
7361 filename
+= strlen("self/");
7362 } else if (*filename
>= '1' && *filename
<= '9') {
7364 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7365 if (!strncmp(filename
, myself
, strlen(myself
))) {
7366 filename
+= strlen(myself
);
7373 if (!strcmp(filename
, entry
)) {
7380 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7381 defined(TARGET_SPARC) || defined(TARGET_M68K)
7382 static int is_proc(const char *filename
, const char *entry
)
7384 return strcmp(filename
, entry
) == 0;
7388 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7389 static int open_net_route(void *cpu_env
, int fd
)
7396 fp
= fopen("/proc/net/route", "r");
7403 read
= getline(&line
, &len
, fp
);
7404 dprintf(fd
, "%s", line
);
7408 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7410 uint32_t dest
, gw
, mask
;
7411 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7414 fields
= sscanf(line
,
7415 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7416 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7417 &mask
, &mtu
, &window
, &irtt
);
7421 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7422 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7423 metric
, tswap32(mask
), mtu
, window
, irtt
);
7433 #if defined(TARGET_SPARC)
7434 static int open_cpuinfo(void *cpu_env
, int fd
)
7436 dprintf(fd
, "type\t\t: sun4u\n");
7441 #if defined(TARGET_M68K)
7442 static int open_hardware(void *cpu_env
, int fd
)
7444 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7449 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7452 const char *filename
;
7453 int (*fill
)(void *cpu_env
, int fd
);
7454 int (*cmp
)(const char *s1
, const char *s2
);
7456 const struct fake_open
*fake_open
;
7457 static const struct fake_open fakes
[] = {
7458 { "maps", open_self_maps
, is_proc_myself
},
7459 { "stat", open_self_stat
, is_proc_myself
},
7460 { "auxv", open_self_auxv
, is_proc_myself
},
7461 { "cmdline", open_self_cmdline
, is_proc_myself
},
7462 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7463 { "/proc/net/route", open_net_route
, is_proc
},
7465 #if defined(TARGET_SPARC)
7466 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7468 #if defined(TARGET_M68K)
7469 { "/proc/hardware", open_hardware
, is_proc
},
7471 { NULL
, NULL
, NULL
}
7474 if (is_proc_myself(pathname
, "exe")) {
7475 int execfd
= qemu_getauxval(AT_EXECFD
);
7476 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7479 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7480 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7485 if (fake_open
->filename
) {
7487 char filename
[PATH_MAX
];
7490 /* create temporary file to map stat to */
7491 tmpdir
= getenv("TMPDIR");
7494 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7495 fd
= mkstemp(filename
);
7501 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7507 lseek(fd
, 0, SEEK_SET
);
7512 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7515 #define TIMER_MAGIC 0x0caf0000
7516 #define TIMER_MAGIC_MASK 0xffff0000
7518 /* Convert QEMU provided timer ID back to internal 16bit index format */
7519 static target_timer_t
get_timer_id(abi_long arg
)
7521 target_timer_t timerid
= arg
;
7523 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7524 return -TARGET_EINVAL
;
7529 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7530 return -TARGET_EINVAL
;
7536 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7538 abi_ulong target_addr
,
7541 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7542 unsigned host_bits
= sizeof(*host_mask
) * 8;
7543 abi_ulong
*target_mask
;
7546 assert(host_size
>= target_size
);
7548 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7550 return -TARGET_EFAULT
;
7552 memset(host_mask
, 0, host_size
);
7554 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7555 unsigned bit
= i
* target_bits
;
7558 __get_user(val
, &target_mask
[i
]);
7559 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7560 if (val
& (1UL << j
)) {
7561 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7566 unlock_user(target_mask
, target_addr
, 0);
7570 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7572 abi_ulong target_addr
,
7575 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7576 unsigned host_bits
= sizeof(*host_mask
) * 8;
7577 abi_ulong
*target_mask
;
7580 assert(host_size
>= target_size
);
7582 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7584 return -TARGET_EFAULT
;
7587 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7588 unsigned bit
= i
* target_bits
;
7591 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7592 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7596 __put_user(val
, &target_mask
[i
]);
7599 unlock_user(target_mask
, target_addr
, target_size
);
7603 /* This is an internal helper for do_syscall so that it is easier
7604 * to have a single return point, so that actions, such as logging
7605 * of syscall results, can be performed.
7606 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7608 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7609 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7610 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7613 CPUState
*cpu
= env_cpu(cpu_env
);
7615 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7616 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7617 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7618 || defined(TARGET_NR_statx)
7621 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7622 || defined(TARGET_NR_fstatfs)
7628 case TARGET_NR_exit
:
7629 /* In old applications this may be used to implement _exit(2).
7630 However in threaded applictions it is used for thread termination,
7631 and _exit_group is used for application termination.
7632 Do thread termination if we have more then one thread. */
7634 if (block_signals()) {
7635 return -TARGET_ERESTARTSYS
;
7640 if (CPU_NEXT(first_cpu
)) {
7643 /* Remove the CPU from the list. */
7644 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7649 if (ts
->child_tidptr
) {
7650 put_user_u32(0, ts
->child_tidptr
);
7651 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7655 object_unref(OBJECT(cpu
));
7657 rcu_unregister_thread();
7662 preexit_cleanup(cpu_env
, arg1
);
7664 return 0; /* avoid warning */
7665 case TARGET_NR_read
:
7666 if (arg2
== 0 && arg3
== 0) {
7667 return get_errno(safe_read(arg1
, 0, 0));
7669 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7670 return -TARGET_EFAULT
;
7671 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7673 fd_trans_host_to_target_data(arg1
)) {
7674 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7676 unlock_user(p
, arg2
, ret
);
7679 case TARGET_NR_write
:
7680 if (arg2
== 0 && arg3
== 0) {
7681 return get_errno(safe_write(arg1
, 0, 0));
7683 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7684 return -TARGET_EFAULT
;
7685 if (fd_trans_target_to_host_data(arg1
)) {
7686 void *copy
= g_malloc(arg3
);
7687 memcpy(copy
, p
, arg3
);
7688 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7690 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7694 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7696 unlock_user(p
, arg2
, 0);
7699 #ifdef TARGET_NR_open
7700 case TARGET_NR_open
:
7701 if (!(p
= lock_user_string(arg1
)))
7702 return -TARGET_EFAULT
;
7703 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7704 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7706 fd_trans_unregister(ret
);
7707 unlock_user(p
, arg1
, 0);
7710 case TARGET_NR_openat
:
7711 if (!(p
= lock_user_string(arg2
)))
7712 return -TARGET_EFAULT
;
7713 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7714 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7716 fd_trans_unregister(ret
);
7717 unlock_user(p
, arg2
, 0);
7719 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7720 case TARGET_NR_name_to_handle_at
:
7721 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7724 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7725 case TARGET_NR_open_by_handle_at
:
7726 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7727 fd_trans_unregister(ret
);
7730 case TARGET_NR_close
:
7731 fd_trans_unregister(arg1
);
7732 return get_errno(close(arg1
));
7735 return do_brk(arg1
);
7736 #ifdef TARGET_NR_fork
7737 case TARGET_NR_fork
:
7738 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7740 #ifdef TARGET_NR_waitpid
7741 case TARGET_NR_waitpid
:
7744 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7745 if (!is_error(ret
) && arg2
&& ret
7746 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7747 return -TARGET_EFAULT
;
7751 #ifdef TARGET_NR_waitid
7752 case TARGET_NR_waitid
:
7756 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7757 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7758 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7759 return -TARGET_EFAULT
;
7760 host_to_target_siginfo(p
, &info
);
7761 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7766 #ifdef TARGET_NR_creat /* not on alpha */
7767 case TARGET_NR_creat
:
7768 if (!(p
= lock_user_string(arg1
)))
7769 return -TARGET_EFAULT
;
7770 ret
= get_errno(creat(p
, arg2
));
7771 fd_trans_unregister(ret
);
7772 unlock_user(p
, arg1
, 0);
7775 #ifdef TARGET_NR_link
7776 case TARGET_NR_link
:
7779 p
= lock_user_string(arg1
);
7780 p2
= lock_user_string(arg2
);
7782 ret
= -TARGET_EFAULT
;
7784 ret
= get_errno(link(p
, p2
));
7785 unlock_user(p2
, arg2
, 0);
7786 unlock_user(p
, arg1
, 0);
7790 #if defined(TARGET_NR_linkat)
7791 case TARGET_NR_linkat
:
7795 return -TARGET_EFAULT
;
7796 p
= lock_user_string(arg2
);
7797 p2
= lock_user_string(arg4
);
7799 ret
= -TARGET_EFAULT
;
7801 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7802 unlock_user(p
, arg2
, 0);
7803 unlock_user(p2
, arg4
, 0);
7807 #ifdef TARGET_NR_unlink
7808 case TARGET_NR_unlink
:
7809 if (!(p
= lock_user_string(arg1
)))
7810 return -TARGET_EFAULT
;
7811 ret
= get_errno(unlink(p
));
7812 unlock_user(p
, arg1
, 0);
7815 #if defined(TARGET_NR_unlinkat)
7816 case TARGET_NR_unlinkat
:
7817 if (!(p
= lock_user_string(arg2
)))
7818 return -TARGET_EFAULT
;
7819 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7820 unlock_user(p
, arg2
, 0);
7823 case TARGET_NR_execve
:
7825 char **argp
, **envp
;
7828 abi_ulong guest_argp
;
7829 abi_ulong guest_envp
;
7836 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7837 if (get_user_ual(addr
, gp
))
7838 return -TARGET_EFAULT
;
7845 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7846 if (get_user_ual(addr
, gp
))
7847 return -TARGET_EFAULT
;
7853 argp
= g_new0(char *, argc
+ 1);
7854 envp
= g_new0(char *, envc
+ 1);
7856 for (gp
= guest_argp
, q
= argp
; gp
;
7857 gp
+= sizeof(abi_ulong
), q
++) {
7858 if (get_user_ual(addr
, gp
))
7862 if (!(*q
= lock_user_string(addr
)))
7864 total_size
+= strlen(*q
) + 1;
7868 for (gp
= guest_envp
, q
= envp
; gp
;
7869 gp
+= sizeof(abi_ulong
), q
++) {
7870 if (get_user_ual(addr
, gp
))
7874 if (!(*q
= lock_user_string(addr
)))
7876 total_size
+= strlen(*q
) + 1;
7880 if (!(p
= lock_user_string(arg1
)))
7882 /* Although execve() is not an interruptible syscall it is
7883 * a special case where we must use the safe_syscall wrapper:
7884 * if we allow a signal to happen before we make the host
7885 * syscall then we will 'lose' it, because at the point of
7886 * execve the process leaves QEMU's control. So we use the
7887 * safe syscall wrapper to ensure that we either take the
7888 * signal as a guest signal, or else it does not happen
7889 * before the execve completes and makes it the other
7890 * program's problem.
7892 ret
= get_errno(safe_execve(p
, argp
, envp
));
7893 unlock_user(p
, arg1
, 0);
7898 ret
= -TARGET_EFAULT
;
7901 for (gp
= guest_argp
, q
= argp
; *q
;
7902 gp
+= sizeof(abi_ulong
), q
++) {
7903 if (get_user_ual(addr
, gp
)
7906 unlock_user(*q
, addr
, 0);
7908 for (gp
= guest_envp
, q
= envp
; *q
;
7909 gp
+= sizeof(abi_ulong
), q
++) {
7910 if (get_user_ual(addr
, gp
)
7913 unlock_user(*q
, addr
, 0);
7920 case TARGET_NR_chdir
:
7921 if (!(p
= lock_user_string(arg1
)))
7922 return -TARGET_EFAULT
;
7923 ret
= get_errno(chdir(p
));
7924 unlock_user(p
, arg1
, 0);
7926 #ifdef TARGET_NR_time
7927 case TARGET_NR_time
:
7930 ret
= get_errno(time(&host_time
));
7933 && put_user_sal(host_time
, arg1
))
7934 return -TARGET_EFAULT
;
7938 #ifdef TARGET_NR_mknod
7939 case TARGET_NR_mknod
:
7940 if (!(p
= lock_user_string(arg1
)))
7941 return -TARGET_EFAULT
;
7942 ret
= get_errno(mknod(p
, arg2
, arg3
));
7943 unlock_user(p
, arg1
, 0);
7946 #if defined(TARGET_NR_mknodat)
7947 case TARGET_NR_mknodat
:
7948 if (!(p
= lock_user_string(arg2
)))
7949 return -TARGET_EFAULT
;
7950 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7951 unlock_user(p
, arg2
, 0);
7954 #ifdef TARGET_NR_chmod
7955 case TARGET_NR_chmod
:
7956 if (!(p
= lock_user_string(arg1
)))
7957 return -TARGET_EFAULT
;
7958 ret
= get_errno(chmod(p
, arg2
));
7959 unlock_user(p
, arg1
, 0);
7962 #ifdef TARGET_NR_lseek
7963 case TARGET_NR_lseek
:
7964 return get_errno(lseek(arg1
, arg2
, arg3
));
7966 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7967 /* Alpha specific */
7968 case TARGET_NR_getxpid
:
7969 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7970 return get_errno(getpid());
7972 #ifdef TARGET_NR_getpid
7973 case TARGET_NR_getpid
:
7974 return get_errno(getpid());
7976 case TARGET_NR_mount
:
7978 /* need to look at the data field */
7982 p
= lock_user_string(arg1
);
7984 return -TARGET_EFAULT
;
7990 p2
= lock_user_string(arg2
);
7993 unlock_user(p
, arg1
, 0);
7995 return -TARGET_EFAULT
;
7999 p3
= lock_user_string(arg3
);
8002 unlock_user(p
, arg1
, 0);
8004 unlock_user(p2
, arg2
, 0);
8005 return -TARGET_EFAULT
;
8011 /* FIXME - arg5 should be locked, but it isn't clear how to
8012 * do that since it's not guaranteed to be a NULL-terminated
8016 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8018 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8020 ret
= get_errno(ret
);
8023 unlock_user(p
, arg1
, 0);
8025 unlock_user(p2
, arg2
, 0);
8027 unlock_user(p3
, arg3
, 0);
8031 #ifdef TARGET_NR_umount
8032 case TARGET_NR_umount
:
8033 if (!(p
= lock_user_string(arg1
)))
8034 return -TARGET_EFAULT
;
8035 ret
= get_errno(umount(p
));
8036 unlock_user(p
, arg1
, 0);
8039 #ifdef TARGET_NR_stime /* not on alpha */
8040 case TARGET_NR_stime
:
8044 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8045 return -TARGET_EFAULT
;
8047 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8050 #ifdef TARGET_NR_alarm /* not on alpha */
8051 case TARGET_NR_alarm
:
8054 #ifdef TARGET_NR_pause /* not on alpha */
8055 case TARGET_NR_pause
:
8056 if (!block_signals()) {
8057 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8059 return -TARGET_EINTR
;
8061 #ifdef TARGET_NR_utime
8062 case TARGET_NR_utime
:
8064 struct utimbuf tbuf
, *host_tbuf
;
8065 struct target_utimbuf
*target_tbuf
;
8067 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8068 return -TARGET_EFAULT
;
8069 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8070 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8071 unlock_user_struct(target_tbuf
, arg2
, 0);
8076 if (!(p
= lock_user_string(arg1
)))
8077 return -TARGET_EFAULT
;
8078 ret
= get_errno(utime(p
, host_tbuf
));
8079 unlock_user(p
, arg1
, 0);
8083 #ifdef TARGET_NR_utimes
8084 case TARGET_NR_utimes
:
8086 struct timeval
*tvp
, tv
[2];
8088 if (copy_from_user_timeval(&tv
[0], arg2
)
8089 || copy_from_user_timeval(&tv
[1],
8090 arg2
+ sizeof(struct target_timeval
)))
8091 return -TARGET_EFAULT
;
8096 if (!(p
= lock_user_string(arg1
)))
8097 return -TARGET_EFAULT
;
8098 ret
= get_errno(utimes(p
, tvp
));
8099 unlock_user(p
, arg1
, 0);
8103 #if defined(TARGET_NR_futimesat)
8104 case TARGET_NR_futimesat
:
8106 struct timeval
*tvp
, tv
[2];
8108 if (copy_from_user_timeval(&tv
[0], arg3
)
8109 || copy_from_user_timeval(&tv
[1],
8110 arg3
+ sizeof(struct target_timeval
)))
8111 return -TARGET_EFAULT
;
8116 if (!(p
= lock_user_string(arg2
))) {
8117 return -TARGET_EFAULT
;
8119 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8120 unlock_user(p
, arg2
, 0);
8124 #ifdef TARGET_NR_access
8125 case TARGET_NR_access
:
8126 if (!(p
= lock_user_string(arg1
))) {
8127 return -TARGET_EFAULT
;
8129 ret
= get_errno(access(path(p
), arg2
));
8130 unlock_user(p
, arg1
, 0);
8133 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8134 case TARGET_NR_faccessat
:
8135 if (!(p
= lock_user_string(arg2
))) {
8136 return -TARGET_EFAULT
;
8138 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8139 unlock_user(p
, arg2
, 0);
8142 #ifdef TARGET_NR_nice /* not on alpha */
8143 case TARGET_NR_nice
:
8144 return get_errno(nice(arg1
));
8146 case TARGET_NR_sync
:
8149 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8150 case TARGET_NR_syncfs
:
8151 return get_errno(syncfs(arg1
));
8153 case TARGET_NR_kill
:
8154 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8155 #ifdef TARGET_NR_rename
8156 case TARGET_NR_rename
:
8159 p
= lock_user_string(arg1
);
8160 p2
= lock_user_string(arg2
);
8162 ret
= -TARGET_EFAULT
;
8164 ret
= get_errno(rename(p
, p2
));
8165 unlock_user(p2
, arg2
, 0);
8166 unlock_user(p
, arg1
, 0);
8170 #if defined(TARGET_NR_renameat)
8171 case TARGET_NR_renameat
:
8174 p
= lock_user_string(arg2
);
8175 p2
= lock_user_string(arg4
);
8177 ret
= -TARGET_EFAULT
;
8179 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8180 unlock_user(p2
, arg4
, 0);
8181 unlock_user(p
, arg2
, 0);
8185 #if defined(TARGET_NR_renameat2)
8186 case TARGET_NR_renameat2
:
8189 p
= lock_user_string(arg2
);
8190 p2
= lock_user_string(arg4
);
8192 ret
= -TARGET_EFAULT
;
8194 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8196 unlock_user(p2
, arg4
, 0);
8197 unlock_user(p
, arg2
, 0);
8201 #ifdef TARGET_NR_mkdir
8202 case TARGET_NR_mkdir
:
8203 if (!(p
= lock_user_string(arg1
)))
8204 return -TARGET_EFAULT
;
8205 ret
= get_errno(mkdir(p
, arg2
));
8206 unlock_user(p
, arg1
, 0);
8209 #if defined(TARGET_NR_mkdirat)
8210 case TARGET_NR_mkdirat
:
8211 if (!(p
= lock_user_string(arg2
)))
8212 return -TARGET_EFAULT
;
8213 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8214 unlock_user(p
, arg2
, 0);
8217 #ifdef TARGET_NR_rmdir
8218 case TARGET_NR_rmdir
:
8219 if (!(p
= lock_user_string(arg1
)))
8220 return -TARGET_EFAULT
;
8221 ret
= get_errno(rmdir(p
));
8222 unlock_user(p
, arg1
, 0);
8226 ret
= get_errno(dup(arg1
));
8228 fd_trans_dup(arg1
, ret
);
8231 #ifdef TARGET_NR_pipe
8232 case TARGET_NR_pipe
:
8233 return do_pipe(cpu_env
, arg1
, 0, 0);
8235 #ifdef TARGET_NR_pipe2
8236 case TARGET_NR_pipe2
:
8237 return do_pipe(cpu_env
, arg1
,
8238 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8240 case TARGET_NR_times
:
8242 struct target_tms
*tmsp
;
8244 ret
= get_errno(times(&tms
));
8246 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8248 return -TARGET_EFAULT
;
8249 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8250 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8251 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8252 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8255 ret
= host_to_target_clock_t(ret
);
8258 case TARGET_NR_acct
:
8260 ret
= get_errno(acct(NULL
));
8262 if (!(p
= lock_user_string(arg1
))) {
8263 return -TARGET_EFAULT
;
8265 ret
= get_errno(acct(path(p
)));
8266 unlock_user(p
, arg1
, 0);
8269 #ifdef TARGET_NR_umount2
8270 case TARGET_NR_umount2
:
8271 if (!(p
= lock_user_string(arg1
)))
8272 return -TARGET_EFAULT
;
8273 ret
= get_errno(umount2(p
, arg2
));
8274 unlock_user(p
, arg1
, 0);
8277 case TARGET_NR_ioctl
:
8278 return do_ioctl(arg1
, arg2
, arg3
);
8279 #ifdef TARGET_NR_fcntl
8280 case TARGET_NR_fcntl
:
8281 return do_fcntl(arg1
, arg2
, arg3
);
8283 case TARGET_NR_setpgid
:
8284 return get_errno(setpgid(arg1
, arg2
));
8285 case TARGET_NR_umask
:
8286 return get_errno(umask(arg1
));
8287 case TARGET_NR_chroot
:
8288 if (!(p
= lock_user_string(arg1
)))
8289 return -TARGET_EFAULT
;
8290 ret
= get_errno(chroot(p
));
8291 unlock_user(p
, arg1
, 0);
8293 #ifdef TARGET_NR_dup2
8294 case TARGET_NR_dup2
:
8295 ret
= get_errno(dup2(arg1
, arg2
));
8297 fd_trans_dup(arg1
, arg2
);
8301 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8302 case TARGET_NR_dup3
:
8306 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8309 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8310 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8312 fd_trans_dup(arg1
, arg2
);
8317 #ifdef TARGET_NR_getppid /* not on alpha */
8318 case TARGET_NR_getppid
:
8319 return get_errno(getppid());
8321 #ifdef TARGET_NR_getpgrp
8322 case TARGET_NR_getpgrp
:
8323 return get_errno(getpgrp());
8325 case TARGET_NR_setsid
:
8326 return get_errno(setsid());
8327 #ifdef TARGET_NR_sigaction
8328 case TARGET_NR_sigaction
:
8330 #if defined(TARGET_ALPHA)
8331 struct target_sigaction act
, oact
, *pact
= 0;
8332 struct target_old_sigaction
*old_act
;
8334 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8335 return -TARGET_EFAULT
;
8336 act
._sa_handler
= old_act
->_sa_handler
;
8337 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8338 act
.sa_flags
= old_act
->sa_flags
;
8339 act
.sa_restorer
= 0;
8340 unlock_user_struct(old_act
, arg2
, 0);
8343 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8344 if (!is_error(ret
) && arg3
) {
8345 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8346 return -TARGET_EFAULT
;
8347 old_act
->_sa_handler
= oact
._sa_handler
;
8348 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8349 old_act
->sa_flags
= oact
.sa_flags
;
8350 unlock_user_struct(old_act
, arg3
, 1);
8352 #elif defined(TARGET_MIPS)
8353 struct target_sigaction act
, oact
, *pact
, *old_act
;
8356 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8357 return -TARGET_EFAULT
;
8358 act
._sa_handler
= old_act
->_sa_handler
;
8359 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8360 act
.sa_flags
= old_act
->sa_flags
;
8361 unlock_user_struct(old_act
, arg2
, 0);
8367 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8369 if (!is_error(ret
) && arg3
) {
8370 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8371 return -TARGET_EFAULT
;
8372 old_act
->_sa_handler
= oact
._sa_handler
;
8373 old_act
->sa_flags
= oact
.sa_flags
;
8374 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8375 old_act
->sa_mask
.sig
[1] = 0;
8376 old_act
->sa_mask
.sig
[2] = 0;
8377 old_act
->sa_mask
.sig
[3] = 0;
8378 unlock_user_struct(old_act
, arg3
, 1);
8381 struct target_old_sigaction
*old_act
;
8382 struct target_sigaction act
, oact
, *pact
;
8384 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8385 return -TARGET_EFAULT
;
8386 act
._sa_handler
= old_act
->_sa_handler
;
8387 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8388 act
.sa_flags
= old_act
->sa_flags
;
8389 act
.sa_restorer
= old_act
->sa_restorer
;
8390 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8391 act
.ka_restorer
= 0;
8393 unlock_user_struct(old_act
, arg2
, 0);
8398 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8399 if (!is_error(ret
) && arg3
) {
8400 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8401 return -TARGET_EFAULT
;
8402 old_act
->_sa_handler
= oact
._sa_handler
;
8403 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8404 old_act
->sa_flags
= oact
.sa_flags
;
8405 old_act
->sa_restorer
= oact
.sa_restorer
;
8406 unlock_user_struct(old_act
, arg3
, 1);
8412 case TARGET_NR_rt_sigaction
:
8414 #if defined(TARGET_ALPHA)
8415 /* For Alpha and SPARC this is a 5 argument syscall, with
8416 * a 'restorer' parameter which must be copied into the
8417 * sa_restorer field of the sigaction struct.
8418 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8419 * and arg5 is the sigsetsize.
8420 * Alpha also has a separate rt_sigaction struct that it uses
8421 * here; SPARC uses the usual sigaction struct.
8423 struct target_rt_sigaction
*rt_act
;
8424 struct target_sigaction act
, oact
, *pact
= 0;
8426 if (arg4
!= sizeof(target_sigset_t
)) {
8427 return -TARGET_EINVAL
;
8430 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8431 return -TARGET_EFAULT
;
8432 act
._sa_handler
= rt_act
->_sa_handler
;
8433 act
.sa_mask
= rt_act
->sa_mask
;
8434 act
.sa_flags
= rt_act
->sa_flags
;
8435 act
.sa_restorer
= arg5
;
8436 unlock_user_struct(rt_act
, arg2
, 0);
8439 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8440 if (!is_error(ret
) && arg3
) {
8441 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8442 return -TARGET_EFAULT
;
8443 rt_act
->_sa_handler
= oact
._sa_handler
;
8444 rt_act
->sa_mask
= oact
.sa_mask
;
8445 rt_act
->sa_flags
= oact
.sa_flags
;
8446 unlock_user_struct(rt_act
, arg3
, 1);
8450 target_ulong restorer
= arg4
;
8451 target_ulong sigsetsize
= arg5
;
8453 target_ulong sigsetsize
= arg4
;
8455 struct target_sigaction
*act
;
8456 struct target_sigaction
*oact
;
8458 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8459 return -TARGET_EINVAL
;
8462 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8463 return -TARGET_EFAULT
;
8465 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8466 act
->ka_restorer
= restorer
;
8472 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8473 ret
= -TARGET_EFAULT
;
8474 goto rt_sigaction_fail
;
8478 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8481 unlock_user_struct(act
, arg2
, 0);
8483 unlock_user_struct(oact
, arg3
, 1);
8487 #ifdef TARGET_NR_sgetmask /* not on alpha */
8488 case TARGET_NR_sgetmask
:
8491 abi_ulong target_set
;
8492 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8494 host_to_target_old_sigset(&target_set
, &cur_set
);
8500 #ifdef TARGET_NR_ssetmask /* not on alpha */
8501 case TARGET_NR_ssetmask
:
8504 abi_ulong target_set
= arg1
;
8505 target_to_host_old_sigset(&set
, &target_set
);
8506 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8508 host_to_target_old_sigset(&target_set
, &oset
);
8514 #ifdef TARGET_NR_sigprocmask
8515 case TARGET_NR_sigprocmask
:
8517 #if defined(TARGET_ALPHA)
8518 sigset_t set
, oldset
;
8523 case TARGET_SIG_BLOCK
:
8526 case TARGET_SIG_UNBLOCK
:
8529 case TARGET_SIG_SETMASK
:
8533 return -TARGET_EINVAL
;
8536 target_to_host_old_sigset(&set
, &mask
);
8538 ret
= do_sigprocmask(how
, &set
, &oldset
);
8539 if (!is_error(ret
)) {
8540 host_to_target_old_sigset(&mask
, &oldset
);
8542 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8545 sigset_t set
, oldset
, *set_ptr
;
8550 case TARGET_SIG_BLOCK
:
8553 case TARGET_SIG_UNBLOCK
:
8556 case TARGET_SIG_SETMASK
:
8560 return -TARGET_EINVAL
;
8562 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8563 return -TARGET_EFAULT
;
8564 target_to_host_old_sigset(&set
, p
);
8565 unlock_user(p
, arg2
, 0);
8571 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8572 if (!is_error(ret
) && arg3
) {
8573 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8574 return -TARGET_EFAULT
;
8575 host_to_target_old_sigset(p
, &oldset
);
8576 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8582 case TARGET_NR_rt_sigprocmask
:
8585 sigset_t set
, oldset
, *set_ptr
;
8587 if (arg4
!= sizeof(target_sigset_t
)) {
8588 return -TARGET_EINVAL
;
8593 case TARGET_SIG_BLOCK
:
8596 case TARGET_SIG_UNBLOCK
:
8599 case TARGET_SIG_SETMASK
:
8603 return -TARGET_EINVAL
;
8605 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8606 return -TARGET_EFAULT
;
8607 target_to_host_sigset(&set
, p
);
8608 unlock_user(p
, arg2
, 0);
8614 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8615 if (!is_error(ret
) && arg3
) {
8616 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8617 return -TARGET_EFAULT
;
8618 host_to_target_sigset(p
, &oldset
);
8619 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8623 #ifdef TARGET_NR_sigpending
8624 case TARGET_NR_sigpending
:
8627 ret
= get_errno(sigpending(&set
));
8628 if (!is_error(ret
)) {
8629 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8630 return -TARGET_EFAULT
;
8631 host_to_target_old_sigset(p
, &set
);
8632 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8637 case TARGET_NR_rt_sigpending
:
8641 /* Yes, this check is >, not != like most. We follow the kernel's
8642 * logic and it does it like this because it implements
8643 * NR_sigpending through the same code path, and in that case
8644 * the old_sigset_t is smaller in size.
8646 if (arg2
> sizeof(target_sigset_t
)) {
8647 return -TARGET_EINVAL
;
8650 ret
= get_errno(sigpending(&set
));
8651 if (!is_error(ret
)) {
8652 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8653 return -TARGET_EFAULT
;
8654 host_to_target_sigset(p
, &set
);
8655 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8659 #ifdef TARGET_NR_sigsuspend
8660 case TARGET_NR_sigsuspend
:
8662 TaskState
*ts
= cpu
->opaque
;
8663 #if defined(TARGET_ALPHA)
8664 abi_ulong mask
= arg1
;
8665 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8667 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8668 return -TARGET_EFAULT
;
8669 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8670 unlock_user(p
, arg1
, 0);
8672 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8674 if (ret
!= -TARGET_ERESTARTSYS
) {
8675 ts
->in_sigsuspend
= 1;
8680 case TARGET_NR_rt_sigsuspend
:
8682 TaskState
*ts
= cpu
->opaque
;
8684 if (arg2
!= sizeof(target_sigset_t
)) {
8685 return -TARGET_EINVAL
;
8687 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8688 return -TARGET_EFAULT
;
8689 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8690 unlock_user(p
, arg1
, 0);
8691 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8693 if (ret
!= -TARGET_ERESTARTSYS
) {
8694 ts
->in_sigsuspend
= 1;
8698 #ifdef TARGET_NR_rt_sigtimedwait
8699 case TARGET_NR_rt_sigtimedwait
:
8702 struct timespec uts
, *puts
;
8705 if (arg4
!= sizeof(target_sigset_t
)) {
8706 return -TARGET_EINVAL
;
8709 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8710 return -TARGET_EFAULT
;
8711 target_to_host_sigset(&set
, p
);
8712 unlock_user(p
, arg1
, 0);
8715 target_to_host_timespec(puts
, arg3
);
8719 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8721 if (!is_error(ret
)) {
8723 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8726 return -TARGET_EFAULT
;
8728 host_to_target_siginfo(p
, &uinfo
);
8729 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8731 ret
= host_to_target_signal(ret
);
8736 case TARGET_NR_rt_sigqueueinfo
:
8740 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8742 return -TARGET_EFAULT
;
8744 target_to_host_siginfo(&uinfo
, p
);
8745 unlock_user(p
, arg3
, 0);
8746 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8749 case TARGET_NR_rt_tgsigqueueinfo
:
8753 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8755 return -TARGET_EFAULT
;
8757 target_to_host_siginfo(&uinfo
, p
);
8758 unlock_user(p
, arg4
, 0);
8759 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8762 #ifdef TARGET_NR_sigreturn
8763 case TARGET_NR_sigreturn
:
8764 if (block_signals()) {
8765 return -TARGET_ERESTARTSYS
;
8767 return do_sigreturn(cpu_env
);
8769 case TARGET_NR_rt_sigreturn
:
8770 if (block_signals()) {
8771 return -TARGET_ERESTARTSYS
;
8773 return do_rt_sigreturn(cpu_env
);
8774 case TARGET_NR_sethostname
:
8775 if (!(p
= lock_user_string(arg1
)))
8776 return -TARGET_EFAULT
;
8777 ret
= get_errno(sethostname(p
, arg2
));
8778 unlock_user(p
, arg1
, 0);
8780 #ifdef TARGET_NR_setrlimit
8781 case TARGET_NR_setrlimit
:
8783 int resource
= target_to_host_resource(arg1
);
8784 struct target_rlimit
*target_rlim
;
8786 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8787 return -TARGET_EFAULT
;
8788 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8789 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8790 unlock_user_struct(target_rlim
, arg2
, 0);
8792 * If we just passed through resource limit settings for memory then
8793 * they would also apply to QEMU's own allocations, and QEMU will
8794 * crash or hang or die if its allocations fail. Ideally we would
8795 * track the guest allocations in QEMU and apply the limits ourselves.
8796 * For now, just tell the guest the call succeeded but don't actually
8799 if (resource
!= RLIMIT_AS
&&
8800 resource
!= RLIMIT_DATA
&&
8801 resource
!= RLIMIT_STACK
) {
8802 return get_errno(setrlimit(resource
, &rlim
));
8808 #ifdef TARGET_NR_getrlimit
8809 case TARGET_NR_getrlimit
:
8811 int resource
= target_to_host_resource(arg1
);
8812 struct target_rlimit
*target_rlim
;
8815 ret
= get_errno(getrlimit(resource
, &rlim
));
8816 if (!is_error(ret
)) {
8817 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8818 return -TARGET_EFAULT
;
8819 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8820 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8821 unlock_user_struct(target_rlim
, arg2
, 1);
8826 case TARGET_NR_getrusage
:
8828 struct rusage rusage
;
8829 ret
= get_errno(getrusage(arg1
, &rusage
));
8830 if (!is_error(ret
)) {
8831 ret
= host_to_target_rusage(arg2
, &rusage
);
8835 #if defined(TARGET_NR_gettimeofday)
8836 case TARGET_NR_gettimeofday
:
8841 ret
= get_errno(gettimeofday(&tv
, &tz
));
8842 if (!is_error(ret
)) {
8843 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
8844 return -TARGET_EFAULT
;
8846 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
8847 return -TARGET_EFAULT
;
8853 #if defined(TARGET_NR_settimeofday)
8854 case TARGET_NR_settimeofday
:
8856 struct timeval tv
, *ptv
= NULL
;
8857 struct timezone tz
, *ptz
= NULL
;
8860 if (copy_from_user_timeval(&tv
, arg1
)) {
8861 return -TARGET_EFAULT
;
8867 if (copy_from_user_timezone(&tz
, arg2
)) {
8868 return -TARGET_EFAULT
;
8873 return get_errno(settimeofday(ptv
, ptz
));
8876 #if defined(TARGET_NR_select)
8877 case TARGET_NR_select
:
8878 #if defined(TARGET_WANT_NI_OLD_SELECT)
8879 /* some architectures used to have old_select here
8880 * but now ENOSYS it.
8882 ret
= -TARGET_ENOSYS
;
8883 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8884 ret
= do_old_select(arg1
);
8886 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8890 #ifdef TARGET_NR_pselect6
8891 case TARGET_NR_pselect6
:
8893 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8894 fd_set rfds
, wfds
, efds
;
8895 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8896 struct timespec ts
, *ts_ptr
;
8899 * The 6th arg is actually two args smashed together,
8900 * so we cannot use the C library.
8908 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8909 target_sigset_t
*target_sigset
;
8917 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8921 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8925 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8931 * This takes a timespec, and not a timeval, so we cannot
8932 * use the do_select() helper ...
8935 if (target_to_host_timespec(&ts
, ts_addr
)) {
8936 return -TARGET_EFAULT
;
8943 /* Extract the two packed args for the sigset */
8946 sig
.size
= SIGSET_T_SIZE
;
8948 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8950 return -TARGET_EFAULT
;
8952 arg_sigset
= tswapal(arg7
[0]);
8953 arg_sigsize
= tswapal(arg7
[1]);
8954 unlock_user(arg7
, arg6
, 0);
8958 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8959 /* Like the kernel, we enforce correct size sigsets */
8960 return -TARGET_EINVAL
;
8962 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8963 sizeof(*target_sigset
), 1);
8964 if (!target_sigset
) {
8965 return -TARGET_EFAULT
;
8967 target_to_host_sigset(&set
, target_sigset
);
8968 unlock_user(target_sigset
, arg_sigset
, 0);
8976 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8979 if (!is_error(ret
)) {
8980 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8981 return -TARGET_EFAULT
;
8982 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8983 return -TARGET_EFAULT
;
8984 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8985 return -TARGET_EFAULT
;
8987 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8988 return -TARGET_EFAULT
;
8993 #ifdef TARGET_NR_symlink
8994 case TARGET_NR_symlink
:
8997 p
= lock_user_string(arg1
);
8998 p2
= lock_user_string(arg2
);
9000 ret
= -TARGET_EFAULT
;
9002 ret
= get_errno(symlink(p
, p2
));
9003 unlock_user(p2
, arg2
, 0);
9004 unlock_user(p
, arg1
, 0);
9008 #if defined(TARGET_NR_symlinkat)
9009 case TARGET_NR_symlinkat
:
9012 p
= lock_user_string(arg1
);
9013 p2
= lock_user_string(arg3
);
9015 ret
= -TARGET_EFAULT
;
9017 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9018 unlock_user(p2
, arg3
, 0);
9019 unlock_user(p
, arg1
, 0);
9023 #ifdef TARGET_NR_readlink
9024 case TARGET_NR_readlink
:
9027 p
= lock_user_string(arg1
);
9028 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9030 ret
= -TARGET_EFAULT
;
9032 /* Short circuit this for the magic exe check. */
9033 ret
= -TARGET_EINVAL
;
9034 } else if (is_proc_myself((const char *)p
, "exe")) {
9035 char real
[PATH_MAX
], *temp
;
9036 temp
= realpath(exec_path
, real
);
9037 /* Return value is # of bytes that we wrote to the buffer. */
9039 ret
= get_errno(-1);
9041 /* Don't worry about sign mismatch as earlier mapping
9042 * logic would have thrown a bad address error. */
9043 ret
= MIN(strlen(real
), arg3
);
9044 /* We cannot NUL terminate the string. */
9045 memcpy(p2
, real
, ret
);
9048 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9050 unlock_user(p2
, arg2
, ret
);
9051 unlock_user(p
, arg1
, 0);
9055 #if defined(TARGET_NR_readlinkat)
9056 case TARGET_NR_readlinkat
:
9059 p
= lock_user_string(arg2
);
9060 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9062 ret
= -TARGET_EFAULT
;
9063 } else if (is_proc_myself((const char *)p
, "exe")) {
9064 char real
[PATH_MAX
], *temp
;
9065 temp
= realpath(exec_path
, real
);
9066 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9067 snprintf((char *)p2
, arg4
, "%s", real
);
9069 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9071 unlock_user(p2
, arg3
, ret
);
9072 unlock_user(p
, arg2
, 0);
9076 #ifdef TARGET_NR_swapon
9077 case TARGET_NR_swapon
:
9078 if (!(p
= lock_user_string(arg1
)))
9079 return -TARGET_EFAULT
;
9080 ret
= get_errno(swapon(p
, arg2
));
9081 unlock_user(p
, arg1
, 0);
9084 case TARGET_NR_reboot
:
9085 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9086 /* arg4 must be ignored in all other cases */
9087 p
= lock_user_string(arg4
);
9089 return -TARGET_EFAULT
;
9091 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9092 unlock_user(p
, arg4
, 0);
9094 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9097 #ifdef TARGET_NR_mmap
9098 case TARGET_NR_mmap
:
9099 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9100 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9101 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9102 || defined(TARGET_S390X)
9105 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9106 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9107 return -TARGET_EFAULT
;
9114 unlock_user(v
, arg1
, 0);
9115 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9116 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9120 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9121 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9127 #ifdef TARGET_NR_mmap2
9128 case TARGET_NR_mmap2
:
9130 #define MMAP_SHIFT 12
9132 ret
= target_mmap(arg1
, arg2
, arg3
,
9133 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9134 arg5
, arg6
<< MMAP_SHIFT
);
9135 return get_errno(ret
);
9137 case TARGET_NR_munmap
:
9138 return get_errno(target_munmap(arg1
, arg2
));
9139 case TARGET_NR_mprotect
:
9141 TaskState
*ts
= cpu
->opaque
;
9142 /* Special hack to detect libc making the stack executable. */
9143 if ((arg3
& PROT_GROWSDOWN
)
9144 && arg1
>= ts
->info
->stack_limit
9145 && arg1
<= ts
->info
->start_stack
) {
9146 arg3
&= ~PROT_GROWSDOWN
;
9147 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9148 arg1
= ts
->info
->stack_limit
;
9151 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9152 #ifdef TARGET_NR_mremap
9153 case TARGET_NR_mremap
:
9154 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9156 /* ??? msync/mlock/munlock are broken for softmmu. */
9157 #ifdef TARGET_NR_msync
9158 case TARGET_NR_msync
:
9159 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9161 #ifdef TARGET_NR_mlock
9162 case TARGET_NR_mlock
:
9163 return get_errno(mlock(g2h(arg1
), arg2
));
9165 #ifdef TARGET_NR_munlock
9166 case TARGET_NR_munlock
:
9167 return get_errno(munlock(g2h(arg1
), arg2
));
9169 #ifdef TARGET_NR_mlockall
9170 case TARGET_NR_mlockall
:
9171 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9173 #ifdef TARGET_NR_munlockall
9174 case TARGET_NR_munlockall
:
9175 return get_errno(munlockall());
9177 #ifdef TARGET_NR_truncate
9178 case TARGET_NR_truncate
:
9179 if (!(p
= lock_user_string(arg1
)))
9180 return -TARGET_EFAULT
;
9181 ret
= get_errno(truncate(p
, arg2
));
9182 unlock_user(p
, arg1
, 0);
9185 #ifdef TARGET_NR_ftruncate
9186 case TARGET_NR_ftruncate
:
9187 return get_errno(ftruncate(arg1
, arg2
));
9189 case TARGET_NR_fchmod
:
9190 return get_errno(fchmod(arg1
, arg2
));
9191 #if defined(TARGET_NR_fchmodat)
9192 case TARGET_NR_fchmodat
:
9193 if (!(p
= lock_user_string(arg2
)))
9194 return -TARGET_EFAULT
;
9195 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9196 unlock_user(p
, arg2
, 0);
9199 case TARGET_NR_getpriority
:
9200 /* Note that negative values are valid for getpriority, so we must
9201 differentiate based on errno settings. */
9203 ret
= getpriority(arg1
, arg2
);
9204 if (ret
== -1 && errno
!= 0) {
9205 return -host_to_target_errno(errno
);
9208 /* Return value is the unbiased priority. Signal no error. */
9209 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9211 /* Return value is a biased priority to avoid negative numbers. */
9215 case TARGET_NR_setpriority
:
9216 return get_errno(setpriority(arg1
, arg2
, arg3
));
9217 #ifdef TARGET_NR_statfs
9218 case TARGET_NR_statfs
:
9219 if (!(p
= lock_user_string(arg1
))) {
9220 return -TARGET_EFAULT
;
9222 ret
= get_errno(statfs(path(p
), &stfs
));
9223 unlock_user(p
, arg1
, 0);
9225 if (!is_error(ret
)) {
9226 struct target_statfs
*target_stfs
;
9228 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9229 return -TARGET_EFAULT
;
9230 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9231 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9232 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9233 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9234 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9235 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9236 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9237 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9238 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9239 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9240 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9241 #ifdef _STATFS_F_FLAGS
9242 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9244 __put_user(0, &target_stfs
->f_flags
);
9246 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9247 unlock_user_struct(target_stfs
, arg2
, 1);
9251 #ifdef TARGET_NR_fstatfs
9252 case TARGET_NR_fstatfs
:
9253 ret
= get_errno(fstatfs(arg1
, &stfs
));
9254 goto convert_statfs
;
9256 #ifdef TARGET_NR_statfs64
9257 case TARGET_NR_statfs64
:
9258 if (!(p
= lock_user_string(arg1
))) {
9259 return -TARGET_EFAULT
;
9261 ret
= get_errno(statfs(path(p
), &stfs
));
9262 unlock_user(p
, arg1
, 0);
9264 if (!is_error(ret
)) {
9265 struct target_statfs64
*target_stfs
;
9267 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9268 return -TARGET_EFAULT
;
9269 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9270 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9271 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9272 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9273 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9274 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9275 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9276 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9277 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9278 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9279 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9280 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9281 unlock_user_struct(target_stfs
, arg3
, 1);
9284 case TARGET_NR_fstatfs64
:
9285 ret
= get_errno(fstatfs(arg1
, &stfs
));
9286 goto convert_statfs64
;
9288 #ifdef TARGET_NR_socketcall
9289 case TARGET_NR_socketcall
:
9290 return do_socketcall(arg1
, arg2
);
9292 #ifdef TARGET_NR_accept
9293 case TARGET_NR_accept
:
9294 return do_accept4(arg1
, arg2
, arg3
, 0);
9296 #ifdef TARGET_NR_accept4
9297 case TARGET_NR_accept4
:
9298 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9300 #ifdef TARGET_NR_bind
9301 case TARGET_NR_bind
:
9302 return do_bind(arg1
, arg2
, arg3
);
9304 #ifdef TARGET_NR_connect
9305 case TARGET_NR_connect
:
9306 return do_connect(arg1
, arg2
, arg3
);
9308 #ifdef TARGET_NR_getpeername
9309 case TARGET_NR_getpeername
:
9310 return do_getpeername(arg1
, arg2
, arg3
);
9312 #ifdef TARGET_NR_getsockname
9313 case TARGET_NR_getsockname
:
9314 return do_getsockname(arg1
, arg2
, arg3
);
9316 #ifdef TARGET_NR_getsockopt
9317 case TARGET_NR_getsockopt
:
9318 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9320 #ifdef TARGET_NR_listen
9321 case TARGET_NR_listen
:
9322 return get_errno(listen(arg1
, arg2
));
9324 #ifdef TARGET_NR_recv
9325 case TARGET_NR_recv
:
9326 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9328 #ifdef TARGET_NR_recvfrom
9329 case TARGET_NR_recvfrom
:
9330 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9332 #ifdef TARGET_NR_recvmsg
9333 case TARGET_NR_recvmsg
:
9334 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9336 #ifdef TARGET_NR_send
9337 case TARGET_NR_send
:
9338 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9340 #ifdef TARGET_NR_sendmsg
9341 case TARGET_NR_sendmsg
:
9342 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9344 #ifdef TARGET_NR_sendmmsg
9345 case TARGET_NR_sendmmsg
:
9346 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9348 #ifdef TARGET_NR_recvmmsg
9349 case TARGET_NR_recvmmsg
:
9350 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9352 #ifdef TARGET_NR_sendto
9353 case TARGET_NR_sendto
:
9354 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9356 #ifdef TARGET_NR_shutdown
9357 case TARGET_NR_shutdown
:
9358 return get_errno(shutdown(arg1
, arg2
));
9360 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9361 case TARGET_NR_getrandom
:
9362 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9364 return -TARGET_EFAULT
;
9366 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9367 unlock_user(p
, arg1
, ret
);
9370 #ifdef TARGET_NR_socket
9371 case TARGET_NR_socket
:
9372 return do_socket(arg1
, arg2
, arg3
);
9374 #ifdef TARGET_NR_socketpair
9375 case TARGET_NR_socketpair
:
9376 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9378 #ifdef TARGET_NR_setsockopt
9379 case TARGET_NR_setsockopt
:
9380 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9382 #if defined(TARGET_NR_syslog)
9383 case TARGET_NR_syslog
:
9388 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9389 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9390 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9391 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9392 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9393 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9394 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9395 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9396 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9397 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9398 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9399 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9402 return -TARGET_EINVAL
;
9407 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9409 return -TARGET_EFAULT
;
9411 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9412 unlock_user(p
, arg2
, arg3
);
9416 return -TARGET_EINVAL
;
9421 case TARGET_NR_setitimer
:
9423 struct itimerval value
, ovalue
, *pvalue
;
9427 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9428 || copy_from_user_timeval(&pvalue
->it_value
,
9429 arg2
+ sizeof(struct target_timeval
)))
9430 return -TARGET_EFAULT
;
9434 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9435 if (!is_error(ret
) && arg3
) {
9436 if (copy_to_user_timeval(arg3
,
9437 &ovalue
.it_interval
)
9438 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9440 return -TARGET_EFAULT
;
9444 case TARGET_NR_getitimer
:
9446 struct itimerval value
;
9448 ret
= get_errno(getitimer(arg1
, &value
));
9449 if (!is_error(ret
) && arg2
) {
9450 if (copy_to_user_timeval(arg2
,
9452 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9454 return -TARGET_EFAULT
;
9458 #ifdef TARGET_NR_stat
9459 case TARGET_NR_stat
:
9460 if (!(p
= lock_user_string(arg1
))) {
9461 return -TARGET_EFAULT
;
9463 ret
= get_errno(stat(path(p
), &st
));
9464 unlock_user(p
, arg1
, 0);
9467 #ifdef TARGET_NR_lstat
9468 case TARGET_NR_lstat
:
9469 if (!(p
= lock_user_string(arg1
))) {
9470 return -TARGET_EFAULT
;
9472 ret
= get_errno(lstat(path(p
), &st
));
9473 unlock_user(p
, arg1
, 0);
9476 #ifdef TARGET_NR_fstat
9477 case TARGET_NR_fstat
:
9479 ret
= get_errno(fstat(arg1
, &st
));
9480 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9483 if (!is_error(ret
)) {
9484 struct target_stat
*target_st
;
9486 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9487 return -TARGET_EFAULT
;
9488 memset(target_st
, 0, sizeof(*target_st
));
9489 __put_user(st
.st_dev
, &target_st
->st_dev
);
9490 __put_user(st
.st_ino
, &target_st
->st_ino
);
9491 __put_user(st
.st_mode
, &target_st
->st_mode
);
9492 __put_user(st
.st_uid
, &target_st
->st_uid
);
9493 __put_user(st
.st_gid
, &target_st
->st_gid
);
9494 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9495 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9496 __put_user(st
.st_size
, &target_st
->st_size
);
9497 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9498 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9499 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9500 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9501 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9502 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9503 defined(TARGET_STAT_HAVE_NSEC)
9504 __put_user(st
.st_atim
.tv_nsec
,
9505 &target_st
->target_st_atime_nsec
);
9506 __put_user(st
.st_mtim
.tv_nsec
,
9507 &target_st
->target_st_mtime_nsec
);
9508 __put_user(st
.st_ctim
.tv_nsec
,
9509 &target_st
->target_st_ctime_nsec
);
9511 unlock_user_struct(target_st
, arg2
, 1);
9516 case TARGET_NR_vhangup
:
9517 return get_errno(vhangup());
9518 #ifdef TARGET_NR_syscall
9519 case TARGET_NR_syscall
:
9520 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9521 arg6
, arg7
, arg8
, 0);
9523 #if defined(TARGET_NR_wait4)
9524 case TARGET_NR_wait4
:
9527 abi_long status_ptr
= arg2
;
9528 struct rusage rusage
, *rusage_ptr
;
9529 abi_ulong target_rusage
= arg4
;
9530 abi_long rusage_err
;
9532 rusage_ptr
= &rusage
;
9535 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9536 if (!is_error(ret
)) {
9537 if (status_ptr
&& ret
) {
9538 status
= host_to_target_waitstatus(status
);
9539 if (put_user_s32(status
, status_ptr
))
9540 return -TARGET_EFAULT
;
9542 if (target_rusage
) {
9543 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9552 #ifdef TARGET_NR_swapoff
9553 case TARGET_NR_swapoff
:
9554 if (!(p
= lock_user_string(arg1
)))
9555 return -TARGET_EFAULT
;
9556 ret
= get_errno(swapoff(p
));
9557 unlock_user(p
, arg1
, 0);
9560 case TARGET_NR_sysinfo
:
9562 struct target_sysinfo
*target_value
;
9563 struct sysinfo value
;
9564 ret
= get_errno(sysinfo(&value
));
9565 if (!is_error(ret
) && arg1
)
9567 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9568 return -TARGET_EFAULT
;
9569 __put_user(value
.uptime
, &target_value
->uptime
);
9570 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9571 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9572 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9573 __put_user(value
.totalram
, &target_value
->totalram
);
9574 __put_user(value
.freeram
, &target_value
->freeram
);
9575 __put_user(value
.sharedram
, &target_value
->sharedram
);
9576 __put_user(value
.bufferram
, &target_value
->bufferram
);
9577 __put_user(value
.totalswap
, &target_value
->totalswap
);
9578 __put_user(value
.freeswap
, &target_value
->freeswap
);
9579 __put_user(value
.procs
, &target_value
->procs
);
9580 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9581 __put_user(value
.freehigh
, &target_value
->freehigh
);
9582 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9583 unlock_user_struct(target_value
, arg1
, 1);
9587 #ifdef TARGET_NR_ipc
9589 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9591 #ifdef TARGET_NR_semget
9592 case TARGET_NR_semget
:
9593 return get_errno(semget(arg1
, arg2
, arg3
));
9595 #ifdef TARGET_NR_semop
9596 case TARGET_NR_semop
:
9597 return do_semop(arg1
, arg2
, arg3
);
9599 #ifdef TARGET_NR_semctl
9600 case TARGET_NR_semctl
:
9601 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9603 #ifdef TARGET_NR_msgctl
9604 case TARGET_NR_msgctl
:
9605 return do_msgctl(arg1
, arg2
, arg3
);
9607 #ifdef TARGET_NR_msgget
9608 case TARGET_NR_msgget
:
9609 return get_errno(msgget(arg1
, arg2
));
9611 #ifdef TARGET_NR_msgrcv
9612 case TARGET_NR_msgrcv
:
9613 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9615 #ifdef TARGET_NR_msgsnd
9616 case TARGET_NR_msgsnd
:
9617 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9619 #ifdef TARGET_NR_shmget
9620 case TARGET_NR_shmget
:
9621 return get_errno(shmget(arg1
, arg2
, arg3
));
9623 #ifdef TARGET_NR_shmctl
9624 case TARGET_NR_shmctl
:
9625 return do_shmctl(arg1
, arg2
, arg3
);
9627 #ifdef TARGET_NR_shmat
9628 case TARGET_NR_shmat
:
9629 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9631 #ifdef TARGET_NR_shmdt
9632 case TARGET_NR_shmdt
:
9633 return do_shmdt(arg1
);
9635 case TARGET_NR_fsync
:
9636 return get_errno(fsync(arg1
));
9637 case TARGET_NR_clone
:
9638 /* Linux manages to have three different orderings for its
9639 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9640 * match the kernel's CONFIG_CLONE_* settings.
9641 * Microblaze is further special in that it uses a sixth
9642 * implicit argument to clone for the TLS pointer.
9644 #if defined(TARGET_MICROBLAZE)
9645 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9646 #elif defined(TARGET_CLONE_BACKWARDS)
9647 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9648 #elif defined(TARGET_CLONE_BACKWARDS2)
9649 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9651 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9654 #ifdef __NR_exit_group
9655 /* new thread calls */
9656 case TARGET_NR_exit_group
:
9657 preexit_cleanup(cpu_env
, arg1
);
9658 return get_errno(exit_group(arg1
));
9660 case TARGET_NR_setdomainname
:
9661 if (!(p
= lock_user_string(arg1
)))
9662 return -TARGET_EFAULT
;
9663 ret
= get_errno(setdomainname(p
, arg2
));
9664 unlock_user(p
, arg1
, 0);
9666 case TARGET_NR_uname
:
9667 /* no need to transcode because we use the linux syscall */
9669 struct new_utsname
* buf
;
9671 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9672 return -TARGET_EFAULT
;
9673 ret
= get_errno(sys_uname(buf
));
9674 if (!is_error(ret
)) {
9675 /* Overwrite the native machine name with whatever is being
9677 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9678 sizeof(buf
->machine
));
9679 /* Allow the user to override the reported release. */
9680 if (qemu_uname_release
&& *qemu_uname_release
) {
9681 g_strlcpy(buf
->release
, qemu_uname_release
,
9682 sizeof(buf
->release
));
9685 unlock_user_struct(buf
, arg1
, 1);
9689 case TARGET_NR_modify_ldt
:
9690 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9691 #if !defined(TARGET_X86_64)
9692 case TARGET_NR_vm86
:
9693 return do_vm86(cpu_env
, arg1
, arg2
);
9696 #if defined(TARGET_NR_adjtimex)
9697 case TARGET_NR_adjtimex
:
9699 struct timex host_buf
;
9701 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9702 return -TARGET_EFAULT
;
9704 ret
= get_errno(adjtimex(&host_buf
));
9705 if (!is_error(ret
)) {
9706 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9707 return -TARGET_EFAULT
;
9713 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9714 case TARGET_NR_clock_adjtime
:
9716 struct timex htx
, *phtx
= &htx
;
9718 if (target_to_host_timex(phtx
, arg2
) != 0) {
9719 return -TARGET_EFAULT
;
9721 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9722 if (!is_error(ret
) && phtx
) {
9723 if (host_to_target_timex(arg2
, phtx
) != 0) {
9724 return -TARGET_EFAULT
;
9730 case TARGET_NR_getpgid
:
9731 return get_errno(getpgid(arg1
));
9732 case TARGET_NR_fchdir
:
9733 return get_errno(fchdir(arg1
));
9734 case TARGET_NR_personality
:
9735 return get_errno(personality(arg1
));
9736 #ifdef TARGET_NR__llseek /* Not on alpha */
9737 case TARGET_NR__llseek
:
9740 #if !defined(__NR_llseek)
9741 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9743 ret
= get_errno(res
);
9748 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9750 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9751 return -TARGET_EFAULT
;
9756 #ifdef TARGET_NR_getdents
9757 case TARGET_NR_getdents
:
9758 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9759 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9761 struct target_dirent
*target_dirp
;
9762 struct linux_dirent
*dirp
;
9763 abi_long count
= arg3
;
9765 dirp
= g_try_malloc(count
);
9767 return -TARGET_ENOMEM
;
9770 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9771 if (!is_error(ret
)) {
9772 struct linux_dirent
*de
;
9773 struct target_dirent
*tde
;
9775 int reclen
, treclen
;
9776 int count1
, tnamelen
;
9780 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9781 return -TARGET_EFAULT
;
9784 reclen
= de
->d_reclen
;
9785 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9786 assert(tnamelen
>= 0);
9787 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9788 assert(count1
+ treclen
<= count
);
9789 tde
->d_reclen
= tswap16(treclen
);
9790 tde
->d_ino
= tswapal(de
->d_ino
);
9791 tde
->d_off
= tswapal(de
->d_off
);
9792 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9793 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9795 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9799 unlock_user(target_dirp
, arg2
, ret
);
9805 struct linux_dirent
*dirp
;
9806 abi_long count
= arg3
;
9808 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9809 return -TARGET_EFAULT
;
9810 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9811 if (!is_error(ret
)) {
9812 struct linux_dirent
*de
;
9817 reclen
= de
->d_reclen
;
9820 de
->d_reclen
= tswap16(reclen
);
9821 tswapls(&de
->d_ino
);
9822 tswapls(&de
->d_off
);
9823 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9827 unlock_user(dirp
, arg2
, ret
);
9831 /* Implement getdents in terms of getdents64 */
9833 struct linux_dirent64
*dirp
;
9834 abi_long count
= arg3
;
9836 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9838 return -TARGET_EFAULT
;
9840 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9841 if (!is_error(ret
)) {
9842 /* Convert the dirent64 structs to target dirent. We do this
9843 * in-place, since we can guarantee that a target_dirent is no
9844 * larger than a dirent64; however this means we have to be
9845 * careful to read everything before writing in the new format.
9847 struct linux_dirent64
*de
;
9848 struct target_dirent
*tde
;
9853 tde
= (struct target_dirent
*)dirp
;
9855 int namelen
, treclen
;
9856 int reclen
= de
->d_reclen
;
9857 uint64_t ino
= de
->d_ino
;
9858 int64_t off
= de
->d_off
;
9859 uint8_t type
= de
->d_type
;
9861 namelen
= strlen(de
->d_name
);
9862 treclen
= offsetof(struct target_dirent
, d_name
)
9864 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9866 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9867 tde
->d_ino
= tswapal(ino
);
9868 tde
->d_off
= tswapal(off
);
9869 tde
->d_reclen
= tswap16(treclen
);
9870 /* The target_dirent type is in what was formerly a padding
9871 * byte at the end of the structure:
9873 *(((char *)tde
) + treclen
- 1) = type
;
9875 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9876 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9882 unlock_user(dirp
, arg2
, ret
);
9886 #endif /* TARGET_NR_getdents */
9887 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9888 case TARGET_NR_getdents64
:
9890 struct linux_dirent64
*dirp
;
9891 abi_long count
= arg3
;
9892 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9893 return -TARGET_EFAULT
;
9894 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9895 if (!is_error(ret
)) {
9896 struct linux_dirent64
*de
;
9901 reclen
= de
->d_reclen
;
9904 de
->d_reclen
= tswap16(reclen
);
9905 tswap64s((uint64_t *)&de
->d_ino
);
9906 tswap64s((uint64_t *)&de
->d_off
);
9907 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9911 unlock_user(dirp
, arg2
, ret
);
9914 #endif /* TARGET_NR_getdents64 */
9915 #if defined(TARGET_NR__newselect)
9916 case TARGET_NR__newselect
:
9917 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9919 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9920 # ifdef TARGET_NR_poll
9921 case TARGET_NR_poll
:
9923 # ifdef TARGET_NR_ppoll
9924 case TARGET_NR_ppoll
:
9927 struct target_pollfd
*target_pfd
;
9928 unsigned int nfds
= arg2
;
9935 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9936 return -TARGET_EINVAL
;
9939 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9940 sizeof(struct target_pollfd
) * nfds
, 1);
9942 return -TARGET_EFAULT
;
9945 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9946 for (i
= 0; i
< nfds
; i
++) {
9947 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9948 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9953 # ifdef TARGET_NR_ppoll
9954 case TARGET_NR_ppoll
:
9956 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9957 target_sigset_t
*target_set
;
9958 sigset_t _set
, *set
= &_set
;
9961 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9962 unlock_user(target_pfd
, arg1
, 0);
9963 return -TARGET_EFAULT
;
9970 if (arg5
!= sizeof(target_sigset_t
)) {
9971 unlock_user(target_pfd
, arg1
, 0);
9972 return -TARGET_EINVAL
;
9975 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9977 unlock_user(target_pfd
, arg1
, 0);
9978 return -TARGET_EFAULT
;
9980 target_to_host_sigset(set
, target_set
);
9985 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9986 set
, SIGSET_T_SIZE
));
9988 if (!is_error(ret
) && arg3
) {
9989 host_to_target_timespec(arg3
, timeout_ts
);
9992 unlock_user(target_set
, arg4
, 0);
9997 # ifdef TARGET_NR_poll
9998 case TARGET_NR_poll
:
10000 struct timespec ts
, *pts
;
10003 /* Convert ms to secs, ns */
10004 ts
.tv_sec
= arg3
/ 1000;
10005 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10008 /* -ve poll() timeout means "infinite" */
10011 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10016 g_assert_not_reached();
10019 if (!is_error(ret
)) {
10020 for(i
= 0; i
< nfds
; i
++) {
10021 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10024 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10028 case TARGET_NR_flock
:
10029 /* NOTE: the flock constant seems to be the same for every
10031 return get_errno(safe_flock(arg1
, arg2
));
10032 case TARGET_NR_readv
:
10034 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10036 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10037 unlock_iovec(vec
, arg2
, arg3
, 1);
10039 ret
= -host_to_target_errno(errno
);
10043 case TARGET_NR_writev
:
10045 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10047 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10048 unlock_iovec(vec
, arg2
, arg3
, 0);
10050 ret
= -host_to_target_errno(errno
);
10054 #if defined(TARGET_NR_preadv)
10055 case TARGET_NR_preadv
:
10057 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10059 unsigned long low
, high
;
10061 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10062 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10063 unlock_iovec(vec
, arg2
, arg3
, 1);
10065 ret
= -host_to_target_errno(errno
);
10070 #if defined(TARGET_NR_pwritev)
10071 case TARGET_NR_pwritev
:
10073 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10075 unsigned long low
, high
;
10077 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10078 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10079 unlock_iovec(vec
, arg2
, arg3
, 0);
10081 ret
= -host_to_target_errno(errno
);
10086 case TARGET_NR_getsid
:
10087 return get_errno(getsid(arg1
));
10088 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10089 case TARGET_NR_fdatasync
:
10090 return get_errno(fdatasync(arg1
));
10092 #ifdef TARGET_NR__sysctl
10093 case TARGET_NR__sysctl
:
10094 /* We don't implement this, but ENOTDIR is always a safe
10096 return -TARGET_ENOTDIR
;
10098 case TARGET_NR_sched_getaffinity
:
10100 unsigned int mask_size
;
10101 unsigned long *mask
;
10104 * sched_getaffinity needs multiples of ulong, so need to take
10105 * care of mismatches between target ulong and host ulong sizes.
10107 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10108 return -TARGET_EINVAL
;
10110 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10112 mask
= alloca(mask_size
);
10113 memset(mask
, 0, mask_size
);
10114 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10116 if (!is_error(ret
)) {
10118 /* More data returned than the caller's buffer will fit.
10119 * This only happens if sizeof(abi_long) < sizeof(long)
10120 * and the caller passed us a buffer holding an odd number
10121 * of abi_longs. If the host kernel is actually using the
10122 * extra 4 bytes then fail EINVAL; otherwise we can just
10123 * ignore them and only copy the interesting part.
10125 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10126 if (numcpus
> arg2
* 8) {
10127 return -TARGET_EINVAL
;
10132 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10133 return -TARGET_EFAULT
;
10138 case TARGET_NR_sched_setaffinity
:
10140 unsigned int mask_size
;
10141 unsigned long *mask
;
10144 * sched_setaffinity needs multiples of ulong, so need to take
10145 * care of mismatches between target ulong and host ulong sizes.
10147 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10148 return -TARGET_EINVAL
;
10150 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10151 mask
= alloca(mask_size
);
10153 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10158 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10160 case TARGET_NR_getcpu
:
10162 unsigned cpu
, node
;
10163 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10164 arg2
? &node
: NULL
,
10166 if (is_error(ret
)) {
10169 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10170 return -TARGET_EFAULT
;
10172 if (arg2
&& put_user_u32(node
, arg2
)) {
10173 return -TARGET_EFAULT
;
10177 case TARGET_NR_sched_setparam
:
10179 struct sched_param
*target_schp
;
10180 struct sched_param schp
;
10183 return -TARGET_EINVAL
;
10185 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10186 return -TARGET_EFAULT
;
10187 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10188 unlock_user_struct(target_schp
, arg2
, 0);
10189 return get_errno(sched_setparam(arg1
, &schp
));
10191 case TARGET_NR_sched_getparam
:
10193 struct sched_param
*target_schp
;
10194 struct sched_param schp
;
10197 return -TARGET_EINVAL
;
10199 ret
= get_errno(sched_getparam(arg1
, &schp
));
10200 if (!is_error(ret
)) {
10201 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10202 return -TARGET_EFAULT
;
10203 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10204 unlock_user_struct(target_schp
, arg2
, 1);
10208 case TARGET_NR_sched_setscheduler
:
10210 struct sched_param
*target_schp
;
10211 struct sched_param schp
;
10213 return -TARGET_EINVAL
;
10215 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10216 return -TARGET_EFAULT
;
10217 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10218 unlock_user_struct(target_schp
, arg3
, 0);
10219 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10221 case TARGET_NR_sched_getscheduler
:
10222 return get_errno(sched_getscheduler(arg1
));
10223 case TARGET_NR_sched_yield
:
10224 return get_errno(sched_yield());
10225 case TARGET_NR_sched_get_priority_max
:
10226 return get_errno(sched_get_priority_max(arg1
));
10227 case TARGET_NR_sched_get_priority_min
:
10228 return get_errno(sched_get_priority_min(arg1
));
10229 #ifdef TARGET_NR_sched_rr_get_interval
10230 case TARGET_NR_sched_rr_get_interval
:
10232 struct timespec ts
;
10233 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10234 if (!is_error(ret
)) {
10235 ret
= host_to_target_timespec(arg2
, &ts
);
10240 #if defined(TARGET_NR_nanosleep)
10241 case TARGET_NR_nanosleep
:
10243 struct timespec req
, rem
;
10244 target_to_host_timespec(&req
, arg1
);
10245 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10246 if (is_error(ret
) && arg2
) {
10247 host_to_target_timespec(arg2
, &rem
);
10252 case TARGET_NR_prctl
:
10254 case PR_GET_PDEATHSIG
:
10257 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10258 if (!is_error(ret
) && arg2
10259 && put_user_ual(deathsig
, arg2
)) {
10260 return -TARGET_EFAULT
;
10267 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10269 return -TARGET_EFAULT
;
10271 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10272 arg3
, arg4
, arg5
));
10273 unlock_user(name
, arg2
, 16);
10278 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10280 return -TARGET_EFAULT
;
10282 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10283 arg3
, arg4
, arg5
));
10284 unlock_user(name
, arg2
, 0);
10289 case TARGET_PR_GET_FP_MODE
:
10291 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10293 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10294 ret
|= TARGET_PR_FP_MODE_FR
;
10296 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10297 ret
|= TARGET_PR_FP_MODE_FRE
;
10301 case TARGET_PR_SET_FP_MODE
:
10303 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10304 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10305 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10306 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10307 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10309 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10310 TARGET_PR_FP_MODE_FRE
;
10312 /* If nothing to change, return right away, successfully. */
10313 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10316 /* Check the value is valid */
10317 if (arg2
& ~known_bits
) {
10318 return -TARGET_EOPNOTSUPP
;
10320 /* Setting FRE without FR is not supported. */
10321 if (new_fre
&& !new_fr
) {
10322 return -TARGET_EOPNOTSUPP
;
10324 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10325 /* FR1 is not supported */
10326 return -TARGET_EOPNOTSUPP
;
10328 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10329 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10330 /* cannot set FR=0 */
10331 return -TARGET_EOPNOTSUPP
;
10333 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10334 /* Cannot set FRE=1 */
10335 return -TARGET_EOPNOTSUPP
;
10339 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10340 for (i
= 0; i
< 32 ; i
+= 2) {
10341 if (!old_fr
&& new_fr
) {
10342 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10343 } else if (old_fr
&& !new_fr
) {
10344 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10349 env
->CP0_Status
|= (1 << CP0St_FR
);
10350 env
->hflags
|= MIPS_HFLAG_F64
;
10352 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10353 env
->hflags
&= ~MIPS_HFLAG_F64
;
10356 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10357 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10358 env
->hflags
|= MIPS_HFLAG_FRE
;
10361 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10362 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10368 #ifdef TARGET_AARCH64
10369 case TARGET_PR_SVE_SET_VL
:
10371 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10372 * PR_SVE_VL_INHERIT. Note the kernel definition
10373 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10374 * even though the current architectural maximum is VQ=16.
10376 ret
= -TARGET_EINVAL
;
10377 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10378 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10379 CPUARMState
*env
= cpu_env
;
10380 ARMCPU
*cpu
= env_archcpu(env
);
10381 uint32_t vq
, old_vq
;
10383 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10384 vq
= MAX(arg2
/ 16, 1);
10385 vq
= MIN(vq
, cpu
->sve_max_vq
);
10388 aarch64_sve_narrow_vq(env
, vq
);
10390 env
->vfp
.zcr_el
[1] = vq
- 1;
10391 arm_rebuild_hflags(env
);
10395 case TARGET_PR_SVE_GET_VL
:
10396 ret
= -TARGET_EINVAL
;
10398 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10399 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10400 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10404 case TARGET_PR_PAC_RESET_KEYS
:
10406 CPUARMState
*env
= cpu_env
;
10407 ARMCPU
*cpu
= env_archcpu(env
);
10409 if (arg3
|| arg4
|| arg5
) {
10410 return -TARGET_EINVAL
;
10412 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10413 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10414 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10415 TARGET_PR_PAC_APGAKEY
);
10421 } else if (arg2
& ~all
) {
10422 return -TARGET_EINVAL
;
10424 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10425 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10426 sizeof(ARMPACKey
), &err
);
10428 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10429 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10430 sizeof(ARMPACKey
), &err
);
10432 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10433 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10434 sizeof(ARMPACKey
), &err
);
10436 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10437 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10438 sizeof(ARMPACKey
), &err
);
10440 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10441 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10442 sizeof(ARMPACKey
), &err
);
10446 * Some unknown failure in the crypto. The best
10447 * we can do is log it and fail the syscall.
10448 * The real syscall cannot fail this way.
10450 qemu_log_mask(LOG_UNIMP
,
10451 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10452 error_get_pretty(err
));
10454 return -TARGET_EIO
;
10459 return -TARGET_EINVAL
;
10460 #endif /* AARCH64 */
10461 case PR_GET_SECCOMP
:
10462 case PR_SET_SECCOMP
:
10463 /* Disable seccomp to prevent the target disabling syscalls we
10465 return -TARGET_EINVAL
;
10467 /* Most prctl options have no pointer arguments */
10468 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10471 #ifdef TARGET_NR_arch_prctl
10472 case TARGET_NR_arch_prctl
:
10473 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10475 #ifdef TARGET_NR_pread64
10476 case TARGET_NR_pread64
:
10477 if (regpairs_aligned(cpu_env
, num
)) {
10481 if (arg2
== 0 && arg3
== 0) {
10482 /* Special-case NULL buffer and zero length, which should succeed */
10485 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10487 return -TARGET_EFAULT
;
10490 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10491 unlock_user(p
, arg2
, ret
);
10493 case TARGET_NR_pwrite64
:
10494 if (regpairs_aligned(cpu_env
, num
)) {
10498 if (arg2
== 0 && arg3
== 0) {
10499 /* Special-case NULL buffer and zero length, which should succeed */
10502 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10504 return -TARGET_EFAULT
;
10507 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10508 unlock_user(p
, arg2
, 0);
10511 case TARGET_NR_getcwd
:
10512 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10513 return -TARGET_EFAULT
;
10514 ret
= get_errno(sys_getcwd1(p
, arg2
));
10515 unlock_user(p
, arg1
, ret
);
10517 case TARGET_NR_capget
:
10518 case TARGET_NR_capset
:
10520 struct target_user_cap_header
*target_header
;
10521 struct target_user_cap_data
*target_data
= NULL
;
10522 struct __user_cap_header_struct header
;
10523 struct __user_cap_data_struct data
[2];
10524 struct __user_cap_data_struct
*dataptr
= NULL
;
10525 int i
, target_datalen
;
10526 int data_items
= 1;
10528 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10529 return -TARGET_EFAULT
;
10531 header
.version
= tswap32(target_header
->version
);
10532 header
.pid
= tswap32(target_header
->pid
);
10534 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10535 /* Version 2 and up takes pointer to two user_data structs */
10539 target_datalen
= sizeof(*target_data
) * data_items
;
10542 if (num
== TARGET_NR_capget
) {
10543 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10545 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10547 if (!target_data
) {
10548 unlock_user_struct(target_header
, arg1
, 0);
10549 return -TARGET_EFAULT
;
10552 if (num
== TARGET_NR_capset
) {
10553 for (i
= 0; i
< data_items
; i
++) {
10554 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10555 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10556 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10563 if (num
== TARGET_NR_capget
) {
10564 ret
= get_errno(capget(&header
, dataptr
));
10566 ret
= get_errno(capset(&header
, dataptr
));
10569 /* The kernel always updates version for both capget and capset */
10570 target_header
->version
= tswap32(header
.version
);
10571 unlock_user_struct(target_header
, arg1
, 1);
10574 if (num
== TARGET_NR_capget
) {
10575 for (i
= 0; i
< data_items
; i
++) {
10576 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10577 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10578 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10580 unlock_user(target_data
, arg2
, target_datalen
);
10582 unlock_user(target_data
, arg2
, 0);
10587 case TARGET_NR_sigaltstack
:
10588 return do_sigaltstack(arg1
, arg2
,
10589 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10591 #ifdef CONFIG_SENDFILE
10592 #ifdef TARGET_NR_sendfile
10593 case TARGET_NR_sendfile
:
10595 off_t
*offp
= NULL
;
10598 ret
= get_user_sal(off
, arg3
);
10599 if (is_error(ret
)) {
10604 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10605 if (!is_error(ret
) && arg3
) {
10606 abi_long ret2
= put_user_sal(off
, arg3
);
10607 if (is_error(ret2
)) {
10614 #ifdef TARGET_NR_sendfile64
10615 case TARGET_NR_sendfile64
:
10617 off_t
*offp
= NULL
;
10620 ret
= get_user_s64(off
, arg3
);
10621 if (is_error(ret
)) {
10626 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10627 if (!is_error(ret
) && arg3
) {
10628 abi_long ret2
= put_user_s64(off
, arg3
);
10629 if (is_error(ret2
)) {
10637 #ifdef TARGET_NR_vfork
10638 case TARGET_NR_vfork
:
10639 return get_errno(do_fork(cpu_env
,
10640 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10643 #ifdef TARGET_NR_ugetrlimit
10644 case TARGET_NR_ugetrlimit
:
10646 struct rlimit rlim
;
10647 int resource
= target_to_host_resource(arg1
);
10648 ret
= get_errno(getrlimit(resource
, &rlim
));
10649 if (!is_error(ret
)) {
10650 struct target_rlimit
*target_rlim
;
10651 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10652 return -TARGET_EFAULT
;
10653 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10654 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10655 unlock_user_struct(target_rlim
, arg2
, 1);
10660 #ifdef TARGET_NR_truncate64
10661 case TARGET_NR_truncate64
:
10662 if (!(p
= lock_user_string(arg1
)))
10663 return -TARGET_EFAULT
;
10664 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10665 unlock_user(p
, arg1
, 0);
10668 #ifdef TARGET_NR_ftruncate64
10669 case TARGET_NR_ftruncate64
:
10670 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10672 #ifdef TARGET_NR_stat64
10673 case TARGET_NR_stat64
:
10674 if (!(p
= lock_user_string(arg1
))) {
10675 return -TARGET_EFAULT
;
10677 ret
= get_errno(stat(path(p
), &st
));
10678 unlock_user(p
, arg1
, 0);
10679 if (!is_error(ret
))
10680 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10683 #ifdef TARGET_NR_lstat64
10684 case TARGET_NR_lstat64
:
10685 if (!(p
= lock_user_string(arg1
))) {
10686 return -TARGET_EFAULT
;
10688 ret
= get_errno(lstat(path(p
), &st
));
10689 unlock_user(p
, arg1
, 0);
10690 if (!is_error(ret
))
10691 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10694 #ifdef TARGET_NR_fstat64
10695 case TARGET_NR_fstat64
:
10696 ret
= get_errno(fstat(arg1
, &st
));
10697 if (!is_error(ret
))
10698 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10701 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10702 #ifdef TARGET_NR_fstatat64
10703 case TARGET_NR_fstatat64
:
10705 #ifdef TARGET_NR_newfstatat
10706 case TARGET_NR_newfstatat
:
10708 if (!(p
= lock_user_string(arg2
))) {
10709 return -TARGET_EFAULT
;
10711 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10712 unlock_user(p
, arg2
, 0);
10713 if (!is_error(ret
))
10714 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10717 #if defined(TARGET_NR_statx)
10718 case TARGET_NR_statx
:
10720 struct target_statx
*target_stx
;
10724 p
= lock_user_string(arg2
);
10726 return -TARGET_EFAULT
;
10728 #if defined(__NR_statx)
10731 * It is assumed that struct statx is architecture independent.
10733 struct target_statx host_stx
;
10736 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10737 if (!is_error(ret
)) {
10738 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10739 unlock_user(p
, arg2
, 0);
10740 return -TARGET_EFAULT
;
10744 if (ret
!= -TARGET_ENOSYS
) {
10745 unlock_user(p
, arg2
, 0);
10750 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10751 unlock_user(p
, arg2
, 0);
10753 if (!is_error(ret
)) {
10754 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10755 return -TARGET_EFAULT
;
10757 memset(target_stx
, 0, sizeof(*target_stx
));
10758 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10759 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10760 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10761 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10762 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10763 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10764 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10765 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10766 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10767 __put_user(st
.st_size
, &target_stx
->stx_size
);
10768 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10769 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10770 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10771 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10772 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10773 unlock_user_struct(target_stx
, arg5
, 1);
10778 #ifdef TARGET_NR_lchown
10779 case TARGET_NR_lchown
:
10780 if (!(p
= lock_user_string(arg1
)))
10781 return -TARGET_EFAULT
;
10782 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10783 unlock_user(p
, arg1
, 0);
10786 #ifdef TARGET_NR_getuid
10787 case TARGET_NR_getuid
:
10788 return get_errno(high2lowuid(getuid()));
10790 #ifdef TARGET_NR_getgid
10791 case TARGET_NR_getgid
:
10792 return get_errno(high2lowgid(getgid()));
10794 #ifdef TARGET_NR_geteuid
10795 case TARGET_NR_geteuid
:
10796 return get_errno(high2lowuid(geteuid()));
10798 #ifdef TARGET_NR_getegid
10799 case TARGET_NR_getegid
:
10800 return get_errno(high2lowgid(getegid()));
10802 case TARGET_NR_setreuid
:
10803 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10804 case TARGET_NR_setregid
:
10805 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10806 case TARGET_NR_getgroups
:
10808 int gidsetsize
= arg1
;
10809 target_id
*target_grouplist
;
10813 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10814 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10815 if (gidsetsize
== 0)
10817 if (!is_error(ret
)) {
10818 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10819 if (!target_grouplist
)
10820 return -TARGET_EFAULT
;
10821 for(i
= 0;i
< ret
; i
++)
10822 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10823 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10827 case TARGET_NR_setgroups
:
10829 int gidsetsize
= arg1
;
10830 target_id
*target_grouplist
;
10831 gid_t
*grouplist
= NULL
;
10834 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10835 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10836 if (!target_grouplist
) {
10837 return -TARGET_EFAULT
;
10839 for (i
= 0; i
< gidsetsize
; i
++) {
10840 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10842 unlock_user(target_grouplist
, arg2
, 0);
10844 return get_errno(setgroups(gidsetsize
, grouplist
));
10846 case TARGET_NR_fchown
:
10847 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10848 #if defined(TARGET_NR_fchownat)
10849 case TARGET_NR_fchownat
:
10850 if (!(p
= lock_user_string(arg2
)))
10851 return -TARGET_EFAULT
;
10852 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10853 low2highgid(arg4
), arg5
));
10854 unlock_user(p
, arg2
, 0);
10857 #ifdef TARGET_NR_setresuid
10858 case TARGET_NR_setresuid
:
10859 return get_errno(sys_setresuid(low2highuid(arg1
),
10861 low2highuid(arg3
)));
10863 #ifdef TARGET_NR_getresuid
10864 case TARGET_NR_getresuid
:
10866 uid_t ruid
, euid
, suid
;
10867 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10868 if (!is_error(ret
)) {
10869 if (put_user_id(high2lowuid(ruid
), arg1
)
10870 || put_user_id(high2lowuid(euid
), arg2
)
10871 || put_user_id(high2lowuid(suid
), arg3
))
10872 return -TARGET_EFAULT
;
10877 #ifdef TARGET_NR_getresgid
10878 case TARGET_NR_setresgid
:
10879 return get_errno(sys_setresgid(low2highgid(arg1
),
10881 low2highgid(arg3
)));
10883 #ifdef TARGET_NR_getresgid
10884 case TARGET_NR_getresgid
:
10886 gid_t rgid
, egid
, sgid
;
10887 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10888 if (!is_error(ret
)) {
10889 if (put_user_id(high2lowgid(rgid
), arg1
)
10890 || put_user_id(high2lowgid(egid
), arg2
)
10891 || put_user_id(high2lowgid(sgid
), arg3
))
10892 return -TARGET_EFAULT
;
10897 #ifdef TARGET_NR_chown
10898 case TARGET_NR_chown
:
10899 if (!(p
= lock_user_string(arg1
)))
10900 return -TARGET_EFAULT
;
10901 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10902 unlock_user(p
, arg1
, 0);
10905 case TARGET_NR_setuid
:
10906 return get_errno(sys_setuid(low2highuid(arg1
)));
10907 case TARGET_NR_setgid
:
10908 return get_errno(sys_setgid(low2highgid(arg1
)));
10909 case TARGET_NR_setfsuid
:
10910 return get_errno(setfsuid(arg1
));
10911 case TARGET_NR_setfsgid
:
10912 return get_errno(setfsgid(arg1
));
10914 #ifdef TARGET_NR_lchown32
10915 case TARGET_NR_lchown32
:
10916 if (!(p
= lock_user_string(arg1
)))
10917 return -TARGET_EFAULT
;
10918 ret
= get_errno(lchown(p
, arg2
, arg3
));
10919 unlock_user(p
, arg1
, 0);
10922 #ifdef TARGET_NR_getuid32
10923 case TARGET_NR_getuid32
:
10924 return get_errno(getuid());
10927 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10928 /* Alpha specific */
10929 case TARGET_NR_getxuid
:
10933 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10935 return get_errno(getuid());
10937 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10938 /* Alpha specific */
10939 case TARGET_NR_getxgid
:
10943 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10945 return get_errno(getgid());
10947 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10948 /* Alpha specific */
10949 case TARGET_NR_osf_getsysinfo
:
10950 ret
= -TARGET_EOPNOTSUPP
;
10952 case TARGET_GSI_IEEE_FP_CONTROL
:
10954 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10955 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10957 swcr
&= ~SWCR_STATUS_MASK
;
10958 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10960 if (put_user_u64 (swcr
, arg2
))
10961 return -TARGET_EFAULT
;
10966 /* case GSI_IEEE_STATE_AT_SIGNAL:
10967 -- Not implemented in linux kernel.
10969 -- Retrieves current unaligned access state; not much used.
10970 case GSI_PROC_TYPE:
10971 -- Retrieves implver information; surely not used.
10972 case GSI_GET_HWRPB:
10973 -- Grabs a copy of the HWRPB; surely not used.
10978 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10979 /* Alpha specific */
10980 case TARGET_NR_osf_setsysinfo
:
10981 ret
= -TARGET_EOPNOTSUPP
;
10983 case TARGET_SSI_IEEE_FP_CONTROL
:
10985 uint64_t swcr
, fpcr
;
10987 if (get_user_u64 (swcr
, arg2
)) {
10988 return -TARGET_EFAULT
;
10992 * The kernel calls swcr_update_status to update the
10993 * status bits from the fpcr at every point that it
10994 * could be queried. Therefore, we store the status
10995 * bits only in FPCR.
10997 ((CPUAlphaState
*)cpu_env
)->swcr
10998 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11000 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11001 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11002 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11003 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11008 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11010 uint64_t exc
, fpcr
, fex
;
11012 if (get_user_u64(exc
, arg2
)) {
11013 return -TARGET_EFAULT
;
11015 exc
&= SWCR_STATUS_MASK
;
11016 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11018 /* Old exceptions are not signaled. */
11019 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11021 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11022 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11024 /* Update the hardware fpcr. */
11025 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11026 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11029 int si_code
= TARGET_FPE_FLTUNK
;
11030 target_siginfo_t info
;
11032 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11033 si_code
= TARGET_FPE_FLTUND
;
11035 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11036 si_code
= TARGET_FPE_FLTRES
;
11038 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11039 si_code
= TARGET_FPE_FLTUND
;
11041 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11042 si_code
= TARGET_FPE_FLTOVF
;
11044 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11045 si_code
= TARGET_FPE_FLTDIV
;
11047 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11048 si_code
= TARGET_FPE_FLTINV
;
11051 info
.si_signo
= SIGFPE
;
11053 info
.si_code
= si_code
;
11054 info
._sifields
._sigfault
._addr
11055 = ((CPUArchState
*)cpu_env
)->pc
;
11056 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11057 QEMU_SI_FAULT
, &info
);
11063 /* case SSI_NVPAIRS:
11064 -- Used with SSIN_UACPROC to enable unaligned accesses.
11065 case SSI_IEEE_STATE_AT_SIGNAL:
11066 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11067 -- Not implemented in linux kernel
11072 #ifdef TARGET_NR_osf_sigprocmask
11073 /* Alpha specific. */
11074 case TARGET_NR_osf_sigprocmask
:
11078 sigset_t set
, oldset
;
11081 case TARGET_SIG_BLOCK
:
11084 case TARGET_SIG_UNBLOCK
:
11087 case TARGET_SIG_SETMASK
:
11091 return -TARGET_EINVAL
;
11094 target_to_host_old_sigset(&set
, &mask
);
11095 ret
= do_sigprocmask(how
, &set
, &oldset
);
11097 host_to_target_old_sigset(&mask
, &oldset
);
11104 #ifdef TARGET_NR_getgid32
11105 case TARGET_NR_getgid32
:
11106 return get_errno(getgid());
11108 #ifdef TARGET_NR_geteuid32
11109 case TARGET_NR_geteuid32
:
11110 return get_errno(geteuid());
11112 #ifdef TARGET_NR_getegid32
11113 case TARGET_NR_getegid32
:
11114 return get_errno(getegid());
11116 #ifdef TARGET_NR_setreuid32
11117 case TARGET_NR_setreuid32
:
11118 return get_errno(setreuid(arg1
, arg2
));
11120 #ifdef TARGET_NR_setregid32
11121 case TARGET_NR_setregid32
:
11122 return get_errno(setregid(arg1
, arg2
));
11124 #ifdef TARGET_NR_getgroups32
11125 case TARGET_NR_getgroups32
:
11127 int gidsetsize
= arg1
;
11128 uint32_t *target_grouplist
;
11132 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11133 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11134 if (gidsetsize
== 0)
11136 if (!is_error(ret
)) {
11137 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11138 if (!target_grouplist
) {
11139 return -TARGET_EFAULT
;
11141 for(i
= 0;i
< ret
; i
++)
11142 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11143 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11148 #ifdef TARGET_NR_setgroups32
11149 case TARGET_NR_setgroups32
:
11151 int gidsetsize
= arg1
;
11152 uint32_t *target_grouplist
;
11156 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11157 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11158 if (!target_grouplist
) {
11159 return -TARGET_EFAULT
;
11161 for(i
= 0;i
< gidsetsize
; i
++)
11162 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11163 unlock_user(target_grouplist
, arg2
, 0);
11164 return get_errno(setgroups(gidsetsize
, grouplist
));
11167 #ifdef TARGET_NR_fchown32
11168 case TARGET_NR_fchown32
:
11169 return get_errno(fchown(arg1
, arg2
, arg3
));
11171 #ifdef TARGET_NR_setresuid32
11172 case TARGET_NR_setresuid32
:
11173 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11175 #ifdef TARGET_NR_getresuid32
11176 case TARGET_NR_getresuid32
:
11178 uid_t ruid
, euid
, suid
;
11179 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11180 if (!is_error(ret
)) {
11181 if (put_user_u32(ruid
, arg1
)
11182 || put_user_u32(euid
, arg2
)
11183 || put_user_u32(suid
, arg3
))
11184 return -TARGET_EFAULT
;
11189 #ifdef TARGET_NR_setresgid32
11190 case TARGET_NR_setresgid32
:
11191 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11193 #ifdef TARGET_NR_getresgid32
11194 case TARGET_NR_getresgid32
:
11196 gid_t rgid
, egid
, sgid
;
11197 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11198 if (!is_error(ret
)) {
11199 if (put_user_u32(rgid
, arg1
)
11200 || put_user_u32(egid
, arg2
)
11201 || put_user_u32(sgid
, arg3
))
11202 return -TARGET_EFAULT
;
11207 #ifdef TARGET_NR_chown32
11208 case TARGET_NR_chown32
:
11209 if (!(p
= lock_user_string(arg1
)))
11210 return -TARGET_EFAULT
;
11211 ret
= get_errno(chown(p
, arg2
, arg3
));
11212 unlock_user(p
, arg1
, 0);
11215 #ifdef TARGET_NR_setuid32
11216 case TARGET_NR_setuid32
:
11217 return get_errno(sys_setuid(arg1
));
11219 #ifdef TARGET_NR_setgid32
11220 case TARGET_NR_setgid32
:
11221 return get_errno(sys_setgid(arg1
));
11223 #ifdef TARGET_NR_setfsuid32
11224 case TARGET_NR_setfsuid32
:
11225 return get_errno(setfsuid(arg1
));
11227 #ifdef TARGET_NR_setfsgid32
11228 case TARGET_NR_setfsgid32
:
11229 return get_errno(setfsgid(arg1
));
11231 #ifdef TARGET_NR_mincore
11232 case TARGET_NR_mincore
:
11234 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11236 return -TARGET_ENOMEM
;
11238 p
= lock_user_string(arg3
);
11240 ret
= -TARGET_EFAULT
;
11242 ret
= get_errno(mincore(a
, arg2
, p
));
11243 unlock_user(p
, arg3
, ret
);
11245 unlock_user(a
, arg1
, 0);
11249 #ifdef TARGET_NR_arm_fadvise64_64
11250 case TARGET_NR_arm_fadvise64_64
:
11251 /* arm_fadvise64_64 looks like fadvise64_64 but
11252 * with different argument order: fd, advice, offset, len
11253 * rather than the usual fd, offset, len, advice.
11254 * Note that offset and len are both 64-bit so appear as
11255 * pairs of 32-bit registers.
11257 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11258 target_offset64(arg5
, arg6
), arg2
);
11259 return -host_to_target_errno(ret
);
11262 #if TARGET_ABI_BITS == 32
11264 #ifdef TARGET_NR_fadvise64_64
11265 case TARGET_NR_fadvise64_64
:
11266 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11267 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11275 /* 6 args: fd, offset (high, low), len (high, low), advice */
11276 if (regpairs_aligned(cpu_env
, num
)) {
11277 /* offset is in (3,4), len in (5,6) and advice in 7 */
11285 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11286 target_offset64(arg4
, arg5
), arg6
);
11287 return -host_to_target_errno(ret
);
11290 #ifdef TARGET_NR_fadvise64
11291 case TARGET_NR_fadvise64
:
11292 /* 5 args: fd, offset (high, low), len, advice */
11293 if (regpairs_aligned(cpu_env
, num
)) {
11294 /* offset is in (3,4), len in 5 and advice in 6 */
11300 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11301 return -host_to_target_errno(ret
);
11304 #else /* not a 32-bit ABI */
11305 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11306 #ifdef TARGET_NR_fadvise64_64
11307 case TARGET_NR_fadvise64_64
:
11309 #ifdef TARGET_NR_fadvise64
11310 case TARGET_NR_fadvise64
:
11312 #ifdef TARGET_S390X
11314 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11315 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11316 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11317 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11321 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11323 #endif /* end of 64-bit ABI fadvise handling */
11325 #ifdef TARGET_NR_madvise
11326 case TARGET_NR_madvise
:
11327 /* A straight passthrough may not be safe because qemu sometimes
11328 turns private file-backed mappings into anonymous mappings.
11329 This will break MADV_DONTNEED.
11330 This is a hint, so ignoring and returning success is ok. */
11333 #ifdef TARGET_NR_fcntl64
11334 case TARGET_NR_fcntl64
:
11338 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11339 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11342 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11343 copyfrom
= copy_from_user_oabi_flock64
;
11344 copyto
= copy_to_user_oabi_flock64
;
11348 cmd
= target_to_host_fcntl_cmd(arg2
);
11349 if (cmd
== -TARGET_EINVAL
) {
11354 case TARGET_F_GETLK64
:
11355 ret
= copyfrom(&fl
, arg3
);
11359 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11361 ret
= copyto(arg3
, &fl
);
11365 case TARGET_F_SETLK64
:
11366 case TARGET_F_SETLKW64
:
11367 ret
= copyfrom(&fl
, arg3
);
11371 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11374 ret
= do_fcntl(arg1
, arg2
, arg3
);
11380 #ifdef TARGET_NR_cacheflush
11381 case TARGET_NR_cacheflush
:
11382 /* self-modifying code is handled automatically, so nothing needed */
11385 #ifdef TARGET_NR_getpagesize
11386 case TARGET_NR_getpagesize
:
11387 return TARGET_PAGE_SIZE
;
11389 case TARGET_NR_gettid
:
11390 return get_errno(sys_gettid());
11391 #ifdef TARGET_NR_readahead
11392 case TARGET_NR_readahead
:
11393 #if TARGET_ABI_BITS == 32
11394 if (regpairs_aligned(cpu_env
, num
)) {
11399 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11401 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11406 #ifdef TARGET_NR_setxattr
11407 case TARGET_NR_listxattr
:
11408 case TARGET_NR_llistxattr
:
11412 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11414 return -TARGET_EFAULT
;
11417 p
= lock_user_string(arg1
);
11419 if (num
== TARGET_NR_listxattr
) {
11420 ret
= get_errno(listxattr(p
, b
, arg3
));
11422 ret
= get_errno(llistxattr(p
, b
, arg3
));
11425 ret
= -TARGET_EFAULT
;
11427 unlock_user(p
, arg1
, 0);
11428 unlock_user(b
, arg2
, arg3
);
11431 case TARGET_NR_flistxattr
:
11435 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11437 return -TARGET_EFAULT
;
11440 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11441 unlock_user(b
, arg2
, arg3
);
11444 case TARGET_NR_setxattr
:
11445 case TARGET_NR_lsetxattr
:
11447 void *p
, *n
, *v
= 0;
11449 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11451 return -TARGET_EFAULT
;
11454 p
= lock_user_string(arg1
);
11455 n
= lock_user_string(arg2
);
11457 if (num
== TARGET_NR_setxattr
) {
11458 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11460 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11463 ret
= -TARGET_EFAULT
;
11465 unlock_user(p
, arg1
, 0);
11466 unlock_user(n
, arg2
, 0);
11467 unlock_user(v
, arg3
, 0);
11470 case TARGET_NR_fsetxattr
:
11474 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11476 return -TARGET_EFAULT
;
11479 n
= lock_user_string(arg2
);
11481 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11483 ret
= -TARGET_EFAULT
;
11485 unlock_user(n
, arg2
, 0);
11486 unlock_user(v
, arg3
, 0);
11489 case TARGET_NR_getxattr
:
11490 case TARGET_NR_lgetxattr
:
11492 void *p
, *n
, *v
= 0;
11494 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11496 return -TARGET_EFAULT
;
11499 p
= lock_user_string(arg1
);
11500 n
= lock_user_string(arg2
);
11502 if (num
== TARGET_NR_getxattr
) {
11503 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11505 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11508 ret
= -TARGET_EFAULT
;
11510 unlock_user(p
, arg1
, 0);
11511 unlock_user(n
, arg2
, 0);
11512 unlock_user(v
, arg3
, arg4
);
11515 case TARGET_NR_fgetxattr
:
11519 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11521 return -TARGET_EFAULT
;
11524 n
= lock_user_string(arg2
);
11526 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11528 ret
= -TARGET_EFAULT
;
11530 unlock_user(n
, arg2
, 0);
11531 unlock_user(v
, arg3
, arg4
);
11534 case TARGET_NR_removexattr
:
11535 case TARGET_NR_lremovexattr
:
11538 p
= lock_user_string(arg1
);
11539 n
= lock_user_string(arg2
);
11541 if (num
== TARGET_NR_removexattr
) {
11542 ret
= get_errno(removexattr(p
, n
));
11544 ret
= get_errno(lremovexattr(p
, n
));
11547 ret
= -TARGET_EFAULT
;
11549 unlock_user(p
, arg1
, 0);
11550 unlock_user(n
, arg2
, 0);
11553 case TARGET_NR_fremovexattr
:
11556 n
= lock_user_string(arg2
);
11558 ret
= get_errno(fremovexattr(arg1
, n
));
11560 ret
= -TARGET_EFAULT
;
11562 unlock_user(n
, arg2
, 0);
11566 #endif /* CONFIG_ATTR */
11567 #ifdef TARGET_NR_set_thread_area
11568 case TARGET_NR_set_thread_area
:
11569 #if defined(TARGET_MIPS)
11570 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11572 #elif defined(TARGET_CRIS)
11574 ret
= -TARGET_EINVAL
;
11576 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11580 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11581 return do_set_thread_area(cpu_env
, arg1
);
11582 #elif defined(TARGET_M68K)
11584 TaskState
*ts
= cpu
->opaque
;
11585 ts
->tp_value
= arg1
;
11589 return -TARGET_ENOSYS
;
11592 #ifdef TARGET_NR_get_thread_area
11593 case TARGET_NR_get_thread_area
:
11594 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11595 return do_get_thread_area(cpu_env
, arg1
);
11596 #elif defined(TARGET_M68K)
11598 TaskState
*ts
= cpu
->opaque
;
11599 return ts
->tp_value
;
11602 return -TARGET_ENOSYS
;
11605 #ifdef TARGET_NR_getdomainname
11606 case TARGET_NR_getdomainname
:
11607 return -TARGET_ENOSYS
;
11610 #ifdef TARGET_NR_clock_settime
11611 case TARGET_NR_clock_settime
:
11613 struct timespec ts
;
11615 ret
= target_to_host_timespec(&ts
, arg2
);
11616 if (!is_error(ret
)) {
11617 ret
= get_errno(clock_settime(arg1
, &ts
));
11622 #ifdef TARGET_NR_clock_settime64
11623 case TARGET_NR_clock_settime64
:
11625 struct timespec ts
;
11627 ret
= target_to_host_timespec64(&ts
, arg2
);
11628 if (!is_error(ret
)) {
11629 ret
= get_errno(clock_settime(arg1
, &ts
));
11634 #ifdef TARGET_NR_clock_gettime
11635 case TARGET_NR_clock_gettime
:
11637 struct timespec ts
;
11638 ret
= get_errno(clock_gettime(arg1
, &ts
));
11639 if (!is_error(ret
)) {
11640 ret
= host_to_target_timespec(arg2
, &ts
);
11645 #ifdef TARGET_NR_clock_gettime64
11646 case TARGET_NR_clock_gettime64
:
11648 struct timespec ts
;
11649 ret
= get_errno(clock_gettime(arg1
, &ts
));
11650 if (!is_error(ret
)) {
11651 ret
= host_to_target_timespec64(arg2
, &ts
);
11656 #ifdef TARGET_NR_clock_getres
11657 case TARGET_NR_clock_getres
:
11659 struct timespec ts
;
11660 ret
= get_errno(clock_getres(arg1
, &ts
));
11661 if (!is_error(ret
)) {
11662 host_to_target_timespec(arg2
, &ts
);
11667 #ifdef TARGET_NR_clock_nanosleep
11668 case TARGET_NR_clock_nanosleep
:
11670 struct timespec ts
;
11671 target_to_host_timespec(&ts
, arg3
);
11672 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11673 &ts
, arg4
? &ts
: NULL
));
11675 host_to_target_timespec(arg4
, &ts
);
11677 #if defined(TARGET_PPC)
11678 /* clock_nanosleep is odd in that it returns positive errno values.
11679 * On PPC, CR0 bit 3 should be set in such a situation. */
11680 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11681 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11688 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11689 case TARGET_NR_set_tid_address
:
11690 return get_errno(set_tid_address((int *)g2h(arg1
)));
11693 case TARGET_NR_tkill
:
11694 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11696 case TARGET_NR_tgkill
:
11697 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11698 target_to_host_signal(arg3
)));
11700 #ifdef TARGET_NR_set_robust_list
11701 case TARGET_NR_set_robust_list
:
11702 case TARGET_NR_get_robust_list
:
11703 /* The ABI for supporting robust futexes has userspace pass
11704 * the kernel a pointer to a linked list which is updated by
11705 * userspace after the syscall; the list is walked by the kernel
11706 * when the thread exits. Since the linked list in QEMU guest
11707 * memory isn't a valid linked list for the host and we have
11708 * no way to reliably intercept the thread-death event, we can't
11709 * support these. Silently return ENOSYS so that guest userspace
11710 * falls back to a non-robust futex implementation (which should
11711 * be OK except in the corner case of the guest crashing while
11712 * holding a mutex that is shared with another process via
11715 return -TARGET_ENOSYS
;
11718 #if defined(TARGET_NR_utimensat)
11719 case TARGET_NR_utimensat
:
11721 struct timespec
*tsp
, ts
[2];
11725 target_to_host_timespec(ts
, arg3
);
11726 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11730 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11732 if (!(p
= lock_user_string(arg2
))) {
11733 return -TARGET_EFAULT
;
11735 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11736 unlock_user(p
, arg2
, 0);
11741 #ifdef TARGET_NR_futex
11742 case TARGET_NR_futex
:
11743 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11745 #ifdef TARGET_NR_futex_time64
11746 case TARGET_NR_futex_time64
:
11747 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11749 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11750 case TARGET_NR_inotify_init
:
11751 ret
= get_errno(sys_inotify_init());
11753 fd_trans_register(ret
, &target_inotify_trans
);
11757 #ifdef CONFIG_INOTIFY1
11758 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11759 case TARGET_NR_inotify_init1
:
11760 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11761 fcntl_flags_tbl
)));
11763 fd_trans_register(ret
, &target_inotify_trans
);
11768 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11769 case TARGET_NR_inotify_add_watch
:
11770 p
= lock_user_string(arg2
);
11771 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11772 unlock_user(p
, arg2
, 0);
11775 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11776 case TARGET_NR_inotify_rm_watch
:
11777 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11780 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11781 case TARGET_NR_mq_open
:
11783 struct mq_attr posix_mq_attr
;
11784 struct mq_attr
*pposix_mq_attr
;
11787 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11788 pposix_mq_attr
= NULL
;
11790 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11791 return -TARGET_EFAULT
;
11793 pposix_mq_attr
= &posix_mq_attr
;
11795 p
= lock_user_string(arg1
- 1);
11797 return -TARGET_EFAULT
;
11799 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11800 unlock_user (p
, arg1
, 0);
11804 case TARGET_NR_mq_unlink
:
11805 p
= lock_user_string(arg1
- 1);
11807 return -TARGET_EFAULT
;
11809 ret
= get_errno(mq_unlink(p
));
11810 unlock_user (p
, arg1
, 0);
11813 #ifdef TARGET_NR_mq_timedsend
11814 case TARGET_NR_mq_timedsend
:
11816 struct timespec ts
;
11818 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11820 target_to_host_timespec(&ts
, arg5
);
11821 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11822 host_to_target_timespec(arg5
, &ts
);
11824 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11826 unlock_user (p
, arg2
, arg3
);
11831 #ifdef TARGET_NR_mq_timedreceive
11832 case TARGET_NR_mq_timedreceive
:
11834 struct timespec ts
;
11837 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11839 target_to_host_timespec(&ts
, arg5
);
11840 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11842 host_to_target_timespec(arg5
, &ts
);
11844 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11847 unlock_user (p
, arg2
, arg3
);
11849 put_user_u32(prio
, arg4
);
11854 /* Not implemented for now... */
11855 /* case TARGET_NR_mq_notify: */
11858 case TARGET_NR_mq_getsetattr
:
11860 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11863 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11864 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11865 &posix_mq_attr_out
));
11866 } else if (arg3
!= 0) {
11867 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11869 if (ret
== 0 && arg3
!= 0) {
11870 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11876 #ifdef CONFIG_SPLICE
11877 #ifdef TARGET_NR_tee
11878 case TARGET_NR_tee
:
11880 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11884 #ifdef TARGET_NR_splice
11885 case TARGET_NR_splice
:
11887 loff_t loff_in
, loff_out
;
11888 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11890 if (get_user_u64(loff_in
, arg2
)) {
11891 return -TARGET_EFAULT
;
11893 ploff_in
= &loff_in
;
11896 if (get_user_u64(loff_out
, arg4
)) {
11897 return -TARGET_EFAULT
;
11899 ploff_out
= &loff_out
;
11901 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11903 if (put_user_u64(loff_in
, arg2
)) {
11904 return -TARGET_EFAULT
;
11908 if (put_user_u64(loff_out
, arg4
)) {
11909 return -TARGET_EFAULT
;
11915 #ifdef TARGET_NR_vmsplice
11916 case TARGET_NR_vmsplice
:
11918 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11920 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11921 unlock_iovec(vec
, arg2
, arg3
, 0);
11923 ret
= -host_to_target_errno(errno
);
11928 #endif /* CONFIG_SPLICE */
11929 #ifdef CONFIG_EVENTFD
11930 #if defined(TARGET_NR_eventfd)
11931 case TARGET_NR_eventfd
:
11932 ret
= get_errno(eventfd(arg1
, 0));
11934 fd_trans_register(ret
, &target_eventfd_trans
);
11938 #if defined(TARGET_NR_eventfd2)
11939 case TARGET_NR_eventfd2
:
11941 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11942 if (arg2
& TARGET_O_NONBLOCK
) {
11943 host_flags
|= O_NONBLOCK
;
11945 if (arg2
& TARGET_O_CLOEXEC
) {
11946 host_flags
|= O_CLOEXEC
;
11948 ret
= get_errno(eventfd(arg1
, host_flags
));
11950 fd_trans_register(ret
, &target_eventfd_trans
);
11955 #endif /* CONFIG_EVENTFD */
11956 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11957 case TARGET_NR_fallocate
:
11958 #if TARGET_ABI_BITS == 32
11959 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11960 target_offset64(arg5
, arg6
)));
11962 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11966 #if defined(CONFIG_SYNC_FILE_RANGE)
11967 #if defined(TARGET_NR_sync_file_range)
11968 case TARGET_NR_sync_file_range
:
11969 #if TARGET_ABI_BITS == 32
11970 #if defined(TARGET_MIPS)
11971 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11972 target_offset64(arg5
, arg6
), arg7
));
11974 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11975 target_offset64(arg4
, arg5
), arg6
));
11976 #endif /* !TARGET_MIPS */
11978 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11982 #if defined(TARGET_NR_sync_file_range2) || \
11983 defined(TARGET_NR_arm_sync_file_range)
11984 #if defined(TARGET_NR_sync_file_range2)
11985 case TARGET_NR_sync_file_range2
:
11987 #if defined(TARGET_NR_arm_sync_file_range)
11988 case TARGET_NR_arm_sync_file_range
:
11990 /* This is like sync_file_range but the arguments are reordered */
11991 #if TARGET_ABI_BITS == 32
11992 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11993 target_offset64(arg5
, arg6
), arg2
));
11995 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12000 #if defined(TARGET_NR_signalfd4)
12001 case TARGET_NR_signalfd4
:
12002 return do_signalfd4(arg1
, arg2
, arg4
);
12004 #if defined(TARGET_NR_signalfd)
12005 case TARGET_NR_signalfd
:
12006 return do_signalfd4(arg1
, arg2
, 0);
12008 #if defined(CONFIG_EPOLL)
12009 #if defined(TARGET_NR_epoll_create)
12010 case TARGET_NR_epoll_create
:
12011 return get_errno(epoll_create(arg1
));
12013 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12014 case TARGET_NR_epoll_create1
:
12015 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12017 #if defined(TARGET_NR_epoll_ctl)
12018 case TARGET_NR_epoll_ctl
:
12020 struct epoll_event ep
;
12021 struct epoll_event
*epp
= 0;
12023 struct target_epoll_event
*target_ep
;
12024 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12025 return -TARGET_EFAULT
;
12027 ep
.events
= tswap32(target_ep
->events
);
12028 /* The epoll_data_t union is just opaque data to the kernel,
12029 * so we transfer all 64 bits across and need not worry what
12030 * actual data type it is.
12032 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12033 unlock_user_struct(target_ep
, arg4
, 0);
12036 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12040 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12041 #if defined(TARGET_NR_epoll_wait)
12042 case TARGET_NR_epoll_wait
:
12044 #if defined(TARGET_NR_epoll_pwait)
12045 case TARGET_NR_epoll_pwait
:
12048 struct target_epoll_event
*target_ep
;
12049 struct epoll_event
*ep
;
12051 int maxevents
= arg3
;
12052 int timeout
= arg4
;
12054 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12055 return -TARGET_EINVAL
;
12058 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12059 maxevents
* sizeof(struct target_epoll_event
), 1);
12061 return -TARGET_EFAULT
;
12064 ep
= g_try_new(struct epoll_event
, maxevents
);
12066 unlock_user(target_ep
, arg2
, 0);
12067 return -TARGET_ENOMEM
;
12071 #if defined(TARGET_NR_epoll_pwait)
12072 case TARGET_NR_epoll_pwait
:
12074 target_sigset_t
*target_set
;
12075 sigset_t _set
, *set
= &_set
;
12078 if (arg6
!= sizeof(target_sigset_t
)) {
12079 ret
= -TARGET_EINVAL
;
12083 target_set
= lock_user(VERIFY_READ
, arg5
,
12084 sizeof(target_sigset_t
), 1);
12086 ret
= -TARGET_EFAULT
;
12089 target_to_host_sigset(set
, target_set
);
12090 unlock_user(target_set
, arg5
, 0);
12095 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12096 set
, SIGSET_T_SIZE
));
12100 #if defined(TARGET_NR_epoll_wait)
12101 case TARGET_NR_epoll_wait
:
12102 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12107 ret
= -TARGET_ENOSYS
;
12109 if (!is_error(ret
)) {
12111 for (i
= 0; i
< ret
; i
++) {
12112 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12113 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12115 unlock_user(target_ep
, arg2
,
12116 ret
* sizeof(struct target_epoll_event
));
12118 unlock_user(target_ep
, arg2
, 0);
12125 #ifdef TARGET_NR_prlimit64
12126 case TARGET_NR_prlimit64
:
12128 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12129 struct target_rlimit64
*target_rnew
, *target_rold
;
12130 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12131 int resource
= target_to_host_resource(arg2
);
12133 if (arg3
&& (resource
!= RLIMIT_AS
&&
12134 resource
!= RLIMIT_DATA
&&
12135 resource
!= RLIMIT_STACK
)) {
12136 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12137 return -TARGET_EFAULT
;
12139 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12140 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12141 unlock_user_struct(target_rnew
, arg3
, 0);
12145 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12146 if (!is_error(ret
) && arg4
) {
12147 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12148 return -TARGET_EFAULT
;
12150 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12151 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12152 unlock_user_struct(target_rold
, arg4
, 1);
12157 #ifdef TARGET_NR_gethostname
12158 case TARGET_NR_gethostname
:
12160 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12162 ret
= get_errno(gethostname(name
, arg2
));
12163 unlock_user(name
, arg1
, arg2
);
12165 ret
= -TARGET_EFAULT
;
12170 #ifdef TARGET_NR_atomic_cmpxchg_32
12171 case TARGET_NR_atomic_cmpxchg_32
:
12173 /* should use start_exclusive from main.c */
12174 abi_ulong mem_value
;
12175 if (get_user_u32(mem_value
, arg6
)) {
12176 target_siginfo_t info
;
12177 info
.si_signo
= SIGSEGV
;
12179 info
.si_code
= TARGET_SEGV_MAPERR
;
12180 info
._sifields
._sigfault
._addr
= arg6
;
12181 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12182 QEMU_SI_FAULT
, &info
);
12186 if (mem_value
== arg2
)
12187 put_user_u32(arg1
, arg6
);
12191 #ifdef TARGET_NR_atomic_barrier
12192 case TARGET_NR_atomic_barrier
:
12193 /* Like the kernel implementation and the
12194 qemu arm barrier, no-op this? */
12198 #ifdef TARGET_NR_timer_create
12199 case TARGET_NR_timer_create
:
12201 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12203 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12206 int timer_index
= next_free_host_timer();
12208 if (timer_index
< 0) {
12209 ret
= -TARGET_EAGAIN
;
12211 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12214 phost_sevp
= &host_sevp
;
12215 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12221 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12225 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12226 return -TARGET_EFAULT
;
12234 #ifdef TARGET_NR_timer_settime
12235 case TARGET_NR_timer_settime
:
12237 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12238 * struct itimerspec * old_value */
12239 target_timer_t timerid
= get_timer_id(arg1
);
12243 } else if (arg3
== 0) {
12244 ret
= -TARGET_EINVAL
;
12246 timer_t htimer
= g_posix_timers
[timerid
];
12247 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12249 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12250 return -TARGET_EFAULT
;
12253 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12254 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12255 return -TARGET_EFAULT
;
12262 #ifdef TARGET_NR_timer_gettime
12263 case TARGET_NR_timer_gettime
:
12265 /* args: timer_t timerid, struct itimerspec *curr_value */
12266 target_timer_t timerid
= get_timer_id(arg1
);
12270 } else if (!arg2
) {
12271 ret
= -TARGET_EFAULT
;
12273 timer_t htimer
= g_posix_timers
[timerid
];
12274 struct itimerspec hspec
;
12275 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12277 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12278 ret
= -TARGET_EFAULT
;
12285 #ifdef TARGET_NR_timer_getoverrun
12286 case TARGET_NR_timer_getoverrun
:
12288 /* args: timer_t timerid */
12289 target_timer_t timerid
= get_timer_id(arg1
);
12294 timer_t htimer
= g_posix_timers
[timerid
];
12295 ret
= get_errno(timer_getoverrun(htimer
));
12301 #ifdef TARGET_NR_timer_delete
12302 case TARGET_NR_timer_delete
:
12304 /* args: timer_t timerid */
12305 target_timer_t timerid
= get_timer_id(arg1
);
12310 timer_t htimer
= g_posix_timers
[timerid
];
12311 ret
= get_errno(timer_delete(htimer
));
12312 g_posix_timers
[timerid
] = 0;
12318 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12319 case TARGET_NR_timerfd_create
:
12320 return get_errno(timerfd_create(arg1
,
12321 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12324 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12325 case TARGET_NR_timerfd_gettime
:
12327 struct itimerspec its_curr
;
12329 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12331 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12332 return -TARGET_EFAULT
;
12338 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12339 case TARGET_NR_timerfd_settime
:
12341 struct itimerspec its_new
, its_old
, *p_new
;
12344 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12345 return -TARGET_EFAULT
;
12352 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12354 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12355 return -TARGET_EFAULT
;
12361 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12362 case TARGET_NR_ioprio_get
:
12363 return get_errno(ioprio_get(arg1
, arg2
));
12366 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12367 case TARGET_NR_ioprio_set
:
12368 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12371 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12372 case TARGET_NR_setns
:
12373 return get_errno(setns(arg1
, arg2
));
12375 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12376 case TARGET_NR_unshare
:
12377 return get_errno(unshare(arg1
));
12379 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12380 case TARGET_NR_kcmp
:
12381 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12383 #ifdef TARGET_NR_swapcontext
12384 case TARGET_NR_swapcontext
:
12385 /* PowerPC specific. */
12386 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12388 #ifdef TARGET_NR_memfd_create
12389 case TARGET_NR_memfd_create
:
12390 p
= lock_user_string(arg1
);
12392 return -TARGET_EFAULT
;
12394 ret
= get_errno(memfd_create(p
, arg2
));
12395 fd_trans_unregister(ret
);
12396 unlock_user(p
, arg1
, 0);
12399 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12400 case TARGET_NR_membarrier
:
12401 return get_errno(membarrier(arg1
, arg2
));
12405 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12406 return -TARGET_ENOSYS
;
12411 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12412 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12413 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12416 CPUState
*cpu
= env_cpu(cpu_env
);
12419 #ifdef DEBUG_ERESTARTSYS
12420 /* Debug-only code for exercising the syscall-restart code paths
12421 * in the per-architecture cpu main loops: restart every syscall
12422 * the guest makes once before letting it through.
12428 return -TARGET_ERESTARTSYS
;
12433 record_syscall_start(cpu
, num
, arg1
,
12434 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12436 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12437 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12440 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12441 arg5
, arg6
, arg7
, arg8
);
12443 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12444 print_syscall_ret(num
, ret
);
12447 record_syscall_return(cpu
, num
, ret
);