4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
126 #define CLONE_IO 0x80000000 /* Clone io context */
129 /* We can't directly call the host clone syscall, because this will
130 * badly confuse libc (breaking mutexes, for example). So we must
131 * divide clone flags into:
132 * * flag combinations that look like pthread_create()
133 * * flag combinations that look like fork()
134 * * flags we can implement within QEMU itself
135 * * flags we can't support and will return an error for
137 /* For thread creation, all these flags must be present; for
138 * fork, none must be present.
140 #define CLONE_THREAD_FLAGS \
141 (CLONE_VM | CLONE_FS | CLONE_FILES | \
142 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
144 /* These flags are ignored:
145 * CLONE_DETACHED is now ignored by the kernel;
146 * CLONE_IO is just an optimisation hint to the I/O scheduler
148 #define CLONE_IGNORED_FLAGS \
149 (CLONE_DETACHED | CLONE_IO)
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
161 #define CLONE_INVALID_FORK_FLAGS \
162 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
164 #define CLONE_INVALID_THREAD_FLAGS \
165 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
166 CLONE_IGNORED_FLAGS))
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169 * have almost all been allocated. We cannot support any of
170 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172 * The checks against the invalid thread masks above will catch these.
173 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177 * once. This exercises the codepaths for restart.
179 //#define DEBUG_ERESTARTSYS
181 //#include <linux/msdos_fs.h>
182 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
183 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
193 #define _syscall0(type,name) \
194 static type name (void) \
196 return syscall(__NR_##name); \
199 #define _syscall1(type,name,type1,arg1) \
200 static type name (type1 arg1) \
202 return syscall(__NR_##name, arg1); \
205 #define _syscall2(type,name,type1,arg1,type2,arg2) \
206 static type name (type1 arg1,type2 arg2) \
208 return syscall(__NR_##name, arg1, arg2); \
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
212 static type name (type1 arg1,type2 arg2,type3 arg3) \
214 return syscall(__NR_##name, arg1, arg2, arg3); \
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
232 type5,arg5,type6,arg6) \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
236 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #if defined(__NR_futex)
249 # define __NR_sys_futex __NR_futex
251 #if defined(__NR_futex_time64)
252 # define __NR_sys_futex_time64 __NR_futex_time64
254 #define __NR_sys_inotify_init __NR_inotify_init
255 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
256 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
257 #define __NR_sys_statx __NR_statx
259 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
260 #define __NR__llseek __NR_lseek
263 /* Newer kernel ports have llseek() instead of _llseek() */
264 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
265 #define TARGET_NR__llseek TARGET_NR_llseek
268 #define __NR_sys_gettid __NR_gettid
269 _syscall0(int, sys_gettid
)
271 /* For the 64-bit guest on 32-bit host case we must emulate
272 * getdents using getdents64, because otherwise the host
273 * might hand us back more dirent records than we can fit
274 * into the guest buffer after structure format conversion.
275 * Otherwise we emulate getdents with getdents if the host has it.
277 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
278 #define EMULATE_GETDENTS_WITH_GETDENTS
281 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
282 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
284 #if (defined(TARGET_NR_getdents) && \
285 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
286 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
287 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
289 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
290 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
291 loff_t
*, res
, uint
, wh
);
293 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
294 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
296 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
297 #ifdef __NR_exit_group
298 _syscall1(int,exit_group
,int,error_code
)
300 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
301 _syscall1(int,set_tid_address
,int *,tidptr
)
303 #if defined(__NR_futex)
304 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
305 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
307 #if defined(__NR_futex_time64)
308 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
309 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
311 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
312 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
313 unsigned long *, user_mask_ptr
);
314 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
315 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
316 unsigned long *, user_mask_ptr
);
317 #define __NR_sys_getcpu __NR_getcpu
318 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
319 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
321 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
322 struct __user_cap_data_struct
*, data
);
323 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
324 struct __user_cap_data_struct
*, data
);
325 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
326 _syscall2(int, ioprio_get
, int, which
, int, who
)
328 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
329 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
331 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
332 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
335 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
336 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
337 unsigned long, idx1
, unsigned long, idx2
)
341 * It is assumed that struct statx is architecture independent.
343 #if defined(TARGET_NR_statx) && defined(__NR_statx)
344 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
345 unsigned int, mask
, struct target_statx
*, statxbuf
)
347 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
348 _syscall2(int, membarrier
, int, cmd
, int, flags
)
351 static bitmask_transtbl fcntl_flags_tbl
[] = {
352 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
353 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
354 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
355 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
356 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
357 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
358 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
359 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
360 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
361 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
362 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
363 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
364 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
365 #if defined(O_DIRECT)
366 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
368 #if defined(O_NOATIME)
369 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
371 #if defined(O_CLOEXEC)
372 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
375 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
377 #if defined(O_TMPFILE)
378 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
380 /* Don't terminate the list prematurely on 64-bit host+guest. */
381 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
382 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
387 static int sys_getcwd1(char *buf
, size_t size
)
389 if (getcwd(buf
, size
) == NULL
) {
390 /* getcwd() sets errno */
393 return strlen(buf
)+1;
396 #ifdef TARGET_NR_utimensat
397 #if defined(__NR_utimensat)
398 #define __NR_sys_utimensat __NR_utimensat
399 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
400 const struct timespec
*,tsp
,int,flags
)
402 static int sys_utimensat(int dirfd
, const char *pathname
,
403 const struct timespec times
[2], int flags
)
409 #endif /* TARGET_NR_utimensat */
411 #ifdef TARGET_NR_renameat2
412 #if defined(__NR_renameat2)
413 #define __NR_sys_renameat2 __NR_renameat2
414 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
415 const char *, new, unsigned int, flags
)
417 static int sys_renameat2(int oldfd
, const char *old
,
418 int newfd
, const char *new, int flags
)
421 return renameat(oldfd
, old
, newfd
, new);
427 #endif /* TARGET_NR_renameat2 */
429 #ifdef CONFIG_INOTIFY
430 #include <sys/inotify.h>
432 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
433 static int sys_inotify_init(void)
435 return (inotify_init());
438 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
439 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
441 return (inotify_add_watch(fd
, pathname
, mask
));
444 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
445 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
447 return (inotify_rm_watch(fd
, wd
));
450 #ifdef CONFIG_INOTIFY1
451 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
452 static int sys_inotify_init1(int flags
)
454 return (inotify_init1(flags
));
459 /* Userspace can usually survive runtime without inotify */
460 #undef TARGET_NR_inotify_init
461 #undef TARGET_NR_inotify_init1
462 #undef TARGET_NR_inotify_add_watch
463 #undef TARGET_NR_inotify_rm_watch
464 #endif /* CONFIG_INOTIFY */
466 #if defined(TARGET_NR_prlimit64)
467 #ifndef __NR_prlimit64
468 # define __NR_prlimit64 -1
470 #define __NR_sys_prlimit64 __NR_prlimit64
471 /* The glibc rlimit structure may not be that used by the underlying syscall */
472 struct host_rlimit64
{
476 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
477 const struct host_rlimit64
*, new_limit
,
478 struct host_rlimit64
*, old_limit
)
482 #if defined(TARGET_NR_timer_create)
483 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
484 static timer_t g_posix_timers
[32] = { 0, } ;
486 static inline int next_free_host_timer(void)
489 /* FIXME: Does finding the next free slot require a lock? */
490 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
491 if (g_posix_timers
[k
] == 0) {
492 g_posix_timers
[k
] = (timer_t
) 1;
500 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
502 static inline int regpairs_aligned(void *cpu_env
, int num
)
504 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
506 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
507 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
508 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
509 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
510 * of registers which translates to the same as ARM/MIPS, because we start with
512 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
513 #elif defined(TARGET_SH4)
514 /* SH4 doesn't align register pairs, except for p{read,write}64 */
515 static inline int regpairs_aligned(void *cpu_env
, int num
)
518 case TARGET_NR_pread64
:
519 case TARGET_NR_pwrite64
:
526 #elif defined(TARGET_XTENSA)
527 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
529 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
532 #define ERRNO_TABLE_SIZE 1200
534 /* target_to_host_errno_table[] is initialized from
535 * host_to_target_errno_table[] in syscall_init(). */
536 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
540 * This list is the union of errno values overridden in asm-<arch>/errno.h
541 * minus the errnos that are not actually generic to all archs.
543 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
544 [EAGAIN
] = TARGET_EAGAIN
,
545 [EIDRM
] = TARGET_EIDRM
,
546 [ECHRNG
] = TARGET_ECHRNG
,
547 [EL2NSYNC
] = TARGET_EL2NSYNC
,
548 [EL3HLT
] = TARGET_EL3HLT
,
549 [EL3RST
] = TARGET_EL3RST
,
550 [ELNRNG
] = TARGET_ELNRNG
,
551 [EUNATCH
] = TARGET_EUNATCH
,
552 [ENOCSI
] = TARGET_ENOCSI
,
553 [EL2HLT
] = TARGET_EL2HLT
,
554 [EDEADLK
] = TARGET_EDEADLK
,
555 [ENOLCK
] = TARGET_ENOLCK
,
556 [EBADE
] = TARGET_EBADE
,
557 [EBADR
] = TARGET_EBADR
,
558 [EXFULL
] = TARGET_EXFULL
,
559 [ENOANO
] = TARGET_ENOANO
,
560 [EBADRQC
] = TARGET_EBADRQC
,
561 [EBADSLT
] = TARGET_EBADSLT
,
562 [EBFONT
] = TARGET_EBFONT
,
563 [ENOSTR
] = TARGET_ENOSTR
,
564 [ENODATA
] = TARGET_ENODATA
,
565 [ETIME
] = TARGET_ETIME
,
566 [ENOSR
] = TARGET_ENOSR
,
567 [ENONET
] = TARGET_ENONET
,
568 [ENOPKG
] = TARGET_ENOPKG
,
569 [EREMOTE
] = TARGET_EREMOTE
,
570 [ENOLINK
] = TARGET_ENOLINK
,
571 [EADV
] = TARGET_EADV
,
572 [ESRMNT
] = TARGET_ESRMNT
,
573 [ECOMM
] = TARGET_ECOMM
,
574 [EPROTO
] = TARGET_EPROTO
,
575 [EDOTDOT
] = TARGET_EDOTDOT
,
576 [EMULTIHOP
] = TARGET_EMULTIHOP
,
577 [EBADMSG
] = TARGET_EBADMSG
,
578 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
579 [EOVERFLOW
] = TARGET_EOVERFLOW
,
580 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
581 [EBADFD
] = TARGET_EBADFD
,
582 [EREMCHG
] = TARGET_EREMCHG
,
583 [ELIBACC
] = TARGET_ELIBACC
,
584 [ELIBBAD
] = TARGET_ELIBBAD
,
585 [ELIBSCN
] = TARGET_ELIBSCN
,
586 [ELIBMAX
] = TARGET_ELIBMAX
,
587 [ELIBEXEC
] = TARGET_ELIBEXEC
,
588 [EILSEQ
] = TARGET_EILSEQ
,
589 [ENOSYS
] = TARGET_ENOSYS
,
590 [ELOOP
] = TARGET_ELOOP
,
591 [ERESTART
] = TARGET_ERESTART
,
592 [ESTRPIPE
] = TARGET_ESTRPIPE
,
593 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
594 [EUSERS
] = TARGET_EUSERS
,
595 [ENOTSOCK
] = TARGET_ENOTSOCK
,
596 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
597 [EMSGSIZE
] = TARGET_EMSGSIZE
,
598 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
599 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
600 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
601 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
602 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
603 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
604 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
605 [EADDRINUSE
] = TARGET_EADDRINUSE
,
606 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
607 [ENETDOWN
] = TARGET_ENETDOWN
,
608 [ENETUNREACH
] = TARGET_ENETUNREACH
,
609 [ENETRESET
] = TARGET_ENETRESET
,
610 [ECONNABORTED
] = TARGET_ECONNABORTED
,
611 [ECONNRESET
] = TARGET_ECONNRESET
,
612 [ENOBUFS
] = TARGET_ENOBUFS
,
613 [EISCONN
] = TARGET_EISCONN
,
614 [ENOTCONN
] = TARGET_ENOTCONN
,
615 [EUCLEAN
] = TARGET_EUCLEAN
,
616 [ENOTNAM
] = TARGET_ENOTNAM
,
617 [ENAVAIL
] = TARGET_ENAVAIL
,
618 [EISNAM
] = TARGET_EISNAM
,
619 [EREMOTEIO
] = TARGET_EREMOTEIO
,
620 [EDQUOT
] = TARGET_EDQUOT
,
621 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
622 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
623 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
624 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
625 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
626 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
627 [EALREADY
] = TARGET_EALREADY
,
628 [EINPROGRESS
] = TARGET_EINPROGRESS
,
629 [ESTALE
] = TARGET_ESTALE
,
630 [ECANCELED
] = TARGET_ECANCELED
,
631 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
632 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
634 [ENOKEY
] = TARGET_ENOKEY
,
637 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
640 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
643 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
646 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
648 #ifdef ENOTRECOVERABLE
649 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
652 [ENOMSG
] = TARGET_ENOMSG
,
655 [ERFKILL
] = TARGET_ERFKILL
,
658 [EHWPOISON
] = TARGET_EHWPOISON
,
662 static inline int host_to_target_errno(int err
)
664 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
665 host_to_target_errno_table
[err
]) {
666 return host_to_target_errno_table
[err
];
671 static inline int target_to_host_errno(int err
)
673 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
674 target_to_host_errno_table
[err
]) {
675 return target_to_host_errno_table
[err
];
680 static inline abi_long
get_errno(abi_long ret
)
683 return -host_to_target_errno(errno
);
688 const char *target_strerror(int err
)
690 if (err
== TARGET_ERESTARTSYS
) {
691 return "To be restarted";
693 if (err
== TARGET_QEMU_ESIGRETURN
) {
694 return "Successful exit from sigreturn";
697 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
700 return strerror(target_to_host_errno(err
));
703 #define safe_syscall0(type, name) \
704 static type safe_##name(void) \
706 return safe_syscall(__NR_##name); \
709 #define safe_syscall1(type, name, type1, arg1) \
710 static type safe_##name(type1 arg1) \
712 return safe_syscall(__NR_##name, arg1); \
715 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
716 static type safe_##name(type1 arg1, type2 arg2) \
718 return safe_syscall(__NR_##name, arg1, arg2); \
721 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
724 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
727 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
729 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
731 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
734 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
735 type4, arg4, type5, arg5) \
736 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
739 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
742 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
743 type4, arg4, type5, arg5, type6, arg6) \
744 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
745 type5 arg5, type6 arg6) \
747 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
750 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
751 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
752 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
753 int, flags
, mode_t
, mode
)
754 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
755 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
756 struct rusage
*, rusage
)
758 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
759 int, options
, struct rusage
*, rusage
)
760 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
761 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
762 defined(TARGET_NR_pselect6)
763 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
764 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
766 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
767 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
768 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
771 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
772 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
774 #if defined(__NR_futex)
775 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
776 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
778 #if defined(__NR_futex_time64)
779 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
780 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
782 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
783 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
784 safe_syscall2(int, tkill
, int, tid
, int, sig
)
785 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
786 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
787 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
788 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
789 unsigned long, pos_l
, unsigned long, pos_h
)
790 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
791 unsigned long, pos_l
, unsigned long, pos_h
)
792 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
794 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
795 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
796 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
797 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
798 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
799 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
800 safe_syscall2(int, flock
, int, fd
, int, operation
)
801 #ifdef TARGET_NR_rt_sigtimedwait
802 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
803 const struct timespec
*, uts
, size_t, sigsetsize
)
805 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
807 #if defined(TARGET_NR_nanosleep)
808 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
809 struct timespec
*, rem
)
811 #ifdef TARGET_NR_clock_nanosleep
812 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
813 const struct timespec
*, req
, struct timespec
*, rem
)
816 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
817 void *, ptr
, long, fifth
)
820 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
824 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
825 long, msgtype
, int, flags
)
827 #ifdef __NR_semtimedop
828 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
829 unsigned, nsops
, const struct timespec
*, timeout
)
831 #ifdef TARGET_NR_mq_timedsend
832 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
833 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
835 #ifdef TARGET_NR_mq_timedreceive
836 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
837 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
839 /* We do ioctl like this rather than via safe_syscall3 to preserve the
840 * "third argument might be integer or pointer or not present" behaviour of
843 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
844 /* Similarly for fcntl. Note that callers must always:
845 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
846 * use the flock64 struct rather than unsuffixed flock
847 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
850 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
855 static inline int host_to_target_sock_type(int host_type
)
859 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
861 target_type
= TARGET_SOCK_DGRAM
;
864 target_type
= TARGET_SOCK_STREAM
;
867 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
871 #if defined(SOCK_CLOEXEC)
872 if (host_type
& SOCK_CLOEXEC
) {
873 target_type
|= TARGET_SOCK_CLOEXEC
;
877 #if defined(SOCK_NONBLOCK)
878 if (host_type
& SOCK_NONBLOCK
) {
879 target_type
|= TARGET_SOCK_NONBLOCK
;
886 static abi_ulong target_brk
;
887 static abi_ulong target_original_brk
;
888 static abi_ulong brk_page
;
890 void target_set_brk(abi_ulong new_brk
)
892 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
893 brk_page
= HOST_PAGE_ALIGN(target_brk
);
896 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
897 #define DEBUGF_BRK(message, args...)
899 /* do_brk() must return target values and target errnos. */
900 abi_long
do_brk(abi_ulong new_brk
)
902 abi_long mapped_addr
;
903 abi_ulong new_alloc_size
;
905 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
908 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
911 if (new_brk
< target_original_brk
) {
912 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
917 /* If the new brk is less than the highest page reserved to the
918 * target heap allocation, set it and we're almost done... */
919 if (new_brk
<= brk_page
) {
920 /* Heap contents are initialized to zero, as for anonymous
922 if (new_brk
> target_brk
) {
923 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
925 target_brk
= new_brk
;
926 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
930 /* We need to allocate more memory after the brk... Note that
931 * we don't use MAP_FIXED because that will map over the top of
932 * any existing mapping (like the one with the host libc or qemu
933 * itself); instead we treat "mapped but at wrong address" as
934 * a failure and unmap again.
936 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
937 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
938 PROT_READ
|PROT_WRITE
,
939 MAP_ANON
|MAP_PRIVATE
, 0, 0));
941 if (mapped_addr
== brk_page
) {
942 /* Heap contents are initialized to zero, as for anonymous
943 * mapped pages. Technically the new pages are already
944 * initialized to zero since they *are* anonymous mapped
945 * pages, however we have to take care with the contents that
946 * come from the remaining part of the previous page: it may
947 * contains garbage data due to a previous heap usage (grown
949 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
951 target_brk
= new_brk
;
952 brk_page
= HOST_PAGE_ALIGN(target_brk
);
953 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
956 } else if (mapped_addr
!= -1) {
957 /* Mapped but at wrong address, meaning there wasn't actually
958 * enough space for this brk.
960 target_munmap(mapped_addr
, new_alloc_size
);
962 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
965 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
968 #if defined(TARGET_ALPHA)
969 /* We (partially) emulate OSF/1 on Alpha, which requires we
970 return a proper errno, not an unchanged brk value. */
971 return -TARGET_ENOMEM
;
973 /* For everything else, return the previous break. */
977 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
978 defined(TARGET_NR_pselect6)
979 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
980 abi_ulong target_fds_addr
,
984 abi_ulong b
, *target_fds
;
986 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
987 if (!(target_fds
= lock_user(VERIFY_READ
,
989 sizeof(abi_ulong
) * nw
,
991 return -TARGET_EFAULT
;
995 for (i
= 0; i
< nw
; i
++) {
996 /* grab the abi_ulong */
997 __get_user(b
, &target_fds
[i
]);
998 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
999 /* check the bit inside the abi_ulong */
1006 unlock_user(target_fds
, target_fds_addr
, 0);
1011 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1012 abi_ulong target_fds_addr
,
1015 if (target_fds_addr
) {
1016 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1017 return -TARGET_EFAULT
;
1025 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1031 abi_ulong
*target_fds
;
1033 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1034 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1036 sizeof(abi_ulong
) * nw
,
1038 return -TARGET_EFAULT
;
1041 for (i
= 0; i
< nw
; i
++) {
1043 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1044 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1047 __put_user(v
, &target_fds
[i
]);
1050 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1056 #if defined(__alpha__)
1057 #define HOST_HZ 1024
1062 static inline abi_long
host_to_target_clock_t(long ticks
)
1064 #if HOST_HZ == TARGET_HZ
1067 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1071 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1072 const struct rusage
*rusage
)
1074 struct target_rusage
*target_rusage
;
1076 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1077 return -TARGET_EFAULT
;
1078 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1079 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1080 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1081 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1082 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1083 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1084 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1085 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1086 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1087 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1088 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1089 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1090 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1091 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1092 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1093 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1094 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1095 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1096 unlock_user_struct(target_rusage
, target_addr
, 1);
1101 #ifdef TARGET_NR_setrlimit
1102 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1104 abi_ulong target_rlim_swap
;
1107 target_rlim_swap
= tswapal(target_rlim
);
1108 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1109 return RLIM_INFINITY
;
1111 result
= target_rlim_swap
;
1112 if (target_rlim_swap
!= (rlim_t
)result
)
1113 return RLIM_INFINITY
;
1119 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1120 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1122 abi_ulong target_rlim_swap
;
1125 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1126 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1128 target_rlim_swap
= rlim
;
1129 result
= tswapal(target_rlim_swap
);
1135 static inline int target_to_host_resource(int code
)
1138 case TARGET_RLIMIT_AS
:
1140 case TARGET_RLIMIT_CORE
:
1142 case TARGET_RLIMIT_CPU
:
1144 case TARGET_RLIMIT_DATA
:
1146 case TARGET_RLIMIT_FSIZE
:
1147 return RLIMIT_FSIZE
;
1148 case TARGET_RLIMIT_LOCKS
:
1149 return RLIMIT_LOCKS
;
1150 case TARGET_RLIMIT_MEMLOCK
:
1151 return RLIMIT_MEMLOCK
;
1152 case TARGET_RLIMIT_MSGQUEUE
:
1153 return RLIMIT_MSGQUEUE
;
1154 case TARGET_RLIMIT_NICE
:
1156 case TARGET_RLIMIT_NOFILE
:
1157 return RLIMIT_NOFILE
;
1158 case TARGET_RLIMIT_NPROC
:
1159 return RLIMIT_NPROC
;
1160 case TARGET_RLIMIT_RSS
:
1162 case TARGET_RLIMIT_RTPRIO
:
1163 return RLIMIT_RTPRIO
;
1164 case TARGET_RLIMIT_SIGPENDING
:
1165 return RLIMIT_SIGPENDING
;
1166 case TARGET_RLIMIT_STACK
:
1167 return RLIMIT_STACK
;
1173 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1174 abi_ulong target_tv_addr
)
1176 struct target_timeval
*target_tv
;
1178 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1179 return -TARGET_EFAULT
;
1182 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1183 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1185 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1190 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1191 const struct timeval
*tv
)
1193 struct target_timeval
*target_tv
;
1195 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1196 return -TARGET_EFAULT
;
1199 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1200 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1202 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1207 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1208 const struct timeval
*tv
)
1210 struct target__kernel_sock_timeval
*target_tv
;
1212 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1213 return -TARGET_EFAULT
;
1216 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1217 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1219 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1224 #if defined(TARGET_NR_futex) || \
1225 defined(TARGET_NR_rt_sigtimedwait) || \
1226 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1227 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1228 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1229 defined(TARGET_NR_mq_timedreceive)
1230 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1231 abi_ulong target_addr
)
1233 struct target_timespec
*target_ts
;
1235 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1236 return -TARGET_EFAULT
;
1238 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1239 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1240 unlock_user_struct(target_ts
, target_addr
, 0);
1245 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1246 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1247 abi_ulong target_addr
)
1249 struct target__kernel_timespec
*target_ts
;
1251 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1252 return -TARGET_EFAULT
;
1254 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1255 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1256 unlock_user_struct(target_ts
, target_addr
, 0);
1261 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1262 struct timespec
*host_ts
)
1264 struct target_timespec
*target_ts
;
1266 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1267 return -TARGET_EFAULT
;
1269 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1270 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1271 unlock_user_struct(target_ts
, target_addr
, 1);
1275 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1276 struct timespec
*host_ts
)
1278 struct target__kernel_timespec
*target_ts
;
1280 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1281 return -TARGET_EFAULT
;
1283 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1284 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1285 unlock_user_struct(target_ts
, target_addr
, 1);
1289 #if defined(TARGET_NR_gettimeofday)
1290 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1291 struct timezone
*tz
)
1293 struct target_timezone
*target_tz
;
1295 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1296 return -TARGET_EFAULT
;
1299 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1300 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1302 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1308 #if defined(TARGET_NR_settimeofday)
1309 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1310 abi_ulong target_tz_addr
)
1312 struct target_timezone
*target_tz
;
1314 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1315 return -TARGET_EFAULT
;
1318 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1319 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1321 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1327 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1330 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1331 abi_ulong target_mq_attr_addr
)
1333 struct target_mq_attr
*target_mq_attr
;
1335 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1336 target_mq_attr_addr
, 1))
1337 return -TARGET_EFAULT
;
1339 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1340 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1341 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1342 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1344 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1349 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1350 const struct mq_attr
*attr
)
1352 struct target_mq_attr
*target_mq_attr
;
1354 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1355 target_mq_attr_addr
, 0))
1356 return -TARGET_EFAULT
;
1358 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1359 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1360 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1361 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1363 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1369 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1370 /* do_select() must return target values and target errnos. */
1371 static abi_long
do_select(int n
,
1372 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1373 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1375 fd_set rfds
, wfds
, efds
;
1376 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1378 struct timespec ts
, *ts_ptr
;
1381 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1385 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1389 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1394 if (target_tv_addr
) {
1395 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1396 return -TARGET_EFAULT
;
1397 ts
.tv_sec
= tv
.tv_sec
;
1398 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1404 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1407 if (!is_error(ret
)) {
1408 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1409 return -TARGET_EFAULT
;
1410 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1411 return -TARGET_EFAULT
;
1412 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1413 return -TARGET_EFAULT
;
1415 if (target_tv_addr
) {
1416 tv
.tv_sec
= ts
.tv_sec
;
1417 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1418 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1419 return -TARGET_EFAULT
;
1427 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1428 static abi_long
do_old_select(abi_ulong arg1
)
1430 struct target_sel_arg_struct
*sel
;
1431 abi_ulong inp
, outp
, exp
, tvp
;
1434 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1435 return -TARGET_EFAULT
;
1438 nsel
= tswapal(sel
->n
);
1439 inp
= tswapal(sel
->inp
);
1440 outp
= tswapal(sel
->outp
);
1441 exp
= tswapal(sel
->exp
);
1442 tvp
= tswapal(sel
->tvp
);
1444 unlock_user_struct(sel
, arg1
, 0);
1446 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1451 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1454 return pipe2(host_pipe
, flags
);
1460 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1461 int flags
, int is_pipe2
)
1465 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1468 return get_errno(ret
);
1470 /* Several targets have special calling conventions for the original
1471 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1473 #if defined(TARGET_ALPHA)
1474 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1475 return host_pipe
[0];
1476 #elif defined(TARGET_MIPS)
1477 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1478 return host_pipe
[0];
1479 #elif defined(TARGET_SH4)
1480 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1481 return host_pipe
[0];
1482 #elif defined(TARGET_SPARC)
1483 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1484 return host_pipe
[0];
1488 if (put_user_s32(host_pipe
[0], pipedes
)
1489 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1490 return -TARGET_EFAULT
;
1491 return get_errno(ret
);
1494 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1495 abi_ulong target_addr
,
1498 struct target_ip_mreqn
*target_smreqn
;
1500 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1502 return -TARGET_EFAULT
;
1503 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1504 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1505 if (len
== sizeof(struct target_ip_mreqn
))
1506 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1507 unlock_user(target_smreqn
, target_addr
, 0);
1512 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1513 abi_ulong target_addr
,
1516 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1517 sa_family_t sa_family
;
1518 struct target_sockaddr
*target_saddr
;
1520 if (fd_trans_target_to_host_addr(fd
)) {
1521 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1524 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1526 return -TARGET_EFAULT
;
1528 sa_family
= tswap16(target_saddr
->sa_family
);
1530 /* Oops. The caller might send a incomplete sun_path; sun_path
1531 * must be terminated by \0 (see the manual page), but
1532 * unfortunately it is quite common to specify sockaddr_un
1533 * length as "strlen(x->sun_path)" while it should be
1534 * "strlen(...) + 1". We'll fix that here if needed.
1535 * Linux kernel has a similar feature.
1538 if (sa_family
== AF_UNIX
) {
1539 if (len
< unix_maxlen
&& len
> 0) {
1540 char *cp
= (char*)target_saddr
;
1542 if ( cp
[len
-1] && !cp
[len
] )
1545 if (len
> unix_maxlen
)
1549 memcpy(addr
, target_saddr
, len
);
1550 addr
->sa_family
= sa_family
;
1551 if (sa_family
== AF_NETLINK
) {
1552 struct sockaddr_nl
*nladdr
;
1554 nladdr
= (struct sockaddr_nl
*)addr
;
1555 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1556 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1557 } else if (sa_family
== AF_PACKET
) {
1558 struct target_sockaddr_ll
*lladdr
;
1560 lladdr
= (struct target_sockaddr_ll
*)addr
;
1561 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1562 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1564 unlock_user(target_saddr
, target_addr
, 0);
1569 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1570 struct sockaddr
*addr
,
1573 struct target_sockaddr
*target_saddr
;
1580 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1582 return -TARGET_EFAULT
;
1583 memcpy(target_saddr
, addr
, len
);
1584 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1585 sizeof(target_saddr
->sa_family
)) {
1586 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1588 if (addr
->sa_family
== AF_NETLINK
&&
1589 len
>= sizeof(struct target_sockaddr_nl
)) {
1590 struct target_sockaddr_nl
*target_nl
=
1591 (struct target_sockaddr_nl
*)target_saddr
;
1592 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1593 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1594 } else if (addr
->sa_family
== AF_PACKET
) {
1595 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1596 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1597 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1598 } else if (addr
->sa_family
== AF_INET6
&&
1599 len
>= sizeof(struct target_sockaddr_in6
)) {
1600 struct target_sockaddr_in6
*target_in6
=
1601 (struct target_sockaddr_in6
*)target_saddr
;
1602 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1604 unlock_user(target_saddr
, target_addr
, len
);
1609 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1610 struct target_msghdr
*target_msgh
)
1612 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1613 abi_long msg_controllen
;
1614 abi_ulong target_cmsg_addr
;
1615 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1616 socklen_t space
= 0;
1618 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1619 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1621 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1622 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1623 target_cmsg_start
= target_cmsg
;
1625 return -TARGET_EFAULT
;
1627 while (cmsg
&& target_cmsg
) {
1628 void *data
= CMSG_DATA(cmsg
);
1629 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1631 int len
= tswapal(target_cmsg
->cmsg_len
)
1632 - sizeof(struct target_cmsghdr
);
1634 space
+= CMSG_SPACE(len
);
1635 if (space
> msgh
->msg_controllen
) {
1636 space
-= CMSG_SPACE(len
);
1637 /* This is a QEMU bug, since we allocated the payload
1638 * area ourselves (unlike overflow in host-to-target
1639 * conversion, which is just the guest giving us a buffer
1640 * that's too small). It can't happen for the payload types
1641 * we currently support; if it becomes an issue in future
1642 * we would need to improve our allocation strategy to
1643 * something more intelligent than "twice the size of the
1644 * target buffer we're reading from".
1646 qemu_log_mask(LOG_UNIMP
,
1647 ("Unsupported ancillary data %d/%d: "
1648 "unhandled msg size\n"),
1649 tswap32(target_cmsg
->cmsg_level
),
1650 tswap32(target_cmsg
->cmsg_type
));
1654 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1655 cmsg
->cmsg_level
= SOL_SOCKET
;
1657 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1659 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1660 cmsg
->cmsg_len
= CMSG_LEN(len
);
1662 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1663 int *fd
= (int *)data
;
1664 int *target_fd
= (int *)target_data
;
1665 int i
, numfds
= len
/ sizeof(int);
1667 for (i
= 0; i
< numfds
; i
++) {
1668 __get_user(fd
[i
], target_fd
+ i
);
1670 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1671 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1672 struct ucred
*cred
= (struct ucred
*)data
;
1673 struct target_ucred
*target_cred
=
1674 (struct target_ucred
*)target_data
;
1676 __get_user(cred
->pid
, &target_cred
->pid
);
1677 __get_user(cred
->uid
, &target_cred
->uid
);
1678 __get_user(cred
->gid
, &target_cred
->gid
);
1680 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1681 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1682 memcpy(data
, target_data
, len
);
1685 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1686 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1689 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1691 msgh
->msg_controllen
= space
;
1695 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1696 struct msghdr
*msgh
)
1698 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1699 abi_long msg_controllen
;
1700 abi_ulong target_cmsg_addr
;
1701 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1702 socklen_t space
= 0;
1704 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1705 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1707 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1708 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1709 target_cmsg_start
= target_cmsg
;
1711 return -TARGET_EFAULT
;
1713 while (cmsg
&& target_cmsg
) {
1714 void *data
= CMSG_DATA(cmsg
);
1715 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1717 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1718 int tgt_len
, tgt_space
;
1720 /* We never copy a half-header but may copy half-data;
1721 * this is Linux's behaviour in put_cmsg(). Note that
1722 * truncation here is a guest problem (which we report
1723 * to the guest via the CTRUNC bit), unlike truncation
1724 * in target_to_host_cmsg, which is a QEMU bug.
1726 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1727 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1731 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1732 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1734 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1736 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1738 /* Payload types which need a different size of payload on
1739 * the target must adjust tgt_len here.
1742 switch (cmsg
->cmsg_level
) {
1744 switch (cmsg
->cmsg_type
) {
1746 tgt_len
= sizeof(struct target_timeval
);
1756 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1757 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1758 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1761 /* We must now copy-and-convert len bytes of payload
1762 * into tgt_len bytes of destination space. Bear in mind
1763 * that in both source and destination we may be dealing
1764 * with a truncated value!
1766 switch (cmsg
->cmsg_level
) {
1768 switch (cmsg
->cmsg_type
) {
1771 int *fd
= (int *)data
;
1772 int *target_fd
= (int *)target_data
;
1773 int i
, numfds
= tgt_len
/ sizeof(int);
1775 for (i
= 0; i
< numfds
; i
++) {
1776 __put_user(fd
[i
], target_fd
+ i
);
1782 struct timeval
*tv
= (struct timeval
*)data
;
1783 struct target_timeval
*target_tv
=
1784 (struct target_timeval
*)target_data
;
1786 if (len
!= sizeof(struct timeval
) ||
1787 tgt_len
!= sizeof(struct target_timeval
)) {
1791 /* copy struct timeval to target */
1792 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1793 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1796 case SCM_CREDENTIALS
:
1798 struct ucred
*cred
= (struct ucred
*)data
;
1799 struct target_ucred
*target_cred
=
1800 (struct target_ucred
*)target_data
;
1802 __put_user(cred
->pid
, &target_cred
->pid
);
1803 __put_user(cred
->uid
, &target_cred
->uid
);
1804 __put_user(cred
->gid
, &target_cred
->gid
);
1813 switch (cmsg
->cmsg_type
) {
1816 uint32_t *v
= (uint32_t *)data
;
1817 uint32_t *t_int
= (uint32_t *)target_data
;
1819 if (len
!= sizeof(uint32_t) ||
1820 tgt_len
!= sizeof(uint32_t)) {
1823 __put_user(*v
, t_int
);
1829 struct sock_extended_err ee
;
1830 struct sockaddr_in offender
;
1832 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1833 struct errhdr_t
*target_errh
=
1834 (struct errhdr_t
*)target_data
;
1836 if (len
!= sizeof(struct errhdr_t
) ||
1837 tgt_len
!= sizeof(struct errhdr_t
)) {
1840 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1841 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1842 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1843 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1844 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1845 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1846 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1847 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1848 (void *) &errh
->offender
, sizeof(errh
->offender
));
1857 switch (cmsg
->cmsg_type
) {
1860 uint32_t *v
= (uint32_t *)data
;
1861 uint32_t *t_int
= (uint32_t *)target_data
;
1863 if (len
!= sizeof(uint32_t) ||
1864 tgt_len
!= sizeof(uint32_t)) {
1867 __put_user(*v
, t_int
);
1873 struct sock_extended_err ee
;
1874 struct sockaddr_in6 offender
;
1876 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1877 struct errhdr6_t
*target_errh
=
1878 (struct errhdr6_t
*)target_data
;
1880 if (len
!= sizeof(struct errhdr6_t
) ||
1881 tgt_len
!= sizeof(struct errhdr6_t
)) {
1884 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1885 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1886 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1887 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1888 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1889 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1890 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1891 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1892 (void *) &errh
->offender
, sizeof(errh
->offender
));
1902 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1903 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1904 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1905 if (tgt_len
> len
) {
1906 memset(target_data
+ len
, 0, tgt_len
- len
);
1910 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1911 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1912 if (msg_controllen
< tgt_space
) {
1913 tgt_space
= msg_controllen
;
1915 msg_controllen
-= tgt_space
;
1917 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1918 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1921 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1923 target_msgh
->msg_controllen
= tswapal(space
);
1927 /* do_setsockopt() Must return target values and target errnos. */
1928 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1929 abi_ulong optval_addr
, socklen_t optlen
)
1933 struct ip_mreqn
*ip_mreq
;
1934 struct ip_mreq_source
*ip_mreq_source
;
1938 /* TCP options all take an 'int' value. */
1939 if (optlen
< sizeof(uint32_t))
1940 return -TARGET_EINVAL
;
1942 if (get_user_u32(val
, optval_addr
))
1943 return -TARGET_EFAULT
;
1944 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1951 case IP_ROUTER_ALERT
:
1955 case IP_MTU_DISCOVER
:
1962 case IP_MULTICAST_TTL
:
1963 case IP_MULTICAST_LOOP
:
1965 if (optlen
>= sizeof(uint32_t)) {
1966 if (get_user_u32(val
, optval_addr
))
1967 return -TARGET_EFAULT
;
1968 } else if (optlen
>= 1) {
1969 if (get_user_u8(val
, optval_addr
))
1970 return -TARGET_EFAULT
;
1972 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1974 case IP_ADD_MEMBERSHIP
:
1975 case IP_DROP_MEMBERSHIP
:
1976 if (optlen
< sizeof (struct target_ip_mreq
) ||
1977 optlen
> sizeof (struct target_ip_mreqn
))
1978 return -TARGET_EINVAL
;
1980 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1981 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1982 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1985 case IP_BLOCK_SOURCE
:
1986 case IP_UNBLOCK_SOURCE
:
1987 case IP_ADD_SOURCE_MEMBERSHIP
:
1988 case IP_DROP_SOURCE_MEMBERSHIP
:
1989 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1990 return -TARGET_EINVAL
;
1992 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1993 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1994 unlock_user (ip_mreq_source
, optval_addr
, 0);
2003 case IPV6_MTU_DISCOVER
:
2006 case IPV6_RECVPKTINFO
:
2007 case IPV6_UNICAST_HOPS
:
2008 case IPV6_MULTICAST_HOPS
:
2009 case IPV6_MULTICAST_LOOP
:
2011 case IPV6_RECVHOPLIMIT
:
2012 case IPV6_2292HOPLIMIT
:
2015 case IPV6_2292PKTINFO
:
2016 case IPV6_RECVTCLASS
:
2017 case IPV6_RECVRTHDR
:
2018 case IPV6_2292RTHDR
:
2019 case IPV6_RECVHOPOPTS
:
2020 case IPV6_2292HOPOPTS
:
2021 case IPV6_RECVDSTOPTS
:
2022 case IPV6_2292DSTOPTS
:
2024 #ifdef IPV6_RECVPATHMTU
2025 case IPV6_RECVPATHMTU
:
2027 #ifdef IPV6_TRANSPARENT
2028 case IPV6_TRANSPARENT
:
2030 #ifdef IPV6_FREEBIND
2033 #ifdef IPV6_RECVORIGDSTADDR
2034 case IPV6_RECVORIGDSTADDR
:
2037 if (optlen
< sizeof(uint32_t)) {
2038 return -TARGET_EINVAL
;
2040 if (get_user_u32(val
, optval_addr
)) {
2041 return -TARGET_EFAULT
;
2043 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2044 &val
, sizeof(val
)));
2048 struct in6_pktinfo pki
;
2050 if (optlen
< sizeof(pki
)) {
2051 return -TARGET_EINVAL
;
2054 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2055 return -TARGET_EFAULT
;
2058 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2060 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2061 &pki
, sizeof(pki
)));
2064 case IPV6_ADD_MEMBERSHIP
:
2065 case IPV6_DROP_MEMBERSHIP
:
2067 struct ipv6_mreq ipv6mreq
;
2069 if (optlen
< sizeof(ipv6mreq
)) {
2070 return -TARGET_EINVAL
;
2073 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2074 return -TARGET_EFAULT
;
2077 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2079 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2080 &ipv6mreq
, sizeof(ipv6mreq
)));
2091 struct icmp6_filter icmp6f
;
2093 if (optlen
> sizeof(icmp6f
)) {
2094 optlen
= sizeof(icmp6f
);
2097 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2098 return -TARGET_EFAULT
;
2101 for (val
= 0; val
< 8; val
++) {
2102 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2105 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2117 /* those take an u32 value */
2118 if (optlen
< sizeof(uint32_t)) {
2119 return -TARGET_EINVAL
;
2122 if (get_user_u32(val
, optval_addr
)) {
2123 return -TARGET_EFAULT
;
2125 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2126 &val
, sizeof(val
)));
2133 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2138 char *alg_key
= g_malloc(optlen
);
2141 return -TARGET_ENOMEM
;
2143 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2145 return -TARGET_EFAULT
;
2147 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2152 case ALG_SET_AEAD_AUTHSIZE
:
2154 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2163 case TARGET_SOL_SOCKET
:
2165 case TARGET_SO_RCVTIMEO
:
2169 optname
= SO_RCVTIMEO
;
2172 if (optlen
!= sizeof(struct target_timeval
)) {
2173 return -TARGET_EINVAL
;
2176 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2177 return -TARGET_EFAULT
;
2180 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2184 case TARGET_SO_SNDTIMEO
:
2185 optname
= SO_SNDTIMEO
;
2187 case TARGET_SO_ATTACH_FILTER
:
2189 struct target_sock_fprog
*tfprog
;
2190 struct target_sock_filter
*tfilter
;
2191 struct sock_fprog fprog
;
2192 struct sock_filter
*filter
;
2195 if (optlen
!= sizeof(*tfprog
)) {
2196 return -TARGET_EINVAL
;
2198 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2199 return -TARGET_EFAULT
;
2201 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2202 tswapal(tfprog
->filter
), 0)) {
2203 unlock_user_struct(tfprog
, optval_addr
, 1);
2204 return -TARGET_EFAULT
;
2207 fprog
.len
= tswap16(tfprog
->len
);
2208 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2209 if (filter
== NULL
) {
2210 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2211 unlock_user_struct(tfprog
, optval_addr
, 1);
2212 return -TARGET_ENOMEM
;
2214 for (i
= 0; i
< fprog
.len
; i
++) {
2215 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2216 filter
[i
].jt
= tfilter
[i
].jt
;
2217 filter
[i
].jf
= tfilter
[i
].jf
;
2218 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2220 fprog
.filter
= filter
;
2222 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2223 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2226 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2227 unlock_user_struct(tfprog
, optval_addr
, 1);
2230 case TARGET_SO_BINDTODEVICE
:
2232 char *dev_ifname
, *addr_ifname
;
2234 if (optlen
> IFNAMSIZ
- 1) {
2235 optlen
= IFNAMSIZ
- 1;
2237 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2239 return -TARGET_EFAULT
;
2241 optname
= SO_BINDTODEVICE
;
2242 addr_ifname
= alloca(IFNAMSIZ
);
2243 memcpy(addr_ifname
, dev_ifname
, optlen
);
2244 addr_ifname
[optlen
] = 0;
2245 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2246 addr_ifname
, optlen
));
2247 unlock_user (dev_ifname
, optval_addr
, 0);
2250 case TARGET_SO_LINGER
:
2253 struct target_linger
*tlg
;
2255 if (optlen
!= sizeof(struct target_linger
)) {
2256 return -TARGET_EINVAL
;
2258 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2259 return -TARGET_EFAULT
;
2261 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2262 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2263 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2265 unlock_user_struct(tlg
, optval_addr
, 0);
2268 /* Options with 'int' argument. */
2269 case TARGET_SO_DEBUG
:
2272 case TARGET_SO_REUSEADDR
:
2273 optname
= SO_REUSEADDR
;
2276 case TARGET_SO_REUSEPORT
:
2277 optname
= SO_REUSEPORT
;
2280 case TARGET_SO_TYPE
:
2283 case TARGET_SO_ERROR
:
2286 case TARGET_SO_DONTROUTE
:
2287 optname
= SO_DONTROUTE
;
2289 case TARGET_SO_BROADCAST
:
2290 optname
= SO_BROADCAST
;
2292 case TARGET_SO_SNDBUF
:
2293 optname
= SO_SNDBUF
;
2295 case TARGET_SO_SNDBUFFORCE
:
2296 optname
= SO_SNDBUFFORCE
;
2298 case TARGET_SO_RCVBUF
:
2299 optname
= SO_RCVBUF
;
2301 case TARGET_SO_RCVBUFFORCE
:
2302 optname
= SO_RCVBUFFORCE
;
2304 case TARGET_SO_KEEPALIVE
:
2305 optname
= SO_KEEPALIVE
;
2307 case TARGET_SO_OOBINLINE
:
2308 optname
= SO_OOBINLINE
;
2310 case TARGET_SO_NO_CHECK
:
2311 optname
= SO_NO_CHECK
;
2313 case TARGET_SO_PRIORITY
:
2314 optname
= SO_PRIORITY
;
2317 case TARGET_SO_BSDCOMPAT
:
2318 optname
= SO_BSDCOMPAT
;
2321 case TARGET_SO_PASSCRED
:
2322 optname
= SO_PASSCRED
;
2324 case TARGET_SO_PASSSEC
:
2325 optname
= SO_PASSSEC
;
2327 case TARGET_SO_TIMESTAMP
:
2328 optname
= SO_TIMESTAMP
;
2330 case TARGET_SO_RCVLOWAT
:
2331 optname
= SO_RCVLOWAT
;
2336 if (optlen
< sizeof(uint32_t))
2337 return -TARGET_EINVAL
;
2339 if (get_user_u32(val
, optval_addr
))
2340 return -TARGET_EFAULT
;
2341 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2346 case NETLINK_PKTINFO
:
2347 case NETLINK_ADD_MEMBERSHIP
:
2348 case NETLINK_DROP_MEMBERSHIP
:
2349 case NETLINK_BROADCAST_ERROR
:
2350 case NETLINK_NO_ENOBUFS
:
2351 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2352 case NETLINK_LISTEN_ALL_NSID
:
2353 case NETLINK_CAP_ACK
:
2354 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2356 case NETLINK_EXT_ACK
:
2357 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2359 case NETLINK_GET_STRICT_CHK
:
2360 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2366 if (optlen
< sizeof(uint32_t)) {
2367 return -TARGET_EINVAL
;
2369 if (get_user_u32(val
, optval_addr
)) {
2370 return -TARGET_EFAULT
;
2372 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2375 #endif /* SOL_NETLINK */
2378 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2380 ret
= -TARGET_ENOPROTOOPT
;
2385 /* do_getsockopt() Must return target values and target errnos. */
2386 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2387 abi_ulong optval_addr
, abi_ulong optlen
)
2394 case TARGET_SOL_SOCKET
:
2397 /* These don't just return a single integer */
2398 case TARGET_SO_PEERNAME
:
2400 case TARGET_SO_RCVTIMEO
: {
2404 optname
= SO_RCVTIMEO
;
2407 if (get_user_u32(len
, optlen
)) {
2408 return -TARGET_EFAULT
;
2411 return -TARGET_EINVAL
;
2415 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2420 if (len
> sizeof(struct target_timeval
)) {
2421 len
= sizeof(struct target_timeval
);
2423 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2424 return -TARGET_EFAULT
;
2426 if (put_user_u32(len
, optlen
)) {
2427 return -TARGET_EFAULT
;
2431 case TARGET_SO_SNDTIMEO
:
2432 optname
= SO_SNDTIMEO
;
2434 case TARGET_SO_PEERCRED
: {
2437 struct target_ucred
*tcr
;
2439 if (get_user_u32(len
, optlen
)) {
2440 return -TARGET_EFAULT
;
2443 return -TARGET_EINVAL
;
2447 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2455 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2456 return -TARGET_EFAULT
;
2458 __put_user(cr
.pid
, &tcr
->pid
);
2459 __put_user(cr
.uid
, &tcr
->uid
);
2460 __put_user(cr
.gid
, &tcr
->gid
);
2461 unlock_user_struct(tcr
, optval_addr
, 1);
2462 if (put_user_u32(len
, optlen
)) {
2463 return -TARGET_EFAULT
;
2467 case TARGET_SO_PEERSEC
: {
2470 if (get_user_u32(len
, optlen
)) {
2471 return -TARGET_EFAULT
;
2474 return -TARGET_EINVAL
;
2476 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2478 return -TARGET_EFAULT
;
2481 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2483 if (put_user_u32(lv
, optlen
)) {
2484 ret
= -TARGET_EFAULT
;
2486 unlock_user(name
, optval_addr
, lv
);
2489 case TARGET_SO_LINGER
:
2493 struct target_linger
*tlg
;
2495 if (get_user_u32(len
, optlen
)) {
2496 return -TARGET_EFAULT
;
2499 return -TARGET_EINVAL
;
2503 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2511 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2512 return -TARGET_EFAULT
;
2514 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2515 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2516 unlock_user_struct(tlg
, optval_addr
, 1);
2517 if (put_user_u32(len
, optlen
)) {
2518 return -TARGET_EFAULT
;
2522 /* Options with 'int' argument. */
2523 case TARGET_SO_DEBUG
:
2526 case TARGET_SO_REUSEADDR
:
2527 optname
= SO_REUSEADDR
;
2530 case TARGET_SO_REUSEPORT
:
2531 optname
= SO_REUSEPORT
;
2534 case TARGET_SO_TYPE
:
2537 case TARGET_SO_ERROR
:
2540 case TARGET_SO_DONTROUTE
:
2541 optname
= SO_DONTROUTE
;
2543 case TARGET_SO_BROADCAST
:
2544 optname
= SO_BROADCAST
;
2546 case TARGET_SO_SNDBUF
:
2547 optname
= SO_SNDBUF
;
2549 case TARGET_SO_RCVBUF
:
2550 optname
= SO_RCVBUF
;
2552 case TARGET_SO_KEEPALIVE
:
2553 optname
= SO_KEEPALIVE
;
2555 case TARGET_SO_OOBINLINE
:
2556 optname
= SO_OOBINLINE
;
2558 case TARGET_SO_NO_CHECK
:
2559 optname
= SO_NO_CHECK
;
2561 case TARGET_SO_PRIORITY
:
2562 optname
= SO_PRIORITY
;
2565 case TARGET_SO_BSDCOMPAT
:
2566 optname
= SO_BSDCOMPAT
;
2569 case TARGET_SO_PASSCRED
:
2570 optname
= SO_PASSCRED
;
2572 case TARGET_SO_TIMESTAMP
:
2573 optname
= SO_TIMESTAMP
;
2575 case TARGET_SO_RCVLOWAT
:
2576 optname
= SO_RCVLOWAT
;
2578 case TARGET_SO_ACCEPTCONN
:
2579 optname
= SO_ACCEPTCONN
;
2586 /* TCP options all take an 'int' value. */
2588 if (get_user_u32(len
, optlen
))
2589 return -TARGET_EFAULT
;
2591 return -TARGET_EINVAL
;
2593 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2596 if (optname
== SO_TYPE
) {
2597 val
= host_to_target_sock_type(val
);
2602 if (put_user_u32(val
, optval_addr
))
2603 return -TARGET_EFAULT
;
2605 if (put_user_u8(val
, optval_addr
))
2606 return -TARGET_EFAULT
;
2608 if (put_user_u32(len
, optlen
))
2609 return -TARGET_EFAULT
;
2616 case IP_ROUTER_ALERT
:
2620 case IP_MTU_DISCOVER
:
2626 case IP_MULTICAST_TTL
:
2627 case IP_MULTICAST_LOOP
:
2628 if (get_user_u32(len
, optlen
))
2629 return -TARGET_EFAULT
;
2631 return -TARGET_EINVAL
;
2633 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2636 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2638 if (put_user_u32(len
, optlen
)
2639 || put_user_u8(val
, optval_addr
))
2640 return -TARGET_EFAULT
;
2642 if (len
> sizeof(int))
2644 if (put_user_u32(len
, optlen
)
2645 || put_user_u32(val
, optval_addr
))
2646 return -TARGET_EFAULT
;
2650 ret
= -TARGET_ENOPROTOOPT
;
2656 case IPV6_MTU_DISCOVER
:
2659 case IPV6_RECVPKTINFO
:
2660 case IPV6_UNICAST_HOPS
:
2661 case IPV6_MULTICAST_HOPS
:
2662 case IPV6_MULTICAST_LOOP
:
2664 case IPV6_RECVHOPLIMIT
:
2665 case IPV6_2292HOPLIMIT
:
2668 case IPV6_2292PKTINFO
:
2669 case IPV6_RECVTCLASS
:
2670 case IPV6_RECVRTHDR
:
2671 case IPV6_2292RTHDR
:
2672 case IPV6_RECVHOPOPTS
:
2673 case IPV6_2292HOPOPTS
:
2674 case IPV6_RECVDSTOPTS
:
2675 case IPV6_2292DSTOPTS
:
2677 #ifdef IPV6_RECVPATHMTU
2678 case IPV6_RECVPATHMTU
:
2680 #ifdef IPV6_TRANSPARENT
2681 case IPV6_TRANSPARENT
:
2683 #ifdef IPV6_FREEBIND
2686 #ifdef IPV6_RECVORIGDSTADDR
2687 case IPV6_RECVORIGDSTADDR
:
2689 if (get_user_u32(len
, optlen
))
2690 return -TARGET_EFAULT
;
2692 return -TARGET_EINVAL
;
2694 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2697 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2699 if (put_user_u32(len
, optlen
)
2700 || put_user_u8(val
, optval_addr
))
2701 return -TARGET_EFAULT
;
2703 if (len
> sizeof(int))
2705 if (put_user_u32(len
, optlen
)
2706 || put_user_u32(val
, optval_addr
))
2707 return -TARGET_EFAULT
;
2711 ret
= -TARGET_ENOPROTOOPT
;
2718 case NETLINK_PKTINFO
:
2719 case NETLINK_BROADCAST_ERROR
:
2720 case NETLINK_NO_ENOBUFS
:
2721 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2722 case NETLINK_LISTEN_ALL_NSID
:
2723 case NETLINK_CAP_ACK
:
2724 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2726 case NETLINK_EXT_ACK
:
2727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2729 case NETLINK_GET_STRICT_CHK
:
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2731 if (get_user_u32(len
, optlen
)) {
2732 return -TARGET_EFAULT
;
2734 if (len
!= sizeof(val
)) {
2735 return -TARGET_EINVAL
;
2738 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2742 if (put_user_u32(lv
, optlen
)
2743 || put_user_u32(val
, optval_addr
)) {
2744 return -TARGET_EFAULT
;
2747 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2748 case NETLINK_LIST_MEMBERSHIPS
:
2752 if (get_user_u32(len
, optlen
)) {
2753 return -TARGET_EFAULT
;
2756 return -TARGET_EINVAL
;
2758 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2760 return -TARGET_EFAULT
;
2763 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2765 unlock_user(results
, optval_addr
, 0);
2768 /* swap host endianess to target endianess. */
2769 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2770 results
[i
] = tswap32(results
[i
]);
2772 if (put_user_u32(lv
, optlen
)) {
2773 return -TARGET_EFAULT
;
2775 unlock_user(results
, optval_addr
, 0);
2778 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2783 #endif /* SOL_NETLINK */
2786 qemu_log_mask(LOG_UNIMP
,
2787 "getsockopt level=%d optname=%d not yet supported\n",
2789 ret
= -TARGET_EOPNOTSUPP
;
2795 /* Convert target low/high pair representing file offset into the host
2796 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2797 * as the kernel doesn't handle them either.
2799 static void target_to_host_low_high(abi_ulong tlow
,
2801 unsigned long *hlow
,
2802 unsigned long *hhigh
)
2804 uint64_t off
= tlow
|
2805 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2806 TARGET_LONG_BITS
/ 2;
2809 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2812 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2813 abi_ulong count
, int copy
)
2815 struct target_iovec
*target_vec
;
2817 abi_ulong total_len
, max_len
;
2820 bool bad_address
= false;
2826 if (count
> IOV_MAX
) {
2831 vec
= g_try_new0(struct iovec
, count
);
2837 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2838 count
* sizeof(struct target_iovec
), 1);
2839 if (target_vec
== NULL
) {
2844 /* ??? If host page size > target page size, this will result in a
2845 value larger than what we can actually support. */
2846 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2849 for (i
= 0; i
< count
; i
++) {
2850 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2851 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2856 } else if (len
== 0) {
2857 /* Zero length pointer is ignored. */
2858 vec
[i
].iov_base
= 0;
2860 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2861 /* If the first buffer pointer is bad, this is a fault. But
2862 * subsequent bad buffers will result in a partial write; this
2863 * is realized by filling the vector with null pointers and
2865 if (!vec
[i
].iov_base
) {
2876 if (len
> max_len
- total_len
) {
2877 len
= max_len
- total_len
;
2880 vec
[i
].iov_len
= len
;
2884 unlock_user(target_vec
, target_addr
, 0);
2889 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2890 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2893 unlock_user(target_vec
, target_addr
, 0);
2900 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2901 abi_ulong count
, int copy
)
2903 struct target_iovec
*target_vec
;
2906 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2907 count
* sizeof(struct target_iovec
), 1);
2909 for (i
= 0; i
< count
; i
++) {
2910 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2911 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2915 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2917 unlock_user(target_vec
, target_addr
, 0);
2923 static inline int target_to_host_sock_type(int *type
)
2926 int target_type
= *type
;
2928 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2929 case TARGET_SOCK_DGRAM
:
2930 host_type
= SOCK_DGRAM
;
2932 case TARGET_SOCK_STREAM
:
2933 host_type
= SOCK_STREAM
;
2936 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2939 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2940 #if defined(SOCK_CLOEXEC)
2941 host_type
|= SOCK_CLOEXEC
;
2943 return -TARGET_EINVAL
;
2946 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2947 #if defined(SOCK_NONBLOCK)
2948 host_type
|= SOCK_NONBLOCK
;
2949 #elif !defined(O_NONBLOCK)
2950 return -TARGET_EINVAL
;
2957 /* Try to emulate socket type flags after socket creation. */
2958 static int sock_flags_fixup(int fd
, int target_type
)
2960 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2961 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2962 int flags
= fcntl(fd
, F_GETFL
);
2963 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2965 return -TARGET_EINVAL
;
2972 /* do_socket() Must return target values and target errnos. */
2973 static abi_long
do_socket(int domain
, int type
, int protocol
)
2975 int target_type
= type
;
2978 ret
= target_to_host_sock_type(&type
);
2983 if (domain
== PF_NETLINK
&& !(
2984 #ifdef CONFIG_RTNETLINK
2985 protocol
== NETLINK_ROUTE
||
2987 protocol
== NETLINK_KOBJECT_UEVENT
||
2988 protocol
== NETLINK_AUDIT
)) {
2989 return -EPFNOSUPPORT
;
2992 if (domain
== AF_PACKET
||
2993 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2994 protocol
= tswap16(protocol
);
2997 ret
= get_errno(socket(domain
, type
, protocol
));
2999 ret
= sock_flags_fixup(ret
, target_type
);
3000 if (type
== SOCK_PACKET
) {
3001 /* Manage an obsolete case :
3002 * if socket type is SOCK_PACKET, bind by name
3004 fd_trans_register(ret
, &target_packet_trans
);
3005 } else if (domain
== PF_NETLINK
) {
3007 #ifdef CONFIG_RTNETLINK
3009 fd_trans_register(ret
, &target_netlink_route_trans
);
3012 case NETLINK_KOBJECT_UEVENT
:
3013 /* nothing to do: messages are strings */
3016 fd_trans_register(ret
, &target_netlink_audit_trans
);
3019 g_assert_not_reached();
3026 /* do_bind() Must return target values and target errnos. */
3027 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3033 if ((int)addrlen
< 0) {
3034 return -TARGET_EINVAL
;
3037 addr
= alloca(addrlen
+1);
3039 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3043 return get_errno(bind(sockfd
, addr
, addrlen
));
3046 /* do_connect() Must return target values and target errnos. */
3047 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3053 if ((int)addrlen
< 0) {
3054 return -TARGET_EINVAL
;
3057 addr
= alloca(addrlen
+1);
3059 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3063 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3066 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3067 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3068 int flags
, int send
)
3074 abi_ulong target_vec
;
3076 if (msgp
->msg_name
) {
3077 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3078 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3079 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3080 tswapal(msgp
->msg_name
),
3082 if (ret
== -TARGET_EFAULT
) {
3083 /* For connected sockets msg_name and msg_namelen must
3084 * be ignored, so returning EFAULT immediately is wrong.
3085 * Instead, pass a bad msg_name to the host kernel, and
3086 * let it decide whether to return EFAULT or not.
3088 msg
.msg_name
= (void *)-1;
3093 msg
.msg_name
= NULL
;
3094 msg
.msg_namelen
= 0;
3096 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3097 msg
.msg_control
= alloca(msg
.msg_controllen
);
3098 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3100 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3102 count
= tswapal(msgp
->msg_iovlen
);
3103 target_vec
= tswapal(msgp
->msg_iov
);
3105 if (count
> IOV_MAX
) {
3106 /* sendrcvmsg returns a different errno for this condition than
3107 * readv/writev, so we must catch it here before lock_iovec() does.
3109 ret
= -TARGET_EMSGSIZE
;
3113 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3114 target_vec
, count
, send
);
3116 ret
= -host_to_target_errno(errno
);
3119 msg
.msg_iovlen
= count
;
3123 if (fd_trans_target_to_host_data(fd
)) {
3126 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3127 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3128 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3129 msg
.msg_iov
->iov_len
);
3131 msg
.msg_iov
->iov_base
= host_msg
;
3132 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3136 ret
= target_to_host_cmsg(&msg
, msgp
);
3138 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3142 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3143 if (!is_error(ret
)) {
3145 if (fd_trans_host_to_target_data(fd
)) {
3146 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3147 MIN(msg
.msg_iov
->iov_len
, len
));
3149 ret
= host_to_target_cmsg(msgp
, &msg
);
3151 if (!is_error(ret
)) {
3152 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3153 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3154 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3155 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3156 msg
.msg_name
, msg
.msg_namelen
);
3168 unlock_iovec(vec
, target_vec
, count
, !send
);
3173 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3174 int flags
, int send
)
3177 struct target_msghdr
*msgp
;
3179 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3183 return -TARGET_EFAULT
;
3185 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3186 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3190 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3191 * so it might not have this *mmsg-specific flag either.
3193 #ifndef MSG_WAITFORONE
3194 #define MSG_WAITFORONE 0x10000
3197 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3198 unsigned int vlen
, unsigned int flags
,
3201 struct target_mmsghdr
*mmsgp
;
3205 if (vlen
> UIO_MAXIOV
) {
3209 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3211 return -TARGET_EFAULT
;
3214 for (i
= 0; i
< vlen
; i
++) {
3215 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3216 if (is_error(ret
)) {
3219 mmsgp
[i
].msg_len
= tswap32(ret
);
3220 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3221 if (flags
& MSG_WAITFORONE
) {
3222 flags
|= MSG_DONTWAIT
;
3226 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3228 /* Return number of datagrams sent if we sent any at all;
3229 * otherwise return the error.
3237 /* do_accept4() Must return target values and target errnos. */
3238 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3239 abi_ulong target_addrlen_addr
, int flags
)
3241 socklen_t addrlen
, ret_addrlen
;
3246 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3248 if (target_addr
== 0) {
3249 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3252 /* linux returns EINVAL if addrlen pointer is invalid */
3253 if (get_user_u32(addrlen
, target_addrlen_addr
))
3254 return -TARGET_EINVAL
;
3256 if ((int)addrlen
< 0) {
3257 return -TARGET_EINVAL
;
3260 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3261 return -TARGET_EINVAL
;
3263 addr
= alloca(addrlen
);
3265 ret_addrlen
= addrlen
;
3266 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3267 if (!is_error(ret
)) {
3268 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3269 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3270 ret
= -TARGET_EFAULT
;
3276 /* do_getpeername() Must return target values and target errnos. */
3277 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3278 abi_ulong target_addrlen_addr
)
3280 socklen_t addrlen
, ret_addrlen
;
3284 if (get_user_u32(addrlen
, target_addrlen_addr
))
3285 return -TARGET_EFAULT
;
3287 if ((int)addrlen
< 0) {
3288 return -TARGET_EINVAL
;
3291 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3292 return -TARGET_EFAULT
;
3294 addr
= alloca(addrlen
);
3296 ret_addrlen
= addrlen
;
3297 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3298 if (!is_error(ret
)) {
3299 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3300 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3301 ret
= -TARGET_EFAULT
;
3307 /* do_getsockname() Must return target values and target errnos. */
3308 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3309 abi_ulong target_addrlen_addr
)
3311 socklen_t addrlen
, ret_addrlen
;
3315 if (get_user_u32(addrlen
, target_addrlen_addr
))
3316 return -TARGET_EFAULT
;
3318 if ((int)addrlen
< 0) {
3319 return -TARGET_EINVAL
;
3322 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3323 return -TARGET_EFAULT
;
3325 addr
= alloca(addrlen
);
3327 ret_addrlen
= addrlen
;
3328 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3329 if (!is_error(ret
)) {
3330 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3331 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3332 ret
= -TARGET_EFAULT
;
3338 /* do_socketpair() Must return target values and target errnos. */
3339 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3340 abi_ulong target_tab_addr
)
3345 target_to_host_sock_type(&type
);
3347 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3348 if (!is_error(ret
)) {
3349 if (put_user_s32(tab
[0], target_tab_addr
)
3350 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3351 ret
= -TARGET_EFAULT
;
3356 /* do_sendto() Must return target values and target errnos. */
3357 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3358 abi_ulong target_addr
, socklen_t addrlen
)
3362 void *copy_msg
= NULL
;
3365 if ((int)addrlen
< 0) {
3366 return -TARGET_EINVAL
;
3369 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3371 return -TARGET_EFAULT
;
3372 if (fd_trans_target_to_host_data(fd
)) {
3373 copy_msg
= host_msg
;
3374 host_msg
= g_malloc(len
);
3375 memcpy(host_msg
, copy_msg
, len
);
3376 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3382 addr
= alloca(addrlen
+1);
3383 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3387 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3389 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3394 host_msg
= copy_msg
;
3396 unlock_user(host_msg
, msg
, 0);
3400 /* do_recvfrom() Must return target values and target errnos. */
3401 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3402 abi_ulong target_addr
,
3403 abi_ulong target_addrlen
)
3405 socklen_t addrlen
, ret_addrlen
;
3410 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3412 return -TARGET_EFAULT
;
3414 if (get_user_u32(addrlen
, target_addrlen
)) {
3415 ret
= -TARGET_EFAULT
;
3418 if ((int)addrlen
< 0) {
3419 ret
= -TARGET_EINVAL
;
3422 addr
= alloca(addrlen
);
3423 ret_addrlen
= addrlen
;
3424 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3425 addr
, &ret_addrlen
));
3427 addr
= NULL
; /* To keep compiler quiet. */
3428 addrlen
= 0; /* To keep compiler quiet. */
3429 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3431 if (!is_error(ret
)) {
3432 if (fd_trans_host_to_target_data(fd
)) {
3434 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3435 if (is_error(trans
)) {
3441 host_to_target_sockaddr(target_addr
, addr
,
3442 MIN(addrlen
, ret_addrlen
));
3443 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3444 ret
= -TARGET_EFAULT
;
3448 unlock_user(host_msg
, msg
, len
);
3451 unlock_user(host_msg
, msg
, 0);
3456 #ifdef TARGET_NR_socketcall
3457 /* do_socketcall() must return target values and target errnos. */
3458 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3460 static const unsigned nargs
[] = { /* number of arguments per operation */
3461 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3462 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3463 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3464 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3465 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3466 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3468 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3469 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3470 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3471 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3472 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3473 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3474 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3475 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3476 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3477 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3478 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3479 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3480 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3482 abi_long a
[6]; /* max 6 args */
3485 /* check the range of the first argument num */
3486 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3487 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3488 return -TARGET_EINVAL
;
3490 /* ensure we have space for args */
3491 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3492 return -TARGET_EINVAL
;
3494 /* collect the arguments in a[] according to nargs[] */
3495 for (i
= 0; i
< nargs
[num
]; ++i
) {
3496 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3497 return -TARGET_EFAULT
;
3500 /* now when we have the args, invoke the appropriate underlying function */
3502 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3503 return do_socket(a
[0], a
[1], a
[2]);
3504 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3505 return do_bind(a
[0], a
[1], a
[2]);
3506 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3507 return do_connect(a
[0], a
[1], a
[2]);
3508 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3509 return get_errno(listen(a
[0], a
[1]));
3510 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3511 return do_accept4(a
[0], a
[1], a
[2], 0);
3512 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3513 return do_getsockname(a
[0], a
[1], a
[2]);
3514 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3515 return do_getpeername(a
[0], a
[1], a
[2]);
3516 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3517 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3518 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3519 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3520 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3521 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3522 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3523 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3524 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3525 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3526 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3527 return get_errno(shutdown(a
[0], a
[1]));
3528 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3529 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3530 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3531 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3532 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3533 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3534 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3535 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3536 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3537 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3538 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3539 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3540 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3541 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3543 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3544 return -TARGET_EINVAL
;
3549 #define N_SHM_REGIONS 32
3551 static struct shm_region
{
3555 } shm_regions
[N_SHM_REGIONS
];
3557 #ifndef TARGET_SEMID64_DS
3558 /* asm-generic version of this struct */
3559 struct target_semid64_ds
3561 struct target_ipc_perm sem_perm
;
3562 abi_ulong sem_otime
;
3563 #if TARGET_ABI_BITS == 32
3564 abi_ulong __unused1
;
3566 abi_ulong sem_ctime
;
3567 #if TARGET_ABI_BITS == 32
3568 abi_ulong __unused2
;
3570 abi_ulong sem_nsems
;
3571 abi_ulong __unused3
;
3572 abi_ulong __unused4
;
3576 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3577 abi_ulong target_addr
)
3579 struct target_ipc_perm
*target_ip
;
3580 struct target_semid64_ds
*target_sd
;
3582 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3583 return -TARGET_EFAULT
;
3584 target_ip
= &(target_sd
->sem_perm
);
3585 host_ip
->__key
= tswap32(target_ip
->__key
);
3586 host_ip
->uid
= tswap32(target_ip
->uid
);
3587 host_ip
->gid
= tswap32(target_ip
->gid
);
3588 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3589 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3590 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3591 host_ip
->mode
= tswap32(target_ip
->mode
);
3593 host_ip
->mode
= tswap16(target_ip
->mode
);
3595 #if defined(TARGET_PPC)
3596 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3598 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3600 unlock_user_struct(target_sd
, target_addr
, 0);
3604 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3605 struct ipc_perm
*host_ip
)
3607 struct target_ipc_perm
*target_ip
;
3608 struct target_semid64_ds
*target_sd
;
3610 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3611 return -TARGET_EFAULT
;
3612 target_ip
= &(target_sd
->sem_perm
);
3613 target_ip
->__key
= tswap32(host_ip
->__key
);
3614 target_ip
->uid
= tswap32(host_ip
->uid
);
3615 target_ip
->gid
= tswap32(host_ip
->gid
);
3616 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3617 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3618 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3619 target_ip
->mode
= tswap32(host_ip
->mode
);
3621 target_ip
->mode
= tswap16(host_ip
->mode
);
3623 #if defined(TARGET_PPC)
3624 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3626 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3628 unlock_user_struct(target_sd
, target_addr
, 1);
3632 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3633 abi_ulong target_addr
)
3635 struct target_semid64_ds
*target_sd
;
3637 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3638 return -TARGET_EFAULT
;
3639 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3640 return -TARGET_EFAULT
;
3641 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3642 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3643 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3644 unlock_user_struct(target_sd
, target_addr
, 0);
3648 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3649 struct semid_ds
*host_sd
)
3651 struct target_semid64_ds
*target_sd
;
3653 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3654 return -TARGET_EFAULT
;
3655 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3656 return -TARGET_EFAULT
;
3657 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3658 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3659 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3660 unlock_user_struct(target_sd
, target_addr
, 1);
3664 struct target_seminfo
{
3677 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3678 struct seminfo
*host_seminfo
)
3680 struct target_seminfo
*target_seminfo
;
3681 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3682 return -TARGET_EFAULT
;
3683 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3684 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3685 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3686 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3687 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3688 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3689 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3690 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3691 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3692 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3693 unlock_user_struct(target_seminfo
, target_addr
, 1);
3699 struct semid_ds
*buf
;
3700 unsigned short *array
;
3701 struct seminfo
*__buf
;
3704 union target_semun
{
3711 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3712 abi_ulong target_addr
)
3715 unsigned short *array
;
3717 struct semid_ds semid_ds
;
3720 semun
.buf
= &semid_ds
;
3722 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3724 return get_errno(ret
);
3726 nsems
= semid_ds
.sem_nsems
;
3728 *host_array
= g_try_new(unsigned short, nsems
);
3730 return -TARGET_ENOMEM
;
3732 array
= lock_user(VERIFY_READ
, target_addr
,
3733 nsems
*sizeof(unsigned short), 1);
3735 g_free(*host_array
);
3736 return -TARGET_EFAULT
;
3739 for(i
=0; i
<nsems
; i
++) {
3740 __get_user((*host_array
)[i
], &array
[i
]);
3742 unlock_user(array
, target_addr
, 0);
3747 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3748 unsigned short **host_array
)
3751 unsigned short *array
;
3753 struct semid_ds semid_ds
;
3756 semun
.buf
= &semid_ds
;
3758 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3760 return get_errno(ret
);
3762 nsems
= semid_ds
.sem_nsems
;
3764 array
= lock_user(VERIFY_WRITE
, target_addr
,
3765 nsems
*sizeof(unsigned short), 0);
3767 return -TARGET_EFAULT
;
3769 for(i
=0; i
<nsems
; i
++) {
3770 __put_user((*host_array
)[i
], &array
[i
]);
3772 g_free(*host_array
);
3773 unlock_user(array
, target_addr
, 1);
3778 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3779 abi_ulong target_arg
)
3781 union target_semun target_su
= { .buf
= target_arg
};
3783 struct semid_ds dsarg
;
3784 unsigned short *array
= NULL
;
3785 struct seminfo seminfo
;
3786 abi_long ret
= -TARGET_EINVAL
;
3793 /* In 64 bit cross-endian situations, we will erroneously pick up
3794 * the wrong half of the union for the "val" element. To rectify
3795 * this, the entire 8-byte structure is byteswapped, followed by
3796 * a swap of the 4 byte val field. In other cases, the data is
3797 * already in proper host byte order. */
3798 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3799 target_su
.buf
= tswapal(target_su
.buf
);
3800 arg
.val
= tswap32(target_su
.val
);
3802 arg
.val
= target_su
.val
;
3804 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3808 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3812 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3813 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3820 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3824 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3825 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3831 arg
.__buf
= &seminfo
;
3832 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3833 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3841 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3848 struct target_sembuf
{
3849 unsigned short sem_num
;
3854 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3855 abi_ulong target_addr
,
3858 struct target_sembuf
*target_sembuf
;
3861 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3862 nsops
*sizeof(struct target_sembuf
), 1);
3864 return -TARGET_EFAULT
;
3866 for(i
=0; i
<nsops
; i
++) {
3867 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3868 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3869 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3872 unlock_user(target_sembuf
, target_addr
, 0);
3877 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3879 struct sembuf sops
[nsops
];
3882 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3883 return -TARGET_EFAULT
;
3885 ret
= -TARGET_ENOSYS
;
3886 #ifdef __NR_semtimedop
3887 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3890 if (ret
== -TARGET_ENOSYS
) {
3891 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3897 struct target_msqid_ds
3899 struct target_ipc_perm msg_perm
;
3900 abi_ulong msg_stime
;
3901 #if TARGET_ABI_BITS == 32
3902 abi_ulong __unused1
;
3904 abi_ulong msg_rtime
;
3905 #if TARGET_ABI_BITS == 32
3906 abi_ulong __unused2
;
3908 abi_ulong msg_ctime
;
3909 #if TARGET_ABI_BITS == 32
3910 abi_ulong __unused3
;
3912 abi_ulong __msg_cbytes
;
3914 abi_ulong msg_qbytes
;
3915 abi_ulong msg_lspid
;
3916 abi_ulong msg_lrpid
;
3917 abi_ulong __unused4
;
3918 abi_ulong __unused5
;
3921 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3922 abi_ulong target_addr
)
3924 struct target_msqid_ds
*target_md
;
3926 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3927 return -TARGET_EFAULT
;
3928 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3929 return -TARGET_EFAULT
;
3930 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3931 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3932 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3933 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3934 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3935 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3936 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3937 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3938 unlock_user_struct(target_md
, target_addr
, 0);
3942 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3943 struct msqid_ds
*host_md
)
3945 struct target_msqid_ds
*target_md
;
3947 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3948 return -TARGET_EFAULT
;
3949 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3950 return -TARGET_EFAULT
;
3951 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3952 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3953 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3954 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3955 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3956 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3957 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3958 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3959 unlock_user_struct(target_md
, target_addr
, 1);
3963 struct target_msginfo
{
3971 unsigned short int msgseg
;
3974 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3975 struct msginfo
*host_msginfo
)
3977 struct target_msginfo
*target_msginfo
;
3978 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3979 return -TARGET_EFAULT
;
3980 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3981 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3982 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3983 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3984 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3985 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3986 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3987 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3988 unlock_user_struct(target_msginfo
, target_addr
, 1);
3992 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3994 struct msqid_ds dsarg
;
3995 struct msginfo msginfo
;
3996 abi_long ret
= -TARGET_EINVAL
;
4004 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4005 return -TARGET_EFAULT
;
4006 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4007 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4008 return -TARGET_EFAULT
;
4011 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4015 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4016 if (host_to_target_msginfo(ptr
, &msginfo
))
4017 return -TARGET_EFAULT
;
4024 struct target_msgbuf
{
4029 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4030 ssize_t msgsz
, int msgflg
)
4032 struct target_msgbuf
*target_mb
;
4033 struct msgbuf
*host_mb
;
4037 return -TARGET_EINVAL
;
4040 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4041 return -TARGET_EFAULT
;
4042 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4044 unlock_user_struct(target_mb
, msgp
, 0);
4045 return -TARGET_ENOMEM
;
4047 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4048 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4049 ret
= -TARGET_ENOSYS
;
4051 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4054 if (ret
== -TARGET_ENOSYS
) {
4055 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4060 unlock_user_struct(target_mb
, msgp
, 0);
4065 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4066 ssize_t msgsz
, abi_long msgtyp
,
4069 struct target_msgbuf
*target_mb
;
4071 struct msgbuf
*host_mb
;
4075 return -TARGET_EINVAL
;
4078 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4079 return -TARGET_EFAULT
;
4081 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4083 ret
= -TARGET_ENOMEM
;
4086 ret
= -TARGET_ENOSYS
;
4088 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4091 if (ret
== -TARGET_ENOSYS
) {
4092 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4093 msgflg
, host_mb
, msgtyp
));
4098 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4099 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4100 if (!target_mtext
) {
4101 ret
= -TARGET_EFAULT
;
4104 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4105 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4108 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4112 unlock_user_struct(target_mb
, msgp
, 1);
4117 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4118 abi_ulong target_addr
)
4120 struct target_shmid_ds
*target_sd
;
4122 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4123 return -TARGET_EFAULT
;
4124 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4125 return -TARGET_EFAULT
;
4126 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4127 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4128 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4129 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4130 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4131 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4132 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4133 unlock_user_struct(target_sd
, target_addr
, 0);
4137 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4138 struct shmid_ds
*host_sd
)
4140 struct target_shmid_ds
*target_sd
;
4142 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4143 return -TARGET_EFAULT
;
4144 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4145 return -TARGET_EFAULT
;
4146 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4147 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4148 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4149 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4150 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4151 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4152 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4153 unlock_user_struct(target_sd
, target_addr
, 1);
4157 struct target_shminfo
{
4165 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4166 struct shminfo
*host_shminfo
)
4168 struct target_shminfo
*target_shminfo
;
4169 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4170 return -TARGET_EFAULT
;
4171 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4172 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4173 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4174 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4175 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4176 unlock_user_struct(target_shminfo
, target_addr
, 1);
4180 struct target_shm_info
{
4185 abi_ulong swap_attempts
;
4186 abi_ulong swap_successes
;
4189 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4190 struct shm_info
*host_shm_info
)
4192 struct target_shm_info
*target_shm_info
;
4193 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4194 return -TARGET_EFAULT
;
4195 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4196 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4197 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4198 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4199 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4200 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4201 unlock_user_struct(target_shm_info
, target_addr
, 1);
4205 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4207 struct shmid_ds dsarg
;
4208 struct shminfo shminfo
;
4209 struct shm_info shm_info
;
4210 abi_long ret
= -TARGET_EINVAL
;
4218 if (target_to_host_shmid_ds(&dsarg
, buf
))
4219 return -TARGET_EFAULT
;
4220 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4221 if (host_to_target_shmid_ds(buf
, &dsarg
))
4222 return -TARGET_EFAULT
;
4225 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4226 if (host_to_target_shminfo(buf
, &shminfo
))
4227 return -TARGET_EFAULT
;
4230 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4231 if (host_to_target_shm_info(buf
, &shm_info
))
4232 return -TARGET_EFAULT
;
4237 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4244 #ifndef TARGET_FORCE_SHMLBA
4245 /* For most architectures, SHMLBA is the same as the page size;
4246 * some architectures have larger values, in which case they should
4247 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4248 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4249 * and defining its own value for SHMLBA.
4251 * The kernel also permits SHMLBA to be set by the architecture to a
4252 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4253 * this means that addresses are rounded to the large size if
4254 * SHM_RND is set but addresses not aligned to that size are not rejected
4255 * as long as they are at least page-aligned. Since the only architecture
4256 * which uses this is ia64 this code doesn't provide for that oddity.
4258 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4260 return TARGET_PAGE_SIZE
;
4264 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4265 int shmid
, abi_ulong shmaddr
, int shmflg
)
4269 struct shmid_ds shm_info
;
4273 /* find out the length of the shared memory segment */
4274 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4275 if (is_error(ret
)) {
4276 /* can't get length, bail out */
4280 shmlba
= target_shmlba(cpu_env
);
4282 if (shmaddr
& (shmlba
- 1)) {
4283 if (shmflg
& SHM_RND
) {
4284 shmaddr
&= ~(shmlba
- 1);
4286 return -TARGET_EINVAL
;
4289 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4290 return -TARGET_EINVAL
;
4296 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4298 abi_ulong mmap_start
;
4300 /* In order to use the host shmat, we need to honor host SHMLBA. */
4301 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4303 if (mmap_start
== -1) {
4305 host_raddr
= (void *)-1;
4307 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4310 if (host_raddr
== (void *)-1) {
4312 return get_errno((long)host_raddr
);
4314 raddr
=h2g((unsigned long)host_raddr
);
4316 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4317 PAGE_VALID
| PAGE_READ
|
4318 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4320 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4321 if (!shm_regions
[i
].in_use
) {
4322 shm_regions
[i
].in_use
= true;
4323 shm_regions
[i
].start
= raddr
;
4324 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4334 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4341 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4342 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4343 shm_regions
[i
].in_use
= false;
4344 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4348 rv
= get_errno(shmdt(g2h(shmaddr
)));
4355 #ifdef TARGET_NR_ipc
4356 /* ??? This only works with linear mappings. */
4357 /* do_ipc() must return target values and target errnos. */
4358 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4359 unsigned int call
, abi_long first
,
4360 abi_long second
, abi_long third
,
4361 abi_long ptr
, abi_long fifth
)
4366 version
= call
>> 16;
4371 ret
= do_semop(first
, ptr
, second
);
4375 ret
= get_errno(semget(first
, second
, third
));
4378 case IPCOP_semctl
: {
4379 /* The semun argument to semctl is passed by value, so dereference the
4382 get_user_ual(atptr
, ptr
);
4383 ret
= do_semctl(first
, second
, third
, atptr
);
4388 ret
= get_errno(msgget(first
, second
));
4392 ret
= do_msgsnd(first
, ptr
, second
, third
);
4396 ret
= do_msgctl(first
, second
, ptr
);
4403 struct target_ipc_kludge
{
4408 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4409 ret
= -TARGET_EFAULT
;
4413 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4415 unlock_user_struct(tmp
, ptr
, 0);
4419 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4428 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4429 if (is_error(raddr
))
4430 return get_errno(raddr
);
4431 if (put_user_ual(raddr
, third
))
4432 return -TARGET_EFAULT
;
4436 ret
= -TARGET_EINVAL
;
4441 ret
= do_shmdt(ptr
);
4445 /* IPC_* flag values are the same on all linux platforms */
4446 ret
= get_errno(shmget(first
, second
, third
));
4449 /* IPC_* and SHM_* command values are the same on all linux platforms */
4451 ret
= do_shmctl(first
, second
, ptr
);
4454 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4456 ret
= -TARGET_ENOSYS
;
4463 /* kernel structure types definitions */
4465 #define STRUCT(name, ...) STRUCT_ ## name,
4466 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4468 #include "syscall_types.h"
4472 #undef STRUCT_SPECIAL
4474 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4475 #define STRUCT_SPECIAL(name)
4476 #include "syscall_types.h"
4478 #undef STRUCT_SPECIAL
4480 typedef struct IOCTLEntry IOCTLEntry
;
4482 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4483 int fd
, int cmd
, abi_long arg
);
4487 unsigned int host_cmd
;
4490 do_ioctl_fn
*do_ioctl
;
4491 const argtype arg_type
[5];
4494 #define IOC_R 0x0001
4495 #define IOC_W 0x0002
4496 #define IOC_RW (IOC_R | IOC_W)
4498 #define MAX_STRUCT_SIZE 4096
4500 #ifdef CONFIG_FIEMAP
4501 /* So fiemap access checks don't overflow on 32 bit systems.
4502 * This is very slightly smaller than the limit imposed by
4503 * the underlying kernel.
4505 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4506 / sizeof(struct fiemap_extent))
4508 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4509 int fd
, int cmd
, abi_long arg
)
4511 /* The parameter for this ioctl is a struct fiemap followed
4512 * by an array of struct fiemap_extent whose size is set
4513 * in fiemap->fm_extent_count. The array is filled in by the
4516 int target_size_in
, target_size_out
;
4518 const argtype
*arg_type
= ie
->arg_type
;
4519 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4522 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4526 assert(arg_type
[0] == TYPE_PTR
);
4527 assert(ie
->access
== IOC_RW
);
4529 target_size_in
= thunk_type_size(arg_type
, 0);
4530 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4532 return -TARGET_EFAULT
;
4534 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4535 unlock_user(argptr
, arg
, 0);
4536 fm
= (struct fiemap
*)buf_temp
;
4537 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4538 return -TARGET_EINVAL
;
4541 outbufsz
= sizeof (*fm
) +
4542 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4544 if (outbufsz
> MAX_STRUCT_SIZE
) {
4545 /* We can't fit all the extents into the fixed size buffer.
4546 * Allocate one that is large enough and use it instead.
4548 fm
= g_try_malloc(outbufsz
);
4550 return -TARGET_ENOMEM
;
4552 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4555 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4556 if (!is_error(ret
)) {
4557 target_size_out
= target_size_in
;
4558 /* An extent_count of 0 means we were only counting the extents
4559 * so there are no structs to copy
4561 if (fm
->fm_extent_count
!= 0) {
4562 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4564 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4566 ret
= -TARGET_EFAULT
;
4568 /* Convert the struct fiemap */
4569 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4570 if (fm
->fm_extent_count
!= 0) {
4571 p
= argptr
+ target_size_in
;
4572 /* ...and then all the struct fiemap_extents */
4573 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4574 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4579 unlock_user(argptr
, arg
, target_size_out
);
4589 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4590 int fd
, int cmd
, abi_long arg
)
4592 const argtype
*arg_type
= ie
->arg_type
;
4596 struct ifconf
*host_ifconf
;
4598 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4599 int target_ifreq_size
;
4604 abi_long target_ifc_buf
;
4608 assert(arg_type
[0] == TYPE_PTR
);
4609 assert(ie
->access
== IOC_RW
);
4612 target_size
= thunk_type_size(arg_type
, 0);
4614 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4616 return -TARGET_EFAULT
;
4617 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4618 unlock_user(argptr
, arg
, 0);
4620 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4621 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4622 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4624 if (target_ifc_buf
!= 0) {
4625 target_ifc_len
= host_ifconf
->ifc_len
;
4626 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4627 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4629 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4630 if (outbufsz
> MAX_STRUCT_SIZE
) {
4632 * We can't fit all the extents into the fixed size buffer.
4633 * Allocate one that is large enough and use it instead.
4635 host_ifconf
= malloc(outbufsz
);
4637 return -TARGET_ENOMEM
;
4639 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4642 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4644 host_ifconf
->ifc_len
= host_ifc_len
;
4646 host_ifc_buf
= NULL
;
4648 host_ifconf
->ifc_buf
= host_ifc_buf
;
4650 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4651 if (!is_error(ret
)) {
4652 /* convert host ifc_len to target ifc_len */
4654 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4655 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4656 host_ifconf
->ifc_len
= target_ifc_len
;
4658 /* restore target ifc_buf */
4660 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4662 /* copy struct ifconf to target user */
4664 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4666 return -TARGET_EFAULT
;
4667 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4668 unlock_user(argptr
, arg
, target_size
);
4670 if (target_ifc_buf
!= 0) {
4671 /* copy ifreq[] to target user */
4672 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4673 for (i
= 0; i
< nb_ifreq
; i
++) {
4674 thunk_convert(argptr
+ i
* target_ifreq_size
,
4675 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4676 ifreq_arg_type
, THUNK_TARGET
);
4678 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4689 #if defined(CONFIG_USBFS)
4690 #if HOST_LONG_BITS > 64
4691 #error USBDEVFS thunks do not support >64 bit hosts yet.
4694 uint64_t target_urb_adr
;
4695 uint64_t target_buf_adr
;
4696 char *target_buf_ptr
;
4697 struct usbdevfs_urb host_urb
;
4700 static GHashTable
*usbdevfs_urb_hashtable(void)
4702 static GHashTable
*urb_hashtable
;
4704 if (!urb_hashtable
) {
4705 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4707 return urb_hashtable
;
4710 static void urb_hashtable_insert(struct live_urb
*urb
)
4712 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4713 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4716 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4718 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4719 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4722 static void urb_hashtable_remove(struct live_urb
*urb
)
4724 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4725 g_hash_table_remove(urb_hashtable
, urb
);
4729 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4730 int fd
, int cmd
, abi_long arg
)
4732 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4733 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4734 struct live_urb
*lurb
;
4738 uintptr_t target_urb_adr
;
4741 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4743 memset(buf_temp
, 0, sizeof(uint64_t));
4744 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4745 if (is_error(ret
)) {
4749 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4750 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4751 if (!lurb
->target_urb_adr
) {
4752 return -TARGET_EFAULT
;
4754 urb_hashtable_remove(lurb
);
4755 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4756 lurb
->host_urb
.buffer_length
);
4757 lurb
->target_buf_ptr
= NULL
;
4759 /* restore the guest buffer pointer */
4760 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4762 /* update the guest urb struct */
4763 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4766 return -TARGET_EFAULT
;
4768 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4769 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4771 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4772 /* write back the urb handle */
4773 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4776 return -TARGET_EFAULT
;
4779 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4780 target_urb_adr
= lurb
->target_urb_adr
;
4781 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4782 unlock_user(argptr
, arg
, target_size
);
4789 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4790 uint8_t *buf_temp
__attribute__((unused
)),
4791 int fd
, int cmd
, abi_long arg
)
4793 struct live_urb
*lurb
;
4795 /* map target address back to host URB with metadata. */
4796 lurb
= urb_hashtable_lookup(arg
);
4798 return -TARGET_EFAULT
;
4800 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4804 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4805 int fd
, int cmd
, abi_long arg
)
4807 const argtype
*arg_type
= ie
->arg_type
;
4812 struct live_urb
*lurb
;
4815 * each submitted URB needs to map to a unique ID for the
4816 * kernel, and that unique ID needs to be a pointer to
4817 * host memory. hence, we need to malloc for each URB.
4818 * isochronous transfers have a variable length struct.
4821 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4823 /* construct host copy of urb and metadata */
4824 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4826 return -TARGET_ENOMEM
;
4829 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4832 return -TARGET_EFAULT
;
4834 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4835 unlock_user(argptr
, arg
, 0);
4837 lurb
->target_urb_adr
= arg
;
4838 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4840 /* buffer space used depends on endpoint type so lock the entire buffer */
4841 /* control type urbs should check the buffer contents for true direction */
4842 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4843 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4844 lurb
->host_urb
.buffer_length
, 1);
4845 if (lurb
->target_buf_ptr
== NULL
) {
4847 return -TARGET_EFAULT
;
4850 /* update buffer pointer in host copy */
4851 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4853 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4854 if (is_error(ret
)) {
4855 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4858 urb_hashtable_insert(lurb
);
4863 #endif /* CONFIG_USBFS */
4865 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4866 int cmd
, abi_long arg
)
4869 struct dm_ioctl
*host_dm
;
4870 abi_long guest_data
;
4871 uint32_t guest_data_size
;
4873 const argtype
*arg_type
= ie
->arg_type
;
4875 void *big_buf
= NULL
;
4879 target_size
= thunk_type_size(arg_type
, 0);
4880 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4882 ret
= -TARGET_EFAULT
;
4885 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4886 unlock_user(argptr
, arg
, 0);
4888 /* buf_temp is too small, so fetch things into a bigger buffer */
4889 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4890 memcpy(big_buf
, buf_temp
, target_size
);
4894 guest_data
= arg
+ host_dm
->data_start
;
4895 if ((guest_data
- arg
) < 0) {
4896 ret
= -TARGET_EINVAL
;
4899 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4900 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4902 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4904 ret
= -TARGET_EFAULT
;
4908 switch (ie
->host_cmd
) {
4910 case DM_LIST_DEVICES
:
4913 case DM_DEV_SUSPEND
:
4916 case DM_TABLE_STATUS
:
4917 case DM_TABLE_CLEAR
:
4919 case DM_LIST_VERSIONS
:
4923 case DM_DEV_SET_GEOMETRY
:
4924 /* data contains only strings */
4925 memcpy(host_data
, argptr
, guest_data_size
);
4928 memcpy(host_data
, argptr
, guest_data_size
);
4929 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4933 void *gspec
= argptr
;
4934 void *cur_data
= host_data
;
4935 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4936 int spec_size
= thunk_type_size(arg_type
, 0);
4939 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4940 struct dm_target_spec
*spec
= cur_data
;
4944 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4945 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4947 spec
->next
= sizeof(*spec
) + slen
;
4948 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4950 cur_data
+= spec
->next
;
4955 ret
= -TARGET_EINVAL
;
4956 unlock_user(argptr
, guest_data
, 0);
4959 unlock_user(argptr
, guest_data
, 0);
4961 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4962 if (!is_error(ret
)) {
4963 guest_data
= arg
+ host_dm
->data_start
;
4964 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4965 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4966 switch (ie
->host_cmd
) {
4971 case DM_DEV_SUSPEND
:
4974 case DM_TABLE_CLEAR
:
4976 case DM_DEV_SET_GEOMETRY
:
4977 /* no return data */
4979 case DM_LIST_DEVICES
:
4981 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4982 uint32_t remaining_data
= guest_data_size
;
4983 void *cur_data
= argptr
;
4984 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4985 int nl_size
= 12; /* can't use thunk_size due to alignment */
4988 uint32_t next
= nl
->next
;
4990 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4992 if (remaining_data
< nl
->next
) {
4993 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4996 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4997 strcpy(cur_data
+ nl_size
, nl
->name
);
4998 cur_data
+= nl
->next
;
4999 remaining_data
-= nl
->next
;
5003 nl
= (void*)nl
+ next
;
5008 case DM_TABLE_STATUS
:
5010 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5011 void *cur_data
= argptr
;
5012 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5013 int spec_size
= thunk_type_size(arg_type
, 0);
5016 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5017 uint32_t next
= spec
->next
;
5018 int slen
= strlen((char*)&spec
[1]) + 1;
5019 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5020 if (guest_data_size
< spec
->next
) {
5021 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5024 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5025 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5026 cur_data
= argptr
+ spec
->next
;
5027 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5033 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5034 int count
= *(uint32_t*)hdata
;
5035 uint64_t *hdev
= hdata
+ 8;
5036 uint64_t *gdev
= argptr
+ 8;
5039 *(uint32_t*)argptr
= tswap32(count
);
5040 for (i
= 0; i
< count
; i
++) {
5041 *gdev
= tswap64(*hdev
);
5047 case DM_LIST_VERSIONS
:
5049 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5050 uint32_t remaining_data
= guest_data_size
;
5051 void *cur_data
= argptr
;
5052 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5053 int vers_size
= thunk_type_size(arg_type
, 0);
5056 uint32_t next
= vers
->next
;
5058 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5060 if (remaining_data
< vers
->next
) {
5061 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5064 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5065 strcpy(cur_data
+ vers_size
, vers
->name
);
5066 cur_data
+= vers
->next
;
5067 remaining_data
-= vers
->next
;
5071 vers
= (void*)vers
+ next
;
5076 unlock_user(argptr
, guest_data
, 0);
5077 ret
= -TARGET_EINVAL
;
5080 unlock_user(argptr
, guest_data
, guest_data_size
);
5082 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5084 ret
= -TARGET_EFAULT
;
5087 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5088 unlock_user(argptr
, arg
, target_size
);
5095 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5096 int cmd
, abi_long arg
)
5100 const argtype
*arg_type
= ie
->arg_type
;
5101 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5104 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5105 struct blkpg_partition host_part
;
5107 /* Read and convert blkpg */
5109 target_size
= thunk_type_size(arg_type
, 0);
5110 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5112 ret
= -TARGET_EFAULT
;
5115 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5116 unlock_user(argptr
, arg
, 0);
5118 switch (host_blkpg
->op
) {
5119 case BLKPG_ADD_PARTITION
:
5120 case BLKPG_DEL_PARTITION
:
5121 /* payload is struct blkpg_partition */
5124 /* Unknown opcode */
5125 ret
= -TARGET_EINVAL
;
5129 /* Read and convert blkpg->data */
5130 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5131 target_size
= thunk_type_size(part_arg_type
, 0);
5132 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5134 ret
= -TARGET_EFAULT
;
5137 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5138 unlock_user(argptr
, arg
, 0);
5140 /* Swizzle the data pointer to our local copy and call! */
5141 host_blkpg
->data
= &host_part
;
5142 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5148 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5149 int fd
, int cmd
, abi_long arg
)
5151 const argtype
*arg_type
= ie
->arg_type
;
5152 const StructEntry
*se
;
5153 const argtype
*field_types
;
5154 const int *dst_offsets
, *src_offsets
;
5157 abi_ulong
*target_rt_dev_ptr
= NULL
;
5158 unsigned long *host_rt_dev_ptr
= NULL
;
5162 assert(ie
->access
== IOC_W
);
5163 assert(*arg_type
== TYPE_PTR
);
5165 assert(*arg_type
== TYPE_STRUCT
);
5166 target_size
= thunk_type_size(arg_type
, 0);
5167 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5169 return -TARGET_EFAULT
;
5172 assert(*arg_type
== (int)STRUCT_rtentry
);
5173 se
= struct_entries
+ *arg_type
++;
5174 assert(se
->convert
[0] == NULL
);
5175 /* convert struct here to be able to catch rt_dev string */
5176 field_types
= se
->field_types
;
5177 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5178 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5179 for (i
= 0; i
< se
->nb_fields
; i
++) {
5180 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5181 assert(*field_types
== TYPE_PTRVOID
);
5182 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5183 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5184 if (*target_rt_dev_ptr
!= 0) {
5185 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5186 tswapal(*target_rt_dev_ptr
));
5187 if (!*host_rt_dev_ptr
) {
5188 unlock_user(argptr
, arg
, 0);
5189 return -TARGET_EFAULT
;
5192 *host_rt_dev_ptr
= 0;
5197 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5198 argptr
+ src_offsets
[i
],
5199 field_types
, THUNK_HOST
);
5201 unlock_user(argptr
, arg
, 0);
5203 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5205 assert(host_rt_dev_ptr
!= NULL
);
5206 assert(target_rt_dev_ptr
!= NULL
);
5207 if (*host_rt_dev_ptr
!= 0) {
5208 unlock_user((void *)*host_rt_dev_ptr
,
5209 *target_rt_dev_ptr
, 0);
5214 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5215 int fd
, int cmd
, abi_long arg
)
5217 int sig
= target_to_host_signal(arg
);
5218 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5221 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5222 int fd
, int cmd
, abi_long arg
)
5227 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5228 if (is_error(ret
)) {
5232 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5233 if (copy_to_user_timeval(arg
, &tv
)) {
5234 return -TARGET_EFAULT
;
5237 if (copy_to_user_timeval64(arg
, &tv
)) {
5238 return -TARGET_EFAULT
;
5245 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5246 int fd
, int cmd
, abi_long arg
)
5251 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5252 if (is_error(ret
)) {
5256 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5257 if (host_to_target_timespec(arg
, &ts
)) {
5258 return -TARGET_EFAULT
;
5261 if (host_to_target_timespec64(arg
, &ts
)) {
5262 return -TARGET_EFAULT
;
5270 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5271 int fd
, int cmd
, abi_long arg
)
5273 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5274 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5278 static IOCTLEntry ioctl_entries
[] = {
5279 #define IOCTL(cmd, access, ...) \
5280 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5281 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5282 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5283 #define IOCTL_IGNORE(cmd) \
5284 { TARGET_ ## cmd, 0, #cmd },
5289 /* ??? Implement proper locking for ioctls. */
5290 /* do_ioctl() Must return target values and target errnos. */
5291 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5293 const IOCTLEntry
*ie
;
5294 const argtype
*arg_type
;
5296 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5302 if (ie
->target_cmd
== 0) {
5304 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5305 return -TARGET_ENOSYS
;
5307 if (ie
->target_cmd
== cmd
)
5311 arg_type
= ie
->arg_type
;
5313 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5314 } else if (!ie
->host_cmd
) {
5315 /* Some architectures define BSD ioctls in their headers
5316 that are not implemented in Linux. */
5317 return -TARGET_ENOSYS
;
5320 switch(arg_type
[0]) {
5323 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5329 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5333 target_size
= thunk_type_size(arg_type
, 0);
5334 switch(ie
->access
) {
5336 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5337 if (!is_error(ret
)) {
5338 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5340 return -TARGET_EFAULT
;
5341 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5342 unlock_user(argptr
, arg
, target_size
);
5346 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5348 return -TARGET_EFAULT
;
5349 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5350 unlock_user(argptr
, arg
, 0);
5351 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5355 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5357 return -TARGET_EFAULT
;
5358 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5359 unlock_user(argptr
, arg
, 0);
5360 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5361 if (!is_error(ret
)) {
5362 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5364 return -TARGET_EFAULT
;
5365 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5366 unlock_user(argptr
, arg
, target_size
);
5372 qemu_log_mask(LOG_UNIMP
,
5373 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5374 (long)cmd
, arg_type
[0]);
5375 ret
= -TARGET_ENOSYS
;
5381 static const bitmask_transtbl iflag_tbl
[] = {
5382 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5383 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5384 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5385 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5386 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5387 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5388 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5389 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5390 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5391 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5392 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5393 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5394 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5395 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5399 static const bitmask_transtbl oflag_tbl
[] = {
5400 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5401 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5402 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5403 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5404 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5405 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5406 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5407 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5408 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5409 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5410 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5411 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5412 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5413 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5414 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5415 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5416 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5417 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5418 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5419 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5420 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5421 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5422 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5423 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5427 static const bitmask_transtbl cflag_tbl
[] = {
5428 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5429 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5430 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5431 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5432 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5433 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5434 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5435 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5436 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5437 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5438 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5439 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5440 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5441 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5442 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5443 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5444 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5445 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5446 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5447 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5448 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5449 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5450 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5451 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5452 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5453 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5454 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5455 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5456 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5457 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5458 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5462 static const bitmask_transtbl lflag_tbl
[] = {
5463 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5464 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5465 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5466 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5467 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5468 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5469 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5470 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5471 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5472 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5473 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5474 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5475 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5476 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5477 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5481 static void target_to_host_termios (void *dst
, const void *src
)
5483 struct host_termios
*host
= dst
;
5484 const struct target_termios
*target
= src
;
5487 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5489 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5491 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5493 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5494 host
->c_line
= target
->c_line
;
5496 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5497 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5498 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5499 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5500 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5501 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5502 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5503 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5504 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5505 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5506 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5507 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5508 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5509 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5510 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5511 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5512 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5513 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5516 static void host_to_target_termios (void *dst
, const void *src
)
5518 struct target_termios
*target
= dst
;
5519 const struct host_termios
*host
= src
;
5522 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5524 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5526 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5528 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5529 target
->c_line
= host
->c_line
;
5531 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5532 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5533 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5534 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5535 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5536 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5537 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5538 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5539 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5540 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5541 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5542 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5543 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5544 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5545 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5546 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5547 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5548 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5551 static const StructEntry struct_termios_def
= {
5552 .convert
= { host_to_target_termios
, target_to_host_termios
},
5553 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5554 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5557 static bitmask_transtbl mmap_flags_tbl
[] = {
5558 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5559 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5560 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5561 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5562 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5563 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5564 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5565 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5566 MAP_DENYWRITE
, MAP_DENYWRITE
},
5567 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5568 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5569 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5570 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5571 MAP_NORESERVE
, MAP_NORESERVE
},
5572 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5573 /* MAP_STACK had been ignored by the kernel for quite some time.
5574 Recognize it for the target insofar as we do not want to pass
5575 it through to the host. */
5576 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5581 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5582 * TARGET_I386 is defined if TARGET_X86_64 is defined
5584 #if defined(TARGET_I386)
5586 /* NOTE: there is really one LDT for all the threads */
5587 static uint8_t *ldt_table
;
5589 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5596 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5597 if (size
> bytecount
)
5599 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5601 return -TARGET_EFAULT
;
5602 /* ??? Should this by byteswapped? */
5603 memcpy(p
, ldt_table
, size
);
5604 unlock_user(p
, ptr
, size
);
5608 /* XXX: add locking support */
5609 static abi_long
write_ldt(CPUX86State
*env
,
5610 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5612 struct target_modify_ldt_ldt_s ldt_info
;
5613 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5614 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5615 int seg_not_present
, useable
, lm
;
5616 uint32_t *lp
, entry_1
, entry_2
;
5618 if (bytecount
!= sizeof(ldt_info
))
5619 return -TARGET_EINVAL
;
5620 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5621 return -TARGET_EFAULT
;
5622 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5623 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5624 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5625 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5626 unlock_user_struct(target_ldt_info
, ptr
, 0);
5628 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5629 return -TARGET_EINVAL
;
5630 seg_32bit
= ldt_info
.flags
& 1;
5631 contents
= (ldt_info
.flags
>> 1) & 3;
5632 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5633 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5634 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5635 useable
= (ldt_info
.flags
>> 6) & 1;
5639 lm
= (ldt_info
.flags
>> 7) & 1;
5641 if (contents
== 3) {
5643 return -TARGET_EINVAL
;
5644 if (seg_not_present
== 0)
5645 return -TARGET_EINVAL
;
5647 /* allocate the LDT */
5649 env
->ldt
.base
= target_mmap(0,
5650 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5651 PROT_READ
|PROT_WRITE
,
5652 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5653 if (env
->ldt
.base
== -1)
5654 return -TARGET_ENOMEM
;
5655 memset(g2h(env
->ldt
.base
), 0,
5656 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5657 env
->ldt
.limit
= 0xffff;
5658 ldt_table
= g2h(env
->ldt
.base
);
5661 /* NOTE: same code as Linux kernel */
5662 /* Allow LDTs to be cleared by the user. */
5663 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5666 read_exec_only
== 1 &&
5668 limit_in_pages
== 0 &&
5669 seg_not_present
== 1 &&
5677 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5678 (ldt_info
.limit
& 0x0ffff);
5679 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5680 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5681 (ldt_info
.limit
& 0xf0000) |
5682 ((read_exec_only
^ 1) << 9) |
5684 ((seg_not_present
^ 1) << 15) |
5686 (limit_in_pages
<< 23) |
5690 entry_2
|= (useable
<< 20);
5692 /* Install the new entry ... */
5694 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5695 lp
[0] = tswap32(entry_1
);
5696 lp
[1] = tswap32(entry_2
);
5700 /* specific and weird i386 syscalls */
5701 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5702 unsigned long bytecount
)
5708 ret
= read_ldt(ptr
, bytecount
);
5711 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5714 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5717 ret
= -TARGET_ENOSYS
;
5723 #if defined(TARGET_ABI32)
5724 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5726 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5727 struct target_modify_ldt_ldt_s ldt_info
;
5728 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5729 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5730 int seg_not_present
, useable
, lm
;
5731 uint32_t *lp
, entry_1
, entry_2
;
5734 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5735 if (!target_ldt_info
)
5736 return -TARGET_EFAULT
;
5737 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5738 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5739 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5740 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5741 if (ldt_info
.entry_number
== -1) {
5742 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5743 if (gdt_table
[i
] == 0) {
5744 ldt_info
.entry_number
= i
;
5745 target_ldt_info
->entry_number
= tswap32(i
);
5750 unlock_user_struct(target_ldt_info
, ptr
, 1);
5752 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5753 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5754 return -TARGET_EINVAL
;
5755 seg_32bit
= ldt_info
.flags
& 1;
5756 contents
= (ldt_info
.flags
>> 1) & 3;
5757 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5758 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5759 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5760 useable
= (ldt_info
.flags
>> 6) & 1;
5764 lm
= (ldt_info
.flags
>> 7) & 1;
5767 if (contents
== 3) {
5768 if (seg_not_present
== 0)
5769 return -TARGET_EINVAL
;
5772 /* NOTE: same code as Linux kernel */
5773 /* Allow LDTs to be cleared by the user. */
5774 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5775 if ((contents
== 0 &&
5776 read_exec_only
== 1 &&
5778 limit_in_pages
== 0 &&
5779 seg_not_present
== 1 &&
5787 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5788 (ldt_info
.limit
& 0x0ffff);
5789 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5790 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5791 (ldt_info
.limit
& 0xf0000) |
5792 ((read_exec_only
^ 1) << 9) |
5794 ((seg_not_present
^ 1) << 15) |
5796 (limit_in_pages
<< 23) |
5801 /* Install the new entry ... */
5803 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5804 lp
[0] = tswap32(entry_1
);
5805 lp
[1] = tswap32(entry_2
);
5809 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5811 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5812 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5813 uint32_t base_addr
, limit
, flags
;
5814 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5815 int seg_not_present
, useable
, lm
;
5816 uint32_t *lp
, entry_1
, entry_2
;
5818 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5819 if (!target_ldt_info
)
5820 return -TARGET_EFAULT
;
5821 idx
= tswap32(target_ldt_info
->entry_number
);
5822 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5823 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5824 unlock_user_struct(target_ldt_info
, ptr
, 1);
5825 return -TARGET_EINVAL
;
5827 lp
= (uint32_t *)(gdt_table
+ idx
);
5828 entry_1
= tswap32(lp
[0]);
5829 entry_2
= tswap32(lp
[1]);
5831 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5832 contents
= (entry_2
>> 10) & 3;
5833 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5834 seg_32bit
= (entry_2
>> 22) & 1;
5835 limit_in_pages
= (entry_2
>> 23) & 1;
5836 useable
= (entry_2
>> 20) & 1;
5840 lm
= (entry_2
>> 21) & 1;
5842 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5843 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5844 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5845 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5846 base_addr
= (entry_1
>> 16) |
5847 (entry_2
& 0xff000000) |
5848 ((entry_2
& 0xff) << 16);
5849 target_ldt_info
->base_addr
= tswapal(base_addr
);
5850 target_ldt_info
->limit
= tswap32(limit
);
5851 target_ldt_info
->flags
= tswap32(flags
);
5852 unlock_user_struct(target_ldt_info
, ptr
, 1);
5856 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5861 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5868 case TARGET_ARCH_SET_GS
:
5869 case TARGET_ARCH_SET_FS
:
5870 if (code
== TARGET_ARCH_SET_GS
)
5874 cpu_x86_load_seg(env
, idx
, 0);
5875 env
->segs
[idx
].base
= addr
;
5877 case TARGET_ARCH_GET_GS
:
5878 case TARGET_ARCH_GET_FS
:
5879 if (code
== TARGET_ARCH_GET_GS
)
5883 val
= env
->segs
[idx
].base
;
5884 if (put_user(val
, addr
, abi_ulong
))
5885 ret
= -TARGET_EFAULT
;
5888 ret
= -TARGET_EINVAL
;
5893 #endif /* defined(TARGET_ABI32 */
5895 #endif /* defined(TARGET_I386) */
5897 #define NEW_STACK_SIZE 0x40000
5900 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5903 pthread_mutex_t mutex
;
5904 pthread_cond_t cond
;
5907 abi_ulong child_tidptr
;
5908 abi_ulong parent_tidptr
;
5912 static void *clone_func(void *arg
)
5914 new_thread_info
*info
= arg
;
5919 rcu_register_thread();
5920 tcg_register_thread();
5924 ts
= (TaskState
*)cpu
->opaque
;
5925 info
->tid
= sys_gettid();
5927 if (info
->child_tidptr
)
5928 put_user_u32(info
->tid
, info
->child_tidptr
);
5929 if (info
->parent_tidptr
)
5930 put_user_u32(info
->tid
, info
->parent_tidptr
);
5931 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5932 /* Enable signals. */
5933 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5934 /* Signal to the parent that we're ready. */
5935 pthread_mutex_lock(&info
->mutex
);
5936 pthread_cond_broadcast(&info
->cond
);
5937 pthread_mutex_unlock(&info
->mutex
);
5938 /* Wait until the parent has finished initializing the tls state. */
5939 pthread_mutex_lock(&clone_lock
);
5940 pthread_mutex_unlock(&clone_lock
);
5946 /* do_fork() Must return host values and target errnos (unlike most
5947 do_*() functions). */
5948 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5949 abi_ulong parent_tidptr
, target_ulong newtls
,
5950 abi_ulong child_tidptr
)
5952 CPUState
*cpu
= env_cpu(env
);
5956 CPUArchState
*new_env
;
5959 flags
&= ~CLONE_IGNORED_FLAGS
;
5961 /* Emulate vfork() with fork() */
5962 if (flags
& CLONE_VFORK
)
5963 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5965 if (flags
& CLONE_VM
) {
5966 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5967 new_thread_info info
;
5968 pthread_attr_t attr
;
5970 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5971 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5972 return -TARGET_EINVAL
;
5975 ts
= g_new0(TaskState
, 1);
5976 init_task_state(ts
);
5978 /* Grab a mutex so that thread setup appears atomic. */
5979 pthread_mutex_lock(&clone_lock
);
5981 /* we create a new CPU instance. */
5982 new_env
= cpu_copy(env
);
5983 /* Init regs that differ from the parent. */
5984 cpu_clone_regs_child(new_env
, newsp
, flags
);
5985 cpu_clone_regs_parent(env
, flags
);
5986 new_cpu
= env_cpu(new_env
);
5987 new_cpu
->opaque
= ts
;
5988 ts
->bprm
= parent_ts
->bprm
;
5989 ts
->info
= parent_ts
->info
;
5990 ts
->signal_mask
= parent_ts
->signal_mask
;
5992 if (flags
& CLONE_CHILD_CLEARTID
) {
5993 ts
->child_tidptr
= child_tidptr
;
5996 if (flags
& CLONE_SETTLS
) {
5997 cpu_set_tls (new_env
, newtls
);
6000 memset(&info
, 0, sizeof(info
));
6001 pthread_mutex_init(&info
.mutex
, NULL
);
6002 pthread_mutex_lock(&info
.mutex
);
6003 pthread_cond_init(&info
.cond
, NULL
);
6005 if (flags
& CLONE_CHILD_SETTID
) {
6006 info
.child_tidptr
= child_tidptr
;
6008 if (flags
& CLONE_PARENT_SETTID
) {
6009 info
.parent_tidptr
= parent_tidptr
;
6012 ret
= pthread_attr_init(&attr
);
6013 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6014 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6015 /* It is not safe to deliver signals until the child has finished
6016 initializing, so temporarily block all signals. */
6017 sigfillset(&sigmask
);
6018 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6019 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6021 /* If this is our first additional thread, we need to ensure we
6022 * generate code for parallel execution and flush old translations.
6024 if (!parallel_cpus
) {
6025 parallel_cpus
= true;
6029 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6030 /* TODO: Free new CPU state if thread creation failed. */
6032 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6033 pthread_attr_destroy(&attr
);
6035 /* Wait for the child to initialize. */
6036 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6041 pthread_mutex_unlock(&info
.mutex
);
6042 pthread_cond_destroy(&info
.cond
);
6043 pthread_mutex_destroy(&info
.mutex
);
6044 pthread_mutex_unlock(&clone_lock
);
6046 /* if no CLONE_VM, we consider it is a fork */
6047 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6048 return -TARGET_EINVAL
;
6051 /* We can't support custom termination signals */
6052 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6053 return -TARGET_EINVAL
;
6056 if (block_signals()) {
6057 return -TARGET_ERESTARTSYS
;
6063 /* Child Process. */
6064 cpu_clone_regs_child(env
, newsp
, flags
);
6066 /* There is a race condition here. The parent process could
6067 theoretically read the TID in the child process before the child
6068 tid is set. This would require using either ptrace
6069 (not implemented) or having *_tidptr to point at a shared memory
6070 mapping. We can't repeat the spinlock hack used above because
6071 the child process gets its own copy of the lock. */
6072 if (flags
& CLONE_CHILD_SETTID
)
6073 put_user_u32(sys_gettid(), child_tidptr
);
6074 if (flags
& CLONE_PARENT_SETTID
)
6075 put_user_u32(sys_gettid(), parent_tidptr
);
6076 ts
= (TaskState
*)cpu
->opaque
;
6077 if (flags
& CLONE_SETTLS
)
6078 cpu_set_tls (env
, newtls
);
6079 if (flags
& CLONE_CHILD_CLEARTID
)
6080 ts
->child_tidptr
= child_tidptr
;
6082 cpu_clone_regs_parent(env
, flags
);
6089 /* warning : doesn't handle linux specific flags... */
6090 static int target_to_host_fcntl_cmd(int cmd
)
6095 case TARGET_F_DUPFD
:
6096 case TARGET_F_GETFD
:
6097 case TARGET_F_SETFD
:
6098 case TARGET_F_GETFL
:
6099 case TARGET_F_SETFL
:
6102 case TARGET_F_GETLK
:
6105 case TARGET_F_SETLK
:
6108 case TARGET_F_SETLKW
:
6111 case TARGET_F_GETOWN
:
6114 case TARGET_F_SETOWN
:
6117 case TARGET_F_GETSIG
:
6120 case TARGET_F_SETSIG
:
6123 #if TARGET_ABI_BITS == 32
6124 case TARGET_F_GETLK64
:
6127 case TARGET_F_SETLK64
:
6130 case TARGET_F_SETLKW64
:
6134 case TARGET_F_SETLEASE
:
6137 case TARGET_F_GETLEASE
:
6140 #ifdef F_DUPFD_CLOEXEC
6141 case TARGET_F_DUPFD_CLOEXEC
:
6142 ret
= F_DUPFD_CLOEXEC
;
6145 case TARGET_F_NOTIFY
:
6149 case TARGET_F_GETOWN_EX
:
6154 case TARGET_F_SETOWN_EX
:
6159 case TARGET_F_SETPIPE_SZ
:
6162 case TARGET_F_GETPIPE_SZ
:
6167 ret
= -TARGET_EINVAL
;
6171 #if defined(__powerpc64__)
6172 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6173 * is not supported by kernel. The glibc fcntl call actually adjusts
6174 * them to 5, 6 and 7 before making the syscall(). Since we make the
6175 * syscall directly, adjust to what is supported by the kernel.
6177 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6178 ret
-= F_GETLK64
- 5;
6185 #define FLOCK_TRANSTBL \
6187 TRANSTBL_CONVERT(F_RDLCK); \
6188 TRANSTBL_CONVERT(F_WRLCK); \
6189 TRANSTBL_CONVERT(F_UNLCK); \
6190 TRANSTBL_CONVERT(F_EXLCK); \
6191 TRANSTBL_CONVERT(F_SHLCK); \
6194 static int target_to_host_flock(int type
)
6196 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6198 #undef TRANSTBL_CONVERT
6199 return -TARGET_EINVAL
;
6202 static int host_to_target_flock(int type
)
6204 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6206 #undef TRANSTBL_CONVERT
6207 /* if we don't know how to convert the value coming
6208 * from the host we copy to the target field as-is
6213 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6214 abi_ulong target_flock_addr
)
6216 struct target_flock
*target_fl
;
6219 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6220 return -TARGET_EFAULT
;
6223 __get_user(l_type
, &target_fl
->l_type
);
6224 l_type
= target_to_host_flock(l_type
);
6228 fl
->l_type
= l_type
;
6229 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6230 __get_user(fl
->l_start
, &target_fl
->l_start
);
6231 __get_user(fl
->l_len
, &target_fl
->l_len
);
6232 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6233 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6237 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6238 const struct flock64
*fl
)
6240 struct target_flock
*target_fl
;
6243 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6244 return -TARGET_EFAULT
;
6247 l_type
= host_to_target_flock(fl
->l_type
);
6248 __put_user(l_type
, &target_fl
->l_type
);
6249 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6250 __put_user(fl
->l_start
, &target_fl
->l_start
);
6251 __put_user(fl
->l_len
, &target_fl
->l_len
);
6252 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6253 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6257 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6258 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6260 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6261 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6262 abi_ulong target_flock_addr
)
6264 struct target_oabi_flock64
*target_fl
;
6267 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6268 return -TARGET_EFAULT
;
6271 __get_user(l_type
, &target_fl
->l_type
);
6272 l_type
= target_to_host_flock(l_type
);
6276 fl
->l_type
= l_type
;
6277 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6278 __get_user(fl
->l_start
, &target_fl
->l_start
);
6279 __get_user(fl
->l_len
, &target_fl
->l_len
);
6280 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6281 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6285 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6286 const struct flock64
*fl
)
6288 struct target_oabi_flock64
*target_fl
;
6291 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6292 return -TARGET_EFAULT
;
6295 l_type
= host_to_target_flock(fl
->l_type
);
6296 __put_user(l_type
, &target_fl
->l_type
);
6297 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6298 __put_user(fl
->l_start
, &target_fl
->l_start
);
6299 __put_user(fl
->l_len
, &target_fl
->l_len
);
6300 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6301 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6306 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6307 abi_ulong target_flock_addr
)
6309 struct target_flock64
*target_fl
;
6312 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6313 return -TARGET_EFAULT
;
6316 __get_user(l_type
, &target_fl
->l_type
);
6317 l_type
= target_to_host_flock(l_type
);
6321 fl
->l_type
= l_type
;
6322 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6323 __get_user(fl
->l_start
, &target_fl
->l_start
);
6324 __get_user(fl
->l_len
, &target_fl
->l_len
);
6325 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6326 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6330 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6331 const struct flock64
*fl
)
6333 struct target_flock64
*target_fl
;
6336 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6337 return -TARGET_EFAULT
;
6340 l_type
= host_to_target_flock(fl
->l_type
);
6341 __put_user(l_type
, &target_fl
->l_type
);
6342 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6343 __put_user(fl
->l_start
, &target_fl
->l_start
);
6344 __put_user(fl
->l_len
, &target_fl
->l_len
);
6345 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6346 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6350 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6352 struct flock64 fl64
;
6354 struct f_owner_ex fox
;
6355 struct target_f_owner_ex
*target_fox
;
6358 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6360 if (host_cmd
== -TARGET_EINVAL
)
6364 case TARGET_F_GETLK
:
6365 ret
= copy_from_user_flock(&fl64
, arg
);
6369 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6371 ret
= copy_to_user_flock(arg
, &fl64
);
6375 case TARGET_F_SETLK
:
6376 case TARGET_F_SETLKW
:
6377 ret
= copy_from_user_flock(&fl64
, arg
);
6381 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6384 case TARGET_F_GETLK64
:
6385 ret
= copy_from_user_flock64(&fl64
, arg
);
6389 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6391 ret
= copy_to_user_flock64(arg
, &fl64
);
6394 case TARGET_F_SETLK64
:
6395 case TARGET_F_SETLKW64
:
6396 ret
= copy_from_user_flock64(&fl64
, arg
);
6400 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6403 case TARGET_F_GETFL
:
6404 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6406 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6410 case TARGET_F_SETFL
:
6411 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6412 target_to_host_bitmask(arg
,
6417 case TARGET_F_GETOWN_EX
:
6418 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6420 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6421 return -TARGET_EFAULT
;
6422 target_fox
->type
= tswap32(fox
.type
);
6423 target_fox
->pid
= tswap32(fox
.pid
);
6424 unlock_user_struct(target_fox
, arg
, 1);
6430 case TARGET_F_SETOWN_EX
:
6431 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6432 return -TARGET_EFAULT
;
6433 fox
.type
= tswap32(target_fox
->type
);
6434 fox
.pid
= tswap32(target_fox
->pid
);
6435 unlock_user_struct(target_fox
, arg
, 0);
6436 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6440 case TARGET_F_SETOWN
:
6441 case TARGET_F_GETOWN
:
6442 case TARGET_F_SETSIG
:
6443 case TARGET_F_GETSIG
:
6444 case TARGET_F_SETLEASE
:
6445 case TARGET_F_GETLEASE
:
6446 case TARGET_F_SETPIPE_SZ
:
6447 case TARGET_F_GETPIPE_SZ
:
6448 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6452 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6460 static inline int high2lowuid(int uid
)
6468 static inline int high2lowgid(int gid
)
6476 static inline int low2highuid(int uid
)
6478 if ((int16_t)uid
== -1)
6484 static inline int low2highgid(int gid
)
6486 if ((int16_t)gid
== -1)
6491 static inline int tswapid(int id
)
6496 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6498 #else /* !USE_UID16 */
6499 static inline int high2lowuid(int uid
)
6503 static inline int high2lowgid(int gid
)
6507 static inline int low2highuid(int uid
)
6511 static inline int low2highgid(int gid
)
6515 static inline int tswapid(int id
)
6520 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6522 #endif /* USE_UID16 */
6524 /* We must do direct syscalls for setting UID/GID, because we want to
6525 * implement the Linux system call semantics of "change only for this thread",
6526 * not the libc/POSIX semantics of "change for all threads in process".
6527 * (See http://ewontfix.com/17/ for more details.)
6528 * We use the 32-bit version of the syscalls if present; if it is not
6529 * then either the host architecture supports 32-bit UIDs natively with
6530 * the standard syscall, or the 16-bit UID is the best we can do.
6532 #ifdef __NR_setuid32
6533 #define __NR_sys_setuid __NR_setuid32
6535 #define __NR_sys_setuid __NR_setuid
6537 #ifdef __NR_setgid32
6538 #define __NR_sys_setgid __NR_setgid32
6540 #define __NR_sys_setgid __NR_setgid
6542 #ifdef __NR_setresuid32
6543 #define __NR_sys_setresuid __NR_setresuid32
6545 #define __NR_sys_setresuid __NR_setresuid
6547 #ifdef __NR_setresgid32
6548 #define __NR_sys_setresgid __NR_setresgid32
6550 #define __NR_sys_setresgid __NR_setresgid
6553 _syscall1(int, sys_setuid
, uid_t
, uid
)
6554 _syscall1(int, sys_setgid
, gid_t
, gid
)
6555 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6556 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6558 void syscall_init(void)
6561 const argtype
*arg_type
;
6565 thunk_init(STRUCT_MAX
);
6567 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6568 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6569 #include "syscall_types.h"
6571 #undef STRUCT_SPECIAL
6573 /* Build target_to_host_errno_table[] table from
6574 * host_to_target_errno_table[]. */
6575 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6576 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6579 /* we patch the ioctl size if necessary. We rely on the fact that
6580 no ioctl has all the bits at '1' in the size field */
6582 while (ie
->target_cmd
!= 0) {
6583 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6584 TARGET_IOC_SIZEMASK
) {
6585 arg_type
= ie
->arg_type
;
6586 if (arg_type
[0] != TYPE_PTR
) {
6587 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6592 size
= thunk_type_size(arg_type
, 0);
6593 ie
->target_cmd
= (ie
->target_cmd
&
6594 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6595 (size
<< TARGET_IOC_SIZESHIFT
);
6598 /* automatic consistency check if same arch */
6599 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6600 (defined(__x86_64__) && defined(TARGET_X86_64))
6601 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6602 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6603 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6610 #if TARGET_ABI_BITS == 32
6611 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6613 #ifdef TARGET_WORDS_BIGENDIAN
6614 return ((uint64_t)word0
<< 32) | word1
;
6616 return ((uint64_t)word1
<< 32) | word0
;
6619 #else /* TARGET_ABI_BITS == 32 */
6620 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6624 #endif /* TARGET_ABI_BITS != 32 */
6626 #ifdef TARGET_NR_truncate64
6627 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6632 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6636 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6640 #ifdef TARGET_NR_ftruncate64
6641 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6646 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6650 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6654 #if defined(TARGET_NR_timer_settime) || \
6655 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6656 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6657 abi_ulong target_addr
)
6659 struct target_itimerspec
*target_itspec
;
6661 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6662 return -TARGET_EFAULT
;
6665 host_itspec
->it_interval
.tv_sec
=
6666 tswapal(target_itspec
->it_interval
.tv_sec
);
6667 host_itspec
->it_interval
.tv_nsec
=
6668 tswapal(target_itspec
->it_interval
.tv_nsec
);
6669 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6670 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6672 unlock_user_struct(target_itspec
, target_addr
, 1);
6677 #if ((defined(TARGET_NR_timerfd_gettime) || \
6678 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6679 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6680 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6681 struct itimerspec
*host_its
)
6683 struct target_itimerspec
*target_itspec
;
6685 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6686 return -TARGET_EFAULT
;
6689 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6690 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6692 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6693 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6695 unlock_user_struct(target_itspec
, target_addr
, 0);
6700 #if defined(TARGET_NR_adjtimex) || \
6701 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6702 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6703 abi_long target_addr
)
6705 struct target_timex
*target_tx
;
6707 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6708 return -TARGET_EFAULT
;
6711 __get_user(host_tx
->modes
, &target_tx
->modes
);
6712 __get_user(host_tx
->offset
, &target_tx
->offset
);
6713 __get_user(host_tx
->freq
, &target_tx
->freq
);
6714 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6715 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6716 __get_user(host_tx
->status
, &target_tx
->status
);
6717 __get_user(host_tx
->constant
, &target_tx
->constant
);
6718 __get_user(host_tx
->precision
, &target_tx
->precision
);
6719 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6720 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6721 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6722 __get_user(host_tx
->tick
, &target_tx
->tick
);
6723 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6724 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6725 __get_user(host_tx
->shift
, &target_tx
->shift
);
6726 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6727 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6728 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6729 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6730 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6731 __get_user(host_tx
->tai
, &target_tx
->tai
);
6733 unlock_user_struct(target_tx
, target_addr
, 0);
6737 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6738 struct timex
*host_tx
)
6740 struct target_timex
*target_tx
;
6742 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6743 return -TARGET_EFAULT
;
6746 __put_user(host_tx
->modes
, &target_tx
->modes
);
6747 __put_user(host_tx
->offset
, &target_tx
->offset
);
6748 __put_user(host_tx
->freq
, &target_tx
->freq
);
6749 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6750 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6751 __put_user(host_tx
->status
, &target_tx
->status
);
6752 __put_user(host_tx
->constant
, &target_tx
->constant
);
6753 __put_user(host_tx
->precision
, &target_tx
->precision
);
6754 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6755 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6756 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6757 __put_user(host_tx
->tick
, &target_tx
->tick
);
6758 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6759 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6760 __put_user(host_tx
->shift
, &target_tx
->shift
);
6761 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6762 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6763 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6764 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6765 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6766 __put_user(host_tx
->tai
, &target_tx
->tai
);
6768 unlock_user_struct(target_tx
, target_addr
, 1);
6773 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6774 abi_ulong target_addr
)
6776 struct target_sigevent
*target_sevp
;
6778 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6779 return -TARGET_EFAULT
;
6782 /* This union is awkward on 64 bit systems because it has a 32 bit
6783 * integer and a pointer in it; we follow the conversion approach
6784 * used for handling sigval types in signal.c so the guest should get
6785 * the correct value back even if we did a 64 bit byteswap and it's
6786 * using the 32 bit integer.
6788 host_sevp
->sigev_value
.sival_ptr
=
6789 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6790 host_sevp
->sigev_signo
=
6791 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6792 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6793 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6795 unlock_user_struct(target_sevp
, target_addr
, 1);
6799 #if defined(TARGET_NR_mlockall)
6800 static inline int target_to_host_mlockall_arg(int arg
)
6804 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6805 result
|= MCL_CURRENT
;
6807 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6808 result
|= MCL_FUTURE
;
6814 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6815 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6816 defined(TARGET_NR_newfstatat))
6817 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6818 abi_ulong target_addr
,
6819 struct stat
*host_st
)
6821 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6822 if (((CPUARMState
*)cpu_env
)->eabi
) {
6823 struct target_eabi_stat64
*target_st
;
6825 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6826 return -TARGET_EFAULT
;
6827 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6828 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6829 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6830 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6831 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6833 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6834 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6835 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6836 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6837 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6838 __put_user(host_st
->st_size
, &target_st
->st_size
);
6839 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6840 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6841 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6842 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6843 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6844 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6845 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6846 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6847 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6849 unlock_user_struct(target_st
, target_addr
, 1);
6853 #if defined(TARGET_HAS_STRUCT_STAT64)
6854 struct target_stat64
*target_st
;
6856 struct target_stat
*target_st
;
6859 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6860 return -TARGET_EFAULT
;
6861 memset(target_st
, 0, sizeof(*target_st
));
6862 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6863 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6864 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6865 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6867 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6868 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6869 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6870 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6871 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6872 /* XXX: better use of kernel struct */
6873 __put_user(host_st
->st_size
, &target_st
->st_size
);
6874 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6875 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6876 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6877 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6878 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6879 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6880 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6881 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6882 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6884 unlock_user_struct(target_st
, target_addr
, 1);
6891 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6892 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6893 abi_ulong target_addr
)
6895 struct target_statx
*target_stx
;
6897 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6898 return -TARGET_EFAULT
;
6900 memset(target_stx
, 0, sizeof(*target_stx
));
6902 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6903 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6904 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6905 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6906 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6907 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6908 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6909 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6910 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6911 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6912 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6913 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6914 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6915 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
6916 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
6917 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
6918 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
6919 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
6920 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
6921 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6922 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6923 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6924 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6926 unlock_user_struct(target_stx
, target_addr
, 1);
6932 static int do_sys_futex(int *uaddr
, int op
, int val
,
6933 const struct timespec
*timeout
, int *uaddr2
,
6936 #if HOST_LONG_BITS == 64
6937 #if defined(__NR_futex)
6938 /* always a 64-bit time_t, it doesn't define _time64 version */
6939 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6942 #else /* HOST_LONG_BITS == 64 */
6943 #if defined(__NR_futex_time64)
6944 if (sizeof(timeout
->tv_sec
) == 8) {
6945 /* _time64 function on 32bit arch */
6946 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6949 #if defined(__NR_futex)
6950 /* old function on 32bit arch */
6951 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
6953 #endif /* HOST_LONG_BITS == 64 */
6954 g_assert_not_reached();
6957 static int do_safe_futex(int *uaddr
, int op
, int val
,
6958 const struct timespec
*timeout
, int *uaddr2
,
6961 #if HOST_LONG_BITS == 64
6962 #if defined(__NR_futex)
6963 /* always a 64-bit time_t, it doesn't define _time64 version */
6964 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
6966 #else /* HOST_LONG_BITS == 64 */
6967 #if defined(__NR_futex_time64)
6968 if (sizeof(timeout
->tv_sec
) == 8) {
6969 /* _time64 function on 32bit arch */
6970 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
6974 #if defined(__NR_futex)
6975 /* old function on 32bit arch */
6976 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
6978 #endif /* HOST_LONG_BITS == 64 */
6979 return -TARGET_ENOSYS
;
6982 /* ??? Using host futex calls even when target atomic operations
6983 are not really atomic probably breaks things. However implementing
6984 futexes locally would make futexes shared between multiple processes
6985 tricky. However they're probably useless because guest atomic
6986 operations won't work either. */
6987 #if defined(TARGET_NR_futex)
6988 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6989 target_ulong uaddr2
, int val3
)
6991 struct timespec ts
, *pts
;
6994 /* ??? We assume FUTEX_* constants are the same on both host
6996 #ifdef FUTEX_CMD_MASK
6997 base_op
= op
& FUTEX_CMD_MASK
;
7003 case FUTEX_WAIT_BITSET
:
7006 target_to_host_timespec(pts
, timeout
);
7010 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7012 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7014 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7016 case FUTEX_CMP_REQUEUE
:
7018 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7019 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7020 But the prototype takes a `struct timespec *'; insert casts
7021 to satisfy the compiler. We do not need to tswap TIMEOUT
7022 since it's not compared to guest memory. */
7023 pts
= (struct timespec
*)(uintptr_t) timeout
;
7024 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7025 (base_op
== FUTEX_CMP_REQUEUE
7029 return -TARGET_ENOSYS
;
7034 #if defined(TARGET_NR_futex_time64)
7035 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7036 target_ulong uaddr2
, int val3
)
7038 struct timespec ts
, *pts
;
7041 /* ??? We assume FUTEX_* constants are the same on both host
7043 #ifdef FUTEX_CMD_MASK
7044 base_op
= op
& FUTEX_CMD_MASK
;
7050 case FUTEX_WAIT_BITSET
:
7053 target_to_host_timespec64(pts
, timeout
);
7057 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7059 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7061 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7063 case FUTEX_CMP_REQUEUE
:
7065 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7066 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7067 But the prototype takes a `struct timespec *'; insert casts
7068 to satisfy the compiler. We do not need to tswap TIMEOUT
7069 since it's not compared to guest memory. */
7070 pts
= (struct timespec
*)(uintptr_t) timeout
;
7071 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7072 (base_op
== FUTEX_CMP_REQUEUE
7076 return -TARGET_ENOSYS
;
7081 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7082 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7083 abi_long handle
, abi_long mount_id
,
7086 struct file_handle
*target_fh
;
7087 struct file_handle
*fh
;
7091 unsigned int size
, total_size
;
7093 if (get_user_s32(size
, handle
)) {
7094 return -TARGET_EFAULT
;
7097 name
= lock_user_string(pathname
);
7099 return -TARGET_EFAULT
;
7102 total_size
= sizeof(struct file_handle
) + size
;
7103 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7105 unlock_user(name
, pathname
, 0);
7106 return -TARGET_EFAULT
;
7109 fh
= g_malloc0(total_size
);
7110 fh
->handle_bytes
= size
;
7112 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7113 unlock_user(name
, pathname
, 0);
7115 /* man name_to_handle_at(2):
7116 * Other than the use of the handle_bytes field, the caller should treat
7117 * the file_handle structure as an opaque data type
7120 memcpy(target_fh
, fh
, total_size
);
7121 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7122 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7124 unlock_user(target_fh
, handle
, total_size
);
7126 if (put_user_s32(mid
, mount_id
)) {
7127 return -TARGET_EFAULT
;
7135 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7136 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7139 struct file_handle
*target_fh
;
7140 struct file_handle
*fh
;
7141 unsigned int size
, total_size
;
7144 if (get_user_s32(size
, handle
)) {
7145 return -TARGET_EFAULT
;
7148 total_size
= sizeof(struct file_handle
) + size
;
7149 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7151 return -TARGET_EFAULT
;
7154 fh
= g_memdup(target_fh
, total_size
);
7155 fh
->handle_bytes
= size
;
7156 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7158 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7159 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7163 unlock_user(target_fh
, handle
, total_size
);
7169 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7171 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7174 target_sigset_t
*target_mask
;
7178 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7179 return -TARGET_EINVAL
;
7181 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7182 return -TARGET_EFAULT
;
7185 target_to_host_sigset(&host_mask
, target_mask
);
7187 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7189 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7191 fd_trans_register(ret
, &target_signalfd_trans
);
7194 unlock_user_struct(target_mask
, mask
, 0);
7200 /* Map host to target signal numbers for the wait family of syscalls.
7201 Assume all other status bits are the same. */
7202 int host_to_target_waitstatus(int status
)
7204 if (WIFSIGNALED(status
)) {
7205 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7207 if (WIFSTOPPED(status
)) {
7208 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7214 static int open_self_cmdline(void *cpu_env
, int fd
)
7216 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7217 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7220 for (i
= 0; i
< bprm
->argc
; i
++) {
7221 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7223 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7231 static int open_self_maps(void *cpu_env
, int fd
)
7233 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7234 TaskState
*ts
= cpu
->opaque
;
7240 fp
= fopen("/proc/self/maps", "r");
7245 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7246 int fields
, dev_maj
, dev_min
, inode
;
7247 uint64_t min
, max
, offset
;
7248 char flag_r
, flag_w
, flag_x
, flag_p
;
7249 char path
[512] = "";
7250 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7251 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7252 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7254 if ((fields
< 10) || (fields
> 11)) {
7257 if (h2g_valid(min
)) {
7258 int flags
= page_get_flags(h2g(min
));
7259 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7260 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7263 if (h2g(min
) == ts
->info
->stack_limit
) {
7264 pstrcpy(path
, sizeof(path
), " [stack]");
7266 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7267 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7268 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7269 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7270 path
[0] ? " " : "", path
);
7274 #ifdef TARGET_VSYSCALL_PAGE
7276 * We only support execution from the vsyscall page.
7277 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7279 dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7280 " --xp 00000000 00:00 0 [vsyscall]\n",
7281 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7290 static int open_self_stat(void *cpu_env
, int fd
)
7292 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7293 TaskState
*ts
= cpu
->opaque
;
7294 abi_ulong start_stack
= ts
->info
->start_stack
;
7297 for (i
= 0; i
< 44; i
++) {
7305 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7306 } else if (i
== 1) {
7308 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7309 } else if (i
== 27) {
7312 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7314 /* for the rest, there is MasterCard */
7315 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7319 if (write(fd
, buf
, len
) != len
) {
7327 static int open_self_auxv(void *cpu_env
, int fd
)
7329 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7330 TaskState
*ts
= cpu
->opaque
;
7331 abi_ulong auxv
= ts
->info
->saved_auxv
;
7332 abi_ulong len
= ts
->info
->auxv_len
;
7336 * Auxiliary vector is stored in target process stack.
7337 * read in whole auxv vector and copy it to file
7339 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7343 r
= write(fd
, ptr
, len
);
7350 lseek(fd
, 0, SEEK_SET
);
7351 unlock_user(ptr
, auxv
, len
);
7357 static int is_proc_myself(const char *filename
, const char *entry
)
7359 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7360 filename
+= strlen("/proc/");
7361 if (!strncmp(filename
, "self/", strlen("self/"))) {
7362 filename
+= strlen("self/");
7363 } else if (*filename
>= '1' && *filename
<= '9') {
7365 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7366 if (!strncmp(filename
, myself
, strlen(myself
))) {
7367 filename
+= strlen(myself
);
7374 if (!strcmp(filename
, entry
)) {
7381 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7382 defined(TARGET_SPARC) || defined(TARGET_M68K)
7383 static int is_proc(const char *filename
, const char *entry
)
7385 return strcmp(filename
, entry
) == 0;
7389 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7390 static int open_net_route(void *cpu_env
, int fd
)
7397 fp
= fopen("/proc/net/route", "r");
7404 read
= getline(&line
, &len
, fp
);
7405 dprintf(fd
, "%s", line
);
7409 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7411 uint32_t dest
, gw
, mask
;
7412 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7415 fields
= sscanf(line
,
7416 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7417 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7418 &mask
, &mtu
, &window
, &irtt
);
7422 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7423 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7424 metric
, tswap32(mask
), mtu
, window
, irtt
);
7434 #if defined(TARGET_SPARC)
7435 static int open_cpuinfo(void *cpu_env
, int fd
)
7437 dprintf(fd
, "type\t\t: sun4u\n");
7442 #if defined(TARGET_M68K)
7443 static int open_hardware(void *cpu_env
, int fd
)
7445 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7450 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7453 const char *filename
;
7454 int (*fill
)(void *cpu_env
, int fd
);
7455 int (*cmp
)(const char *s1
, const char *s2
);
7457 const struct fake_open
*fake_open
;
7458 static const struct fake_open fakes
[] = {
7459 { "maps", open_self_maps
, is_proc_myself
},
7460 { "stat", open_self_stat
, is_proc_myself
},
7461 { "auxv", open_self_auxv
, is_proc_myself
},
7462 { "cmdline", open_self_cmdline
, is_proc_myself
},
7463 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7464 { "/proc/net/route", open_net_route
, is_proc
},
7466 #if defined(TARGET_SPARC)
7467 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7469 #if defined(TARGET_M68K)
7470 { "/proc/hardware", open_hardware
, is_proc
},
7472 { NULL
, NULL
, NULL
}
7475 if (is_proc_myself(pathname
, "exe")) {
7476 int execfd
= qemu_getauxval(AT_EXECFD
);
7477 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7480 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7481 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7486 if (fake_open
->filename
) {
7488 char filename
[PATH_MAX
];
7491 /* create temporary file to map stat to */
7492 tmpdir
= getenv("TMPDIR");
7495 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7496 fd
= mkstemp(filename
);
7502 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7508 lseek(fd
, 0, SEEK_SET
);
7513 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7516 #define TIMER_MAGIC 0x0caf0000
7517 #define TIMER_MAGIC_MASK 0xffff0000
7519 /* Convert QEMU provided timer ID back to internal 16bit index format */
7520 static target_timer_t
get_timer_id(abi_long arg
)
7522 target_timer_t timerid
= arg
;
7524 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7525 return -TARGET_EINVAL
;
7530 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7531 return -TARGET_EINVAL
;
7537 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7539 abi_ulong target_addr
,
7542 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7543 unsigned host_bits
= sizeof(*host_mask
) * 8;
7544 abi_ulong
*target_mask
;
7547 assert(host_size
>= target_size
);
7549 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7551 return -TARGET_EFAULT
;
7553 memset(host_mask
, 0, host_size
);
7555 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7556 unsigned bit
= i
* target_bits
;
7559 __get_user(val
, &target_mask
[i
]);
7560 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7561 if (val
& (1UL << j
)) {
7562 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7567 unlock_user(target_mask
, target_addr
, 0);
7571 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7573 abi_ulong target_addr
,
7576 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7577 unsigned host_bits
= sizeof(*host_mask
) * 8;
7578 abi_ulong
*target_mask
;
7581 assert(host_size
>= target_size
);
7583 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7585 return -TARGET_EFAULT
;
7588 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7589 unsigned bit
= i
* target_bits
;
7592 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7593 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7597 __put_user(val
, &target_mask
[i
]);
7600 unlock_user(target_mask
, target_addr
, target_size
);
7604 /* This is an internal helper for do_syscall so that it is easier
7605 * to have a single return point, so that actions, such as logging
7606 * of syscall results, can be performed.
7607 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7609 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7610 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7611 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7614 CPUState
*cpu
= env_cpu(cpu_env
);
7616 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7617 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7618 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7619 || defined(TARGET_NR_statx)
7622 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7623 || defined(TARGET_NR_fstatfs)
7629 case TARGET_NR_exit
:
7630 /* In old applications this may be used to implement _exit(2).
7631 However in threaded applictions it is used for thread termination,
7632 and _exit_group is used for application termination.
7633 Do thread termination if we have more then one thread. */
7635 if (block_signals()) {
7636 return -TARGET_ERESTARTSYS
;
7641 if (CPU_NEXT(first_cpu
)) {
7644 /* Remove the CPU from the list. */
7645 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7650 if (ts
->child_tidptr
) {
7651 put_user_u32(0, ts
->child_tidptr
);
7652 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7656 object_unref(OBJECT(cpu
));
7658 rcu_unregister_thread();
7663 preexit_cleanup(cpu_env
, arg1
);
7665 return 0; /* avoid warning */
7666 case TARGET_NR_read
:
7667 if (arg2
== 0 && arg3
== 0) {
7668 return get_errno(safe_read(arg1
, 0, 0));
7670 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7671 return -TARGET_EFAULT
;
7672 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7674 fd_trans_host_to_target_data(arg1
)) {
7675 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7677 unlock_user(p
, arg2
, ret
);
7680 case TARGET_NR_write
:
7681 if (arg2
== 0 && arg3
== 0) {
7682 return get_errno(safe_write(arg1
, 0, 0));
7684 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7685 return -TARGET_EFAULT
;
7686 if (fd_trans_target_to_host_data(arg1
)) {
7687 void *copy
= g_malloc(arg3
);
7688 memcpy(copy
, p
, arg3
);
7689 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7691 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7695 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7697 unlock_user(p
, arg2
, 0);
7700 #ifdef TARGET_NR_open
7701 case TARGET_NR_open
:
7702 if (!(p
= lock_user_string(arg1
)))
7703 return -TARGET_EFAULT
;
7704 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7705 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7707 fd_trans_unregister(ret
);
7708 unlock_user(p
, arg1
, 0);
7711 case TARGET_NR_openat
:
7712 if (!(p
= lock_user_string(arg2
)))
7713 return -TARGET_EFAULT
;
7714 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7715 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7717 fd_trans_unregister(ret
);
7718 unlock_user(p
, arg2
, 0);
7720 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7721 case TARGET_NR_name_to_handle_at
:
7722 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7725 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7726 case TARGET_NR_open_by_handle_at
:
7727 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7728 fd_trans_unregister(ret
);
7731 case TARGET_NR_close
:
7732 fd_trans_unregister(arg1
);
7733 return get_errno(close(arg1
));
7736 return do_brk(arg1
);
7737 #ifdef TARGET_NR_fork
7738 case TARGET_NR_fork
:
7739 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7741 #ifdef TARGET_NR_waitpid
7742 case TARGET_NR_waitpid
:
7745 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7746 if (!is_error(ret
) && arg2
&& ret
7747 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7748 return -TARGET_EFAULT
;
7752 #ifdef TARGET_NR_waitid
7753 case TARGET_NR_waitid
:
7757 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7758 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7759 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7760 return -TARGET_EFAULT
;
7761 host_to_target_siginfo(p
, &info
);
7762 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7767 #ifdef TARGET_NR_creat /* not on alpha */
7768 case TARGET_NR_creat
:
7769 if (!(p
= lock_user_string(arg1
)))
7770 return -TARGET_EFAULT
;
7771 ret
= get_errno(creat(p
, arg2
));
7772 fd_trans_unregister(ret
);
7773 unlock_user(p
, arg1
, 0);
7776 #ifdef TARGET_NR_link
7777 case TARGET_NR_link
:
7780 p
= lock_user_string(arg1
);
7781 p2
= lock_user_string(arg2
);
7783 ret
= -TARGET_EFAULT
;
7785 ret
= get_errno(link(p
, p2
));
7786 unlock_user(p2
, arg2
, 0);
7787 unlock_user(p
, arg1
, 0);
7791 #if defined(TARGET_NR_linkat)
7792 case TARGET_NR_linkat
:
7796 return -TARGET_EFAULT
;
7797 p
= lock_user_string(arg2
);
7798 p2
= lock_user_string(arg4
);
7800 ret
= -TARGET_EFAULT
;
7802 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7803 unlock_user(p
, arg2
, 0);
7804 unlock_user(p2
, arg4
, 0);
7808 #ifdef TARGET_NR_unlink
7809 case TARGET_NR_unlink
:
7810 if (!(p
= lock_user_string(arg1
)))
7811 return -TARGET_EFAULT
;
7812 ret
= get_errno(unlink(p
));
7813 unlock_user(p
, arg1
, 0);
7816 #if defined(TARGET_NR_unlinkat)
7817 case TARGET_NR_unlinkat
:
7818 if (!(p
= lock_user_string(arg2
)))
7819 return -TARGET_EFAULT
;
7820 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7821 unlock_user(p
, arg2
, 0);
7824 case TARGET_NR_execve
:
7826 char **argp
, **envp
;
7829 abi_ulong guest_argp
;
7830 abi_ulong guest_envp
;
7837 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7838 if (get_user_ual(addr
, gp
))
7839 return -TARGET_EFAULT
;
7846 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7847 if (get_user_ual(addr
, gp
))
7848 return -TARGET_EFAULT
;
7854 argp
= g_new0(char *, argc
+ 1);
7855 envp
= g_new0(char *, envc
+ 1);
7857 for (gp
= guest_argp
, q
= argp
; gp
;
7858 gp
+= sizeof(abi_ulong
), q
++) {
7859 if (get_user_ual(addr
, gp
))
7863 if (!(*q
= lock_user_string(addr
)))
7865 total_size
+= strlen(*q
) + 1;
7869 for (gp
= guest_envp
, q
= envp
; gp
;
7870 gp
+= sizeof(abi_ulong
), q
++) {
7871 if (get_user_ual(addr
, gp
))
7875 if (!(*q
= lock_user_string(addr
)))
7877 total_size
+= strlen(*q
) + 1;
7881 if (!(p
= lock_user_string(arg1
)))
7883 /* Although execve() is not an interruptible syscall it is
7884 * a special case where we must use the safe_syscall wrapper:
7885 * if we allow a signal to happen before we make the host
7886 * syscall then we will 'lose' it, because at the point of
7887 * execve the process leaves QEMU's control. So we use the
7888 * safe syscall wrapper to ensure that we either take the
7889 * signal as a guest signal, or else it does not happen
7890 * before the execve completes and makes it the other
7891 * program's problem.
7893 ret
= get_errno(safe_execve(p
, argp
, envp
));
7894 unlock_user(p
, arg1
, 0);
7899 ret
= -TARGET_EFAULT
;
7902 for (gp
= guest_argp
, q
= argp
; *q
;
7903 gp
+= sizeof(abi_ulong
), q
++) {
7904 if (get_user_ual(addr
, gp
)
7907 unlock_user(*q
, addr
, 0);
7909 for (gp
= guest_envp
, q
= envp
; *q
;
7910 gp
+= sizeof(abi_ulong
), q
++) {
7911 if (get_user_ual(addr
, gp
)
7914 unlock_user(*q
, addr
, 0);
7921 case TARGET_NR_chdir
:
7922 if (!(p
= lock_user_string(arg1
)))
7923 return -TARGET_EFAULT
;
7924 ret
= get_errno(chdir(p
));
7925 unlock_user(p
, arg1
, 0);
7927 #ifdef TARGET_NR_time
7928 case TARGET_NR_time
:
7931 ret
= get_errno(time(&host_time
));
7934 && put_user_sal(host_time
, arg1
))
7935 return -TARGET_EFAULT
;
7939 #ifdef TARGET_NR_mknod
7940 case TARGET_NR_mknod
:
7941 if (!(p
= lock_user_string(arg1
)))
7942 return -TARGET_EFAULT
;
7943 ret
= get_errno(mknod(p
, arg2
, arg3
));
7944 unlock_user(p
, arg1
, 0);
7947 #if defined(TARGET_NR_mknodat)
7948 case TARGET_NR_mknodat
:
7949 if (!(p
= lock_user_string(arg2
)))
7950 return -TARGET_EFAULT
;
7951 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7952 unlock_user(p
, arg2
, 0);
7955 #ifdef TARGET_NR_chmod
7956 case TARGET_NR_chmod
:
7957 if (!(p
= lock_user_string(arg1
)))
7958 return -TARGET_EFAULT
;
7959 ret
= get_errno(chmod(p
, arg2
));
7960 unlock_user(p
, arg1
, 0);
7963 #ifdef TARGET_NR_lseek
7964 case TARGET_NR_lseek
:
7965 return get_errno(lseek(arg1
, arg2
, arg3
));
7967 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7968 /* Alpha specific */
7969 case TARGET_NR_getxpid
:
7970 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7971 return get_errno(getpid());
7973 #ifdef TARGET_NR_getpid
7974 case TARGET_NR_getpid
:
7975 return get_errno(getpid());
7977 case TARGET_NR_mount
:
7979 /* need to look at the data field */
7983 p
= lock_user_string(arg1
);
7985 return -TARGET_EFAULT
;
7991 p2
= lock_user_string(arg2
);
7994 unlock_user(p
, arg1
, 0);
7996 return -TARGET_EFAULT
;
8000 p3
= lock_user_string(arg3
);
8003 unlock_user(p
, arg1
, 0);
8005 unlock_user(p2
, arg2
, 0);
8006 return -TARGET_EFAULT
;
8012 /* FIXME - arg5 should be locked, but it isn't clear how to
8013 * do that since it's not guaranteed to be a NULL-terminated
8017 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8019 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8021 ret
= get_errno(ret
);
8024 unlock_user(p
, arg1
, 0);
8026 unlock_user(p2
, arg2
, 0);
8028 unlock_user(p3
, arg3
, 0);
8032 #ifdef TARGET_NR_umount
8033 case TARGET_NR_umount
:
8034 if (!(p
= lock_user_string(arg1
)))
8035 return -TARGET_EFAULT
;
8036 ret
= get_errno(umount(p
));
8037 unlock_user(p
, arg1
, 0);
8040 #ifdef TARGET_NR_stime /* not on alpha */
8041 case TARGET_NR_stime
:
8045 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8046 return -TARGET_EFAULT
;
8048 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8051 #ifdef TARGET_NR_alarm /* not on alpha */
8052 case TARGET_NR_alarm
:
8055 #ifdef TARGET_NR_pause /* not on alpha */
8056 case TARGET_NR_pause
:
8057 if (!block_signals()) {
8058 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8060 return -TARGET_EINTR
;
8062 #ifdef TARGET_NR_utime
8063 case TARGET_NR_utime
:
8065 struct utimbuf tbuf
, *host_tbuf
;
8066 struct target_utimbuf
*target_tbuf
;
8068 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8069 return -TARGET_EFAULT
;
8070 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8071 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8072 unlock_user_struct(target_tbuf
, arg2
, 0);
8077 if (!(p
= lock_user_string(arg1
)))
8078 return -TARGET_EFAULT
;
8079 ret
= get_errno(utime(p
, host_tbuf
));
8080 unlock_user(p
, arg1
, 0);
8084 #ifdef TARGET_NR_utimes
8085 case TARGET_NR_utimes
:
8087 struct timeval
*tvp
, tv
[2];
8089 if (copy_from_user_timeval(&tv
[0], arg2
)
8090 || copy_from_user_timeval(&tv
[1],
8091 arg2
+ sizeof(struct target_timeval
)))
8092 return -TARGET_EFAULT
;
8097 if (!(p
= lock_user_string(arg1
)))
8098 return -TARGET_EFAULT
;
8099 ret
= get_errno(utimes(p
, tvp
));
8100 unlock_user(p
, arg1
, 0);
8104 #if defined(TARGET_NR_futimesat)
8105 case TARGET_NR_futimesat
:
8107 struct timeval
*tvp
, tv
[2];
8109 if (copy_from_user_timeval(&tv
[0], arg3
)
8110 || copy_from_user_timeval(&tv
[1],
8111 arg3
+ sizeof(struct target_timeval
)))
8112 return -TARGET_EFAULT
;
8117 if (!(p
= lock_user_string(arg2
))) {
8118 return -TARGET_EFAULT
;
8120 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8121 unlock_user(p
, arg2
, 0);
8125 #ifdef TARGET_NR_access
8126 case TARGET_NR_access
:
8127 if (!(p
= lock_user_string(arg1
))) {
8128 return -TARGET_EFAULT
;
8130 ret
= get_errno(access(path(p
), arg2
));
8131 unlock_user(p
, arg1
, 0);
8134 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8135 case TARGET_NR_faccessat
:
8136 if (!(p
= lock_user_string(arg2
))) {
8137 return -TARGET_EFAULT
;
8139 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8140 unlock_user(p
, arg2
, 0);
8143 #ifdef TARGET_NR_nice /* not on alpha */
8144 case TARGET_NR_nice
:
8145 return get_errno(nice(arg1
));
8147 case TARGET_NR_sync
:
8150 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8151 case TARGET_NR_syncfs
:
8152 return get_errno(syncfs(arg1
));
8154 case TARGET_NR_kill
:
8155 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8156 #ifdef TARGET_NR_rename
8157 case TARGET_NR_rename
:
8160 p
= lock_user_string(arg1
);
8161 p2
= lock_user_string(arg2
);
8163 ret
= -TARGET_EFAULT
;
8165 ret
= get_errno(rename(p
, p2
));
8166 unlock_user(p2
, arg2
, 0);
8167 unlock_user(p
, arg1
, 0);
8171 #if defined(TARGET_NR_renameat)
8172 case TARGET_NR_renameat
:
8175 p
= lock_user_string(arg2
);
8176 p2
= lock_user_string(arg4
);
8178 ret
= -TARGET_EFAULT
;
8180 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8181 unlock_user(p2
, arg4
, 0);
8182 unlock_user(p
, arg2
, 0);
8186 #if defined(TARGET_NR_renameat2)
8187 case TARGET_NR_renameat2
:
8190 p
= lock_user_string(arg2
);
8191 p2
= lock_user_string(arg4
);
8193 ret
= -TARGET_EFAULT
;
8195 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8197 unlock_user(p2
, arg4
, 0);
8198 unlock_user(p
, arg2
, 0);
8202 #ifdef TARGET_NR_mkdir
8203 case TARGET_NR_mkdir
:
8204 if (!(p
= lock_user_string(arg1
)))
8205 return -TARGET_EFAULT
;
8206 ret
= get_errno(mkdir(p
, arg2
));
8207 unlock_user(p
, arg1
, 0);
8210 #if defined(TARGET_NR_mkdirat)
8211 case TARGET_NR_mkdirat
:
8212 if (!(p
= lock_user_string(arg2
)))
8213 return -TARGET_EFAULT
;
8214 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8215 unlock_user(p
, arg2
, 0);
8218 #ifdef TARGET_NR_rmdir
8219 case TARGET_NR_rmdir
:
8220 if (!(p
= lock_user_string(arg1
)))
8221 return -TARGET_EFAULT
;
8222 ret
= get_errno(rmdir(p
));
8223 unlock_user(p
, arg1
, 0);
8227 ret
= get_errno(dup(arg1
));
8229 fd_trans_dup(arg1
, ret
);
8232 #ifdef TARGET_NR_pipe
8233 case TARGET_NR_pipe
:
8234 return do_pipe(cpu_env
, arg1
, 0, 0);
8236 #ifdef TARGET_NR_pipe2
8237 case TARGET_NR_pipe2
:
8238 return do_pipe(cpu_env
, arg1
,
8239 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8241 case TARGET_NR_times
:
8243 struct target_tms
*tmsp
;
8245 ret
= get_errno(times(&tms
));
8247 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8249 return -TARGET_EFAULT
;
8250 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8251 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8252 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8253 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8256 ret
= host_to_target_clock_t(ret
);
8259 case TARGET_NR_acct
:
8261 ret
= get_errno(acct(NULL
));
8263 if (!(p
= lock_user_string(arg1
))) {
8264 return -TARGET_EFAULT
;
8266 ret
= get_errno(acct(path(p
)));
8267 unlock_user(p
, arg1
, 0);
8270 #ifdef TARGET_NR_umount2
8271 case TARGET_NR_umount2
:
8272 if (!(p
= lock_user_string(arg1
)))
8273 return -TARGET_EFAULT
;
8274 ret
= get_errno(umount2(p
, arg2
));
8275 unlock_user(p
, arg1
, 0);
8278 case TARGET_NR_ioctl
:
8279 return do_ioctl(arg1
, arg2
, arg3
);
8280 #ifdef TARGET_NR_fcntl
8281 case TARGET_NR_fcntl
:
8282 return do_fcntl(arg1
, arg2
, arg3
);
8284 case TARGET_NR_setpgid
:
8285 return get_errno(setpgid(arg1
, arg2
));
8286 case TARGET_NR_umask
:
8287 return get_errno(umask(arg1
));
8288 case TARGET_NR_chroot
:
8289 if (!(p
= lock_user_string(arg1
)))
8290 return -TARGET_EFAULT
;
8291 ret
= get_errno(chroot(p
));
8292 unlock_user(p
, arg1
, 0);
8294 #ifdef TARGET_NR_dup2
8295 case TARGET_NR_dup2
:
8296 ret
= get_errno(dup2(arg1
, arg2
));
8298 fd_trans_dup(arg1
, arg2
);
8302 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8303 case TARGET_NR_dup3
:
8307 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8310 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8311 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8313 fd_trans_dup(arg1
, arg2
);
8318 #ifdef TARGET_NR_getppid /* not on alpha */
8319 case TARGET_NR_getppid
:
8320 return get_errno(getppid());
8322 #ifdef TARGET_NR_getpgrp
8323 case TARGET_NR_getpgrp
:
8324 return get_errno(getpgrp());
8326 case TARGET_NR_setsid
:
8327 return get_errno(setsid());
8328 #ifdef TARGET_NR_sigaction
8329 case TARGET_NR_sigaction
:
8331 #if defined(TARGET_ALPHA)
8332 struct target_sigaction act
, oact
, *pact
= 0;
8333 struct target_old_sigaction
*old_act
;
8335 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8336 return -TARGET_EFAULT
;
8337 act
._sa_handler
= old_act
->_sa_handler
;
8338 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8339 act
.sa_flags
= old_act
->sa_flags
;
8340 act
.sa_restorer
= 0;
8341 unlock_user_struct(old_act
, arg2
, 0);
8344 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8345 if (!is_error(ret
) && arg3
) {
8346 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8347 return -TARGET_EFAULT
;
8348 old_act
->_sa_handler
= oact
._sa_handler
;
8349 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8350 old_act
->sa_flags
= oact
.sa_flags
;
8351 unlock_user_struct(old_act
, arg3
, 1);
8353 #elif defined(TARGET_MIPS)
8354 struct target_sigaction act
, oact
, *pact
, *old_act
;
8357 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8358 return -TARGET_EFAULT
;
8359 act
._sa_handler
= old_act
->_sa_handler
;
8360 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8361 act
.sa_flags
= old_act
->sa_flags
;
8362 unlock_user_struct(old_act
, arg2
, 0);
8368 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8370 if (!is_error(ret
) && arg3
) {
8371 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8372 return -TARGET_EFAULT
;
8373 old_act
->_sa_handler
= oact
._sa_handler
;
8374 old_act
->sa_flags
= oact
.sa_flags
;
8375 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8376 old_act
->sa_mask
.sig
[1] = 0;
8377 old_act
->sa_mask
.sig
[2] = 0;
8378 old_act
->sa_mask
.sig
[3] = 0;
8379 unlock_user_struct(old_act
, arg3
, 1);
8382 struct target_old_sigaction
*old_act
;
8383 struct target_sigaction act
, oact
, *pact
;
8385 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8386 return -TARGET_EFAULT
;
8387 act
._sa_handler
= old_act
->_sa_handler
;
8388 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8389 act
.sa_flags
= old_act
->sa_flags
;
8390 act
.sa_restorer
= old_act
->sa_restorer
;
8391 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8392 act
.ka_restorer
= 0;
8394 unlock_user_struct(old_act
, arg2
, 0);
8399 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8400 if (!is_error(ret
) && arg3
) {
8401 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8402 return -TARGET_EFAULT
;
8403 old_act
->_sa_handler
= oact
._sa_handler
;
8404 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8405 old_act
->sa_flags
= oact
.sa_flags
;
8406 old_act
->sa_restorer
= oact
.sa_restorer
;
8407 unlock_user_struct(old_act
, arg3
, 1);
8413 case TARGET_NR_rt_sigaction
:
8415 #if defined(TARGET_ALPHA)
8416 /* For Alpha and SPARC this is a 5 argument syscall, with
8417 * a 'restorer' parameter which must be copied into the
8418 * sa_restorer field of the sigaction struct.
8419 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8420 * and arg5 is the sigsetsize.
8421 * Alpha also has a separate rt_sigaction struct that it uses
8422 * here; SPARC uses the usual sigaction struct.
8424 struct target_rt_sigaction
*rt_act
;
8425 struct target_sigaction act
, oact
, *pact
= 0;
8427 if (arg4
!= sizeof(target_sigset_t
)) {
8428 return -TARGET_EINVAL
;
8431 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8432 return -TARGET_EFAULT
;
8433 act
._sa_handler
= rt_act
->_sa_handler
;
8434 act
.sa_mask
= rt_act
->sa_mask
;
8435 act
.sa_flags
= rt_act
->sa_flags
;
8436 act
.sa_restorer
= arg5
;
8437 unlock_user_struct(rt_act
, arg2
, 0);
8440 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8441 if (!is_error(ret
) && arg3
) {
8442 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8443 return -TARGET_EFAULT
;
8444 rt_act
->_sa_handler
= oact
._sa_handler
;
8445 rt_act
->sa_mask
= oact
.sa_mask
;
8446 rt_act
->sa_flags
= oact
.sa_flags
;
8447 unlock_user_struct(rt_act
, arg3
, 1);
8451 target_ulong restorer
= arg4
;
8452 target_ulong sigsetsize
= arg5
;
8454 target_ulong sigsetsize
= arg4
;
8456 struct target_sigaction
*act
;
8457 struct target_sigaction
*oact
;
8459 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8460 return -TARGET_EINVAL
;
8463 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8464 return -TARGET_EFAULT
;
8466 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8467 act
->ka_restorer
= restorer
;
8473 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8474 ret
= -TARGET_EFAULT
;
8475 goto rt_sigaction_fail
;
8479 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8482 unlock_user_struct(act
, arg2
, 0);
8484 unlock_user_struct(oact
, arg3
, 1);
8488 #ifdef TARGET_NR_sgetmask /* not on alpha */
8489 case TARGET_NR_sgetmask
:
8492 abi_ulong target_set
;
8493 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8495 host_to_target_old_sigset(&target_set
, &cur_set
);
8501 #ifdef TARGET_NR_ssetmask /* not on alpha */
8502 case TARGET_NR_ssetmask
:
8505 abi_ulong target_set
= arg1
;
8506 target_to_host_old_sigset(&set
, &target_set
);
8507 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8509 host_to_target_old_sigset(&target_set
, &oset
);
8515 #ifdef TARGET_NR_sigprocmask
8516 case TARGET_NR_sigprocmask
:
8518 #if defined(TARGET_ALPHA)
8519 sigset_t set
, oldset
;
8524 case TARGET_SIG_BLOCK
:
8527 case TARGET_SIG_UNBLOCK
:
8530 case TARGET_SIG_SETMASK
:
8534 return -TARGET_EINVAL
;
8537 target_to_host_old_sigset(&set
, &mask
);
8539 ret
= do_sigprocmask(how
, &set
, &oldset
);
8540 if (!is_error(ret
)) {
8541 host_to_target_old_sigset(&mask
, &oldset
);
8543 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8546 sigset_t set
, oldset
, *set_ptr
;
8551 case TARGET_SIG_BLOCK
:
8554 case TARGET_SIG_UNBLOCK
:
8557 case TARGET_SIG_SETMASK
:
8561 return -TARGET_EINVAL
;
8563 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8564 return -TARGET_EFAULT
;
8565 target_to_host_old_sigset(&set
, p
);
8566 unlock_user(p
, arg2
, 0);
8572 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8573 if (!is_error(ret
) && arg3
) {
8574 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8575 return -TARGET_EFAULT
;
8576 host_to_target_old_sigset(p
, &oldset
);
8577 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8583 case TARGET_NR_rt_sigprocmask
:
8586 sigset_t set
, oldset
, *set_ptr
;
8588 if (arg4
!= sizeof(target_sigset_t
)) {
8589 return -TARGET_EINVAL
;
8594 case TARGET_SIG_BLOCK
:
8597 case TARGET_SIG_UNBLOCK
:
8600 case TARGET_SIG_SETMASK
:
8604 return -TARGET_EINVAL
;
8606 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8607 return -TARGET_EFAULT
;
8608 target_to_host_sigset(&set
, p
);
8609 unlock_user(p
, arg2
, 0);
8615 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8616 if (!is_error(ret
) && arg3
) {
8617 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8618 return -TARGET_EFAULT
;
8619 host_to_target_sigset(p
, &oldset
);
8620 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8624 #ifdef TARGET_NR_sigpending
8625 case TARGET_NR_sigpending
:
8628 ret
= get_errno(sigpending(&set
));
8629 if (!is_error(ret
)) {
8630 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8631 return -TARGET_EFAULT
;
8632 host_to_target_old_sigset(p
, &set
);
8633 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8638 case TARGET_NR_rt_sigpending
:
8642 /* Yes, this check is >, not != like most. We follow the kernel's
8643 * logic and it does it like this because it implements
8644 * NR_sigpending through the same code path, and in that case
8645 * the old_sigset_t is smaller in size.
8647 if (arg2
> sizeof(target_sigset_t
)) {
8648 return -TARGET_EINVAL
;
8651 ret
= get_errno(sigpending(&set
));
8652 if (!is_error(ret
)) {
8653 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8654 return -TARGET_EFAULT
;
8655 host_to_target_sigset(p
, &set
);
8656 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8660 #ifdef TARGET_NR_sigsuspend
8661 case TARGET_NR_sigsuspend
:
8663 TaskState
*ts
= cpu
->opaque
;
8664 #if defined(TARGET_ALPHA)
8665 abi_ulong mask
= arg1
;
8666 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8668 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8669 return -TARGET_EFAULT
;
8670 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8671 unlock_user(p
, arg1
, 0);
8673 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8675 if (ret
!= -TARGET_ERESTARTSYS
) {
8676 ts
->in_sigsuspend
= 1;
8681 case TARGET_NR_rt_sigsuspend
:
8683 TaskState
*ts
= cpu
->opaque
;
8685 if (arg2
!= sizeof(target_sigset_t
)) {
8686 return -TARGET_EINVAL
;
8688 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8689 return -TARGET_EFAULT
;
8690 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8691 unlock_user(p
, arg1
, 0);
8692 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8694 if (ret
!= -TARGET_ERESTARTSYS
) {
8695 ts
->in_sigsuspend
= 1;
8699 #ifdef TARGET_NR_rt_sigtimedwait
8700 case TARGET_NR_rt_sigtimedwait
:
8703 struct timespec uts
, *puts
;
8706 if (arg4
!= sizeof(target_sigset_t
)) {
8707 return -TARGET_EINVAL
;
8710 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8711 return -TARGET_EFAULT
;
8712 target_to_host_sigset(&set
, p
);
8713 unlock_user(p
, arg1
, 0);
8716 target_to_host_timespec(puts
, arg3
);
8720 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8722 if (!is_error(ret
)) {
8724 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8727 return -TARGET_EFAULT
;
8729 host_to_target_siginfo(p
, &uinfo
);
8730 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8732 ret
= host_to_target_signal(ret
);
8737 case TARGET_NR_rt_sigqueueinfo
:
8741 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8743 return -TARGET_EFAULT
;
8745 target_to_host_siginfo(&uinfo
, p
);
8746 unlock_user(p
, arg3
, 0);
8747 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8750 case TARGET_NR_rt_tgsigqueueinfo
:
8754 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8756 return -TARGET_EFAULT
;
8758 target_to_host_siginfo(&uinfo
, p
);
8759 unlock_user(p
, arg4
, 0);
8760 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8763 #ifdef TARGET_NR_sigreturn
8764 case TARGET_NR_sigreturn
:
8765 if (block_signals()) {
8766 return -TARGET_ERESTARTSYS
;
8768 return do_sigreturn(cpu_env
);
8770 case TARGET_NR_rt_sigreturn
:
8771 if (block_signals()) {
8772 return -TARGET_ERESTARTSYS
;
8774 return do_rt_sigreturn(cpu_env
);
8775 case TARGET_NR_sethostname
:
8776 if (!(p
= lock_user_string(arg1
)))
8777 return -TARGET_EFAULT
;
8778 ret
= get_errno(sethostname(p
, arg2
));
8779 unlock_user(p
, arg1
, 0);
8781 #ifdef TARGET_NR_setrlimit
8782 case TARGET_NR_setrlimit
:
8784 int resource
= target_to_host_resource(arg1
);
8785 struct target_rlimit
*target_rlim
;
8787 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8788 return -TARGET_EFAULT
;
8789 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8790 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8791 unlock_user_struct(target_rlim
, arg2
, 0);
8793 * If we just passed through resource limit settings for memory then
8794 * they would also apply to QEMU's own allocations, and QEMU will
8795 * crash or hang or die if its allocations fail. Ideally we would
8796 * track the guest allocations in QEMU and apply the limits ourselves.
8797 * For now, just tell the guest the call succeeded but don't actually
8800 if (resource
!= RLIMIT_AS
&&
8801 resource
!= RLIMIT_DATA
&&
8802 resource
!= RLIMIT_STACK
) {
8803 return get_errno(setrlimit(resource
, &rlim
));
8809 #ifdef TARGET_NR_getrlimit
8810 case TARGET_NR_getrlimit
:
8812 int resource
= target_to_host_resource(arg1
);
8813 struct target_rlimit
*target_rlim
;
8816 ret
= get_errno(getrlimit(resource
, &rlim
));
8817 if (!is_error(ret
)) {
8818 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8819 return -TARGET_EFAULT
;
8820 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8821 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8822 unlock_user_struct(target_rlim
, arg2
, 1);
8827 case TARGET_NR_getrusage
:
8829 struct rusage rusage
;
8830 ret
= get_errno(getrusage(arg1
, &rusage
));
8831 if (!is_error(ret
)) {
8832 ret
= host_to_target_rusage(arg2
, &rusage
);
8836 #if defined(TARGET_NR_gettimeofday)
8837 case TARGET_NR_gettimeofday
:
8842 ret
= get_errno(gettimeofday(&tv
, &tz
));
8843 if (!is_error(ret
)) {
8844 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
8845 return -TARGET_EFAULT
;
8847 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
8848 return -TARGET_EFAULT
;
8854 #if defined(TARGET_NR_settimeofday)
8855 case TARGET_NR_settimeofday
:
8857 struct timeval tv
, *ptv
= NULL
;
8858 struct timezone tz
, *ptz
= NULL
;
8861 if (copy_from_user_timeval(&tv
, arg1
)) {
8862 return -TARGET_EFAULT
;
8868 if (copy_from_user_timezone(&tz
, arg2
)) {
8869 return -TARGET_EFAULT
;
8874 return get_errno(settimeofday(ptv
, ptz
));
8877 #if defined(TARGET_NR_select)
8878 case TARGET_NR_select
:
8879 #if defined(TARGET_WANT_NI_OLD_SELECT)
8880 /* some architectures used to have old_select here
8881 * but now ENOSYS it.
8883 ret
= -TARGET_ENOSYS
;
8884 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8885 ret
= do_old_select(arg1
);
8887 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8891 #ifdef TARGET_NR_pselect6
8892 case TARGET_NR_pselect6
:
8894 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8895 fd_set rfds
, wfds
, efds
;
8896 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8897 struct timespec ts
, *ts_ptr
;
8900 * The 6th arg is actually two args smashed together,
8901 * so we cannot use the C library.
8909 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8910 target_sigset_t
*target_sigset
;
8918 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8922 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8926 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8932 * This takes a timespec, and not a timeval, so we cannot
8933 * use the do_select() helper ...
8936 if (target_to_host_timespec(&ts
, ts_addr
)) {
8937 return -TARGET_EFAULT
;
8944 /* Extract the two packed args for the sigset */
8947 sig
.size
= SIGSET_T_SIZE
;
8949 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8951 return -TARGET_EFAULT
;
8953 arg_sigset
= tswapal(arg7
[0]);
8954 arg_sigsize
= tswapal(arg7
[1]);
8955 unlock_user(arg7
, arg6
, 0);
8959 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8960 /* Like the kernel, we enforce correct size sigsets */
8961 return -TARGET_EINVAL
;
8963 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8964 sizeof(*target_sigset
), 1);
8965 if (!target_sigset
) {
8966 return -TARGET_EFAULT
;
8968 target_to_host_sigset(&set
, target_sigset
);
8969 unlock_user(target_sigset
, arg_sigset
, 0);
8977 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8980 if (!is_error(ret
)) {
8981 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8982 return -TARGET_EFAULT
;
8983 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8984 return -TARGET_EFAULT
;
8985 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8986 return -TARGET_EFAULT
;
8988 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8989 return -TARGET_EFAULT
;
8994 #ifdef TARGET_NR_symlink
8995 case TARGET_NR_symlink
:
8998 p
= lock_user_string(arg1
);
8999 p2
= lock_user_string(arg2
);
9001 ret
= -TARGET_EFAULT
;
9003 ret
= get_errno(symlink(p
, p2
));
9004 unlock_user(p2
, arg2
, 0);
9005 unlock_user(p
, arg1
, 0);
9009 #if defined(TARGET_NR_symlinkat)
9010 case TARGET_NR_symlinkat
:
9013 p
= lock_user_string(arg1
);
9014 p2
= lock_user_string(arg3
);
9016 ret
= -TARGET_EFAULT
;
9018 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9019 unlock_user(p2
, arg3
, 0);
9020 unlock_user(p
, arg1
, 0);
9024 #ifdef TARGET_NR_readlink
9025 case TARGET_NR_readlink
:
9028 p
= lock_user_string(arg1
);
9029 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9031 ret
= -TARGET_EFAULT
;
9033 /* Short circuit this for the magic exe check. */
9034 ret
= -TARGET_EINVAL
;
9035 } else if (is_proc_myself((const char *)p
, "exe")) {
9036 char real
[PATH_MAX
], *temp
;
9037 temp
= realpath(exec_path
, real
);
9038 /* Return value is # of bytes that we wrote to the buffer. */
9040 ret
= get_errno(-1);
9042 /* Don't worry about sign mismatch as earlier mapping
9043 * logic would have thrown a bad address error. */
9044 ret
= MIN(strlen(real
), arg3
);
9045 /* We cannot NUL terminate the string. */
9046 memcpy(p2
, real
, ret
);
9049 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9051 unlock_user(p2
, arg2
, ret
);
9052 unlock_user(p
, arg1
, 0);
9056 #if defined(TARGET_NR_readlinkat)
9057 case TARGET_NR_readlinkat
:
9060 p
= lock_user_string(arg2
);
9061 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9063 ret
= -TARGET_EFAULT
;
9064 } else if (is_proc_myself((const char *)p
, "exe")) {
9065 char real
[PATH_MAX
], *temp
;
9066 temp
= realpath(exec_path
, real
);
9067 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9068 snprintf((char *)p2
, arg4
, "%s", real
);
9070 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9072 unlock_user(p2
, arg3
, ret
);
9073 unlock_user(p
, arg2
, 0);
9077 #ifdef TARGET_NR_swapon
9078 case TARGET_NR_swapon
:
9079 if (!(p
= lock_user_string(arg1
)))
9080 return -TARGET_EFAULT
;
9081 ret
= get_errno(swapon(p
, arg2
));
9082 unlock_user(p
, arg1
, 0);
9085 case TARGET_NR_reboot
:
9086 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9087 /* arg4 must be ignored in all other cases */
9088 p
= lock_user_string(arg4
);
9090 return -TARGET_EFAULT
;
9092 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9093 unlock_user(p
, arg4
, 0);
9095 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9098 #ifdef TARGET_NR_mmap
9099 case TARGET_NR_mmap
:
9100 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9101 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9102 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9103 || defined(TARGET_S390X)
9106 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9107 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9108 return -TARGET_EFAULT
;
9115 unlock_user(v
, arg1
, 0);
9116 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9117 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9121 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9122 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9128 #ifdef TARGET_NR_mmap2
9129 case TARGET_NR_mmap2
:
9131 #define MMAP_SHIFT 12
9133 ret
= target_mmap(arg1
, arg2
, arg3
,
9134 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9135 arg5
, arg6
<< MMAP_SHIFT
);
9136 return get_errno(ret
);
9138 case TARGET_NR_munmap
:
9139 return get_errno(target_munmap(arg1
, arg2
));
9140 case TARGET_NR_mprotect
:
9142 TaskState
*ts
= cpu
->opaque
;
9143 /* Special hack to detect libc making the stack executable. */
9144 if ((arg3
& PROT_GROWSDOWN
)
9145 && arg1
>= ts
->info
->stack_limit
9146 && arg1
<= ts
->info
->start_stack
) {
9147 arg3
&= ~PROT_GROWSDOWN
;
9148 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9149 arg1
= ts
->info
->stack_limit
;
9152 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9153 #ifdef TARGET_NR_mremap
9154 case TARGET_NR_mremap
:
9155 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9157 /* ??? msync/mlock/munlock are broken for softmmu. */
9158 #ifdef TARGET_NR_msync
9159 case TARGET_NR_msync
:
9160 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9162 #ifdef TARGET_NR_mlock
9163 case TARGET_NR_mlock
:
9164 return get_errno(mlock(g2h(arg1
), arg2
));
9166 #ifdef TARGET_NR_munlock
9167 case TARGET_NR_munlock
:
9168 return get_errno(munlock(g2h(arg1
), arg2
));
9170 #ifdef TARGET_NR_mlockall
9171 case TARGET_NR_mlockall
:
9172 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9174 #ifdef TARGET_NR_munlockall
9175 case TARGET_NR_munlockall
:
9176 return get_errno(munlockall());
9178 #ifdef TARGET_NR_truncate
9179 case TARGET_NR_truncate
:
9180 if (!(p
= lock_user_string(arg1
)))
9181 return -TARGET_EFAULT
;
9182 ret
= get_errno(truncate(p
, arg2
));
9183 unlock_user(p
, arg1
, 0);
9186 #ifdef TARGET_NR_ftruncate
9187 case TARGET_NR_ftruncate
:
9188 return get_errno(ftruncate(arg1
, arg2
));
9190 case TARGET_NR_fchmod
:
9191 return get_errno(fchmod(arg1
, arg2
));
9192 #if defined(TARGET_NR_fchmodat)
9193 case TARGET_NR_fchmodat
:
9194 if (!(p
= lock_user_string(arg2
)))
9195 return -TARGET_EFAULT
;
9196 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9197 unlock_user(p
, arg2
, 0);
9200 case TARGET_NR_getpriority
:
9201 /* Note that negative values are valid for getpriority, so we must
9202 differentiate based on errno settings. */
9204 ret
= getpriority(arg1
, arg2
);
9205 if (ret
== -1 && errno
!= 0) {
9206 return -host_to_target_errno(errno
);
9209 /* Return value is the unbiased priority. Signal no error. */
9210 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9212 /* Return value is a biased priority to avoid negative numbers. */
9216 case TARGET_NR_setpriority
:
9217 return get_errno(setpriority(arg1
, arg2
, arg3
));
9218 #ifdef TARGET_NR_statfs
9219 case TARGET_NR_statfs
:
9220 if (!(p
= lock_user_string(arg1
))) {
9221 return -TARGET_EFAULT
;
9223 ret
= get_errno(statfs(path(p
), &stfs
));
9224 unlock_user(p
, arg1
, 0);
9226 if (!is_error(ret
)) {
9227 struct target_statfs
*target_stfs
;
9229 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9230 return -TARGET_EFAULT
;
9231 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9232 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9233 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9234 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9235 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9236 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9237 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9238 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9239 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9240 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9241 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9242 #ifdef _STATFS_F_FLAGS
9243 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9245 __put_user(0, &target_stfs
->f_flags
);
9247 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9248 unlock_user_struct(target_stfs
, arg2
, 1);
9252 #ifdef TARGET_NR_fstatfs
9253 case TARGET_NR_fstatfs
:
9254 ret
= get_errno(fstatfs(arg1
, &stfs
));
9255 goto convert_statfs
;
9257 #ifdef TARGET_NR_statfs64
9258 case TARGET_NR_statfs64
:
9259 if (!(p
= lock_user_string(arg1
))) {
9260 return -TARGET_EFAULT
;
9262 ret
= get_errno(statfs(path(p
), &stfs
));
9263 unlock_user(p
, arg1
, 0);
9265 if (!is_error(ret
)) {
9266 struct target_statfs64
*target_stfs
;
9268 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9269 return -TARGET_EFAULT
;
9270 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9271 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9272 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9273 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9274 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9275 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9276 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9277 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9278 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9279 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9280 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9281 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9282 unlock_user_struct(target_stfs
, arg3
, 1);
9285 case TARGET_NR_fstatfs64
:
9286 ret
= get_errno(fstatfs(arg1
, &stfs
));
9287 goto convert_statfs64
;
9289 #ifdef TARGET_NR_socketcall
9290 case TARGET_NR_socketcall
:
9291 return do_socketcall(arg1
, arg2
);
9293 #ifdef TARGET_NR_accept
9294 case TARGET_NR_accept
:
9295 return do_accept4(arg1
, arg2
, arg3
, 0);
9297 #ifdef TARGET_NR_accept4
9298 case TARGET_NR_accept4
:
9299 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9301 #ifdef TARGET_NR_bind
9302 case TARGET_NR_bind
:
9303 return do_bind(arg1
, arg2
, arg3
);
9305 #ifdef TARGET_NR_connect
9306 case TARGET_NR_connect
:
9307 return do_connect(arg1
, arg2
, arg3
);
9309 #ifdef TARGET_NR_getpeername
9310 case TARGET_NR_getpeername
:
9311 return do_getpeername(arg1
, arg2
, arg3
);
9313 #ifdef TARGET_NR_getsockname
9314 case TARGET_NR_getsockname
:
9315 return do_getsockname(arg1
, arg2
, arg3
);
9317 #ifdef TARGET_NR_getsockopt
9318 case TARGET_NR_getsockopt
:
9319 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9321 #ifdef TARGET_NR_listen
9322 case TARGET_NR_listen
:
9323 return get_errno(listen(arg1
, arg2
));
9325 #ifdef TARGET_NR_recv
9326 case TARGET_NR_recv
:
9327 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9329 #ifdef TARGET_NR_recvfrom
9330 case TARGET_NR_recvfrom
:
9331 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9333 #ifdef TARGET_NR_recvmsg
9334 case TARGET_NR_recvmsg
:
9335 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9337 #ifdef TARGET_NR_send
9338 case TARGET_NR_send
:
9339 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9341 #ifdef TARGET_NR_sendmsg
9342 case TARGET_NR_sendmsg
:
9343 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9345 #ifdef TARGET_NR_sendmmsg
9346 case TARGET_NR_sendmmsg
:
9347 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9349 #ifdef TARGET_NR_recvmmsg
9350 case TARGET_NR_recvmmsg
:
9351 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9353 #ifdef TARGET_NR_sendto
9354 case TARGET_NR_sendto
:
9355 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9357 #ifdef TARGET_NR_shutdown
9358 case TARGET_NR_shutdown
:
9359 return get_errno(shutdown(arg1
, arg2
));
9361 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9362 case TARGET_NR_getrandom
:
9363 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9365 return -TARGET_EFAULT
;
9367 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9368 unlock_user(p
, arg1
, ret
);
9371 #ifdef TARGET_NR_socket
9372 case TARGET_NR_socket
:
9373 return do_socket(arg1
, arg2
, arg3
);
9375 #ifdef TARGET_NR_socketpair
9376 case TARGET_NR_socketpair
:
9377 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9379 #ifdef TARGET_NR_setsockopt
9380 case TARGET_NR_setsockopt
:
9381 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9383 #if defined(TARGET_NR_syslog)
9384 case TARGET_NR_syslog
:
9389 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9390 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9391 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9392 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9393 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9394 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9395 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9396 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9397 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9398 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9399 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9400 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9403 return -TARGET_EINVAL
;
9408 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9410 return -TARGET_EFAULT
;
9412 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9413 unlock_user(p
, arg2
, arg3
);
9417 return -TARGET_EINVAL
;
9422 case TARGET_NR_setitimer
:
9424 struct itimerval value
, ovalue
, *pvalue
;
9428 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9429 || copy_from_user_timeval(&pvalue
->it_value
,
9430 arg2
+ sizeof(struct target_timeval
)))
9431 return -TARGET_EFAULT
;
9435 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9436 if (!is_error(ret
) && arg3
) {
9437 if (copy_to_user_timeval(arg3
,
9438 &ovalue
.it_interval
)
9439 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9441 return -TARGET_EFAULT
;
9445 case TARGET_NR_getitimer
:
9447 struct itimerval value
;
9449 ret
= get_errno(getitimer(arg1
, &value
));
9450 if (!is_error(ret
) && arg2
) {
9451 if (copy_to_user_timeval(arg2
,
9453 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9455 return -TARGET_EFAULT
;
9459 #ifdef TARGET_NR_stat
9460 case TARGET_NR_stat
:
9461 if (!(p
= lock_user_string(arg1
))) {
9462 return -TARGET_EFAULT
;
9464 ret
= get_errno(stat(path(p
), &st
));
9465 unlock_user(p
, arg1
, 0);
9468 #ifdef TARGET_NR_lstat
9469 case TARGET_NR_lstat
:
9470 if (!(p
= lock_user_string(arg1
))) {
9471 return -TARGET_EFAULT
;
9473 ret
= get_errno(lstat(path(p
), &st
));
9474 unlock_user(p
, arg1
, 0);
9477 #ifdef TARGET_NR_fstat
9478 case TARGET_NR_fstat
:
9480 ret
= get_errno(fstat(arg1
, &st
));
9481 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9484 if (!is_error(ret
)) {
9485 struct target_stat
*target_st
;
9487 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9488 return -TARGET_EFAULT
;
9489 memset(target_st
, 0, sizeof(*target_st
));
9490 __put_user(st
.st_dev
, &target_st
->st_dev
);
9491 __put_user(st
.st_ino
, &target_st
->st_ino
);
9492 __put_user(st
.st_mode
, &target_st
->st_mode
);
9493 __put_user(st
.st_uid
, &target_st
->st_uid
);
9494 __put_user(st
.st_gid
, &target_st
->st_gid
);
9495 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9496 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9497 __put_user(st
.st_size
, &target_st
->st_size
);
9498 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9499 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9500 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9501 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9502 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9503 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9504 defined(TARGET_STAT_HAVE_NSEC)
9505 __put_user(st
.st_atim
.tv_nsec
,
9506 &target_st
->target_st_atime_nsec
);
9507 __put_user(st
.st_mtim
.tv_nsec
,
9508 &target_st
->target_st_mtime_nsec
);
9509 __put_user(st
.st_ctim
.tv_nsec
,
9510 &target_st
->target_st_ctime_nsec
);
9512 unlock_user_struct(target_st
, arg2
, 1);
9517 case TARGET_NR_vhangup
:
9518 return get_errno(vhangup());
9519 #ifdef TARGET_NR_syscall
9520 case TARGET_NR_syscall
:
9521 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9522 arg6
, arg7
, arg8
, 0);
9524 #if defined(TARGET_NR_wait4)
9525 case TARGET_NR_wait4
:
9528 abi_long status_ptr
= arg2
;
9529 struct rusage rusage
, *rusage_ptr
;
9530 abi_ulong target_rusage
= arg4
;
9531 abi_long rusage_err
;
9533 rusage_ptr
= &rusage
;
9536 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9537 if (!is_error(ret
)) {
9538 if (status_ptr
&& ret
) {
9539 status
= host_to_target_waitstatus(status
);
9540 if (put_user_s32(status
, status_ptr
))
9541 return -TARGET_EFAULT
;
9543 if (target_rusage
) {
9544 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9553 #ifdef TARGET_NR_swapoff
9554 case TARGET_NR_swapoff
:
9555 if (!(p
= lock_user_string(arg1
)))
9556 return -TARGET_EFAULT
;
9557 ret
= get_errno(swapoff(p
));
9558 unlock_user(p
, arg1
, 0);
9561 case TARGET_NR_sysinfo
:
9563 struct target_sysinfo
*target_value
;
9564 struct sysinfo value
;
9565 ret
= get_errno(sysinfo(&value
));
9566 if (!is_error(ret
) && arg1
)
9568 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9569 return -TARGET_EFAULT
;
9570 __put_user(value
.uptime
, &target_value
->uptime
);
9571 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9572 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9573 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9574 __put_user(value
.totalram
, &target_value
->totalram
);
9575 __put_user(value
.freeram
, &target_value
->freeram
);
9576 __put_user(value
.sharedram
, &target_value
->sharedram
);
9577 __put_user(value
.bufferram
, &target_value
->bufferram
);
9578 __put_user(value
.totalswap
, &target_value
->totalswap
);
9579 __put_user(value
.freeswap
, &target_value
->freeswap
);
9580 __put_user(value
.procs
, &target_value
->procs
);
9581 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9582 __put_user(value
.freehigh
, &target_value
->freehigh
);
9583 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9584 unlock_user_struct(target_value
, arg1
, 1);
9588 #ifdef TARGET_NR_ipc
9590 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9592 #ifdef TARGET_NR_semget
9593 case TARGET_NR_semget
:
9594 return get_errno(semget(arg1
, arg2
, arg3
));
9596 #ifdef TARGET_NR_semop
9597 case TARGET_NR_semop
:
9598 return do_semop(arg1
, arg2
, arg3
);
9600 #ifdef TARGET_NR_semctl
9601 case TARGET_NR_semctl
:
9602 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9604 #ifdef TARGET_NR_msgctl
9605 case TARGET_NR_msgctl
:
9606 return do_msgctl(arg1
, arg2
, arg3
);
9608 #ifdef TARGET_NR_msgget
9609 case TARGET_NR_msgget
:
9610 return get_errno(msgget(arg1
, arg2
));
9612 #ifdef TARGET_NR_msgrcv
9613 case TARGET_NR_msgrcv
:
9614 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9616 #ifdef TARGET_NR_msgsnd
9617 case TARGET_NR_msgsnd
:
9618 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9620 #ifdef TARGET_NR_shmget
9621 case TARGET_NR_shmget
:
9622 return get_errno(shmget(arg1
, arg2
, arg3
));
9624 #ifdef TARGET_NR_shmctl
9625 case TARGET_NR_shmctl
:
9626 return do_shmctl(arg1
, arg2
, arg3
);
9628 #ifdef TARGET_NR_shmat
9629 case TARGET_NR_shmat
:
9630 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9632 #ifdef TARGET_NR_shmdt
9633 case TARGET_NR_shmdt
:
9634 return do_shmdt(arg1
);
9636 case TARGET_NR_fsync
:
9637 return get_errno(fsync(arg1
));
9638 case TARGET_NR_clone
:
9639 /* Linux manages to have three different orderings for its
9640 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9641 * match the kernel's CONFIG_CLONE_* settings.
9642 * Microblaze is further special in that it uses a sixth
9643 * implicit argument to clone for the TLS pointer.
9645 #if defined(TARGET_MICROBLAZE)
9646 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9647 #elif defined(TARGET_CLONE_BACKWARDS)
9648 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9649 #elif defined(TARGET_CLONE_BACKWARDS2)
9650 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9652 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9655 #ifdef __NR_exit_group
9656 /* new thread calls */
9657 case TARGET_NR_exit_group
:
9658 preexit_cleanup(cpu_env
, arg1
);
9659 return get_errno(exit_group(arg1
));
9661 case TARGET_NR_setdomainname
:
9662 if (!(p
= lock_user_string(arg1
)))
9663 return -TARGET_EFAULT
;
9664 ret
= get_errno(setdomainname(p
, arg2
));
9665 unlock_user(p
, arg1
, 0);
9667 case TARGET_NR_uname
:
9668 /* no need to transcode because we use the linux syscall */
9670 struct new_utsname
* buf
;
9672 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9673 return -TARGET_EFAULT
;
9674 ret
= get_errno(sys_uname(buf
));
9675 if (!is_error(ret
)) {
9676 /* Overwrite the native machine name with whatever is being
9678 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9679 sizeof(buf
->machine
));
9680 /* Allow the user to override the reported release. */
9681 if (qemu_uname_release
&& *qemu_uname_release
) {
9682 g_strlcpy(buf
->release
, qemu_uname_release
,
9683 sizeof(buf
->release
));
9686 unlock_user_struct(buf
, arg1
, 1);
9690 case TARGET_NR_modify_ldt
:
9691 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9692 #if !defined(TARGET_X86_64)
9693 case TARGET_NR_vm86
:
9694 return do_vm86(cpu_env
, arg1
, arg2
);
9697 #if defined(TARGET_NR_adjtimex)
9698 case TARGET_NR_adjtimex
:
9700 struct timex host_buf
;
9702 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9703 return -TARGET_EFAULT
;
9705 ret
= get_errno(adjtimex(&host_buf
));
9706 if (!is_error(ret
)) {
9707 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9708 return -TARGET_EFAULT
;
9714 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9715 case TARGET_NR_clock_adjtime
:
9717 struct timex htx
, *phtx
= &htx
;
9719 if (target_to_host_timex(phtx
, arg2
) != 0) {
9720 return -TARGET_EFAULT
;
9722 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9723 if (!is_error(ret
) && phtx
) {
9724 if (host_to_target_timex(arg2
, phtx
) != 0) {
9725 return -TARGET_EFAULT
;
9731 case TARGET_NR_getpgid
:
9732 return get_errno(getpgid(arg1
));
9733 case TARGET_NR_fchdir
:
9734 return get_errno(fchdir(arg1
));
9735 case TARGET_NR_personality
:
9736 return get_errno(personality(arg1
));
9737 #ifdef TARGET_NR__llseek /* Not on alpha */
9738 case TARGET_NR__llseek
:
9741 #if !defined(__NR_llseek)
9742 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9744 ret
= get_errno(res
);
9749 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9751 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9752 return -TARGET_EFAULT
;
9757 #ifdef TARGET_NR_getdents
9758 case TARGET_NR_getdents
:
9759 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9760 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9762 struct target_dirent
*target_dirp
;
9763 struct linux_dirent
*dirp
;
9764 abi_long count
= arg3
;
9766 dirp
= g_try_malloc(count
);
9768 return -TARGET_ENOMEM
;
9771 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9772 if (!is_error(ret
)) {
9773 struct linux_dirent
*de
;
9774 struct target_dirent
*tde
;
9776 int reclen
, treclen
;
9777 int count1
, tnamelen
;
9781 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9782 return -TARGET_EFAULT
;
9785 reclen
= de
->d_reclen
;
9786 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9787 assert(tnamelen
>= 0);
9788 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9789 assert(count1
+ treclen
<= count
);
9790 tde
->d_reclen
= tswap16(treclen
);
9791 tde
->d_ino
= tswapal(de
->d_ino
);
9792 tde
->d_off
= tswapal(de
->d_off
);
9793 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9794 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9796 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9800 unlock_user(target_dirp
, arg2
, ret
);
9806 struct linux_dirent
*dirp
;
9807 abi_long count
= arg3
;
9809 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9810 return -TARGET_EFAULT
;
9811 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9812 if (!is_error(ret
)) {
9813 struct linux_dirent
*de
;
9818 reclen
= de
->d_reclen
;
9821 de
->d_reclen
= tswap16(reclen
);
9822 tswapls(&de
->d_ino
);
9823 tswapls(&de
->d_off
);
9824 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9828 unlock_user(dirp
, arg2
, ret
);
9832 /* Implement getdents in terms of getdents64 */
9834 struct linux_dirent64
*dirp
;
9835 abi_long count
= arg3
;
9837 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9839 return -TARGET_EFAULT
;
9841 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9842 if (!is_error(ret
)) {
9843 /* Convert the dirent64 structs to target dirent. We do this
9844 * in-place, since we can guarantee that a target_dirent is no
9845 * larger than a dirent64; however this means we have to be
9846 * careful to read everything before writing in the new format.
9848 struct linux_dirent64
*de
;
9849 struct target_dirent
*tde
;
9854 tde
= (struct target_dirent
*)dirp
;
9856 int namelen
, treclen
;
9857 int reclen
= de
->d_reclen
;
9858 uint64_t ino
= de
->d_ino
;
9859 int64_t off
= de
->d_off
;
9860 uint8_t type
= de
->d_type
;
9862 namelen
= strlen(de
->d_name
);
9863 treclen
= offsetof(struct target_dirent
, d_name
)
9865 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9867 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9868 tde
->d_ino
= tswapal(ino
);
9869 tde
->d_off
= tswapal(off
);
9870 tde
->d_reclen
= tswap16(treclen
);
9871 /* The target_dirent type is in what was formerly a padding
9872 * byte at the end of the structure:
9874 *(((char *)tde
) + treclen
- 1) = type
;
9876 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9877 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9883 unlock_user(dirp
, arg2
, ret
);
9887 #endif /* TARGET_NR_getdents */
9888 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9889 case TARGET_NR_getdents64
:
9891 struct linux_dirent64
*dirp
;
9892 abi_long count
= arg3
;
9893 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9894 return -TARGET_EFAULT
;
9895 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9896 if (!is_error(ret
)) {
9897 struct linux_dirent64
*de
;
9902 reclen
= de
->d_reclen
;
9905 de
->d_reclen
= tswap16(reclen
);
9906 tswap64s((uint64_t *)&de
->d_ino
);
9907 tswap64s((uint64_t *)&de
->d_off
);
9908 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9912 unlock_user(dirp
, arg2
, ret
);
9915 #endif /* TARGET_NR_getdents64 */
9916 #if defined(TARGET_NR__newselect)
9917 case TARGET_NR__newselect
:
9918 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9920 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9921 # ifdef TARGET_NR_poll
9922 case TARGET_NR_poll
:
9924 # ifdef TARGET_NR_ppoll
9925 case TARGET_NR_ppoll
:
9928 struct target_pollfd
*target_pfd
;
9929 unsigned int nfds
= arg2
;
9936 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9937 return -TARGET_EINVAL
;
9940 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9941 sizeof(struct target_pollfd
) * nfds
, 1);
9943 return -TARGET_EFAULT
;
9946 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9947 for (i
= 0; i
< nfds
; i
++) {
9948 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9949 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9954 # ifdef TARGET_NR_ppoll
9955 case TARGET_NR_ppoll
:
9957 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9958 target_sigset_t
*target_set
;
9959 sigset_t _set
, *set
= &_set
;
9962 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9963 unlock_user(target_pfd
, arg1
, 0);
9964 return -TARGET_EFAULT
;
9971 if (arg5
!= sizeof(target_sigset_t
)) {
9972 unlock_user(target_pfd
, arg1
, 0);
9973 return -TARGET_EINVAL
;
9976 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9978 unlock_user(target_pfd
, arg1
, 0);
9979 return -TARGET_EFAULT
;
9981 target_to_host_sigset(set
, target_set
);
9986 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9987 set
, SIGSET_T_SIZE
));
9989 if (!is_error(ret
) && arg3
) {
9990 host_to_target_timespec(arg3
, timeout_ts
);
9993 unlock_user(target_set
, arg4
, 0);
9998 # ifdef TARGET_NR_poll
9999 case TARGET_NR_poll
:
10001 struct timespec ts
, *pts
;
10004 /* Convert ms to secs, ns */
10005 ts
.tv_sec
= arg3
/ 1000;
10006 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10009 /* -ve poll() timeout means "infinite" */
10012 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10017 g_assert_not_reached();
10020 if (!is_error(ret
)) {
10021 for(i
= 0; i
< nfds
; i
++) {
10022 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10025 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10029 case TARGET_NR_flock
:
10030 /* NOTE: the flock constant seems to be the same for every
10032 return get_errno(safe_flock(arg1
, arg2
));
10033 case TARGET_NR_readv
:
10035 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10037 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10038 unlock_iovec(vec
, arg2
, arg3
, 1);
10040 ret
= -host_to_target_errno(errno
);
10044 case TARGET_NR_writev
:
10046 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10048 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10049 unlock_iovec(vec
, arg2
, arg3
, 0);
10051 ret
= -host_to_target_errno(errno
);
10055 #if defined(TARGET_NR_preadv)
10056 case TARGET_NR_preadv
:
10058 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10060 unsigned long low
, high
;
10062 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10063 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10064 unlock_iovec(vec
, arg2
, arg3
, 1);
10066 ret
= -host_to_target_errno(errno
);
10071 #if defined(TARGET_NR_pwritev)
10072 case TARGET_NR_pwritev
:
10074 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10076 unsigned long low
, high
;
10078 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10079 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10080 unlock_iovec(vec
, arg2
, arg3
, 0);
10082 ret
= -host_to_target_errno(errno
);
10087 case TARGET_NR_getsid
:
10088 return get_errno(getsid(arg1
));
10089 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10090 case TARGET_NR_fdatasync
:
10091 return get_errno(fdatasync(arg1
));
10093 #ifdef TARGET_NR__sysctl
10094 case TARGET_NR__sysctl
:
10095 /* We don't implement this, but ENOTDIR is always a safe
10097 return -TARGET_ENOTDIR
;
10099 case TARGET_NR_sched_getaffinity
:
10101 unsigned int mask_size
;
10102 unsigned long *mask
;
10105 * sched_getaffinity needs multiples of ulong, so need to take
10106 * care of mismatches between target ulong and host ulong sizes.
10108 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10109 return -TARGET_EINVAL
;
10111 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10113 mask
= alloca(mask_size
);
10114 memset(mask
, 0, mask_size
);
10115 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10117 if (!is_error(ret
)) {
10119 /* More data returned than the caller's buffer will fit.
10120 * This only happens if sizeof(abi_long) < sizeof(long)
10121 * and the caller passed us a buffer holding an odd number
10122 * of abi_longs. If the host kernel is actually using the
10123 * extra 4 bytes then fail EINVAL; otherwise we can just
10124 * ignore them and only copy the interesting part.
10126 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10127 if (numcpus
> arg2
* 8) {
10128 return -TARGET_EINVAL
;
10133 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10134 return -TARGET_EFAULT
;
10139 case TARGET_NR_sched_setaffinity
:
10141 unsigned int mask_size
;
10142 unsigned long *mask
;
10145 * sched_setaffinity needs multiples of ulong, so need to take
10146 * care of mismatches between target ulong and host ulong sizes.
10148 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10149 return -TARGET_EINVAL
;
10151 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10152 mask
= alloca(mask_size
);
10154 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10159 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10161 case TARGET_NR_getcpu
:
10163 unsigned cpu
, node
;
10164 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10165 arg2
? &node
: NULL
,
10167 if (is_error(ret
)) {
10170 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10171 return -TARGET_EFAULT
;
10173 if (arg2
&& put_user_u32(node
, arg2
)) {
10174 return -TARGET_EFAULT
;
10178 case TARGET_NR_sched_setparam
:
10180 struct sched_param
*target_schp
;
10181 struct sched_param schp
;
10184 return -TARGET_EINVAL
;
10186 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10187 return -TARGET_EFAULT
;
10188 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10189 unlock_user_struct(target_schp
, arg2
, 0);
10190 return get_errno(sched_setparam(arg1
, &schp
));
10192 case TARGET_NR_sched_getparam
:
10194 struct sched_param
*target_schp
;
10195 struct sched_param schp
;
10198 return -TARGET_EINVAL
;
10200 ret
= get_errno(sched_getparam(arg1
, &schp
));
10201 if (!is_error(ret
)) {
10202 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10203 return -TARGET_EFAULT
;
10204 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10205 unlock_user_struct(target_schp
, arg2
, 1);
10209 case TARGET_NR_sched_setscheduler
:
10211 struct sched_param
*target_schp
;
10212 struct sched_param schp
;
10214 return -TARGET_EINVAL
;
10216 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10217 return -TARGET_EFAULT
;
10218 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10219 unlock_user_struct(target_schp
, arg3
, 0);
10220 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10222 case TARGET_NR_sched_getscheduler
:
10223 return get_errno(sched_getscheduler(arg1
));
10224 case TARGET_NR_sched_yield
:
10225 return get_errno(sched_yield());
10226 case TARGET_NR_sched_get_priority_max
:
10227 return get_errno(sched_get_priority_max(arg1
));
10228 case TARGET_NR_sched_get_priority_min
:
10229 return get_errno(sched_get_priority_min(arg1
));
10230 #ifdef TARGET_NR_sched_rr_get_interval
10231 case TARGET_NR_sched_rr_get_interval
:
10233 struct timespec ts
;
10234 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10235 if (!is_error(ret
)) {
10236 ret
= host_to_target_timespec(arg2
, &ts
);
10241 #if defined(TARGET_NR_nanosleep)
10242 case TARGET_NR_nanosleep
:
10244 struct timespec req
, rem
;
10245 target_to_host_timespec(&req
, arg1
);
10246 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10247 if (is_error(ret
) && arg2
) {
10248 host_to_target_timespec(arg2
, &rem
);
10253 case TARGET_NR_prctl
:
10255 case PR_GET_PDEATHSIG
:
10258 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10259 if (!is_error(ret
) && arg2
10260 && put_user_ual(deathsig
, arg2
)) {
10261 return -TARGET_EFAULT
;
10268 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10270 return -TARGET_EFAULT
;
10272 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10273 arg3
, arg4
, arg5
));
10274 unlock_user(name
, arg2
, 16);
10279 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10281 return -TARGET_EFAULT
;
10283 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10284 arg3
, arg4
, arg5
));
10285 unlock_user(name
, arg2
, 0);
10290 case TARGET_PR_GET_FP_MODE
:
10292 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10294 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10295 ret
|= TARGET_PR_FP_MODE_FR
;
10297 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10298 ret
|= TARGET_PR_FP_MODE_FRE
;
10302 case TARGET_PR_SET_FP_MODE
:
10304 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10305 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10306 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10307 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10308 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10310 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10311 TARGET_PR_FP_MODE_FRE
;
10313 /* If nothing to change, return right away, successfully. */
10314 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10317 /* Check the value is valid */
10318 if (arg2
& ~known_bits
) {
10319 return -TARGET_EOPNOTSUPP
;
10321 /* Setting FRE without FR is not supported. */
10322 if (new_fre
&& !new_fr
) {
10323 return -TARGET_EOPNOTSUPP
;
10325 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10326 /* FR1 is not supported */
10327 return -TARGET_EOPNOTSUPP
;
10329 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10330 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10331 /* cannot set FR=0 */
10332 return -TARGET_EOPNOTSUPP
;
10334 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10335 /* Cannot set FRE=1 */
10336 return -TARGET_EOPNOTSUPP
;
10340 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10341 for (i
= 0; i
< 32 ; i
+= 2) {
10342 if (!old_fr
&& new_fr
) {
10343 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10344 } else if (old_fr
&& !new_fr
) {
10345 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10350 env
->CP0_Status
|= (1 << CP0St_FR
);
10351 env
->hflags
|= MIPS_HFLAG_F64
;
10353 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10354 env
->hflags
&= ~MIPS_HFLAG_F64
;
10357 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10358 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10359 env
->hflags
|= MIPS_HFLAG_FRE
;
10362 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10363 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10369 #ifdef TARGET_AARCH64
10370 case TARGET_PR_SVE_SET_VL
:
10372 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10373 * PR_SVE_VL_INHERIT. Note the kernel definition
10374 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10375 * even though the current architectural maximum is VQ=16.
10377 ret
= -TARGET_EINVAL
;
10378 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10379 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10380 CPUARMState
*env
= cpu_env
;
10381 ARMCPU
*cpu
= env_archcpu(env
);
10382 uint32_t vq
, old_vq
;
10384 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10385 vq
= MAX(arg2
/ 16, 1);
10386 vq
= MIN(vq
, cpu
->sve_max_vq
);
10389 aarch64_sve_narrow_vq(env
, vq
);
10391 env
->vfp
.zcr_el
[1] = vq
- 1;
10392 arm_rebuild_hflags(env
);
10396 case TARGET_PR_SVE_GET_VL
:
10397 ret
= -TARGET_EINVAL
;
10399 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10400 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10401 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10405 case TARGET_PR_PAC_RESET_KEYS
:
10407 CPUARMState
*env
= cpu_env
;
10408 ARMCPU
*cpu
= env_archcpu(env
);
10410 if (arg3
|| arg4
|| arg5
) {
10411 return -TARGET_EINVAL
;
10413 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10414 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10415 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10416 TARGET_PR_PAC_APGAKEY
);
10422 } else if (arg2
& ~all
) {
10423 return -TARGET_EINVAL
;
10425 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10426 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10427 sizeof(ARMPACKey
), &err
);
10429 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10430 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10431 sizeof(ARMPACKey
), &err
);
10433 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10434 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10435 sizeof(ARMPACKey
), &err
);
10437 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10438 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10439 sizeof(ARMPACKey
), &err
);
10441 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10442 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10443 sizeof(ARMPACKey
), &err
);
10447 * Some unknown failure in the crypto. The best
10448 * we can do is log it and fail the syscall.
10449 * The real syscall cannot fail this way.
10451 qemu_log_mask(LOG_UNIMP
,
10452 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10453 error_get_pretty(err
));
10455 return -TARGET_EIO
;
10460 return -TARGET_EINVAL
;
10461 #endif /* AARCH64 */
10462 case PR_GET_SECCOMP
:
10463 case PR_SET_SECCOMP
:
10464 /* Disable seccomp to prevent the target disabling syscalls we
10466 return -TARGET_EINVAL
;
10468 /* Most prctl options have no pointer arguments */
10469 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10472 #ifdef TARGET_NR_arch_prctl
10473 case TARGET_NR_arch_prctl
:
10474 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10476 #ifdef TARGET_NR_pread64
10477 case TARGET_NR_pread64
:
10478 if (regpairs_aligned(cpu_env
, num
)) {
10482 if (arg2
== 0 && arg3
== 0) {
10483 /* Special-case NULL buffer and zero length, which should succeed */
10486 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10488 return -TARGET_EFAULT
;
10491 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10492 unlock_user(p
, arg2
, ret
);
10494 case TARGET_NR_pwrite64
:
10495 if (regpairs_aligned(cpu_env
, num
)) {
10499 if (arg2
== 0 && arg3
== 0) {
10500 /* Special-case NULL buffer and zero length, which should succeed */
10503 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10505 return -TARGET_EFAULT
;
10508 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10509 unlock_user(p
, arg2
, 0);
10512 case TARGET_NR_getcwd
:
10513 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10514 return -TARGET_EFAULT
;
10515 ret
= get_errno(sys_getcwd1(p
, arg2
));
10516 unlock_user(p
, arg1
, ret
);
10518 case TARGET_NR_capget
:
10519 case TARGET_NR_capset
:
10521 struct target_user_cap_header
*target_header
;
10522 struct target_user_cap_data
*target_data
= NULL
;
10523 struct __user_cap_header_struct header
;
10524 struct __user_cap_data_struct data
[2];
10525 struct __user_cap_data_struct
*dataptr
= NULL
;
10526 int i
, target_datalen
;
10527 int data_items
= 1;
10529 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10530 return -TARGET_EFAULT
;
10532 header
.version
= tswap32(target_header
->version
);
10533 header
.pid
= tswap32(target_header
->pid
);
10535 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10536 /* Version 2 and up takes pointer to two user_data structs */
10540 target_datalen
= sizeof(*target_data
) * data_items
;
10543 if (num
== TARGET_NR_capget
) {
10544 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10546 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10548 if (!target_data
) {
10549 unlock_user_struct(target_header
, arg1
, 0);
10550 return -TARGET_EFAULT
;
10553 if (num
== TARGET_NR_capset
) {
10554 for (i
= 0; i
< data_items
; i
++) {
10555 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10556 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10557 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10564 if (num
== TARGET_NR_capget
) {
10565 ret
= get_errno(capget(&header
, dataptr
));
10567 ret
= get_errno(capset(&header
, dataptr
));
10570 /* The kernel always updates version for both capget and capset */
10571 target_header
->version
= tswap32(header
.version
);
10572 unlock_user_struct(target_header
, arg1
, 1);
10575 if (num
== TARGET_NR_capget
) {
10576 for (i
= 0; i
< data_items
; i
++) {
10577 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10578 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10579 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10581 unlock_user(target_data
, arg2
, target_datalen
);
10583 unlock_user(target_data
, arg2
, 0);
10588 case TARGET_NR_sigaltstack
:
10589 return do_sigaltstack(arg1
, arg2
,
10590 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10592 #ifdef CONFIG_SENDFILE
10593 #ifdef TARGET_NR_sendfile
10594 case TARGET_NR_sendfile
:
10596 off_t
*offp
= NULL
;
10599 ret
= get_user_sal(off
, arg3
);
10600 if (is_error(ret
)) {
10605 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10606 if (!is_error(ret
) && arg3
) {
10607 abi_long ret2
= put_user_sal(off
, arg3
);
10608 if (is_error(ret2
)) {
10615 #ifdef TARGET_NR_sendfile64
10616 case TARGET_NR_sendfile64
:
10618 off_t
*offp
= NULL
;
10621 ret
= get_user_s64(off
, arg3
);
10622 if (is_error(ret
)) {
10627 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10628 if (!is_error(ret
) && arg3
) {
10629 abi_long ret2
= put_user_s64(off
, arg3
);
10630 if (is_error(ret2
)) {
10638 #ifdef TARGET_NR_vfork
10639 case TARGET_NR_vfork
:
10640 return get_errno(do_fork(cpu_env
,
10641 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10644 #ifdef TARGET_NR_ugetrlimit
10645 case TARGET_NR_ugetrlimit
:
10647 struct rlimit rlim
;
10648 int resource
= target_to_host_resource(arg1
);
10649 ret
= get_errno(getrlimit(resource
, &rlim
));
10650 if (!is_error(ret
)) {
10651 struct target_rlimit
*target_rlim
;
10652 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10653 return -TARGET_EFAULT
;
10654 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10655 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10656 unlock_user_struct(target_rlim
, arg2
, 1);
10661 #ifdef TARGET_NR_truncate64
10662 case TARGET_NR_truncate64
:
10663 if (!(p
= lock_user_string(arg1
)))
10664 return -TARGET_EFAULT
;
10665 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10666 unlock_user(p
, arg1
, 0);
10669 #ifdef TARGET_NR_ftruncate64
10670 case TARGET_NR_ftruncate64
:
10671 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10673 #ifdef TARGET_NR_stat64
10674 case TARGET_NR_stat64
:
10675 if (!(p
= lock_user_string(arg1
))) {
10676 return -TARGET_EFAULT
;
10678 ret
= get_errno(stat(path(p
), &st
));
10679 unlock_user(p
, arg1
, 0);
10680 if (!is_error(ret
))
10681 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10684 #ifdef TARGET_NR_lstat64
10685 case TARGET_NR_lstat64
:
10686 if (!(p
= lock_user_string(arg1
))) {
10687 return -TARGET_EFAULT
;
10689 ret
= get_errno(lstat(path(p
), &st
));
10690 unlock_user(p
, arg1
, 0);
10691 if (!is_error(ret
))
10692 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10695 #ifdef TARGET_NR_fstat64
10696 case TARGET_NR_fstat64
:
10697 ret
= get_errno(fstat(arg1
, &st
));
10698 if (!is_error(ret
))
10699 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10702 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10703 #ifdef TARGET_NR_fstatat64
10704 case TARGET_NR_fstatat64
:
10706 #ifdef TARGET_NR_newfstatat
10707 case TARGET_NR_newfstatat
:
10709 if (!(p
= lock_user_string(arg2
))) {
10710 return -TARGET_EFAULT
;
10712 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10713 unlock_user(p
, arg2
, 0);
10714 if (!is_error(ret
))
10715 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10718 #if defined(TARGET_NR_statx)
10719 case TARGET_NR_statx
:
10721 struct target_statx
*target_stx
;
10725 p
= lock_user_string(arg2
);
10727 return -TARGET_EFAULT
;
10729 #if defined(__NR_statx)
10732 * It is assumed that struct statx is architecture independent.
10734 struct target_statx host_stx
;
10737 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10738 if (!is_error(ret
)) {
10739 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10740 unlock_user(p
, arg2
, 0);
10741 return -TARGET_EFAULT
;
10745 if (ret
!= -TARGET_ENOSYS
) {
10746 unlock_user(p
, arg2
, 0);
10751 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10752 unlock_user(p
, arg2
, 0);
10754 if (!is_error(ret
)) {
10755 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10756 return -TARGET_EFAULT
;
10758 memset(target_stx
, 0, sizeof(*target_stx
));
10759 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10760 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10761 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10762 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10763 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10764 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10765 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10766 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10767 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10768 __put_user(st
.st_size
, &target_stx
->stx_size
);
10769 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10770 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10771 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10772 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10773 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10774 unlock_user_struct(target_stx
, arg5
, 1);
10779 #ifdef TARGET_NR_lchown
10780 case TARGET_NR_lchown
:
10781 if (!(p
= lock_user_string(arg1
)))
10782 return -TARGET_EFAULT
;
10783 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10784 unlock_user(p
, arg1
, 0);
10787 #ifdef TARGET_NR_getuid
10788 case TARGET_NR_getuid
:
10789 return get_errno(high2lowuid(getuid()));
10791 #ifdef TARGET_NR_getgid
10792 case TARGET_NR_getgid
:
10793 return get_errno(high2lowgid(getgid()));
10795 #ifdef TARGET_NR_geteuid
10796 case TARGET_NR_geteuid
:
10797 return get_errno(high2lowuid(geteuid()));
10799 #ifdef TARGET_NR_getegid
10800 case TARGET_NR_getegid
:
10801 return get_errno(high2lowgid(getegid()));
10803 case TARGET_NR_setreuid
:
10804 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10805 case TARGET_NR_setregid
:
10806 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10807 case TARGET_NR_getgroups
:
10809 int gidsetsize
= arg1
;
10810 target_id
*target_grouplist
;
10814 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10815 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10816 if (gidsetsize
== 0)
10818 if (!is_error(ret
)) {
10819 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10820 if (!target_grouplist
)
10821 return -TARGET_EFAULT
;
10822 for(i
= 0;i
< ret
; i
++)
10823 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10824 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10828 case TARGET_NR_setgroups
:
10830 int gidsetsize
= arg1
;
10831 target_id
*target_grouplist
;
10832 gid_t
*grouplist
= NULL
;
10835 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10836 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10837 if (!target_grouplist
) {
10838 return -TARGET_EFAULT
;
10840 for (i
= 0; i
< gidsetsize
; i
++) {
10841 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10843 unlock_user(target_grouplist
, arg2
, 0);
10845 return get_errno(setgroups(gidsetsize
, grouplist
));
10847 case TARGET_NR_fchown
:
10848 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10849 #if defined(TARGET_NR_fchownat)
10850 case TARGET_NR_fchownat
:
10851 if (!(p
= lock_user_string(arg2
)))
10852 return -TARGET_EFAULT
;
10853 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10854 low2highgid(arg4
), arg5
));
10855 unlock_user(p
, arg2
, 0);
10858 #ifdef TARGET_NR_setresuid
10859 case TARGET_NR_setresuid
:
10860 return get_errno(sys_setresuid(low2highuid(arg1
),
10862 low2highuid(arg3
)));
10864 #ifdef TARGET_NR_getresuid
10865 case TARGET_NR_getresuid
:
10867 uid_t ruid
, euid
, suid
;
10868 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10869 if (!is_error(ret
)) {
10870 if (put_user_id(high2lowuid(ruid
), arg1
)
10871 || put_user_id(high2lowuid(euid
), arg2
)
10872 || put_user_id(high2lowuid(suid
), arg3
))
10873 return -TARGET_EFAULT
;
10878 #ifdef TARGET_NR_getresgid
10879 case TARGET_NR_setresgid
:
10880 return get_errno(sys_setresgid(low2highgid(arg1
),
10882 low2highgid(arg3
)));
10884 #ifdef TARGET_NR_getresgid
10885 case TARGET_NR_getresgid
:
10887 gid_t rgid
, egid
, sgid
;
10888 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10889 if (!is_error(ret
)) {
10890 if (put_user_id(high2lowgid(rgid
), arg1
)
10891 || put_user_id(high2lowgid(egid
), arg2
)
10892 || put_user_id(high2lowgid(sgid
), arg3
))
10893 return -TARGET_EFAULT
;
10898 #ifdef TARGET_NR_chown
10899 case TARGET_NR_chown
:
10900 if (!(p
= lock_user_string(arg1
)))
10901 return -TARGET_EFAULT
;
10902 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10903 unlock_user(p
, arg1
, 0);
10906 case TARGET_NR_setuid
:
10907 return get_errno(sys_setuid(low2highuid(arg1
)));
10908 case TARGET_NR_setgid
:
10909 return get_errno(sys_setgid(low2highgid(arg1
)));
10910 case TARGET_NR_setfsuid
:
10911 return get_errno(setfsuid(arg1
));
10912 case TARGET_NR_setfsgid
:
10913 return get_errno(setfsgid(arg1
));
10915 #ifdef TARGET_NR_lchown32
10916 case TARGET_NR_lchown32
:
10917 if (!(p
= lock_user_string(arg1
)))
10918 return -TARGET_EFAULT
;
10919 ret
= get_errno(lchown(p
, arg2
, arg3
));
10920 unlock_user(p
, arg1
, 0);
10923 #ifdef TARGET_NR_getuid32
10924 case TARGET_NR_getuid32
:
10925 return get_errno(getuid());
10928 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10929 /* Alpha specific */
10930 case TARGET_NR_getxuid
:
10934 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10936 return get_errno(getuid());
10938 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10939 /* Alpha specific */
10940 case TARGET_NR_getxgid
:
10944 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10946 return get_errno(getgid());
10948 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10949 /* Alpha specific */
10950 case TARGET_NR_osf_getsysinfo
:
10951 ret
= -TARGET_EOPNOTSUPP
;
10953 case TARGET_GSI_IEEE_FP_CONTROL
:
10955 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10956 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10958 swcr
&= ~SWCR_STATUS_MASK
;
10959 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10961 if (put_user_u64 (swcr
, arg2
))
10962 return -TARGET_EFAULT
;
10967 /* case GSI_IEEE_STATE_AT_SIGNAL:
10968 -- Not implemented in linux kernel.
10970 -- Retrieves current unaligned access state; not much used.
10971 case GSI_PROC_TYPE:
10972 -- Retrieves implver information; surely not used.
10973 case GSI_GET_HWRPB:
10974 -- Grabs a copy of the HWRPB; surely not used.
10979 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10980 /* Alpha specific */
10981 case TARGET_NR_osf_setsysinfo
:
10982 ret
= -TARGET_EOPNOTSUPP
;
10984 case TARGET_SSI_IEEE_FP_CONTROL
:
10986 uint64_t swcr
, fpcr
;
10988 if (get_user_u64 (swcr
, arg2
)) {
10989 return -TARGET_EFAULT
;
10993 * The kernel calls swcr_update_status to update the
10994 * status bits from the fpcr at every point that it
10995 * could be queried. Therefore, we store the status
10996 * bits only in FPCR.
10998 ((CPUAlphaState
*)cpu_env
)->swcr
10999 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11001 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11002 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11003 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11004 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11009 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11011 uint64_t exc
, fpcr
, fex
;
11013 if (get_user_u64(exc
, arg2
)) {
11014 return -TARGET_EFAULT
;
11016 exc
&= SWCR_STATUS_MASK
;
11017 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11019 /* Old exceptions are not signaled. */
11020 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11022 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11023 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11025 /* Update the hardware fpcr. */
11026 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11027 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11030 int si_code
= TARGET_FPE_FLTUNK
;
11031 target_siginfo_t info
;
11033 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11034 si_code
= TARGET_FPE_FLTUND
;
11036 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11037 si_code
= TARGET_FPE_FLTRES
;
11039 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11040 si_code
= TARGET_FPE_FLTUND
;
11042 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11043 si_code
= TARGET_FPE_FLTOVF
;
11045 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11046 si_code
= TARGET_FPE_FLTDIV
;
11048 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11049 si_code
= TARGET_FPE_FLTINV
;
11052 info
.si_signo
= SIGFPE
;
11054 info
.si_code
= si_code
;
11055 info
._sifields
._sigfault
._addr
11056 = ((CPUArchState
*)cpu_env
)->pc
;
11057 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11058 QEMU_SI_FAULT
, &info
);
11064 /* case SSI_NVPAIRS:
11065 -- Used with SSIN_UACPROC to enable unaligned accesses.
11066 case SSI_IEEE_STATE_AT_SIGNAL:
11067 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11068 -- Not implemented in linux kernel
11073 #ifdef TARGET_NR_osf_sigprocmask
11074 /* Alpha specific. */
11075 case TARGET_NR_osf_sigprocmask
:
11079 sigset_t set
, oldset
;
11082 case TARGET_SIG_BLOCK
:
11085 case TARGET_SIG_UNBLOCK
:
11088 case TARGET_SIG_SETMASK
:
11092 return -TARGET_EINVAL
;
11095 target_to_host_old_sigset(&set
, &mask
);
11096 ret
= do_sigprocmask(how
, &set
, &oldset
);
11098 host_to_target_old_sigset(&mask
, &oldset
);
11105 #ifdef TARGET_NR_getgid32
11106 case TARGET_NR_getgid32
:
11107 return get_errno(getgid());
11109 #ifdef TARGET_NR_geteuid32
11110 case TARGET_NR_geteuid32
:
11111 return get_errno(geteuid());
11113 #ifdef TARGET_NR_getegid32
11114 case TARGET_NR_getegid32
:
11115 return get_errno(getegid());
11117 #ifdef TARGET_NR_setreuid32
11118 case TARGET_NR_setreuid32
:
11119 return get_errno(setreuid(arg1
, arg2
));
11121 #ifdef TARGET_NR_setregid32
11122 case TARGET_NR_setregid32
:
11123 return get_errno(setregid(arg1
, arg2
));
11125 #ifdef TARGET_NR_getgroups32
11126 case TARGET_NR_getgroups32
:
11128 int gidsetsize
= arg1
;
11129 uint32_t *target_grouplist
;
11133 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11134 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11135 if (gidsetsize
== 0)
11137 if (!is_error(ret
)) {
11138 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11139 if (!target_grouplist
) {
11140 return -TARGET_EFAULT
;
11142 for(i
= 0;i
< ret
; i
++)
11143 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11144 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11149 #ifdef TARGET_NR_setgroups32
11150 case TARGET_NR_setgroups32
:
11152 int gidsetsize
= arg1
;
11153 uint32_t *target_grouplist
;
11157 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11158 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11159 if (!target_grouplist
) {
11160 return -TARGET_EFAULT
;
11162 for(i
= 0;i
< gidsetsize
; i
++)
11163 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11164 unlock_user(target_grouplist
, arg2
, 0);
11165 return get_errno(setgroups(gidsetsize
, grouplist
));
11168 #ifdef TARGET_NR_fchown32
11169 case TARGET_NR_fchown32
:
11170 return get_errno(fchown(arg1
, arg2
, arg3
));
11172 #ifdef TARGET_NR_setresuid32
11173 case TARGET_NR_setresuid32
:
11174 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11176 #ifdef TARGET_NR_getresuid32
11177 case TARGET_NR_getresuid32
:
11179 uid_t ruid
, euid
, suid
;
11180 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11181 if (!is_error(ret
)) {
11182 if (put_user_u32(ruid
, arg1
)
11183 || put_user_u32(euid
, arg2
)
11184 || put_user_u32(suid
, arg3
))
11185 return -TARGET_EFAULT
;
11190 #ifdef TARGET_NR_setresgid32
11191 case TARGET_NR_setresgid32
:
11192 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11194 #ifdef TARGET_NR_getresgid32
11195 case TARGET_NR_getresgid32
:
11197 gid_t rgid
, egid
, sgid
;
11198 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11199 if (!is_error(ret
)) {
11200 if (put_user_u32(rgid
, arg1
)
11201 || put_user_u32(egid
, arg2
)
11202 || put_user_u32(sgid
, arg3
))
11203 return -TARGET_EFAULT
;
11208 #ifdef TARGET_NR_chown32
11209 case TARGET_NR_chown32
:
11210 if (!(p
= lock_user_string(arg1
)))
11211 return -TARGET_EFAULT
;
11212 ret
= get_errno(chown(p
, arg2
, arg3
));
11213 unlock_user(p
, arg1
, 0);
11216 #ifdef TARGET_NR_setuid32
11217 case TARGET_NR_setuid32
:
11218 return get_errno(sys_setuid(arg1
));
11220 #ifdef TARGET_NR_setgid32
11221 case TARGET_NR_setgid32
:
11222 return get_errno(sys_setgid(arg1
));
11224 #ifdef TARGET_NR_setfsuid32
11225 case TARGET_NR_setfsuid32
:
11226 return get_errno(setfsuid(arg1
));
11228 #ifdef TARGET_NR_setfsgid32
11229 case TARGET_NR_setfsgid32
:
11230 return get_errno(setfsgid(arg1
));
11232 #ifdef TARGET_NR_mincore
11233 case TARGET_NR_mincore
:
11235 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11237 return -TARGET_ENOMEM
;
11239 p
= lock_user_string(arg3
);
11241 ret
= -TARGET_EFAULT
;
11243 ret
= get_errno(mincore(a
, arg2
, p
));
11244 unlock_user(p
, arg3
, ret
);
11246 unlock_user(a
, arg1
, 0);
11250 #ifdef TARGET_NR_arm_fadvise64_64
11251 case TARGET_NR_arm_fadvise64_64
:
11252 /* arm_fadvise64_64 looks like fadvise64_64 but
11253 * with different argument order: fd, advice, offset, len
11254 * rather than the usual fd, offset, len, advice.
11255 * Note that offset and len are both 64-bit so appear as
11256 * pairs of 32-bit registers.
11258 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11259 target_offset64(arg5
, arg6
), arg2
);
11260 return -host_to_target_errno(ret
);
11263 #if TARGET_ABI_BITS == 32
11265 #ifdef TARGET_NR_fadvise64_64
11266 case TARGET_NR_fadvise64_64
:
11267 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11268 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11276 /* 6 args: fd, offset (high, low), len (high, low), advice */
11277 if (regpairs_aligned(cpu_env
, num
)) {
11278 /* offset is in (3,4), len in (5,6) and advice in 7 */
11286 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11287 target_offset64(arg4
, arg5
), arg6
);
11288 return -host_to_target_errno(ret
);
11291 #ifdef TARGET_NR_fadvise64
11292 case TARGET_NR_fadvise64
:
11293 /* 5 args: fd, offset (high, low), len, advice */
11294 if (regpairs_aligned(cpu_env
, num
)) {
11295 /* offset is in (3,4), len in 5 and advice in 6 */
11301 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11302 return -host_to_target_errno(ret
);
11305 #else /* not a 32-bit ABI */
11306 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11307 #ifdef TARGET_NR_fadvise64_64
11308 case TARGET_NR_fadvise64_64
:
11310 #ifdef TARGET_NR_fadvise64
11311 case TARGET_NR_fadvise64
:
11313 #ifdef TARGET_S390X
11315 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11316 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11317 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11318 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11322 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11324 #endif /* end of 64-bit ABI fadvise handling */
11326 #ifdef TARGET_NR_madvise
11327 case TARGET_NR_madvise
:
11328 /* A straight passthrough may not be safe because qemu sometimes
11329 turns private file-backed mappings into anonymous mappings.
11330 This will break MADV_DONTNEED.
11331 This is a hint, so ignoring and returning success is ok. */
11334 #if TARGET_ABI_BITS == 32
11335 case TARGET_NR_fcntl64
:
11339 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11340 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11343 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11344 copyfrom
= copy_from_user_oabi_flock64
;
11345 copyto
= copy_to_user_oabi_flock64
;
11349 cmd
= target_to_host_fcntl_cmd(arg2
);
11350 if (cmd
== -TARGET_EINVAL
) {
11355 case TARGET_F_GETLK64
:
11356 ret
= copyfrom(&fl
, arg3
);
11360 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11362 ret
= copyto(arg3
, &fl
);
11366 case TARGET_F_SETLK64
:
11367 case TARGET_F_SETLKW64
:
11368 ret
= copyfrom(&fl
, arg3
);
11372 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11375 ret
= do_fcntl(arg1
, arg2
, arg3
);
11381 #ifdef TARGET_NR_cacheflush
11382 case TARGET_NR_cacheflush
:
11383 /* self-modifying code is handled automatically, so nothing needed */
11386 #ifdef TARGET_NR_getpagesize
11387 case TARGET_NR_getpagesize
:
11388 return TARGET_PAGE_SIZE
;
11390 case TARGET_NR_gettid
:
11391 return get_errno(sys_gettid());
11392 #ifdef TARGET_NR_readahead
11393 case TARGET_NR_readahead
:
11394 #if TARGET_ABI_BITS == 32
11395 if (regpairs_aligned(cpu_env
, num
)) {
11400 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11402 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11407 #ifdef TARGET_NR_setxattr
11408 case TARGET_NR_listxattr
:
11409 case TARGET_NR_llistxattr
:
11413 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11415 return -TARGET_EFAULT
;
11418 p
= lock_user_string(arg1
);
11420 if (num
== TARGET_NR_listxattr
) {
11421 ret
= get_errno(listxattr(p
, b
, arg3
));
11423 ret
= get_errno(llistxattr(p
, b
, arg3
));
11426 ret
= -TARGET_EFAULT
;
11428 unlock_user(p
, arg1
, 0);
11429 unlock_user(b
, arg2
, arg3
);
11432 case TARGET_NR_flistxattr
:
11436 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11438 return -TARGET_EFAULT
;
11441 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11442 unlock_user(b
, arg2
, arg3
);
11445 case TARGET_NR_setxattr
:
11446 case TARGET_NR_lsetxattr
:
11448 void *p
, *n
, *v
= 0;
11450 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11452 return -TARGET_EFAULT
;
11455 p
= lock_user_string(arg1
);
11456 n
= lock_user_string(arg2
);
11458 if (num
== TARGET_NR_setxattr
) {
11459 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11461 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11464 ret
= -TARGET_EFAULT
;
11466 unlock_user(p
, arg1
, 0);
11467 unlock_user(n
, arg2
, 0);
11468 unlock_user(v
, arg3
, 0);
11471 case TARGET_NR_fsetxattr
:
11475 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11477 return -TARGET_EFAULT
;
11480 n
= lock_user_string(arg2
);
11482 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11484 ret
= -TARGET_EFAULT
;
11486 unlock_user(n
, arg2
, 0);
11487 unlock_user(v
, arg3
, 0);
11490 case TARGET_NR_getxattr
:
11491 case TARGET_NR_lgetxattr
:
11493 void *p
, *n
, *v
= 0;
11495 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11497 return -TARGET_EFAULT
;
11500 p
= lock_user_string(arg1
);
11501 n
= lock_user_string(arg2
);
11503 if (num
== TARGET_NR_getxattr
) {
11504 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11506 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11509 ret
= -TARGET_EFAULT
;
11511 unlock_user(p
, arg1
, 0);
11512 unlock_user(n
, arg2
, 0);
11513 unlock_user(v
, arg3
, arg4
);
11516 case TARGET_NR_fgetxattr
:
11520 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11522 return -TARGET_EFAULT
;
11525 n
= lock_user_string(arg2
);
11527 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11529 ret
= -TARGET_EFAULT
;
11531 unlock_user(n
, arg2
, 0);
11532 unlock_user(v
, arg3
, arg4
);
11535 case TARGET_NR_removexattr
:
11536 case TARGET_NR_lremovexattr
:
11539 p
= lock_user_string(arg1
);
11540 n
= lock_user_string(arg2
);
11542 if (num
== TARGET_NR_removexattr
) {
11543 ret
= get_errno(removexattr(p
, n
));
11545 ret
= get_errno(lremovexattr(p
, n
));
11548 ret
= -TARGET_EFAULT
;
11550 unlock_user(p
, arg1
, 0);
11551 unlock_user(n
, arg2
, 0);
11554 case TARGET_NR_fremovexattr
:
11557 n
= lock_user_string(arg2
);
11559 ret
= get_errno(fremovexattr(arg1
, n
));
11561 ret
= -TARGET_EFAULT
;
11563 unlock_user(n
, arg2
, 0);
11567 #endif /* CONFIG_ATTR */
11568 #ifdef TARGET_NR_set_thread_area
11569 case TARGET_NR_set_thread_area
:
11570 #if defined(TARGET_MIPS)
11571 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11573 #elif defined(TARGET_CRIS)
11575 ret
= -TARGET_EINVAL
;
11577 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11581 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11582 return do_set_thread_area(cpu_env
, arg1
);
11583 #elif defined(TARGET_M68K)
11585 TaskState
*ts
= cpu
->opaque
;
11586 ts
->tp_value
= arg1
;
11590 return -TARGET_ENOSYS
;
11593 #ifdef TARGET_NR_get_thread_area
11594 case TARGET_NR_get_thread_area
:
11595 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11596 return do_get_thread_area(cpu_env
, arg1
);
11597 #elif defined(TARGET_M68K)
11599 TaskState
*ts
= cpu
->opaque
;
11600 return ts
->tp_value
;
11603 return -TARGET_ENOSYS
;
11606 #ifdef TARGET_NR_getdomainname
11607 case TARGET_NR_getdomainname
:
11608 return -TARGET_ENOSYS
;
11611 #ifdef TARGET_NR_clock_settime
11612 case TARGET_NR_clock_settime
:
11614 struct timespec ts
;
11616 ret
= target_to_host_timespec(&ts
, arg2
);
11617 if (!is_error(ret
)) {
11618 ret
= get_errno(clock_settime(arg1
, &ts
));
11623 #ifdef TARGET_NR_clock_settime64
11624 case TARGET_NR_clock_settime64
:
11626 struct timespec ts
;
11628 ret
= target_to_host_timespec64(&ts
, arg2
);
11629 if (!is_error(ret
)) {
11630 ret
= get_errno(clock_settime(arg1
, &ts
));
11635 #ifdef TARGET_NR_clock_gettime
11636 case TARGET_NR_clock_gettime
:
11638 struct timespec ts
;
11639 ret
= get_errno(clock_gettime(arg1
, &ts
));
11640 if (!is_error(ret
)) {
11641 ret
= host_to_target_timespec(arg2
, &ts
);
11646 #ifdef TARGET_NR_clock_gettime64
11647 case TARGET_NR_clock_gettime64
:
11649 struct timespec ts
;
11650 ret
= get_errno(clock_gettime(arg1
, &ts
));
11651 if (!is_error(ret
)) {
11652 ret
= host_to_target_timespec64(arg2
, &ts
);
11657 #ifdef TARGET_NR_clock_getres
11658 case TARGET_NR_clock_getres
:
11660 struct timespec ts
;
11661 ret
= get_errno(clock_getres(arg1
, &ts
));
11662 if (!is_error(ret
)) {
11663 host_to_target_timespec(arg2
, &ts
);
11668 #ifdef TARGET_NR_clock_nanosleep
11669 case TARGET_NR_clock_nanosleep
:
11671 struct timespec ts
;
11672 target_to_host_timespec(&ts
, arg3
);
11673 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11674 &ts
, arg4
? &ts
: NULL
));
11676 host_to_target_timespec(arg4
, &ts
);
11678 #if defined(TARGET_PPC)
11679 /* clock_nanosleep is odd in that it returns positive errno values.
11680 * On PPC, CR0 bit 3 should be set in such a situation. */
11681 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11682 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11689 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11690 case TARGET_NR_set_tid_address
:
11691 return get_errno(set_tid_address((int *)g2h(arg1
)));
11694 case TARGET_NR_tkill
:
11695 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11697 case TARGET_NR_tgkill
:
11698 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11699 target_to_host_signal(arg3
)));
11701 #ifdef TARGET_NR_set_robust_list
11702 case TARGET_NR_set_robust_list
:
11703 case TARGET_NR_get_robust_list
:
11704 /* The ABI for supporting robust futexes has userspace pass
11705 * the kernel a pointer to a linked list which is updated by
11706 * userspace after the syscall; the list is walked by the kernel
11707 * when the thread exits. Since the linked list in QEMU guest
11708 * memory isn't a valid linked list for the host and we have
11709 * no way to reliably intercept the thread-death event, we can't
11710 * support these. Silently return ENOSYS so that guest userspace
11711 * falls back to a non-robust futex implementation (which should
11712 * be OK except in the corner case of the guest crashing while
11713 * holding a mutex that is shared with another process via
11716 return -TARGET_ENOSYS
;
11719 #if defined(TARGET_NR_utimensat)
11720 case TARGET_NR_utimensat
:
11722 struct timespec
*tsp
, ts
[2];
11726 target_to_host_timespec(ts
, arg3
);
11727 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11731 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11733 if (!(p
= lock_user_string(arg2
))) {
11734 return -TARGET_EFAULT
;
11736 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11737 unlock_user(p
, arg2
, 0);
11742 #ifdef TARGET_NR_futex
11743 case TARGET_NR_futex
:
11744 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11746 #ifdef TARGET_NR_futex_time64
11747 case TARGET_NR_futex_time64
:
11748 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11750 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11751 case TARGET_NR_inotify_init
:
11752 ret
= get_errno(sys_inotify_init());
11754 fd_trans_register(ret
, &target_inotify_trans
);
11758 #ifdef CONFIG_INOTIFY1
11759 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11760 case TARGET_NR_inotify_init1
:
11761 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11762 fcntl_flags_tbl
)));
11764 fd_trans_register(ret
, &target_inotify_trans
);
11769 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11770 case TARGET_NR_inotify_add_watch
:
11771 p
= lock_user_string(arg2
);
11772 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11773 unlock_user(p
, arg2
, 0);
11776 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11777 case TARGET_NR_inotify_rm_watch
:
11778 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11781 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11782 case TARGET_NR_mq_open
:
11784 struct mq_attr posix_mq_attr
;
11785 struct mq_attr
*pposix_mq_attr
;
11788 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11789 pposix_mq_attr
= NULL
;
11791 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11792 return -TARGET_EFAULT
;
11794 pposix_mq_attr
= &posix_mq_attr
;
11796 p
= lock_user_string(arg1
- 1);
11798 return -TARGET_EFAULT
;
11800 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11801 unlock_user (p
, arg1
, 0);
11805 case TARGET_NR_mq_unlink
:
11806 p
= lock_user_string(arg1
- 1);
11808 return -TARGET_EFAULT
;
11810 ret
= get_errno(mq_unlink(p
));
11811 unlock_user (p
, arg1
, 0);
11814 #ifdef TARGET_NR_mq_timedsend
11815 case TARGET_NR_mq_timedsend
:
11817 struct timespec ts
;
11819 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11821 target_to_host_timespec(&ts
, arg5
);
11822 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11823 host_to_target_timespec(arg5
, &ts
);
11825 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11827 unlock_user (p
, arg2
, arg3
);
11832 #ifdef TARGET_NR_mq_timedreceive
11833 case TARGET_NR_mq_timedreceive
:
11835 struct timespec ts
;
11838 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11840 target_to_host_timespec(&ts
, arg5
);
11841 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11843 host_to_target_timespec(arg5
, &ts
);
11845 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11848 unlock_user (p
, arg2
, arg3
);
11850 put_user_u32(prio
, arg4
);
11855 /* Not implemented for now... */
11856 /* case TARGET_NR_mq_notify: */
11859 case TARGET_NR_mq_getsetattr
:
11861 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11864 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11865 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11866 &posix_mq_attr_out
));
11867 } else if (arg3
!= 0) {
11868 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11870 if (ret
== 0 && arg3
!= 0) {
11871 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11877 #ifdef CONFIG_SPLICE
11878 #ifdef TARGET_NR_tee
11879 case TARGET_NR_tee
:
11881 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11885 #ifdef TARGET_NR_splice
11886 case TARGET_NR_splice
:
11888 loff_t loff_in
, loff_out
;
11889 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11891 if (get_user_u64(loff_in
, arg2
)) {
11892 return -TARGET_EFAULT
;
11894 ploff_in
= &loff_in
;
11897 if (get_user_u64(loff_out
, arg4
)) {
11898 return -TARGET_EFAULT
;
11900 ploff_out
= &loff_out
;
11902 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11904 if (put_user_u64(loff_in
, arg2
)) {
11905 return -TARGET_EFAULT
;
11909 if (put_user_u64(loff_out
, arg4
)) {
11910 return -TARGET_EFAULT
;
11916 #ifdef TARGET_NR_vmsplice
11917 case TARGET_NR_vmsplice
:
11919 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11921 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11922 unlock_iovec(vec
, arg2
, arg3
, 0);
11924 ret
= -host_to_target_errno(errno
);
11929 #endif /* CONFIG_SPLICE */
11930 #ifdef CONFIG_EVENTFD
11931 #if defined(TARGET_NR_eventfd)
11932 case TARGET_NR_eventfd
:
11933 ret
= get_errno(eventfd(arg1
, 0));
11935 fd_trans_register(ret
, &target_eventfd_trans
);
11939 #if defined(TARGET_NR_eventfd2)
11940 case TARGET_NR_eventfd2
:
11942 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11943 if (arg2
& TARGET_O_NONBLOCK
) {
11944 host_flags
|= O_NONBLOCK
;
11946 if (arg2
& TARGET_O_CLOEXEC
) {
11947 host_flags
|= O_CLOEXEC
;
11949 ret
= get_errno(eventfd(arg1
, host_flags
));
11951 fd_trans_register(ret
, &target_eventfd_trans
);
11956 #endif /* CONFIG_EVENTFD */
11957 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11958 case TARGET_NR_fallocate
:
11959 #if TARGET_ABI_BITS == 32
11960 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11961 target_offset64(arg5
, arg6
)));
11963 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11967 #if defined(CONFIG_SYNC_FILE_RANGE)
11968 #if defined(TARGET_NR_sync_file_range)
11969 case TARGET_NR_sync_file_range
:
11970 #if TARGET_ABI_BITS == 32
11971 #if defined(TARGET_MIPS)
11972 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11973 target_offset64(arg5
, arg6
), arg7
));
11975 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11976 target_offset64(arg4
, arg5
), arg6
));
11977 #endif /* !TARGET_MIPS */
11979 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11983 #if defined(TARGET_NR_sync_file_range2) || \
11984 defined(TARGET_NR_arm_sync_file_range)
11985 #if defined(TARGET_NR_sync_file_range2)
11986 case TARGET_NR_sync_file_range2
:
11988 #if defined(TARGET_NR_arm_sync_file_range)
11989 case TARGET_NR_arm_sync_file_range
:
11991 /* This is like sync_file_range but the arguments are reordered */
11992 #if TARGET_ABI_BITS == 32
11993 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11994 target_offset64(arg5
, arg6
), arg2
));
11996 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12001 #if defined(TARGET_NR_signalfd4)
12002 case TARGET_NR_signalfd4
:
12003 return do_signalfd4(arg1
, arg2
, arg4
);
12005 #if defined(TARGET_NR_signalfd)
12006 case TARGET_NR_signalfd
:
12007 return do_signalfd4(arg1
, arg2
, 0);
12009 #if defined(CONFIG_EPOLL)
12010 #if defined(TARGET_NR_epoll_create)
12011 case TARGET_NR_epoll_create
:
12012 return get_errno(epoll_create(arg1
));
12014 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12015 case TARGET_NR_epoll_create1
:
12016 return get_errno(epoll_create1(arg1
));
12018 #if defined(TARGET_NR_epoll_ctl)
12019 case TARGET_NR_epoll_ctl
:
12021 struct epoll_event ep
;
12022 struct epoll_event
*epp
= 0;
12024 struct target_epoll_event
*target_ep
;
12025 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12026 return -TARGET_EFAULT
;
12028 ep
.events
= tswap32(target_ep
->events
);
12029 /* The epoll_data_t union is just opaque data to the kernel,
12030 * so we transfer all 64 bits across and need not worry what
12031 * actual data type it is.
12033 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12034 unlock_user_struct(target_ep
, arg4
, 0);
12037 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12041 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12042 #if defined(TARGET_NR_epoll_wait)
12043 case TARGET_NR_epoll_wait
:
12045 #if defined(TARGET_NR_epoll_pwait)
12046 case TARGET_NR_epoll_pwait
:
12049 struct target_epoll_event
*target_ep
;
12050 struct epoll_event
*ep
;
12052 int maxevents
= arg3
;
12053 int timeout
= arg4
;
12055 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12056 return -TARGET_EINVAL
;
12059 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12060 maxevents
* sizeof(struct target_epoll_event
), 1);
12062 return -TARGET_EFAULT
;
12065 ep
= g_try_new(struct epoll_event
, maxevents
);
12067 unlock_user(target_ep
, arg2
, 0);
12068 return -TARGET_ENOMEM
;
12072 #if defined(TARGET_NR_epoll_pwait)
12073 case TARGET_NR_epoll_pwait
:
12075 target_sigset_t
*target_set
;
12076 sigset_t _set
, *set
= &_set
;
12079 if (arg6
!= sizeof(target_sigset_t
)) {
12080 ret
= -TARGET_EINVAL
;
12084 target_set
= lock_user(VERIFY_READ
, arg5
,
12085 sizeof(target_sigset_t
), 1);
12087 ret
= -TARGET_EFAULT
;
12090 target_to_host_sigset(set
, target_set
);
12091 unlock_user(target_set
, arg5
, 0);
12096 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12097 set
, SIGSET_T_SIZE
));
12101 #if defined(TARGET_NR_epoll_wait)
12102 case TARGET_NR_epoll_wait
:
12103 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12108 ret
= -TARGET_ENOSYS
;
12110 if (!is_error(ret
)) {
12112 for (i
= 0; i
< ret
; i
++) {
12113 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12114 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12116 unlock_user(target_ep
, arg2
,
12117 ret
* sizeof(struct target_epoll_event
));
12119 unlock_user(target_ep
, arg2
, 0);
12126 #ifdef TARGET_NR_prlimit64
12127 case TARGET_NR_prlimit64
:
12129 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12130 struct target_rlimit64
*target_rnew
, *target_rold
;
12131 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12132 int resource
= target_to_host_resource(arg2
);
12134 if (arg3
&& (resource
!= RLIMIT_AS
&&
12135 resource
!= RLIMIT_DATA
&&
12136 resource
!= RLIMIT_STACK
)) {
12137 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12138 return -TARGET_EFAULT
;
12140 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12141 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12142 unlock_user_struct(target_rnew
, arg3
, 0);
12146 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12147 if (!is_error(ret
) && arg4
) {
12148 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12149 return -TARGET_EFAULT
;
12151 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12152 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12153 unlock_user_struct(target_rold
, arg4
, 1);
12158 #ifdef TARGET_NR_gethostname
12159 case TARGET_NR_gethostname
:
12161 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12163 ret
= get_errno(gethostname(name
, arg2
));
12164 unlock_user(name
, arg1
, arg2
);
12166 ret
= -TARGET_EFAULT
;
12171 #ifdef TARGET_NR_atomic_cmpxchg_32
12172 case TARGET_NR_atomic_cmpxchg_32
:
12174 /* should use start_exclusive from main.c */
12175 abi_ulong mem_value
;
12176 if (get_user_u32(mem_value
, arg6
)) {
12177 target_siginfo_t info
;
12178 info
.si_signo
= SIGSEGV
;
12180 info
.si_code
= TARGET_SEGV_MAPERR
;
12181 info
._sifields
._sigfault
._addr
= arg6
;
12182 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12183 QEMU_SI_FAULT
, &info
);
12187 if (mem_value
== arg2
)
12188 put_user_u32(arg1
, arg6
);
12192 #ifdef TARGET_NR_atomic_barrier
12193 case TARGET_NR_atomic_barrier
:
12194 /* Like the kernel implementation and the
12195 qemu arm barrier, no-op this? */
12199 #ifdef TARGET_NR_timer_create
12200 case TARGET_NR_timer_create
:
12202 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12204 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12207 int timer_index
= next_free_host_timer();
12209 if (timer_index
< 0) {
12210 ret
= -TARGET_EAGAIN
;
12212 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12215 phost_sevp
= &host_sevp
;
12216 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12222 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12226 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12227 return -TARGET_EFAULT
;
12235 #ifdef TARGET_NR_timer_settime
12236 case TARGET_NR_timer_settime
:
12238 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12239 * struct itimerspec * old_value */
12240 target_timer_t timerid
= get_timer_id(arg1
);
12244 } else if (arg3
== 0) {
12245 ret
= -TARGET_EINVAL
;
12247 timer_t htimer
= g_posix_timers
[timerid
];
12248 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12250 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12251 return -TARGET_EFAULT
;
12254 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12255 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12256 return -TARGET_EFAULT
;
12263 #ifdef TARGET_NR_timer_gettime
12264 case TARGET_NR_timer_gettime
:
12266 /* args: timer_t timerid, struct itimerspec *curr_value */
12267 target_timer_t timerid
= get_timer_id(arg1
);
12271 } else if (!arg2
) {
12272 ret
= -TARGET_EFAULT
;
12274 timer_t htimer
= g_posix_timers
[timerid
];
12275 struct itimerspec hspec
;
12276 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12278 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12279 ret
= -TARGET_EFAULT
;
12286 #ifdef TARGET_NR_timer_getoverrun
12287 case TARGET_NR_timer_getoverrun
:
12289 /* args: timer_t timerid */
12290 target_timer_t timerid
= get_timer_id(arg1
);
12295 timer_t htimer
= g_posix_timers
[timerid
];
12296 ret
= get_errno(timer_getoverrun(htimer
));
12302 #ifdef TARGET_NR_timer_delete
12303 case TARGET_NR_timer_delete
:
12305 /* args: timer_t timerid */
12306 target_timer_t timerid
= get_timer_id(arg1
);
12311 timer_t htimer
= g_posix_timers
[timerid
];
12312 ret
= get_errno(timer_delete(htimer
));
12313 g_posix_timers
[timerid
] = 0;
12319 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12320 case TARGET_NR_timerfd_create
:
12321 return get_errno(timerfd_create(arg1
,
12322 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12325 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12326 case TARGET_NR_timerfd_gettime
:
12328 struct itimerspec its_curr
;
12330 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12332 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12333 return -TARGET_EFAULT
;
12339 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12340 case TARGET_NR_timerfd_settime
:
12342 struct itimerspec its_new
, its_old
, *p_new
;
12345 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12346 return -TARGET_EFAULT
;
12353 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12355 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12356 return -TARGET_EFAULT
;
12362 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12363 case TARGET_NR_ioprio_get
:
12364 return get_errno(ioprio_get(arg1
, arg2
));
12367 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12368 case TARGET_NR_ioprio_set
:
12369 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12372 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12373 case TARGET_NR_setns
:
12374 return get_errno(setns(arg1
, arg2
));
12376 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12377 case TARGET_NR_unshare
:
12378 return get_errno(unshare(arg1
));
12380 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12381 case TARGET_NR_kcmp
:
12382 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12384 #ifdef TARGET_NR_swapcontext
12385 case TARGET_NR_swapcontext
:
12386 /* PowerPC specific. */
12387 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12389 #ifdef TARGET_NR_memfd_create
12390 case TARGET_NR_memfd_create
:
12391 p
= lock_user_string(arg1
);
12393 return -TARGET_EFAULT
;
12395 ret
= get_errno(memfd_create(p
, arg2
));
12396 fd_trans_unregister(ret
);
12397 unlock_user(p
, arg1
, 0);
12400 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12401 case TARGET_NR_membarrier
:
12402 return get_errno(membarrier(arg1
, arg2
));
12406 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12407 return -TARGET_ENOSYS
;
12412 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12413 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12414 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12417 CPUState
*cpu
= env_cpu(cpu_env
);
12420 #ifdef DEBUG_ERESTARTSYS
12421 /* Debug-only code for exercising the syscall-restart code paths
12422 * in the per-architecture cpu main loops: restart every syscall
12423 * the guest makes once before letting it through.
12429 return -TARGET_ERESTARTSYS
;
12434 record_syscall_start(cpu
, num
, arg1
,
12435 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12437 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12438 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12441 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12442 arg5
, arg6
, arg7
, arg8
);
12444 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12445 print_syscall_ret(num
, ret
);
12448 record_syscall_return(cpu
, num
, ret
);