4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
119 #include "linux_loop.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
131 #define CLONE_IO 0x80000000 /* Clone io context */
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid
)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
296 loff_t
*, res
, uint
, wh
);
298 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
299 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
301 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group
,int,error_code
)
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address
,int *,tidptr
)
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
310 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
314 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
318 unsigned long *, user_mask_ptr
);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
321 unsigned long *, user_mask_ptr
);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
324 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
326 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
327 struct __user_cap_data_struct
*, data
);
328 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
329 struct __user_cap_data_struct
*, data
);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get
, int, which
, int, who
)
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
342 unsigned long, idx1
, unsigned long, idx2
)
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
350 unsigned int, mask
, struct target_statx
*, statxbuf
)
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier
, int, cmd
, int, flags
)
356 static bitmask_transtbl fcntl_flags_tbl
[] = {
357 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
358 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
359 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
360 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
361 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
362 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
363 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
364 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
365 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
366 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
367 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
368 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
369 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
380 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
392 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
394 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
398 const struct timespec
*,tsp
,int,flags
)
400 static int sys_utimensat(int dirfd
, const char *pathname
,
401 const struct timespec times
[2], int flags
)
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
413 const char *, new, unsigned int, flags
)
415 static int sys_renameat2(int oldfd
, const char *old
,
416 int newfd
, const char *new, int flags
)
419 return renameat(oldfd
, old
, newfd
, new);
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
439 return (inotify_add_watch(fd
, pathname
, mask
));
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
445 return (inotify_rm_watch(fd
, wd
));
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags
)
452 return (inotify_init1(flags
));
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64
{
474 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
475 const struct host_rlimit64
*, new_limit
,
476 struct host_rlimit64
*, old_limit
)
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers
[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
489 if (g_posix_timers
[k
] == 0) {
490 g_posix_timers
[k
] = (timer_t
) 1;
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
510 [EAGAIN
] = TARGET_EAGAIN
,
511 [EIDRM
] = TARGET_EIDRM
,
512 [ECHRNG
] = TARGET_ECHRNG
,
513 [EL2NSYNC
] = TARGET_EL2NSYNC
,
514 [EL3HLT
] = TARGET_EL3HLT
,
515 [EL3RST
] = TARGET_EL3RST
,
516 [ELNRNG
] = TARGET_ELNRNG
,
517 [EUNATCH
] = TARGET_EUNATCH
,
518 [ENOCSI
] = TARGET_ENOCSI
,
519 [EL2HLT
] = TARGET_EL2HLT
,
520 [EDEADLK
] = TARGET_EDEADLK
,
521 [ENOLCK
] = TARGET_ENOLCK
,
522 [EBADE
] = TARGET_EBADE
,
523 [EBADR
] = TARGET_EBADR
,
524 [EXFULL
] = TARGET_EXFULL
,
525 [ENOANO
] = TARGET_ENOANO
,
526 [EBADRQC
] = TARGET_EBADRQC
,
527 [EBADSLT
] = TARGET_EBADSLT
,
528 [EBFONT
] = TARGET_EBFONT
,
529 [ENOSTR
] = TARGET_ENOSTR
,
530 [ENODATA
] = TARGET_ENODATA
,
531 [ETIME
] = TARGET_ETIME
,
532 [ENOSR
] = TARGET_ENOSR
,
533 [ENONET
] = TARGET_ENONET
,
534 [ENOPKG
] = TARGET_ENOPKG
,
535 [EREMOTE
] = TARGET_EREMOTE
,
536 [ENOLINK
] = TARGET_ENOLINK
,
537 [EADV
] = TARGET_EADV
,
538 [ESRMNT
] = TARGET_ESRMNT
,
539 [ECOMM
] = TARGET_ECOMM
,
540 [EPROTO
] = TARGET_EPROTO
,
541 [EDOTDOT
] = TARGET_EDOTDOT
,
542 [EMULTIHOP
] = TARGET_EMULTIHOP
,
543 [EBADMSG
] = TARGET_EBADMSG
,
544 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
545 [EOVERFLOW
] = TARGET_EOVERFLOW
,
546 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
547 [EBADFD
] = TARGET_EBADFD
,
548 [EREMCHG
] = TARGET_EREMCHG
,
549 [ELIBACC
] = TARGET_ELIBACC
,
550 [ELIBBAD
] = TARGET_ELIBBAD
,
551 [ELIBSCN
] = TARGET_ELIBSCN
,
552 [ELIBMAX
] = TARGET_ELIBMAX
,
553 [ELIBEXEC
] = TARGET_ELIBEXEC
,
554 [EILSEQ
] = TARGET_EILSEQ
,
555 [ENOSYS
] = TARGET_ENOSYS
,
556 [ELOOP
] = TARGET_ELOOP
,
557 [ERESTART
] = TARGET_ERESTART
,
558 [ESTRPIPE
] = TARGET_ESTRPIPE
,
559 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
560 [EUSERS
] = TARGET_EUSERS
,
561 [ENOTSOCK
] = TARGET_ENOTSOCK
,
562 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
563 [EMSGSIZE
] = TARGET_EMSGSIZE
,
564 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
565 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
566 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
567 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
568 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
569 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
570 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
571 [EADDRINUSE
] = TARGET_EADDRINUSE
,
572 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
573 [ENETDOWN
] = TARGET_ENETDOWN
,
574 [ENETUNREACH
] = TARGET_ENETUNREACH
,
575 [ENETRESET
] = TARGET_ENETRESET
,
576 [ECONNABORTED
] = TARGET_ECONNABORTED
,
577 [ECONNRESET
] = TARGET_ECONNRESET
,
578 [ENOBUFS
] = TARGET_ENOBUFS
,
579 [EISCONN
] = TARGET_EISCONN
,
580 [ENOTCONN
] = TARGET_ENOTCONN
,
581 [EUCLEAN
] = TARGET_EUCLEAN
,
582 [ENOTNAM
] = TARGET_ENOTNAM
,
583 [ENAVAIL
] = TARGET_ENAVAIL
,
584 [EISNAM
] = TARGET_EISNAM
,
585 [EREMOTEIO
] = TARGET_EREMOTEIO
,
586 [EDQUOT
] = TARGET_EDQUOT
,
587 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
588 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
589 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
590 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
591 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
592 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
593 [EALREADY
] = TARGET_EALREADY
,
594 [EINPROGRESS
] = TARGET_EINPROGRESS
,
595 [ESTALE
] = TARGET_ESTALE
,
596 [ECANCELED
] = TARGET_ECANCELED
,
597 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
598 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
600 [ENOKEY
] = TARGET_ENOKEY
,
603 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
606 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
609 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
612 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
618 [ENOMSG
] = TARGET_ENOMSG
,
621 [ERFKILL
] = TARGET_ERFKILL
,
624 [EHWPOISON
] = TARGET_EHWPOISON
,
628 static inline int host_to_target_errno(int err
)
630 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
631 host_to_target_errno_table
[err
]) {
632 return host_to_target_errno_table
[err
];
637 static inline int target_to_host_errno(int err
)
639 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
640 target_to_host_errno_table
[err
]) {
641 return target_to_host_errno_table
[err
];
646 static inline abi_long
get_errno(abi_long ret
)
649 return -host_to_target_errno(errno
);
654 const char *target_strerror(int err
)
656 if (err
== TARGET_ERESTARTSYS
) {
657 return "To be restarted";
659 if (err
== TARGET_QEMU_ESIGRETURN
) {
660 return "Successful exit from sigreturn";
663 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
666 return strerror(target_to_host_errno(err
));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
717 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
718 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
719 int, flags
, mode_t
, mode
)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
722 struct rusage
*, rusage
)
724 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
725 int, options
, struct rusage
*, rusage
)
726 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
730 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
734 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
737 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
738 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
742 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
746 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
748 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
749 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
750 safe_syscall2(int, tkill
, int, tid
, int, sig
)
751 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
752 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
753 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
754 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
755 unsigned long, pos_l
, unsigned long, pos_h
)
756 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
757 unsigned long, pos_l
, unsigned long, pos_h
)
758 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
760 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
761 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
762 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
763 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
764 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
765 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
766 safe_syscall2(int, flock
, int, fd
, int, operation
)
767 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
768 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
769 const struct timespec
*, uts
, size_t, sigsetsize
)
771 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
775 struct timespec
*, rem
)
777 #if defined(TARGET_NR_clock_nanosleep) || \
778 defined(TARGET_NR_clock_nanosleep_time64)
779 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
780 const struct timespec
*, req
, struct timespec
*, rem
)
784 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
787 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
788 void *, ptr
, long, fifth
)
792 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
796 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
797 long, msgtype
, int, flags
)
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
801 unsigned, nsops
, const struct timespec
*, timeout
)
803 #if defined(TARGET_NR_mq_timedsend) || \
804 defined(TARGET_NR_mq_timedsend_time64)
805 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
806 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
808 #if defined(TARGET_NR_mq_timedreceive) || \
809 defined(TARGET_NR_mq_timedreceive_time64)
810 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
811 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
829 static inline int host_to_target_sock_type(int host_type
)
833 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
835 target_type
= TARGET_SOCK_DGRAM
;
838 target_type
= TARGET_SOCK_STREAM
;
841 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type
& SOCK_CLOEXEC
) {
847 target_type
|= TARGET_SOCK_CLOEXEC
;
851 #if defined(SOCK_NONBLOCK)
852 if (host_type
& SOCK_NONBLOCK
) {
853 target_type
|= TARGET_SOCK_NONBLOCK
;
860 static abi_ulong target_brk
;
861 static abi_ulong target_original_brk
;
862 static abi_ulong brk_page
;
864 void target_set_brk(abi_ulong new_brk
)
866 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
867 brk_page
= HOST_PAGE_ALIGN(target_brk
);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long
do_brk(abi_ulong new_brk
)
876 abi_long mapped_addr
;
877 abi_ulong new_alloc_size
;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
882 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
885 if (new_brk
< target_original_brk
) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk
<= brk_page
) {
894 /* Heap contents are initialized to zero, as for anonymous
896 if (new_brk
> target_brk
) {
897 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
899 target_brk
= new_brk
;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
911 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
912 PROT_READ
|PROT_WRITE
,
913 MAP_ANON
|MAP_PRIVATE
, 0, 0));
915 if (mapped_addr
== brk_page
) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
923 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
925 target_brk
= new_brk
;
926 brk_page
= HOST_PAGE_ALIGN(target_brk
);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
930 } else if (mapped_addr
!= -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr
, new_alloc_size
);
936 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
939 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM
;
947 /* For everything else, return the previous break. */
951 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
952 defined(TARGET_NR_pselect6)
953 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
954 abi_ulong target_fds_addr
,
958 abi_ulong b
, *target_fds
;
960 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
961 if (!(target_fds
= lock_user(VERIFY_READ
,
963 sizeof(abi_ulong
) * nw
,
965 return -TARGET_EFAULT
;
969 for (i
= 0; i
< nw
; i
++) {
970 /* grab the abi_ulong */
971 __get_user(b
, &target_fds
[i
]);
972 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
973 /* check the bit inside the abi_ulong */
980 unlock_user(target_fds
, target_fds_addr
, 0);
985 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
986 abi_ulong target_fds_addr
,
989 if (target_fds_addr
) {
990 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
991 return -TARGET_EFAULT
;
999 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1005 abi_ulong
*target_fds
;
1007 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1008 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1010 sizeof(abi_ulong
) * nw
,
1012 return -TARGET_EFAULT
;
1015 for (i
= 0; i
< nw
; i
++) {
1017 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1018 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1021 __put_user(v
, &target_fds
[i
]);
1024 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1036 static inline abi_long
host_to_target_clock_t(long ticks
)
1038 #if HOST_HZ == TARGET_HZ
1041 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1045 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1046 const struct rusage
*rusage
)
1048 struct target_rusage
*target_rusage
;
1050 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1051 return -TARGET_EFAULT
;
1052 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1053 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1054 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1055 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1056 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1057 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1058 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1059 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1060 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1061 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1062 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1063 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1064 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1065 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1066 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1067 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1068 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1069 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1070 unlock_user_struct(target_rusage
, target_addr
, 1);
1075 #ifdef TARGET_NR_setrlimit
1076 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1078 abi_ulong target_rlim_swap
;
1081 target_rlim_swap
= tswapal(target_rlim
);
1082 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1083 return RLIM_INFINITY
;
1085 result
= target_rlim_swap
;
1086 if (target_rlim_swap
!= (rlim_t
)result
)
1087 return RLIM_INFINITY
;
1093 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1094 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1096 abi_ulong target_rlim_swap
;
1099 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1100 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1102 target_rlim_swap
= rlim
;
1103 result
= tswapal(target_rlim_swap
);
1109 static inline int target_to_host_resource(int code
)
1112 case TARGET_RLIMIT_AS
:
1114 case TARGET_RLIMIT_CORE
:
1116 case TARGET_RLIMIT_CPU
:
1118 case TARGET_RLIMIT_DATA
:
1120 case TARGET_RLIMIT_FSIZE
:
1121 return RLIMIT_FSIZE
;
1122 case TARGET_RLIMIT_LOCKS
:
1123 return RLIMIT_LOCKS
;
1124 case TARGET_RLIMIT_MEMLOCK
:
1125 return RLIMIT_MEMLOCK
;
1126 case TARGET_RLIMIT_MSGQUEUE
:
1127 return RLIMIT_MSGQUEUE
;
1128 case TARGET_RLIMIT_NICE
:
1130 case TARGET_RLIMIT_NOFILE
:
1131 return RLIMIT_NOFILE
;
1132 case TARGET_RLIMIT_NPROC
:
1133 return RLIMIT_NPROC
;
1134 case TARGET_RLIMIT_RSS
:
1136 case TARGET_RLIMIT_RTPRIO
:
1137 return RLIMIT_RTPRIO
;
1138 case TARGET_RLIMIT_SIGPENDING
:
1139 return RLIMIT_SIGPENDING
;
1140 case TARGET_RLIMIT_STACK
:
1141 return RLIMIT_STACK
;
1147 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1148 abi_ulong target_tv_addr
)
1150 struct target_timeval
*target_tv
;
1152 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1153 return -TARGET_EFAULT
;
1156 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1157 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1159 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1164 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1165 const struct timeval
*tv
)
1167 struct target_timeval
*target_tv
;
1169 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1170 return -TARGET_EFAULT
;
1173 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1174 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1176 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1182 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1183 abi_ulong target_tv_addr
)
1185 struct target__kernel_sock_timeval
*target_tv
;
1187 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1188 return -TARGET_EFAULT
;
1191 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1192 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1194 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1200 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1201 const struct timeval
*tv
)
1203 struct target__kernel_sock_timeval
*target_tv
;
1205 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1206 return -TARGET_EFAULT
;
1209 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1210 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1212 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1217 #if defined(TARGET_NR_futex) || \
1218 defined(TARGET_NR_rt_sigtimedwait) || \
1219 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1220 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1221 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1222 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1223 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1224 defined(TARGET_NR_timer_settime) || \
1225 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1226 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1227 abi_ulong target_addr
)
1229 struct target_timespec
*target_ts
;
1231 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1232 return -TARGET_EFAULT
;
1234 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1235 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1236 unlock_user_struct(target_ts
, target_addr
, 0);
1241 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1242 defined(TARGET_NR_timer_settime64) || \
1243 defined(TARGET_NR_mq_timedsend_time64) || \
1244 defined(TARGET_NR_mq_timedreceive_time64) || \
1245 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1246 defined(TARGET_NR_clock_nanosleep_time64) || \
1247 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1248 defined(TARGET_NR_utimensat) || \
1249 defined(TARGET_NR_utimensat_time64) || \
1250 defined(TARGET_NR_semtimedop_time64)
1251 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1252 abi_ulong target_addr
)
1254 struct target__kernel_timespec
*target_ts
;
1256 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1257 return -TARGET_EFAULT
;
1259 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1260 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1261 /* in 32bit mode, this drops the padding */
1262 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1263 unlock_user_struct(target_ts
, target_addr
, 0);
1268 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1269 struct timespec
*host_ts
)
1271 struct target_timespec
*target_ts
;
1273 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1274 return -TARGET_EFAULT
;
1276 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1277 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1278 unlock_user_struct(target_ts
, target_addr
, 1);
1282 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1283 struct timespec
*host_ts
)
1285 struct target__kernel_timespec
*target_ts
;
1287 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1288 return -TARGET_EFAULT
;
1290 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1291 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1292 unlock_user_struct(target_ts
, target_addr
, 1);
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1298 struct timezone
*tz
)
1300 struct target_timezone
*target_tz
;
1302 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1303 return -TARGET_EFAULT
;
1306 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1307 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1309 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1317 abi_ulong target_tz_addr
)
1319 struct target_timezone
*target_tz
;
1321 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1322 return -TARGET_EFAULT
;
1325 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1326 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1328 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1337 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1338 abi_ulong target_mq_attr_addr
)
1340 struct target_mq_attr
*target_mq_attr
;
1342 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1343 target_mq_attr_addr
, 1))
1344 return -TARGET_EFAULT
;
1346 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1347 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1348 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1349 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1351 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1356 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1357 const struct mq_attr
*attr
)
1359 struct target_mq_attr
*target_mq_attr
;
1361 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1362 target_mq_attr_addr
, 0))
1363 return -TARGET_EFAULT
;
1365 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1366 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1367 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1368 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1370 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long
do_select(int n
,
1379 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1380 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1382 fd_set rfds
, wfds
, efds
;
1383 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1385 struct timespec ts
, *ts_ptr
;
1388 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1392 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1396 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1401 if (target_tv_addr
) {
1402 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1403 return -TARGET_EFAULT
;
1404 ts
.tv_sec
= tv
.tv_sec
;
1405 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1411 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1414 if (!is_error(ret
)) {
1415 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1416 return -TARGET_EFAULT
;
1417 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1418 return -TARGET_EFAULT
;
1419 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1420 return -TARGET_EFAULT
;
1422 if (target_tv_addr
) {
1423 tv
.tv_sec
= ts
.tv_sec
;
1424 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1425 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1426 return -TARGET_EFAULT
;
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long
do_old_select(abi_ulong arg1
)
1437 struct target_sel_arg_struct
*sel
;
1438 abi_ulong inp
, outp
, exp
, tvp
;
1441 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1442 return -TARGET_EFAULT
;
1445 nsel
= tswapal(sel
->n
);
1446 inp
= tswapal(sel
->inp
);
1447 outp
= tswapal(sel
->outp
);
1448 exp
= tswapal(sel
->exp
);
1449 tvp
= tswapal(sel
->tvp
);
1451 unlock_user_struct(sel
, arg1
, 0);
1453 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1458 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1461 return pipe2(host_pipe
, flags
);
1467 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1468 int flags
, int is_pipe2
)
1472 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1475 return get_errno(ret
);
1477 /* Several targets have special calling conventions for the original
1478 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1480 #if defined(TARGET_ALPHA)
1481 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1482 return host_pipe
[0];
1483 #elif defined(TARGET_MIPS)
1484 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1485 return host_pipe
[0];
1486 #elif defined(TARGET_SH4)
1487 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1488 return host_pipe
[0];
1489 #elif defined(TARGET_SPARC)
1490 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1491 return host_pipe
[0];
1495 if (put_user_s32(host_pipe
[0], pipedes
)
1496 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1497 return -TARGET_EFAULT
;
1498 return get_errno(ret
);
1501 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1502 abi_ulong target_addr
,
1505 struct target_ip_mreqn
*target_smreqn
;
1507 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1509 return -TARGET_EFAULT
;
1510 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1511 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1512 if (len
== sizeof(struct target_ip_mreqn
))
1513 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1514 unlock_user(target_smreqn
, target_addr
, 0);
1519 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1520 abi_ulong target_addr
,
1523 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1524 sa_family_t sa_family
;
1525 struct target_sockaddr
*target_saddr
;
1527 if (fd_trans_target_to_host_addr(fd
)) {
1528 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1531 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1533 return -TARGET_EFAULT
;
1535 sa_family
= tswap16(target_saddr
->sa_family
);
1537 /* Oops. The caller might send a incomplete sun_path; sun_path
1538 * must be terminated by \0 (see the manual page), but
1539 * unfortunately it is quite common to specify sockaddr_un
1540 * length as "strlen(x->sun_path)" while it should be
1541 * "strlen(...) + 1". We'll fix that here if needed.
1542 * Linux kernel has a similar feature.
1545 if (sa_family
== AF_UNIX
) {
1546 if (len
< unix_maxlen
&& len
> 0) {
1547 char *cp
= (char*)target_saddr
;
1549 if ( cp
[len
-1] && !cp
[len
] )
1552 if (len
> unix_maxlen
)
1556 memcpy(addr
, target_saddr
, len
);
1557 addr
->sa_family
= sa_family
;
1558 if (sa_family
== AF_NETLINK
) {
1559 struct sockaddr_nl
*nladdr
;
1561 nladdr
= (struct sockaddr_nl
*)addr
;
1562 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1563 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1564 } else if (sa_family
== AF_PACKET
) {
1565 struct target_sockaddr_ll
*lladdr
;
1567 lladdr
= (struct target_sockaddr_ll
*)addr
;
1568 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1569 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1571 unlock_user(target_saddr
, target_addr
, 0);
1576 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1577 struct sockaddr
*addr
,
1580 struct target_sockaddr
*target_saddr
;
1587 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1589 return -TARGET_EFAULT
;
1590 memcpy(target_saddr
, addr
, len
);
1591 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1592 sizeof(target_saddr
->sa_family
)) {
1593 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1595 if (addr
->sa_family
== AF_NETLINK
&&
1596 len
>= sizeof(struct target_sockaddr_nl
)) {
1597 struct target_sockaddr_nl
*target_nl
=
1598 (struct target_sockaddr_nl
*)target_saddr
;
1599 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1600 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1601 } else if (addr
->sa_family
== AF_PACKET
) {
1602 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1603 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1604 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1605 } else if (addr
->sa_family
== AF_INET6
&&
1606 len
>= sizeof(struct target_sockaddr_in6
)) {
1607 struct target_sockaddr_in6
*target_in6
=
1608 (struct target_sockaddr_in6
*)target_saddr
;
1609 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1611 unlock_user(target_saddr
, target_addr
, len
);
1616 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1617 struct target_msghdr
*target_msgh
)
1619 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1620 abi_long msg_controllen
;
1621 abi_ulong target_cmsg_addr
;
1622 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1623 socklen_t space
= 0;
1625 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1626 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1628 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1629 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1630 target_cmsg_start
= target_cmsg
;
1632 return -TARGET_EFAULT
;
1634 while (cmsg
&& target_cmsg
) {
1635 void *data
= CMSG_DATA(cmsg
);
1636 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1638 int len
= tswapal(target_cmsg
->cmsg_len
)
1639 - sizeof(struct target_cmsghdr
);
1641 space
+= CMSG_SPACE(len
);
1642 if (space
> msgh
->msg_controllen
) {
1643 space
-= CMSG_SPACE(len
);
1644 /* This is a QEMU bug, since we allocated the payload
1645 * area ourselves (unlike overflow in host-to-target
1646 * conversion, which is just the guest giving us a buffer
1647 * that's too small). It can't happen for the payload types
1648 * we currently support; if it becomes an issue in future
1649 * we would need to improve our allocation strategy to
1650 * something more intelligent than "twice the size of the
1651 * target buffer we're reading from".
1653 qemu_log_mask(LOG_UNIMP
,
1654 ("Unsupported ancillary data %d/%d: "
1655 "unhandled msg size\n"),
1656 tswap32(target_cmsg
->cmsg_level
),
1657 tswap32(target_cmsg
->cmsg_type
));
1661 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1662 cmsg
->cmsg_level
= SOL_SOCKET
;
1664 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1666 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1667 cmsg
->cmsg_len
= CMSG_LEN(len
);
1669 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1670 int *fd
= (int *)data
;
1671 int *target_fd
= (int *)target_data
;
1672 int i
, numfds
= len
/ sizeof(int);
1674 for (i
= 0; i
< numfds
; i
++) {
1675 __get_user(fd
[i
], target_fd
+ i
);
1677 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1678 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1679 struct ucred
*cred
= (struct ucred
*)data
;
1680 struct target_ucred
*target_cred
=
1681 (struct target_ucred
*)target_data
;
1683 __get_user(cred
->pid
, &target_cred
->pid
);
1684 __get_user(cred
->uid
, &target_cred
->uid
);
1685 __get_user(cred
->gid
, &target_cred
->gid
);
1687 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1688 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1689 memcpy(data
, target_data
, len
);
1692 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1693 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1696 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1698 msgh
->msg_controllen
= space
;
1702 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1703 struct msghdr
*msgh
)
1705 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1706 abi_long msg_controllen
;
1707 abi_ulong target_cmsg_addr
;
1708 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1709 socklen_t space
= 0;
1711 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1712 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1714 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1715 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1716 target_cmsg_start
= target_cmsg
;
1718 return -TARGET_EFAULT
;
1720 while (cmsg
&& target_cmsg
) {
1721 void *data
= CMSG_DATA(cmsg
);
1722 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1724 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1725 int tgt_len
, tgt_space
;
1727 /* We never copy a half-header but may copy half-data;
1728 * this is Linux's behaviour in put_cmsg(). Note that
1729 * truncation here is a guest problem (which we report
1730 * to the guest via the CTRUNC bit), unlike truncation
1731 * in target_to_host_cmsg, which is a QEMU bug.
1733 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1734 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1738 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1739 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1741 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1743 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1745 /* Payload types which need a different size of payload on
1746 * the target must adjust tgt_len here.
1749 switch (cmsg
->cmsg_level
) {
1751 switch (cmsg
->cmsg_type
) {
1753 tgt_len
= sizeof(struct target_timeval
);
1763 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1764 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1765 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1768 /* We must now copy-and-convert len bytes of payload
1769 * into tgt_len bytes of destination space. Bear in mind
1770 * that in both source and destination we may be dealing
1771 * with a truncated value!
1773 switch (cmsg
->cmsg_level
) {
1775 switch (cmsg
->cmsg_type
) {
1778 int *fd
= (int *)data
;
1779 int *target_fd
= (int *)target_data
;
1780 int i
, numfds
= tgt_len
/ sizeof(int);
1782 for (i
= 0; i
< numfds
; i
++) {
1783 __put_user(fd
[i
], target_fd
+ i
);
1789 struct timeval
*tv
= (struct timeval
*)data
;
1790 struct target_timeval
*target_tv
=
1791 (struct target_timeval
*)target_data
;
1793 if (len
!= sizeof(struct timeval
) ||
1794 tgt_len
!= sizeof(struct target_timeval
)) {
1798 /* copy struct timeval to target */
1799 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1800 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1803 case SCM_CREDENTIALS
:
1805 struct ucred
*cred
= (struct ucred
*)data
;
1806 struct target_ucred
*target_cred
=
1807 (struct target_ucred
*)target_data
;
1809 __put_user(cred
->pid
, &target_cred
->pid
);
1810 __put_user(cred
->uid
, &target_cred
->uid
);
1811 __put_user(cred
->gid
, &target_cred
->gid
);
1820 switch (cmsg
->cmsg_type
) {
1823 uint32_t *v
= (uint32_t *)data
;
1824 uint32_t *t_int
= (uint32_t *)target_data
;
1826 if (len
!= sizeof(uint32_t) ||
1827 tgt_len
!= sizeof(uint32_t)) {
1830 __put_user(*v
, t_int
);
1836 struct sock_extended_err ee
;
1837 struct sockaddr_in offender
;
1839 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1840 struct errhdr_t
*target_errh
=
1841 (struct errhdr_t
*)target_data
;
1843 if (len
!= sizeof(struct errhdr_t
) ||
1844 tgt_len
!= sizeof(struct errhdr_t
)) {
1847 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1848 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1849 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1850 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1851 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1852 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1853 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1854 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1855 (void *) &errh
->offender
, sizeof(errh
->offender
));
1864 switch (cmsg
->cmsg_type
) {
1867 uint32_t *v
= (uint32_t *)data
;
1868 uint32_t *t_int
= (uint32_t *)target_data
;
1870 if (len
!= sizeof(uint32_t) ||
1871 tgt_len
!= sizeof(uint32_t)) {
1874 __put_user(*v
, t_int
);
1880 struct sock_extended_err ee
;
1881 struct sockaddr_in6 offender
;
1883 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1884 struct errhdr6_t
*target_errh
=
1885 (struct errhdr6_t
*)target_data
;
1887 if (len
!= sizeof(struct errhdr6_t
) ||
1888 tgt_len
!= sizeof(struct errhdr6_t
)) {
1891 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1892 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1893 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1894 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1895 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1896 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1897 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1898 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1899 (void *) &errh
->offender
, sizeof(errh
->offender
));
1909 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1910 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1911 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1912 if (tgt_len
> len
) {
1913 memset(target_data
+ len
, 0, tgt_len
- len
);
1917 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1918 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1919 if (msg_controllen
< tgt_space
) {
1920 tgt_space
= msg_controllen
;
1922 msg_controllen
-= tgt_space
;
1924 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1925 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1928 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1930 target_msgh
->msg_controllen
= tswapal(space
);
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1936 abi_ulong optval_addr
, socklen_t optlen
)
1940 struct ip_mreqn
*ip_mreq
;
1941 struct ip_mreq_source
*ip_mreq_source
;
1945 /* TCP options all take an 'int' value. */
1946 if (optlen
< sizeof(uint32_t))
1947 return -TARGET_EINVAL
;
1949 if (get_user_u32(val
, optval_addr
))
1950 return -TARGET_EFAULT
;
1951 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1958 case IP_ROUTER_ALERT
:
1962 case IP_MTU_DISCOVER
:
1969 case IP_MULTICAST_TTL
:
1970 case IP_MULTICAST_LOOP
:
1972 if (optlen
>= sizeof(uint32_t)) {
1973 if (get_user_u32(val
, optval_addr
))
1974 return -TARGET_EFAULT
;
1975 } else if (optlen
>= 1) {
1976 if (get_user_u8(val
, optval_addr
))
1977 return -TARGET_EFAULT
;
1979 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1981 case IP_ADD_MEMBERSHIP
:
1982 case IP_DROP_MEMBERSHIP
:
1983 if (optlen
< sizeof (struct target_ip_mreq
) ||
1984 optlen
> sizeof (struct target_ip_mreqn
))
1985 return -TARGET_EINVAL
;
1987 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1988 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1989 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1992 case IP_BLOCK_SOURCE
:
1993 case IP_UNBLOCK_SOURCE
:
1994 case IP_ADD_SOURCE_MEMBERSHIP
:
1995 case IP_DROP_SOURCE_MEMBERSHIP
:
1996 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1997 return -TARGET_EINVAL
;
1999 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2000 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2001 unlock_user (ip_mreq_source
, optval_addr
, 0);
2010 case IPV6_MTU_DISCOVER
:
2013 case IPV6_RECVPKTINFO
:
2014 case IPV6_UNICAST_HOPS
:
2015 case IPV6_MULTICAST_HOPS
:
2016 case IPV6_MULTICAST_LOOP
:
2018 case IPV6_RECVHOPLIMIT
:
2019 case IPV6_2292HOPLIMIT
:
2022 case IPV6_2292PKTINFO
:
2023 case IPV6_RECVTCLASS
:
2024 case IPV6_RECVRTHDR
:
2025 case IPV6_2292RTHDR
:
2026 case IPV6_RECVHOPOPTS
:
2027 case IPV6_2292HOPOPTS
:
2028 case IPV6_RECVDSTOPTS
:
2029 case IPV6_2292DSTOPTS
:
2031 #ifdef IPV6_RECVPATHMTU
2032 case IPV6_RECVPATHMTU
:
2034 #ifdef IPV6_TRANSPARENT
2035 case IPV6_TRANSPARENT
:
2037 #ifdef IPV6_FREEBIND
2040 #ifdef IPV6_RECVORIGDSTADDR
2041 case IPV6_RECVORIGDSTADDR
:
2044 if (optlen
< sizeof(uint32_t)) {
2045 return -TARGET_EINVAL
;
2047 if (get_user_u32(val
, optval_addr
)) {
2048 return -TARGET_EFAULT
;
2050 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2051 &val
, sizeof(val
)));
2055 struct in6_pktinfo pki
;
2057 if (optlen
< sizeof(pki
)) {
2058 return -TARGET_EINVAL
;
2061 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2062 return -TARGET_EFAULT
;
2065 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2067 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2068 &pki
, sizeof(pki
)));
2071 case IPV6_ADD_MEMBERSHIP
:
2072 case IPV6_DROP_MEMBERSHIP
:
2074 struct ipv6_mreq ipv6mreq
;
2076 if (optlen
< sizeof(ipv6mreq
)) {
2077 return -TARGET_EINVAL
;
2080 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2081 return -TARGET_EFAULT
;
2084 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2086 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2087 &ipv6mreq
, sizeof(ipv6mreq
)));
2098 struct icmp6_filter icmp6f
;
2100 if (optlen
> sizeof(icmp6f
)) {
2101 optlen
= sizeof(icmp6f
);
2104 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2105 return -TARGET_EFAULT
;
2108 for (val
= 0; val
< 8; val
++) {
2109 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2112 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2124 /* those take an u32 value */
2125 if (optlen
< sizeof(uint32_t)) {
2126 return -TARGET_EINVAL
;
2129 if (get_user_u32(val
, optval_addr
)) {
2130 return -TARGET_EFAULT
;
2132 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2133 &val
, sizeof(val
)));
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2145 char *alg_key
= g_malloc(optlen
);
2148 return -TARGET_ENOMEM
;
2150 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2152 return -TARGET_EFAULT
;
2154 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2159 case ALG_SET_AEAD_AUTHSIZE
:
2161 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2170 case TARGET_SOL_SOCKET
:
2172 case TARGET_SO_RCVTIMEO
:
2176 optname
= SO_RCVTIMEO
;
2179 if (optlen
!= sizeof(struct target_timeval
)) {
2180 return -TARGET_EINVAL
;
2183 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2187 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2191 case TARGET_SO_SNDTIMEO
:
2192 optname
= SO_SNDTIMEO
;
2194 case TARGET_SO_ATTACH_FILTER
:
2196 struct target_sock_fprog
*tfprog
;
2197 struct target_sock_filter
*tfilter
;
2198 struct sock_fprog fprog
;
2199 struct sock_filter
*filter
;
2202 if (optlen
!= sizeof(*tfprog
)) {
2203 return -TARGET_EINVAL
;
2205 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2206 return -TARGET_EFAULT
;
2208 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2209 tswapal(tfprog
->filter
), 0)) {
2210 unlock_user_struct(tfprog
, optval_addr
, 1);
2211 return -TARGET_EFAULT
;
2214 fprog
.len
= tswap16(tfprog
->len
);
2215 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2216 if (filter
== NULL
) {
2217 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2218 unlock_user_struct(tfprog
, optval_addr
, 1);
2219 return -TARGET_ENOMEM
;
2221 for (i
= 0; i
< fprog
.len
; i
++) {
2222 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2223 filter
[i
].jt
= tfilter
[i
].jt
;
2224 filter
[i
].jf
= tfilter
[i
].jf
;
2225 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2227 fprog
.filter
= filter
;
2229 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2230 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2233 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2234 unlock_user_struct(tfprog
, optval_addr
, 1);
2237 case TARGET_SO_BINDTODEVICE
:
2239 char *dev_ifname
, *addr_ifname
;
2241 if (optlen
> IFNAMSIZ
- 1) {
2242 optlen
= IFNAMSIZ
- 1;
2244 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2246 return -TARGET_EFAULT
;
2248 optname
= SO_BINDTODEVICE
;
2249 addr_ifname
= alloca(IFNAMSIZ
);
2250 memcpy(addr_ifname
, dev_ifname
, optlen
);
2251 addr_ifname
[optlen
] = 0;
2252 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2253 addr_ifname
, optlen
));
2254 unlock_user (dev_ifname
, optval_addr
, 0);
2257 case TARGET_SO_LINGER
:
2260 struct target_linger
*tlg
;
2262 if (optlen
!= sizeof(struct target_linger
)) {
2263 return -TARGET_EINVAL
;
2265 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2266 return -TARGET_EFAULT
;
2268 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2269 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2270 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2272 unlock_user_struct(tlg
, optval_addr
, 0);
2275 /* Options with 'int' argument. */
2276 case TARGET_SO_DEBUG
:
2279 case TARGET_SO_REUSEADDR
:
2280 optname
= SO_REUSEADDR
;
2283 case TARGET_SO_REUSEPORT
:
2284 optname
= SO_REUSEPORT
;
2287 case TARGET_SO_TYPE
:
2290 case TARGET_SO_ERROR
:
2293 case TARGET_SO_DONTROUTE
:
2294 optname
= SO_DONTROUTE
;
2296 case TARGET_SO_BROADCAST
:
2297 optname
= SO_BROADCAST
;
2299 case TARGET_SO_SNDBUF
:
2300 optname
= SO_SNDBUF
;
2302 case TARGET_SO_SNDBUFFORCE
:
2303 optname
= SO_SNDBUFFORCE
;
2305 case TARGET_SO_RCVBUF
:
2306 optname
= SO_RCVBUF
;
2308 case TARGET_SO_RCVBUFFORCE
:
2309 optname
= SO_RCVBUFFORCE
;
2311 case TARGET_SO_KEEPALIVE
:
2312 optname
= SO_KEEPALIVE
;
2314 case TARGET_SO_OOBINLINE
:
2315 optname
= SO_OOBINLINE
;
2317 case TARGET_SO_NO_CHECK
:
2318 optname
= SO_NO_CHECK
;
2320 case TARGET_SO_PRIORITY
:
2321 optname
= SO_PRIORITY
;
2324 case TARGET_SO_BSDCOMPAT
:
2325 optname
= SO_BSDCOMPAT
;
2328 case TARGET_SO_PASSCRED
:
2329 optname
= SO_PASSCRED
;
2331 case TARGET_SO_PASSSEC
:
2332 optname
= SO_PASSSEC
;
2334 case TARGET_SO_TIMESTAMP
:
2335 optname
= SO_TIMESTAMP
;
2337 case TARGET_SO_RCVLOWAT
:
2338 optname
= SO_RCVLOWAT
;
2343 if (optlen
< sizeof(uint32_t))
2344 return -TARGET_EINVAL
;
2346 if (get_user_u32(val
, optval_addr
))
2347 return -TARGET_EFAULT
;
2348 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2353 case NETLINK_PKTINFO
:
2354 case NETLINK_ADD_MEMBERSHIP
:
2355 case NETLINK_DROP_MEMBERSHIP
:
2356 case NETLINK_BROADCAST_ERROR
:
2357 case NETLINK_NO_ENOBUFS
:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359 case NETLINK_LISTEN_ALL_NSID
:
2360 case NETLINK_CAP_ACK
:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363 case NETLINK_EXT_ACK
:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366 case NETLINK_GET_STRICT_CHK
:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2373 if (optlen
< sizeof(uint32_t)) {
2374 return -TARGET_EINVAL
;
2376 if (get_user_u32(val
, optval_addr
)) {
2377 return -TARGET_EFAULT
;
2379 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2382 #endif /* SOL_NETLINK */
2385 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2387 ret
= -TARGET_ENOPROTOOPT
;
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2394 abi_ulong optval_addr
, abi_ulong optlen
)
2401 case TARGET_SOL_SOCKET
:
2404 /* These don't just return a single integer */
2405 case TARGET_SO_PEERNAME
:
2407 case TARGET_SO_RCVTIMEO
: {
2411 optname
= SO_RCVTIMEO
;
2414 if (get_user_u32(len
, optlen
)) {
2415 return -TARGET_EFAULT
;
2418 return -TARGET_EINVAL
;
2422 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2427 if (len
> sizeof(struct target_timeval
)) {
2428 len
= sizeof(struct target_timeval
);
2430 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2431 return -TARGET_EFAULT
;
2433 if (put_user_u32(len
, optlen
)) {
2434 return -TARGET_EFAULT
;
2438 case TARGET_SO_SNDTIMEO
:
2439 optname
= SO_SNDTIMEO
;
2441 case TARGET_SO_PEERCRED
: {
2444 struct target_ucred
*tcr
;
2446 if (get_user_u32(len
, optlen
)) {
2447 return -TARGET_EFAULT
;
2450 return -TARGET_EINVAL
;
2454 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2462 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2463 return -TARGET_EFAULT
;
2465 __put_user(cr
.pid
, &tcr
->pid
);
2466 __put_user(cr
.uid
, &tcr
->uid
);
2467 __put_user(cr
.gid
, &tcr
->gid
);
2468 unlock_user_struct(tcr
, optval_addr
, 1);
2469 if (put_user_u32(len
, optlen
)) {
2470 return -TARGET_EFAULT
;
2474 case TARGET_SO_PEERSEC
: {
2477 if (get_user_u32(len
, optlen
)) {
2478 return -TARGET_EFAULT
;
2481 return -TARGET_EINVAL
;
2483 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2485 return -TARGET_EFAULT
;
2488 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2490 if (put_user_u32(lv
, optlen
)) {
2491 ret
= -TARGET_EFAULT
;
2493 unlock_user(name
, optval_addr
, lv
);
2496 case TARGET_SO_LINGER
:
2500 struct target_linger
*tlg
;
2502 if (get_user_u32(len
, optlen
)) {
2503 return -TARGET_EFAULT
;
2506 return -TARGET_EINVAL
;
2510 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2518 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2519 return -TARGET_EFAULT
;
2521 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2522 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2523 unlock_user_struct(tlg
, optval_addr
, 1);
2524 if (put_user_u32(len
, optlen
)) {
2525 return -TARGET_EFAULT
;
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG
:
2533 case TARGET_SO_REUSEADDR
:
2534 optname
= SO_REUSEADDR
;
2537 case TARGET_SO_REUSEPORT
:
2538 optname
= SO_REUSEPORT
;
2541 case TARGET_SO_TYPE
:
2544 case TARGET_SO_ERROR
:
2547 case TARGET_SO_DONTROUTE
:
2548 optname
= SO_DONTROUTE
;
2550 case TARGET_SO_BROADCAST
:
2551 optname
= SO_BROADCAST
;
2553 case TARGET_SO_SNDBUF
:
2554 optname
= SO_SNDBUF
;
2556 case TARGET_SO_RCVBUF
:
2557 optname
= SO_RCVBUF
;
2559 case TARGET_SO_KEEPALIVE
:
2560 optname
= SO_KEEPALIVE
;
2562 case TARGET_SO_OOBINLINE
:
2563 optname
= SO_OOBINLINE
;
2565 case TARGET_SO_NO_CHECK
:
2566 optname
= SO_NO_CHECK
;
2568 case TARGET_SO_PRIORITY
:
2569 optname
= SO_PRIORITY
;
2572 case TARGET_SO_BSDCOMPAT
:
2573 optname
= SO_BSDCOMPAT
;
2576 case TARGET_SO_PASSCRED
:
2577 optname
= SO_PASSCRED
;
2579 case TARGET_SO_TIMESTAMP
:
2580 optname
= SO_TIMESTAMP
;
2582 case TARGET_SO_RCVLOWAT
:
2583 optname
= SO_RCVLOWAT
;
2585 case TARGET_SO_ACCEPTCONN
:
2586 optname
= SO_ACCEPTCONN
;
2593 /* TCP options all take an 'int' value. */
2595 if (get_user_u32(len
, optlen
))
2596 return -TARGET_EFAULT
;
2598 return -TARGET_EINVAL
;
2600 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2603 if (optname
== SO_TYPE
) {
2604 val
= host_to_target_sock_type(val
);
2609 if (put_user_u32(val
, optval_addr
))
2610 return -TARGET_EFAULT
;
2612 if (put_user_u8(val
, optval_addr
))
2613 return -TARGET_EFAULT
;
2615 if (put_user_u32(len
, optlen
))
2616 return -TARGET_EFAULT
;
2623 case IP_ROUTER_ALERT
:
2627 case IP_MTU_DISCOVER
:
2633 case IP_MULTICAST_TTL
:
2634 case IP_MULTICAST_LOOP
:
2635 if (get_user_u32(len
, optlen
))
2636 return -TARGET_EFAULT
;
2638 return -TARGET_EINVAL
;
2640 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2643 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2645 if (put_user_u32(len
, optlen
)
2646 || put_user_u8(val
, optval_addr
))
2647 return -TARGET_EFAULT
;
2649 if (len
> sizeof(int))
2651 if (put_user_u32(len
, optlen
)
2652 || put_user_u32(val
, optval_addr
))
2653 return -TARGET_EFAULT
;
2657 ret
= -TARGET_ENOPROTOOPT
;
2663 case IPV6_MTU_DISCOVER
:
2666 case IPV6_RECVPKTINFO
:
2667 case IPV6_UNICAST_HOPS
:
2668 case IPV6_MULTICAST_HOPS
:
2669 case IPV6_MULTICAST_LOOP
:
2671 case IPV6_RECVHOPLIMIT
:
2672 case IPV6_2292HOPLIMIT
:
2675 case IPV6_2292PKTINFO
:
2676 case IPV6_RECVTCLASS
:
2677 case IPV6_RECVRTHDR
:
2678 case IPV6_2292RTHDR
:
2679 case IPV6_RECVHOPOPTS
:
2680 case IPV6_2292HOPOPTS
:
2681 case IPV6_RECVDSTOPTS
:
2682 case IPV6_2292DSTOPTS
:
2684 #ifdef IPV6_RECVPATHMTU
2685 case IPV6_RECVPATHMTU
:
2687 #ifdef IPV6_TRANSPARENT
2688 case IPV6_TRANSPARENT
:
2690 #ifdef IPV6_FREEBIND
2693 #ifdef IPV6_RECVORIGDSTADDR
2694 case IPV6_RECVORIGDSTADDR
:
2696 if (get_user_u32(len
, optlen
))
2697 return -TARGET_EFAULT
;
2699 return -TARGET_EINVAL
;
2701 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2704 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2706 if (put_user_u32(len
, optlen
)
2707 || put_user_u8(val
, optval_addr
))
2708 return -TARGET_EFAULT
;
2710 if (len
> sizeof(int))
2712 if (put_user_u32(len
, optlen
)
2713 || put_user_u32(val
, optval_addr
))
2714 return -TARGET_EFAULT
;
2718 ret
= -TARGET_ENOPROTOOPT
;
2725 case NETLINK_PKTINFO
:
2726 case NETLINK_BROADCAST_ERROR
:
2727 case NETLINK_NO_ENOBUFS
:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729 case NETLINK_LISTEN_ALL_NSID
:
2730 case NETLINK_CAP_ACK
:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733 case NETLINK_EXT_ACK
:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736 case NETLINK_GET_STRICT_CHK
:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 if (get_user_u32(len
, optlen
)) {
2739 return -TARGET_EFAULT
;
2741 if (len
!= sizeof(val
)) {
2742 return -TARGET_EINVAL
;
2745 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2749 if (put_user_u32(lv
, optlen
)
2750 || put_user_u32(val
, optval_addr
)) {
2751 return -TARGET_EFAULT
;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755 case NETLINK_LIST_MEMBERSHIPS
:
2759 if (get_user_u32(len
, optlen
)) {
2760 return -TARGET_EFAULT
;
2763 return -TARGET_EINVAL
;
2765 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2767 return -TARGET_EFAULT
;
2770 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2772 unlock_user(results
, optval_addr
, 0);
2775 /* swap host endianess to target endianess. */
2776 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2777 results
[i
] = tswap32(results
[i
]);
2779 if (put_user_u32(lv
, optlen
)) {
2780 return -TARGET_EFAULT
;
2782 unlock_user(results
, optval_addr
, 0);
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2790 #endif /* SOL_NETLINK */
2793 qemu_log_mask(LOG_UNIMP
,
2794 "getsockopt level=%d optname=%d not yet supported\n",
2796 ret
= -TARGET_EOPNOTSUPP
;
2802 /* Convert target low/high pair representing file offset into the host
2803 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804 * as the kernel doesn't handle them either.
2806 static void target_to_host_low_high(abi_ulong tlow
,
2808 unsigned long *hlow
,
2809 unsigned long *hhigh
)
2811 uint64_t off
= tlow
|
2812 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2813 TARGET_LONG_BITS
/ 2;
2816 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2819 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2820 abi_ulong count
, int copy
)
2822 struct target_iovec
*target_vec
;
2824 abi_ulong total_len
, max_len
;
2827 bool bad_address
= false;
2833 if (count
> IOV_MAX
) {
2838 vec
= g_try_new0(struct iovec
, count
);
2844 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2845 count
* sizeof(struct target_iovec
), 1);
2846 if (target_vec
== NULL
) {
2851 /* ??? If host page size > target page size, this will result in a
2852 value larger than what we can actually support. */
2853 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2856 for (i
= 0; i
< count
; i
++) {
2857 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2858 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2863 } else if (len
== 0) {
2864 /* Zero length pointer is ignored. */
2865 vec
[i
].iov_base
= 0;
2867 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2868 /* If the first buffer pointer is bad, this is a fault. But
2869 * subsequent bad buffers will result in a partial write; this
2870 * is realized by filling the vector with null pointers and
2872 if (!vec
[i
].iov_base
) {
2883 if (len
> max_len
- total_len
) {
2884 len
= max_len
- total_len
;
2887 vec
[i
].iov_len
= len
;
2891 unlock_user(target_vec
, target_addr
, 0);
2896 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2897 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2900 unlock_user(target_vec
, target_addr
, 0);
2907 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2908 abi_ulong count
, int copy
)
2910 struct target_iovec
*target_vec
;
2913 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2914 count
* sizeof(struct target_iovec
), 1);
2916 for (i
= 0; i
< count
; i
++) {
2917 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2918 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2922 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2924 unlock_user(target_vec
, target_addr
, 0);
2930 static inline int target_to_host_sock_type(int *type
)
2933 int target_type
= *type
;
2935 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2936 case TARGET_SOCK_DGRAM
:
2937 host_type
= SOCK_DGRAM
;
2939 case TARGET_SOCK_STREAM
:
2940 host_type
= SOCK_STREAM
;
2943 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2946 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2947 #if defined(SOCK_CLOEXEC)
2948 host_type
|= SOCK_CLOEXEC
;
2950 return -TARGET_EINVAL
;
2953 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2954 #if defined(SOCK_NONBLOCK)
2955 host_type
|= SOCK_NONBLOCK
;
2956 #elif !defined(O_NONBLOCK)
2957 return -TARGET_EINVAL
;
2964 /* Try to emulate socket type flags after socket creation. */
2965 static int sock_flags_fixup(int fd
, int target_type
)
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2969 int flags
= fcntl(fd
, F_GETFL
);
2970 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2972 return -TARGET_EINVAL
;
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long
do_socket(int domain
, int type
, int protocol
)
2982 int target_type
= type
;
2985 ret
= target_to_host_sock_type(&type
);
2990 if (domain
== PF_NETLINK
&& !(
2991 #ifdef CONFIG_RTNETLINK
2992 protocol
== NETLINK_ROUTE
||
2994 protocol
== NETLINK_KOBJECT_UEVENT
||
2995 protocol
== NETLINK_AUDIT
)) {
2996 return -TARGET_EPROTONOSUPPORT
;
2999 if (domain
== AF_PACKET
||
3000 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3001 protocol
= tswap16(protocol
);
3004 ret
= get_errno(socket(domain
, type
, protocol
));
3006 ret
= sock_flags_fixup(ret
, target_type
);
3007 if (type
== SOCK_PACKET
) {
3008 /* Manage an obsolete case :
3009 * if socket type is SOCK_PACKET, bind by name
3011 fd_trans_register(ret
, &target_packet_trans
);
3012 } else if (domain
== PF_NETLINK
) {
3014 #ifdef CONFIG_RTNETLINK
3016 fd_trans_register(ret
, &target_netlink_route_trans
);
3019 case NETLINK_KOBJECT_UEVENT
:
3020 /* nothing to do: messages are strings */
3023 fd_trans_register(ret
, &target_netlink_audit_trans
);
3026 g_assert_not_reached();
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3040 if ((int)addrlen
< 0) {
3041 return -TARGET_EINVAL
;
3044 addr
= alloca(addrlen
+1);
3046 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3050 return get_errno(bind(sockfd
, addr
, addrlen
));
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3060 if ((int)addrlen
< 0) {
3061 return -TARGET_EINVAL
;
3064 addr
= alloca(addrlen
+1);
3066 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3070 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3075 int flags
, int send
)
3081 abi_ulong target_vec
;
3083 if (msgp
->msg_name
) {
3084 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3085 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3086 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3087 tswapal(msgp
->msg_name
),
3089 if (ret
== -TARGET_EFAULT
) {
3090 /* For connected sockets msg_name and msg_namelen must
3091 * be ignored, so returning EFAULT immediately is wrong.
3092 * Instead, pass a bad msg_name to the host kernel, and
3093 * let it decide whether to return EFAULT or not.
3095 msg
.msg_name
= (void *)-1;
3100 msg
.msg_name
= NULL
;
3101 msg
.msg_namelen
= 0;
3103 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3104 msg
.msg_control
= alloca(msg
.msg_controllen
);
3105 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3107 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3109 count
= tswapal(msgp
->msg_iovlen
);
3110 target_vec
= tswapal(msgp
->msg_iov
);
3112 if (count
> IOV_MAX
) {
3113 /* sendrcvmsg returns a different errno for this condition than
3114 * readv/writev, so we must catch it here before lock_iovec() does.
3116 ret
= -TARGET_EMSGSIZE
;
3120 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3121 target_vec
, count
, send
);
3123 ret
= -host_to_target_errno(errno
);
3126 msg
.msg_iovlen
= count
;
3130 if (fd_trans_target_to_host_data(fd
)) {
3133 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3134 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3135 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3136 msg
.msg_iov
->iov_len
);
3138 msg
.msg_iov
->iov_base
= host_msg
;
3139 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3143 ret
= target_to_host_cmsg(&msg
, msgp
);
3145 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3149 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3150 if (!is_error(ret
)) {
3152 if (fd_trans_host_to_target_data(fd
)) {
3153 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3154 MIN(msg
.msg_iov
->iov_len
, len
));
3156 ret
= host_to_target_cmsg(msgp
, &msg
);
3158 if (!is_error(ret
)) {
3159 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3160 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3161 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3162 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3163 msg
.msg_name
, msg
.msg_namelen
);
3175 unlock_iovec(vec
, target_vec
, count
, !send
);
3180 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3181 int flags
, int send
)
3184 struct target_msghdr
*msgp
;
3186 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3190 return -TARGET_EFAULT
;
3192 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3193 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198 * so it might not have this *mmsg-specific flag either.
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3204 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3205 unsigned int vlen
, unsigned int flags
,
3208 struct target_mmsghdr
*mmsgp
;
3212 if (vlen
> UIO_MAXIOV
) {
3216 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3218 return -TARGET_EFAULT
;
3221 for (i
= 0; i
< vlen
; i
++) {
3222 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3223 if (is_error(ret
)) {
3226 mmsgp
[i
].msg_len
= tswap32(ret
);
3227 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228 if (flags
& MSG_WAITFORONE
) {
3229 flags
|= MSG_DONTWAIT
;
3233 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3235 /* Return number of datagrams sent if we sent any at all;
3236 * otherwise return the error.
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3246 abi_ulong target_addrlen_addr
, int flags
)
3248 socklen_t addrlen
, ret_addrlen
;
3253 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3255 if (target_addr
== 0) {
3256 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3259 /* linux returns EINVAL if addrlen pointer is invalid */
3260 if (get_user_u32(addrlen
, target_addrlen_addr
))
3261 return -TARGET_EINVAL
;
3263 if ((int)addrlen
< 0) {
3264 return -TARGET_EINVAL
;
3267 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3268 return -TARGET_EINVAL
;
3270 addr
= alloca(addrlen
);
3272 ret_addrlen
= addrlen
;
3273 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3274 if (!is_error(ret
)) {
3275 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3276 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3277 ret
= -TARGET_EFAULT
;
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3285 abi_ulong target_addrlen_addr
)
3287 socklen_t addrlen
, ret_addrlen
;
3291 if (get_user_u32(addrlen
, target_addrlen_addr
))
3292 return -TARGET_EFAULT
;
3294 if ((int)addrlen
< 0) {
3295 return -TARGET_EINVAL
;
3298 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3299 return -TARGET_EFAULT
;
3301 addr
= alloca(addrlen
);
3303 ret_addrlen
= addrlen
;
3304 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3305 if (!is_error(ret
)) {
3306 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3307 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3308 ret
= -TARGET_EFAULT
;
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3316 abi_ulong target_addrlen_addr
)
3318 socklen_t addrlen
, ret_addrlen
;
3322 if (get_user_u32(addrlen
, target_addrlen_addr
))
3323 return -TARGET_EFAULT
;
3325 if ((int)addrlen
< 0) {
3326 return -TARGET_EINVAL
;
3329 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3330 return -TARGET_EFAULT
;
3332 addr
= alloca(addrlen
);
3334 ret_addrlen
= addrlen
;
3335 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3336 if (!is_error(ret
)) {
3337 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3338 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3339 ret
= -TARGET_EFAULT
;
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3347 abi_ulong target_tab_addr
)
3352 target_to_host_sock_type(&type
);
3354 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3355 if (!is_error(ret
)) {
3356 if (put_user_s32(tab
[0], target_tab_addr
)
3357 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3358 ret
= -TARGET_EFAULT
;
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3365 abi_ulong target_addr
, socklen_t addrlen
)
3369 void *copy_msg
= NULL
;
3372 if ((int)addrlen
< 0) {
3373 return -TARGET_EINVAL
;
3376 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3378 return -TARGET_EFAULT
;
3379 if (fd_trans_target_to_host_data(fd
)) {
3380 copy_msg
= host_msg
;
3381 host_msg
= g_malloc(len
);
3382 memcpy(host_msg
, copy_msg
, len
);
3383 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3389 addr
= alloca(addrlen
+1);
3390 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3394 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3396 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3401 host_msg
= copy_msg
;
3403 unlock_user(host_msg
, msg
, 0);
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3409 abi_ulong target_addr
,
3410 abi_ulong target_addrlen
)
3412 socklen_t addrlen
, ret_addrlen
;
3417 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3419 return -TARGET_EFAULT
;
3421 if (get_user_u32(addrlen
, target_addrlen
)) {
3422 ret
= -TARGET_EFAULT
;
3425 if ((int)addrlen
< 0) {
3426 ret
= -TARGET_EINVAL
;
3429 addr
= alloca(addrlen
);
3430 ret_addrlen
= addrlen
;
3431 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3432 addr
, &ret_addrlen
));
3434 addr
= NULL
; /* To keep compiler quiet. */
3435 addrlen
= 0; /* To keep compiler quiet. */
3436 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3438 if (!is_error(ret
)) {
3439 if (fd_trans_host_to_target_data(fd
)) {
3441 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3442 if (is_error(trans
)) {
3448 host_to_target_sockaddr(target_addr
, addr
,
3449 MIN(addrlen
, ret_addrlen
));
3450 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3451 ret
= -TARGET_EFAULT
;
3455 unlock_user(host_msg
, msg
, len
);
3458 unlock_user(host_msg
, msg
, 0);
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3467 static const unsigned nargs
[] = { /* number of arguments per operation */
3468 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3469 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3472 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3476 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3477 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3479 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3481 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3482 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3484 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3486 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3487 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3489 abi_long a
[6]; /* max 6 args */
3492 /* check the range of the first argument num */
3493 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3495 return -TARGET_EINVAL
;
3497 /* ensure we have space for args */
3498 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3499 return -TARGET_EINVAL
;
3501 /* collect the arguments in a[] according to nargs[] */
3502 for (i
= 0; i
< nargs
[num
]; ++i
) {
3503 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3504 return -TARGET_EFAULT
;
3507 /* now when we have the args, invoke the appropriate underlying function */
3509 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3510 return do_socket(a
[0], a
[1], a
[2]);
3511 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3512 return do_bind(a
[0], a
[1], a
[2]);
3513 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3514 return do_connect(a
[0], a
[1], a
[2]);
3515 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3516 return get_errno(listen(a
[0], a
[1]));
3517 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3518 return do_accept4(a
[0], a
[1], a
[2], 0);
3519 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3520 return do_getsockname(a
[0], a
[1], a
[2]);
3521 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3522 return do_getpeername(a
[0], a
[1], a
[2]);
3523 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3524 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3525 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3526 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3527 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3528 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3529 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3530 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3531 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3532 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3533 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3534 return get_errno(shutdown(a
[0], a
[1]));
3535 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3536 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3537 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3538 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3539 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3540 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3541 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3542 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3543 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3544 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3545 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3546 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3547 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3548 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3550 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3551 return -TARGET_EINVAL
;
3556 #define N_SHM_REGIONS 32
3558 static struct shm_region
{
3562 } shm_regions
[N_SHM_REGIONS
];
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3568 struct target_ipc_perm sem_perm
;
3569 abi_ulong sem_otime
;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused1
;
3573 abi_ulong sem_ctime
;
3574 #if TARGET_ABI_BITS == 32
3575 abi_ulong __unused2
;
3577 abi_ulong sem_nsems
;
3578 abi_ulong __unused3
;
3579 abi_ulong __unused4
;
3583 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3584 abi_ulong target_addr
)
3586 struct target_ipc_perm
*target_ip
;
3587 struct target_semid64_ds
*target_sd
;
3589 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3590 return -TARGET_EFAULT
;
3591 target_ip
= &(target_sd
->sem_perm
);
3592 host_ip
->__key
= tswap32(target_ip
->__key
);
3593 host_ip
->uid
= tswap32(target_ip
->uid
);
3594 host_ip
->gid
= tswap32(target_ip
->gid
);
3595 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3596 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598 host_ip
->mode
= tswap32(target_ip
->mode
);
3600 host_ip
->mode
= tswap16(target_ip
->mode
);
3602 #if defined(TARGET_PPC)
3603 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3605 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3607 unlock_user_struct(target_sd
, target_addr
, 0);
3611 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3612 struct ipc_perm
*host_ip
)
3614 struct target_ipc_perm
*target_ip
;
3615 struct target_semid64_ds
*target_sd
;
3617 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3618 return -TARGET_EFAULT
;
3619 target_ip
= &(target_sd
->sem_perm
);
3620 target_ip
->__key
= tswap32(host_ip
->__key
);
3621 target_ip
->uid
= tswap32(host_ip
->uid
);
3622 target_ip
->gid
= tswap32(host_ip
->gid
);
3623 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3624 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626 target_ip
->mode
= tswap32(host_ip
->mode
);
3628 target_ip
->mode
= tswap16(host_ip
->mode
);
3630 #if defined(TARGET_PPC)
3631 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3633 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3635 unlock_user_struct(target_sd
, target_addr
, 1);
3639 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3640 abi_ulong target_addr
)
3642 struct target_semid64_ds
*target_sd
;
3644 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3645 return -TARGET_EFAULT
;
3646 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3647 return -TARGET_EFAULT
;
3648 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3649 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3650 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3651 unlock_user_struct(target_sd
, target_addr
, 0);
3655 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3656 struct semid_ds
*host_sd
)
3658 struct target_semid64_ds
*target_sd
;
3660 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3661 return -TARGET_EFAULT
;
3662 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3663 return -TARGET_EFAULT
;
3664 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3665 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3666 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3667 unlock_user_struct(target_sd
, target_addr
, 1);
3671 struct target_seminfo
{
3684 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3685 struct seminfo
*host_seminfo
)
3687 struct target_seminfo
*target_seminfo
;
3688 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3689 return -TARGET_EFAULT
;
3690 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3691 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3692 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3693 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3694 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3695 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3696 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3697 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3698 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3699 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3700 unlock_user_struct(target_seminfo
, target_addr
, 1);
3706 struct semid_ds
*buf
;
3707 unsigned short *array
;
3708 struct seminfo
*__buf
;
3711 union target_semun
{
3718 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3719 abi_ulong target_addr
)
3722 unsigned short *array
;
3724 struct semid_ds semid_ds
;
3727 semun
.buf
= &semid_ds
;
3729 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3731 return get_errno(ret
);
3733 nsems
= semid_ds
.sem_nsems
;
3735 *host_array
= g_try_new(unsigned short, nsems
);
3737 return -TARGET_ENOMEM
;
3739 array
= lock_user(VERIFY_READ
, target_addr
,
3740 nsems
*sizeof(unsigned short), 1);
3742 g_free(*host_array
);
3743 return -TARGET_EFAULT
;
3746 for(i
=0; i
<nsems
; i
++) {
3747 __get_user((*host_array
)[i
], &array
[i
]);
3749 unlock_user(array
, target_addr
, 0);
3754 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3755 unsigned short **host_array
)
3758 unsigned short *array
;
3760 struct semid_ds semid_ds
;
3763 semun
.buf
= &semid_ds
;
3765 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3767 return get_errno(ret
);
3769 nsems
= semid_ds
.sem_nsems
;
3771 array
= lock_user(VERIFY_WRITE
, target_addr
,
3772 nsems
*sizeof(unsigned short), 0);
3774 return -TARGET_EFAULT
;
3776 for(i
=0; i
<nsems
; i
++) {
3777 __put_user((*host_array
)[i
], &array
[i
]);
3779 g_free(*host_array
);
3780 unlock_user(array
, target_addr
, 1);
3785 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3786 abi_ulong target_arg
)
3788 union target_semun target_su
= { .buf
= target_arg
};
3790 struct semid_ds dsarg
;
3791 unsigned short *array
= NULL
;
3792 struct seminfo seminfo
;
3793 abi_long ret
= -TARGET_EINVAL
;
3800 /* In 64 bit cross-endian situations, we will erroneously pick up
3801 * the wrong half of the union for the "val" element. To rectify
3802 * this, the entire 8-byte structure is byteswapped, followed by
3803 * a swap of the 4 byte val field. In other cases, the data is
3804 * already in proper host byte order. */
3805 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3806 target_su
.buf
= tswapal(target_su
.buf
);
3807 arg
.val
= tswap32(target_su
.val
);
3809 arg
.val
= target_su
.val
;
3811 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3815 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3819 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3820 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3827 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3831 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3832 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3838 arg
.__buf
= &seminfo
;
3839 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3840 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3848 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3855 struct target_sembuf
{
3856 unsigned short sem_num
;
3861 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3862 abi_ulong target_addr
,
3865 struct target_sembuf
*target_sembuf
;
3868 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3869 nsops
*sizeof(struct target_sembuf
), 1);
3871 return -TARGET_EFAULT
;
3873 for(i
=0; i
<nsops
; i
++) {
3874 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3875 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3876 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3879 unlock_user(target_sembuf
, target_addr
, 0);
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3888 * This macro is required to handle the s390 variants, which passes the
3889 * arguments in a different order than default.
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893 (__nsops), (__timeout), (__sops)
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), 0, (__sops), (__timeout)
3899 static inline abi_long
do_semtimedop(int semid
,
3902 abi_long timeout
, bool time64
)
3904 struct sembuf
*sops
;
3905 struct timespec ts
, *pts
= NULL
;
3911 if (target_to_host_timespec64(pts
, timeout
)) {
3912 return -TARGET_EFAULT
;
3915 if (target_to_host_timespec(pts
, timeout
)) {
3916 return -TARGET_EFAULT
;
3921 if (nsops
> TARGET_SEMOPM
) {
3922 return -TARGET_E2BIG
;
3925 sops
= g_new(struct sembuf
, nsops
);
3927 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
3929 return -TARGET_EFAULT
;
3932 ret
= -TARGET_ENOSYS
;
3933 #ifdef __NR_semtimedop
3934 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
3937 if (ret
== -TARGET_ENOSYS
) {
3938 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
3939 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
3947 struct target_msqid_ds
3949 struct target_ipc_perm msg_perm
;
3950 abi_ulong msg_stime
;
3951 #if TARGET_ABI_BITS == 32
3952 abi_ulong __unused1
;
3954 abi_ulong msg_rtime
;
3955 #if TARGET_ABI_BITS == 32
3956 abi_ulong __unused2
;
3958 abi_ulong msg_ctime
;
3959 #if TARGET_ABI_BITS == 32
3960 abi_ulong __unused3
;
3962 abi_ulong __msg_cbytes
;
3964 abi_ulong msg_qbytes
;
3965 abi_ulong msg_lspid
;
3966 abi_ulong msg_lrpid
;
3967 abi_ulong __unused4
;
3968 abi_ulong __unused5
;
3971 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3972 abi_ulong target_addr
)
3974 struct target_msqid_ds
*target_md
;
3976 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3977 return -TARGET_EFAULT
;
3978 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3979 return -TARGET_EFAULT
;
3980 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3981 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3982 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3983 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3984 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3985 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3986 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3987 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3988 unlock_user_struct(target_md
, target_addr
, 0);
3992 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3993 struct msqid_ds
*host_md
)
3995 struct target_msqid_ds
*target_md
;
3997 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3998 return -TARGET_EFAULT
;
3999 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4000 return -TARGET_EFAULT
;
4001 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4002 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4003 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4004 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4005 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4006 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4007 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4008 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4009 unlock_user_struct(target_md
, target_addr
, 1);
4013 struct target_msginfo
{
4021 unsigned short int msgseg
;
4024 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4025 struct msginfo
*host_msginfo
)
4027 struct target_msginfo
*target_msginfo
;
4028 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4029 return -TARGET_EFAULT
;
4030 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4031 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4032 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4033 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4034 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4035 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4036 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4037 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4038 unlock_user_struct(target_msginfo
, target_addr
, 1);
4042 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4044 struct msqid_ds dsarg
;
4045 struct msginfo msginfo
;
4046 abi_long ret
= -TARGET_EINVAL
;
4054 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4055 return -TARGET_EFAULT
;
4056 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4057 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4058 return -TARGET_EFAULT
;
4061 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4065 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4066 if (host_to_target_msginfo(ptr
, &msginfo
))
4067 return -TARGET_EFAULT
;
4074 struct target_msgbuf
{
4079 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4080 ssize_t msgsz
, int msgflg
)
4082 struct target_msgbuf
*target_mb
;
4083 struct msgbuf
*host_mb
;
4087 return -TARGET_EINVAL
;
4090 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4091 return -TARGET_EFAULT
;
4092 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4094 unlock_user_struct(target_mb
, msgp
, 0);
4095 return -TARGET_ENOMEM
;
4097 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4098 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4099 ret
= -TARGET_ENOSYS
;
4101 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4104 if (ret
== -TARGET_ENOSYS
) {
4106 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4109 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4115 unlock_user_struct(target_mb
, msgp
, 0);
4121 #if defined(__sparc__)
4122 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4123 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4124 #elif defined(__s390x__)
4125 /* The s390 sys_ipc variant has only five parameters. */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4127 ((long int[]){(long int)__msgp, __msgtyp})
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130 ((long int[]){(long int)__msgp, __msgtyp}), 0
4134 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4135 ssize_t msgsz
, abi_long msgtyp
,
4138 struct target_msgbuf
*target_mb
;
4140 struct msgbuf
*host_mb
;
4144 return -TARGET_EINVAL
;
4147 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4148 return -TARGET_EFAULT
;
4150 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4152 ret
= -TARGET_ENOMEM
;
4155 ret
= -TARGET_ENOSYS
;
4157 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4160 if (ret
== -TARGET_ENOSYS
) {
4161 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4162 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4167 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4168 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4169 if (!target_mtext
) {
4170 ret
= -TARGET_EFAULT
;
4173 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4174 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4177 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4181 unlock_user_struct(target_mb
, msgp
, 1);
4186 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4187 abi_ulong target_addr
)
4189 struct target_shmid_ds
*target_sd
;
4191 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4192 return -TARGET_EFAULT
;
4193 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4194 return -TARGET_EFAULT
;
4195 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4196 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4197 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4198 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4199 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4200 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4201 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4202 unlock_user_struct(target_sd
, target_addr
, 0);
4206 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4207 struct shmid_ds
*host_sd
)
4209 struct target_shmid_ds
*target_sd
;
4211 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4212 return -TARGET_EFAULT
;
4213 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4214 return -TARGET_EFAULT
;
4215 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4216 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4217 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4218 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4219 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4220 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4221 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4222 unlock_user_struct(target_sd
, target_addr
, 1);
4226 struct target_shminfo
{
4234 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4235 struct shminfo
*host_shminfo
)
4237 struct target_shminfo
*target_shminfo
;
4238 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4239 return -TARGET_EFAULT
;
4240 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4241 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4242 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4243 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4244 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4245 unlock_user_struct(target_shminfo
, target_addr
, 1);
4249 struct target_shm_info
{
4254 abi_ulong swap_attempts
;
4255 abi_ulong swap_successes
;
4258 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4259 struct shm_info
*host_shm_info
)
4261 struct target_shm_info
*target_shm_info
;
4262 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4263 return -TARGET_EFAULT
;
4264 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4265 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4266 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4267 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4268 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4269 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4270 unlock_user_struct(target_shm_info
, target_addr
, 1);
4274 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4276 struct shmid_ds dsarg
;
4277 struct shminfo shminfo
;
4278 struct shm_info shm_info
;
4279 abi_long ret
= -TARGET_EINVAL
;
4287 if (target_to_host_shmid_ds(&dsarg
, buf
))
4288 return -TARGET_EFAULT
;
4289 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4290 if (host_to_target_shmid_ds(buf
, &dsarg
))
4291 return -TARGET_EFAULT
;
4294 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4295 if (host_to_target_shminfo(buf
, &shminfo
))
4296 return -TARGET_EFAULT
;
4299 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4300 if (host_to_target_shm_info(buf
, &shm_info
))
4301 return -TARGET_EFAULT
;
4306 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4313 #ifndef TARGET_FORCE_SHMLBA
4314 /* For most architectures, SHMLBA is the same as the page size;
4315 * some architectures have larger values, in which case they should
4316 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4317 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4318 * and defining its own value for SHMLBA.
4320 * The kernel also permits SHMLBA to be set by the architecture to a
4321 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4322 * this means that addresses are rounded to the large size if
4323 * SHM_RND is set but addresses not aligned to that size are not rejected
4324 * as long as they are at least page-aligned. Since the only architecture
4325 * which uses this is ia64 this code doesn't provide for that oddity.
4327 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4329 return TARGET_PAGE_SIZE
;
4333 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4334 int shmid
, abi_ulong shmaddr
, int shmflg
)
4338 struct shmid_ds shm_info
;
4342 /* find out the length of the shared memory segment */
4343 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4344 if (is_error(ret
)) {
4345 /* can't get length, bail out */
4349 shmlba
= target_shmlba(cpu_env
);
4351 if (shmaddr
& (shmlba
- 1)) {
4352 if (shmflg
& SHM_RND
) {
4353 shmaddr
&= ~(shmlba
- 1);
4355 return -TARGET_EINVAL
;
4358 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4359 return -TARGET_EINVAL
;
4365 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4367 abi_ulong mmap_start
;
4369 /* In order to use the host shmat, we need to honor host SHMLBA. */
4370 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4372 if (mmap_start
== -1) {
4374 host_raddr
= (void *)-1;
4376 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4379 if (host_raddr
== (void *)-1) {
4381 return get_errno((long)host_raddr
);
4383 raddr
=h2g((unsigned long)host_raddr
);
4385 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4386 PAGE_VALID
| PAGE_READ
|
4387 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4389 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4390 if (!shm_regions
[i
].in_use
) {
4391 shm_regions
[i
].in_use
= true;
4392 shm_regions
[i
].start
= raddr
;
4393 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4403 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4410 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4411 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4412 shm_regions
[i
].in_use
= false;
4413 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4417 rv
= get_errno(shmdt(g2h(shmaddr
)));
4424 #ifdef TARGET_NR_ipc
4425 /* ??? This only works with linear mappings. */
4426 /* do_ipc() must return target values and target errnos. */
4427 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4428 unsigned int call
, abi_long first
,
4429 abi_long second
, abi_long third
,
4430 abi_long ptr
, abi_long fifth
)
4435 version
= call
>> 16;
4440 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4442 case IPCOP_semtimedop
:
4444 * The s390 sys_ipc variant has only five parameters instead of six
4445 * (as for default variant) and the only difference is the handling of
4446 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4447 * to a struct timespec where the generic variant uses fifth parameter.
4449 #if defined(TARGET_S390X)
4450 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4452 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4457 ret
= get_errno(semget(first
, second
, third
));
4460 case IPCOP_semctl
: {
4461 /* The semun argument to semctl is passed by value, so dereference the
4464 get_user_ual(atptr
, ptr
);
4465 ret
= do_semctl(first
, second
, third
, atptr
);
4470 ret
= get_errno(msgget(first
, second
));
4474 ret
= do_msgsnd(first
, ptr
, second
, third
);
4478 ret
= do_msgctl(first
, second
, ptr
);
4485 struct target_ipc_kludge
{
4490 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4491 ret
= -TARGET_EFAULT
;
4495 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4497 unlock_user_struct(tmp
, ptr
, 0);
4501 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4510 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4511 if (is_error(raddr
))
4512 return get_errno(raddr
);
4513 if (put_user_ual(raddr
, third
))
4514 return -TARGET_EFAULT
;
4518 ret
= -TARGET_EINVAL
;
4523 ret
= do_shmdt(ptr
);
4527 /* IPC_* flag values are the same on all linux platforms */
4528 ret
= get_errno(shmget(first
, second
, third
));
4531 /* IPC_* and SHM_* command values are the same on all linux platforms */
4533 ret
= do_shmctl(first
, second
, ptr
);
4536 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4538 ret
= -TARGET_ENOSYS
;
4545 /* kernel structure types definitions */
4547 #define STRUCT(name, ...) STRUCT_ ## name,
4548 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4550 #include "syscall_types.h"
4554 #undef STRUCT_SPECIAL
4556 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4557 #define STRUCT_SPECIAL(name)
4558 #include "syscall_types.h"
4560 #undef STRUCT_SPECIAL
4562 #define MAX_STRUCT_SIZE 4096
4564 #ifdef CONFIG_FIEMAP
4565 /* So fiemap access checks don't overflow on 32 bit systems.
4566 * This is very slightly smaller than the limit imposed by
4567 * the underlying kernel.
4569 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4570 / sizeof(struct fiemap_extent))
4572 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4573 int fd
, int cmd
, abi_long arg
)
4575 /* The parameter for this ioctl is a struct fiemap followed
4576 * by an array of struct fiemap_extent whose size is set
4577 * in fiemap->fm_extent_count. The array is filled in by the
4580 int target_size_in
, target_size_out
;
4582 const argtype
*arg_type
= ie
->arg_type
;
4583 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4586 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4590 assert(arg_type
[0] == TYPE_PTR
);
4591 assert(ie
->access
== IOC_RW
);
4593 target_size_in
= thunk_type_size(arg_type
, 0);
4594 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4596 return -TARGET_EFAULT
;
4598 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4599 unlock_user(argptr
, arg
, 0);
4600 fm
= (struct fiemap
*)buf_temp
;
4601 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4602 return -TARGET_EINVAL
;
4605 outbufsz
= sizeof (*fm
) +
4606 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4608 if (outbufsz
> MAX_STRUCT_SIZE
) {
4609 /* We can't fit all the extents into the fixed size buffer.
4610 * Allocate one that is large enough and use it instead.
4612 fm
= g_try_malloc(outbufsz
);
4614 return -TARGET_ENOMEM
;
4616 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4619 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4620 if (!is_error(ret
)) {
4621 target_size_out
= target_size_in
;
4622 /* An extent_count of 0 means we were only counting the extents
4623 * so there are no structs to copy
4625 if (fm
->fm_extent_count
!= 0) {
4626 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4628 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4630 ret
= -TARGET_EFAULT
;
4632 /* Convert the struct fiemap */
4633 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4634 if (fm
->fm_extent_count
!= 0) {
4635 p
= argptr
+ target_size_in
;
4636 /* ...and then all the struct fiemap_extents */
4637 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4638 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4643 unlock_user(argptr
, arg
, target_size_out
);
4653 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4654 int fd
, int cmd
, abi_long arg
)
4656 const argtype
*arg_type
= ie
->arg_type
;
4660 struct ifconf
*host_ifconf
;
4662 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4663 int target_ifreq_size
;
4668 abi_long target_ifc_buf
;
4672 assert(arg_type
[0] == TYPE_PTR
);
4673 assert(ie
->access
== IOC_RW
);
4676 target_size
= thunk_type_size(arg_type
, 0);
4678 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4680 return -TARGET_EFAULT
;
4681 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4682 unlock_user(argptr
, arg
, 0);
4684 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4685 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4686 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4688 if (target_ifc_buf
!= 0) {
4689 target_ifc_len
= host_ifconf
->ifc_len
;
4690 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4691 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4693 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4694 if (outbufsz
> MAX_STRUCT_SIZE
) {
4696 * We can't fit all the extents into the fixed size buffer.
4697 * Allocate one that is large enough and use it instead.
4699 host_ifconf
= malloc(outbufsz
);
4701 return -TARGET_ENOMEM
;
4703 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4706 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4708 host_ifconf
->ifc_len
= host_ifc_len
;
4710 host_ifc_buf
= NULL
;
4712 host_ifconf
->ifc_buf
= host_ifc_buf
;
4714 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4715 if (!is_error(ret
)) {
4716 /* convert host ifc_len to target ifc_len */
4718 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4719 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4720 host_ifconf
->ifc_len
= target_ifc_len
;
4722 /* restore target ifc_buf */
4724 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4726 /* copy struct ifconf to target user */
4728 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4730 return -TARGET_EFAULT
;
4731 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4732 unlock_user(argptr
, arg
, target_size
);
4734 if (target_ifc_buf
!= 0) {
4735 /* copy ifreq[] to target user */
4736 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4737 for (i
= 0; i
< nb_ifreq
; i
++) {
4738 thunk_convert(argptr
+ i
* target_ifreq_size
,
4739 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4740 ifreq_arg_type
, THUNK_TARGET
);
4742 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4753 #if defined(CONFIG_USBFS)
4754 #if HOST_LONG_BITS > 64
4755 #error USBDEVFS thunks do not support >64 bit hosts yet.
4758 uint64_t target_urb_adr
;
4759 uint64_t target_buf_adr
;
4760 char *target_buf_ptr
;
4761 struct usbdevfs_urb host_urb
;
4764 static GHashTable
*usbdevfs_urb_hashtable(void)
4766 static GHashTable
*urb_hashtable
;
4768 if (!urb_hashtable
) {
4769 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4771 return urb_hashtable
;
4774 static void urb_hashtable_insert(struct live_urb
*urb
)
4776 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4777 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4780 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4782 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4783 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4786 static void urb_hashtable_remove(struct live_urb
*urb
)
4788 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4789 g_hash_table_remove(urb_hashtable
, urb
);
4793 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4794 int fd
, int cmd
, abi_long arg
)
4796 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4797 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4798 struct live_urb
*lurb
;
4802 uintptr_t target_urb_adr
;
4805 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4807 memset(buf_temp
, 0, sizeof(uint64_t));
4808 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4809 if (is_error(ret
)) {
4813 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4814 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4815 if (!lurb
->target_urb_adr
) {
4816 return -TARGET_EFAULT
;
4818 urb_hashtable_remove(lurb
);
4819 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4820 lurb
->host_urb
.buffer_length
);
4821 lurb
->target_buf_ptr
= NULL
;
4823 /* restore the guest buffer pointer */
4824 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4826 /* update the guest urb struct */
4827 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4830 return -TARGET_EFAULT
;
4832 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4833 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4835 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4836 /* write back the urb handle */
4837 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4840 return -TARGET_EFAULT
;
4843 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4844 target_urb_adr
= lurb
->target_urb_adr
;
4845 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4846 unlock_user(argptr
, arg
, target_size
);
4853 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4854 uint8_t *buf_temp
__attribute__((unused
)),
4855 int fd
, int cmd
, abi_long arg
)
4857 struct live_urb
*lurb
;
4859 /* map target address back to host URB with metadata. */
4860 lurb
= urb_hashtable_lookup(arg
);
4862 return -TARGET_EFAULT
;
4864 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4868 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4869 int fd
, int cmd
, abi_long arg
)
4871 const argtype
*arg_type
= ie
->arg_type
;
4876 struct live_urb
*lurb
;
4879 * each submitted URB needs to map to a unique ID for the
4880 * kernel, and that unique ID needs to be a pointer to
4881 * host memory. hence, we need to malloc for each URB.
4882 * isochronous transfers have a variable length struct.
4885 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4887 /* construct host copy of urb and metadata */
4888 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4890 return -TARGET_ENOMEM
;
4893 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4896 return -TARGET_EFAULT
;
4898 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4899 unlock_user(argptr
, arg
, 0);
4901 lurb
->target_urb_adr
= arg
;
4902 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4904 /* buffer space used depends on endpoint type so lock the entire buffer */
4905 /* control type urbs should check the buffer contents for true direction */
4906 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4907 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4908 lurb
->host_urb
.buffer_length
, 1);
4909 if (lurb
->target_buf_ptr
== NULL
) {
4911 return -TARGET_EFAULT
;
4914 /* update buffer pointer in host copy */
4915 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4917 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4918 if (is_error(ret
)) {
4919 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4922 urb_hashtable_insert(lurb
);
4927 #endif /* CONFIG_USBFS */
4929 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4930 int cmd
, abi_long arg
)
4933 struct dm_ioctl
*host_dm
;
4934 abi_long guest_data
;
4935 uint32_t guest_data_size
;
4937 const argtype
*arg_type
= ie
->arg_type
;
4939 void *big_buf
= NULL
;
4943 target_size
= thunk_type_size(arg_type
, 0);
4944 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4946 ret
= -TARGET_EFAULT
;
4949 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4950 unlock_user(argptr
, arg
, 0);
4952 /* buf_temp is too small, so fetch things into a bigger buffer */
4953 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4954 memcpy(big_buf
, buf_temp
, target_size
);
4958 guest_data
= arg
+ host_dm
->data_start
;
4959 if ((guest_data
- arg
) < 0) {
4960 ret
= -TARGET_EINVAL
;
4963 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4964 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4966 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4968 ret
= -TARGET_EFAULT
;
4972 switch (ie
->host_cmd
) {
4974 case DM_LIST_DEVICES
:
4977 case DM_DEV_SUSPEND
:
4980 case DM_TABLE_STATUS
:
4981 case DM_TABLE_CLEAR
:
4983 case DM_LIST_VERSIONS
:
4987 case DM_DEV_SET_GEOMETRY
:
4988 /* data contains only strings */
4989 memcpy(host_data
, argptr
, guest_data_size
);
4992 memcpy(host_data
, argptr
, guest_data_size
);
4993 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4997 void *gspec
= argptr
;
4998 void *cur_data
= host_data
;
4999 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5000 int spec_size
= thunk_type_size(arg_type
, 0);
5003 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5004 struct dm_target_spec
*spec
= cur_data
;
5008 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5009 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5011 spec
->next
= sizeof(*spec
) + slen
;
5012 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5014 cur_data
+= spec
->next
;
5019 ret
= -TARGET_EINVAL
;
5020 unlock_user(argptr
, guest_data
, 0);
5023 unlock_user(argptr
, guest_data
, 0);
5025 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5026 if (!is_error(ret
)) {
5027 guest_data
= arg
+ host_dm
->data_start
;
5028 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5029 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5030 switch (ie
->host_cmd
) {
5035 case DM_DEV_SUSPEND
:
5038 case DM_TABLE_CLEAR
:
5040 case DM_DEV_SET_GEOMETRY
:
5041 /* no return data */
5043 case DM_LIST_DEVICES
:
5045 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5046 uint32_t remaining_data
= guest_data_size
;
5047 void *cur_data
= argptr
;
5048 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5049 int nl_size
= 12; /* can't use thunk_size due to alignment */
5052 uint32_t next
= nl
->next
;
5054 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5056 if (remaining_data
< nl
->next
) {
5057 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5060 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5061 strcpy(cur_data
+ nl_size
, nl
->name
);
5062 cur_data
+= nl
->next
;
5063 remaining_data
-= nl
->next
;
5067 nl
= (void*)nl
+ next
;
5072 case DM_TABLE_STATUS
:
5074 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5075 void *cur_data
= argptr
;
5076 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5077 int spec_size
= thunk_type_size(arg_type
, 0);
5080 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5081 uint32_t next
= spec
->next
;
5082 int slen
= strlen((char*)&spec
[1]) + 1;
5083 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5084 if (guest_data_size
< spec
->next
) {
5085 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5088 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5089 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5090 cur_data
= argptr
+ spec
->next
;
5091 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5097 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5098 int count
= *(uint32_t*)hdata
;
5099 uint64_t *hdev
= hdata
+ 8;
5100 uint64_t *gdev
= argptr
+ 8;
5103 *(uint32_t*)argptr
= tswap32(count
);
5104 for (i
= 0; i
< count
; i
++) {
5105 *gdev
= tswap64(*hdev
);
5111 case DM_LIST_VERSIONS
:
5113 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5114 uint32_t remaining_data
= guest_data_size
;
5115 void *cur_data
= argptr
;
5116 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5117 int vers_size
= thunk_type_size(arg_type
, 0);
5120 uint32_t next
= vers
->next
;
5122 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5124 if (remaining_data
< vers
->next
) {
5125 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5128 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5129 strcpy(cur_data
+ vers_size
, vers
->name
);
5130 cur_data
+= vers
->next
;
5131 remaining_data
-= vers
->next
;
5135 vers
= (void*)vers
+ next
;
5140 unlock_user(argptr
, guest_data
, 0);
5141 ret
= -TARGET_EINVAL
;
5144 unlock_user(argptr
, guest_data
, guest_data_size
);
5146 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5148 ret
= -TARGET_EFAULT
;
5151 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5152 unlock_user(argptr
, arg
, target_size
);
5159 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5160 int cmd
, abi_long arg
)
5164 const argtype
*arg_type
= ie
->arg_type
;
5165 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5168 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5169 struct blkpg_partition host_part
;
5171 /* Read and convert blkpg */
5173 target_size
= thunk_type_size(arg_type
, 0);
5174 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5176 ret
= -TARGET_EFAULT
;
5179 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5180 unlock_user(argptr
, arg
, 0);
5182 switch (host_blkpg
->op
) {
5183 case BLKPG_ADD_PARTITION
:
5184 case BLKPG_DEL_PARTITION
:
5185 /* payload is struct blkpg_partition */
5188 /* Unknown opcode */
5189 ret
= -TARGET_EINVAL
;
5193 /* Read and convert blkpg->data */
5194 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5195 target_size
= thunk_type_size(part_arg_type
, 0);
5196 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5198 ret
= -TARGET_EFAULT
;
5201 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5202 unlock_user(argptr
, arg
, 0);
5204 /* Swizzle the data pointer to our local copy and call! */
5205 host_blkpg
->data
= &host_part
;
5206 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5212 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5213 int fd
, int cmd
, abi_long arg
)
5215 const argtype
*arg_type
= ie
->arg_type
;
5216 const StructEntry
*se
;
5217 const argtype
*field_types
;
5218 const int *dst_offsets
, *src_offsets
;
5221 abi_ulong
*target_rt_dev_ptr
= NULL
;
5222 unsigned long *host_rt_dev_ptr
= NULL
;
5226 assert(ie
->access
== IOC_W
);
5227 assert(*arg_type
== TYPE_PTR
);
5229 assert(*arg_type
== TYPE_STRUCT
);
5230 target_size
= thunk_type_size(arg_type
, 0);
5231 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5233 return -TARGET_EFAULT
;
5236 assert(*arg_type
== (int)STRUCT_rtentry
);
5237 se
= struct_entries
+ *arg_type
++;
5238 assert(se
->convert
[0] == NULL
);
5239 /* convert struct here to be able to catch rt_dev string */
5240 field_types
= se
->field_types
;
5241 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5242 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5243 for (i
= 0; i
< se
->nb_fields
; i
++) {
5244 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5245 assert(*field_types
== TYPE_PTRVOID
);
5246 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5247 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5248 if (*target_rt_dev_ptr
!= 0) {
5249 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5250 tswapal(*target_rt_dev_ptr
));
5251 if (!*host_rt_dev_ptr
) {
5252 unlock_user(argptr
, arg
, 0);
5253 return -TARGET_EFAULT
;
5256 *host_rt_dev_ptr
= 0;
5261 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5262 argptr
+ src_offsets
[i
],
5263 field_types
, THUNK_HOST
);
5265 unlock_user(argptr
, arg
, 0);
5267 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5269 assert(host_rt_dev_ptr
!= NULL
);
5270 assert(target_rt_dev_ptr
!= NULL
);
5271 if (*host_rt_dev_ptr
!= 0) {
5272 unlock_user((void *)*host_rt_dev_ptr
,
5273 *target_rt_dev_ptr
, 0);
5278 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5279 int fd
, int cmd
, abi_long arg
)
5281 int sig
= target_to_host_signal(arg
);
5282 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5285 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5286 int fd
, int cmd
, abi_long arg
)
5291 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5292 if (is_error(ret
)) {
5296 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5297 if (copy_to_user_timeval(arg
, &tv
)) {
5298 return -TARGET_EFAULT
;
5301 if (copy_to_user_timeval64(arg
, &tv
)) {
5302 return -TARGET_EFAULT
;
5309 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5310 int fd
, int cmd
, abi_long arg
)
5315 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5316 if (is_error(ret
)) {
5320 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5321 if (host_to_target_timespec(arg
, &ts
)) {
5322 return -TARGET_EFAULT
;
5325 if (host_to_target_timespec64(arg
, &ts
)) {
5326 return -TARGET_EFAULT
;
5334 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5335 int fd
, int cmd
, abi_long arg
)
5337 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5338 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5344 static void unlock_drm_version(struct drm_version
*host_ver
,
5345 struct target_drm_version
*target_ver
,
5348 unlock_user(host_ver
->name
, target_ver
->name
,
5349 copy
? host_ver
->name_len
: 0);
5350 unlock_user(host_ver
->date
, target_ver
->date
,
5351 copy
? host_ver
->date_len
: 0);
5352 unlock_user(host_ver
->desc
, target_ver
->desc
,
5353 copy
? host_ver
->desc_len
: 0);
5356 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5357 struct target_drm_version
*target_ver
)
5359 memset(host_ver
, 0, sizeof(*host_ver
));
5361 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5362 if (host_ver
->name_len
) {
5363 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5364 target_ver
->name_len
, 0);
5365 if (!host_ver
->name
) {
5370 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5371 if (host_ver
->date_len
) {
5372 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5373 target_ver
->date_len
, 0);
5374 if (!host_ver
->date
) {
5379 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5380 if (host_ver
->desc_len
) {
5381 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5382 target_ver
->desc_len
, 0);
5383 if (!host_ver
->desc
) {
5390 unlock_drm_version(host_ver
, target_ver
, false);
5394 static inline void host_to_target_drmversion(
5395 struct target_drm_version
*target_ver
,
5396 struct drm_version
*host_ver
)
5398 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5399 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5400 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5401 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5402 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5403 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5404 unlock_drm_version(host_ver
, target_ver
, true);
5407 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5408 int fd
, int cmd
, abi_long arg
)
5410 struct drm_version
*ver
;
5411 struct target_drm_version
*target_ver
;
5414 switch (ie
->host_cmd
) {
5415 case DRM_IOCTL_VERSION
:
5416 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5417 return -TARGET_EFAULT
;
5419 ver
= (struct drm_version
*)buf_temp
;
5420 ret
= target_to_host_drmversion(ver
, target_ver
);
5421 if (!is_error(ret
)) {
5422 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5423 if (is_error(ret
)) {
5424 unlock_drm_version(ver
, target_ver
, false);
5426 host_to_target_drmversion(target_ver
, ver
);
5429 unlock_user_struct(target_ver
, arg
, 0);
5432 return -TARGET_ENOSYS
;
5435 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5436 struct drm_i915_getparam
*gparam
,
5437 int fd
, abi_long arg
)
5441 struct target_drm_i915_getparam
*target_gparam
;
5443 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5444 return -TARGET_EFAULT
;
5447 __get_user(gparam
->param
, &target_gparam
->param
);
5448 gparam
->value
= &value
;
5449 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5450 put_user_s32(value
, target_gparam
->value
);
5452 unlock_user_struct(target_gparam
, arg
, 0);
5456 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5457 int fd
, int cmd
, abi_long arg
)
5459 switch (ie
->host_cmd
) {
5460 case DRM_IOCTL_I915_GETPARAM
:
5461 return do_ioctl_drm_i915_getparam(ie
,
5462 (struct drm_i915_getparam
*)buf_temp
,
5465 return -TARGET_ENOSYS
;
5471 IOCTLEntry ioctl_entries
[] = {
5472 #define IOCTL(cmd, access, ...) \
5473 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5474 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5475 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5476 #define IOCTL_IGNORE(cmd) \
5477 { TARGET_ ## cmd, 0, #cmd },
5482 /* ??? Implement proper locking for ioctls. */
5483 /* do_ioctl() Must return target values and target errnos. */
5484 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5486 const IOCTLEntry
*ie
;
5487 const argtype
*arg_type
;
5489 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5495 if (ie
->target_cmd
== 0) {
5497 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5498 return -TARGET_ENOSYS
;
5500 if (ie
->target_cmd
== cmd
)
5504 arg_type
= ie
->arg_type
;
5506 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5507 } else if (!ie
->host_cmd
) {
5508 /* Some architectures define BSD ioctls in their headers
5509 that are not implemented in Linux. */
5510 return -TARGET_ENOSYS
;
5513 switch(arg_type
[0]) {
5516 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5522 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5526 target_size
= thunk_type_size(arg_type
, 0);
5527 switch(ie
->access
) {
5529 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5530 if (!is_error(ret
)) {
5531 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5533 return -TARGET_EFAULT
;
5534 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5535 unlock_user(argptr
, arg
, target_size
);
5539 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5541 return -TARGET_EFAULT
;
5542 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5543 unlock_user(argptr
, arg
, 0);
5544 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5548 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5550 return -TARGET_EFAULT
;
5551 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5552 unlock_user(argptr
, arg
, 0);
5553 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5554 if (!is_error(ret
)) {
5555 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5557 return -TARGET_EFAULT
;
5558 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5559 unlock_user(argptr
, arg
, target_size
);
5565 qemu_log_mask(LOG_UNIMP
,
5566 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5567 (long)cmd
, arg_type
[0]);
5568 ret
= -TARGET_ENOSYS
;
5574 static const bitmask_transtbl iflag_tbl
[] = {
5575 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5576 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5577 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5578 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5579 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5580 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5581 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5582 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5583 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5584 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5585 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5586 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5587 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5588 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5589 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5593 static const bitmask_transtbl oflag_tbl
[] = {
5594 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5595 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5596 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5597 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5598 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5599 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5600 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5601 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5602 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5603 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5604 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5605 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5606 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5607 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5608 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5609 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5610 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5611 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5612 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5613 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5614 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5615 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5616 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5617 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5621 static const bitmask_transtbl cflag_tbl
[] = {
5622 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5623 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5624 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5625 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5626 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5627 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5628 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5629 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5630 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5631 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5632 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5633 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5634 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5635 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5636 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5637 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5638 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5639 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5640 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5641 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5642 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5643 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5644 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5645 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5646 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5647 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5648 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5649 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5650 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5651 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5652 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5656 static const bitmask_transtbl lflag_tbl
[] = {
5657 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5658 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5659 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5660 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5661 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5662 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5663 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5664 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5665 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5666 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5667 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5668 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5669 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5670 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5671 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5672 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5676 static void target_to_host_termios (void *dst
, const void *src
)
5678 struct host_termios
*host
= dst
;
5679 const struct target_termios
*target
= src
;
5682 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5684 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5686 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5688 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5689 host
->c_line
= target
->c_line
;
5691 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5692 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5693 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5694 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5695 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5696 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5697 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5698 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5699 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5700 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5701 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5702 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5703 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5704 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5705 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5706 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5707 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5708 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5711 static void host_to_target_termios (void *dst
, const void *src
)
5713 struct target_termios
*target
= dst
;
5714 const struct host_termios
*host
= src
;
5717 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5719 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5721 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5723 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5724 target
->c_line
= host
->c_line
;
5726 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5727 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5728 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5729 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5730 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5731 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5732 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5733 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5734 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5735 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5736 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5737 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5738 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5739 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5740 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5741 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5742 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5743 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5746 static const StructEntry struct_termios_def
= {
5747 .convert
= { host_to_target_termios
, target_to_host_termios
},
5748 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5749 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5750 .print
= print_termios
,
5753 static bitmask_transtbl mmap_flags_tbl
[] = {
5754 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5755 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5756 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5757 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5758 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5759 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5760 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5761 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5762 MAP_DENYWRITE
, MAP_DENYWRITE
},
5763 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5764 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5765 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5766 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5767 MAP_NORESERVE
, MAP_NORESERVE
},
5768 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5769 /* MAP_STACK had been ignored by the kernel for quite some time.
5770 Recognize it for the target insofar as we do not want to pass
5771 it through to the host. */
5772 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5777 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5778 * TARGET_I386 is defined if TARGET_X86_64 is defined
5780 #if defined(TARGET_I386)
5782 /* NOTE: there is really one LDT for all the threads */
5783 static uint8_t *ldt_table
;
5785 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5792 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5793 if (size
> bytecount
)
5795 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5797 return -TARGET_EFAULT
;
5798 /* ??? Should this by byteswapped? */
5799 memcpy(p
, ldt_table
, size
);
5800 unlock_user(p
, ptr
, size
);
5804 /* XXX: add locking support */
5805 static abi_long
write_ldt(CPUX86State
*env
,
5806 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5808 struct target_modify_ldt_ldt_s ldt_info
;
5809 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5810 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5811 int seg_not_present
, useable
, lm
;
5812 uint32_t *lp
, entry_1
, entry_2
;
5814 if (bytecount
!= sizeof(ldt_info
))
5815 return -TARGET_EINVAL
;
5816 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5817 return -TARGET_EFAULT
;
5818 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5819 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5820 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5821 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5822 unlock_user_struct(target_ldt_info
, ptr
, 0);
5824 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5825 return -TARGET_EINVAL
;
5826 seg_32bit
= ldt_info
.flags
& 1;
5827 contents
= (ldt_info
.flags
>> 1) & 3;
5828 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5829 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5830 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5831 useable
= (ldt_info
.flags
>> 6) & 1;
5835 lm
= (ldt_info
.flags
>> 7) & 1;
5837 if (contents
== 3) {
5839 return -TARGET_EINVAL
;
5840 if (seg_not_present
== 0)
5841 return -TARGET_EINVAL
;
5843 /* allocate the LDT */
5845 env
->ldt
.base
= target_mmap(0,
5846 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5847 PROT_READ
|PROT_WRITE
,
5848 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5849 if (env
->ldt
.base
== -1)
5850 return -TARGET_ENOMEM
;
5851 memset(g2h(env
->ldt
.base
), 0,
5852 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5853 env
->ldt
.limit
= 0xffff;
5854 ldt_table
= g2h(env
->ldt
.base
);
5857 /* NOTE: same code as Linux kernel */
5858 /* Allow LDTs to be cleared by the user. */
5859 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5862 read_exec_only
== 1 &&
5864 limit_in_pages
== 0 &&
5865 seg_not_present
== 1 &&
5873 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5874 (ldt_info
.limit
& 0x0ffff);
5875 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5876 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5877 (ldt_info
.limit
& 0xf0000) |
5878 ((read_exec_only
^ 1) << 9) |
5880 ((seg_not_present
^ 1) << 15) |
5882 (limit_in_pages
<< 23) |
5886 entry_2
|= (useable
<< 20);
5888 /* Install the new entry ... */
5890 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5891 lp
[0] = tswap32(entry_1
);
5892 lp
[1] = tswap32(entry_2
);
5896 /* specific and weird i386 syscalls */
5897 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5898 unsigned long bytecount
)
5904 ret
= read_ldt(ptr
, bytecount
);
5907 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5910 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5913 ret
= -TARGET_ENOSYS
;
5919 #if defined(TARGET_ABI32)
5920 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5922 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5923 struct target_modify_ldt_ldt_s ldt_info
;
5924 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5925 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5926 int seg_not_present
, useable
, lm
;
5927 uint32_t *lp
, entry_1
, entry_2
;
5930 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5931 if (!target_ldt_info
)
5932 return -TARGET_EFAULT
;
5933 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5934 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5935 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5936 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5937 if (ldt_info
.entry_number
== -1) {
5938 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5939 if (gdt_table
[i
] == 0) {
5940 ldt_info
.entry_number
= i
;
5941 target_ldt_info
->entry_number
= tswap32(i
);
5946 unlock_user_struct(target_ldt_info
, ptr
, 1);
5948 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5949 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5950 return -TARGET_EINVAL
;
5951 seg_32bit
= ldt_info
.flags
& 1;
5952 contents
= (ldt_info
.flags
>> 1) & 3;
5953 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5954 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5955 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5956 useable
= (ldt_info
.flags
>> 6) & 1;
5960 lm
= (ldt_info
.flags
>> 7) & 1;
5963 if (contents
== 3) {
5964 if (seg_not_present
== 0)
5965 return -TARGET_EINVAL
;
5968 /* NOTE: same code as Linux kernel */
5969 /* Allow LDTs to be cleared by the user. */
5970 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5971 if ((contents
== 0 &&
5972 read_exec_only
== 1 &&
5974 limit_in_pages
== 0 &&
5975 seg_not_present
== 1 &&
5983 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5984 (ldt_info
.limit
& 0x0ffff);
5985 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5986 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5987 (ldt_info
.limit
& 0xf0000) |
5988 ((read_exec_only
^ 1) << 9) |
5990 ((seg_not_present
^ 1) << 15) |
5992 (limit_in_pages
<< 23) |
5997 /* Install the new entry ... */
5999 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6000 lp
[0] = tswap32(entry_1
);
6001 lp
[1] = tswap32(entry_2
);
6005 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6007 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6008 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6009 uint32_t base_addr
, limit
, flags
;
6010 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6011 int seg_not_present
, useable
, lm
;
6012 uint32_t *lp
, entry_1
, entry_2
;
6014 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6015 if (!target_ldt_info
)
6016 return -TARGET_EFAULT
;
6017 idx
= tswap32(target_ldt_info
->entry_number
);
6018 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6019 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6020 unlock_user_struct(target_ldt_info
, ptr
, 1);
6021 return -TARGET_EINVAL
;
6023 lp
= (uint32_t *)(gdt_table
+ idx
);
6024 entry_1
= tswap32(lp
[0]);
6025 entry_2
= tswap32(lp
[1]);
6027 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6028 contents
= (entry_2
>> 10) & 3;
6029 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6030 seg_32bit
= (entry_2
>> 22) & 1;
6031 limit_in_pages
= (entry_2
>> 23) & 1;
6032 useable
= (entry_2
>> 20) & 1;
6036 lm
= (entry_2
>> 21) & 1;
6038 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6039 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6040 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6041 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6042 base_addr
= (entry_1
>> 16) |
6043 (entry_2
& 0xff000000) |
6044 ((entry_2
& 0xff) << 16);
6045 target_ldt_info
->base_addr
= tswapal(base_addr
);
6046 target_ldt_info
->limit
= tswap32(limit
);
6047 target_ldt_info
->flags
= tswap32(flags
);
6048 unlock_user_struct(target_ldt_info
, ptr
, 1);
6052 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6054 return -TARGET_ENOSYS
;
6057 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6064 case TARGET_ARCH_SET_GS
:
6065 case TARGET_ARCH_SET_FS
:
6066 if (code
== TARGET_ARCH_SET_GS
)
6070 cpu_x86_load_seg(env
, idx
, 0);
6071 env
->segs
[idx
].base
= addr
;
6073 case TARGET_ARCH_GET_GS
:
6074 case TARGET_ARCH_GET_FS
:
6075 if (code
== TARGET_ARCH_GET_GS
)
6079 val
= env
->segs
[idx
].base
;
6080 if (put_user(val
, addr
, abi_ulong
))
6081 ret
= -TARGET_EFAULT
;
6084 ret
= -TARGET_EINVAL
;
6089 #endif /* defined(TARGET_ABI32 */
6091 #endif /* defined(TARGET_I386) */
6093 #define NEW_STACK_SIZE 0x40000
6096 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6099 pthread_mutex_t mutex
;
6100 pthread_cond_t cond
;
6103 abi_ulong child_tidptr
;
6104 abi_ulong parent_tidptr
;
6108 static void *clone_func(void *arg
)
6110 new_thread_info
*info
= arg
;
6115 rcu_register_thread();
6116 tcg_register_thread();
6120 ts
= (TaskState
*)cpu
->opaque
;
6121 info
->tid
= sys_gettid();
6123 if (info
->child_tidptr
)
6124 put_user_u32(info
->tid
, info
->child_tidptr
);
6125 if (info
->parent_tidptr
)
6126 put_user_u32(info
->tid
, info
->parent_tidptr
);
6127 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6128 /* Enable signals. */
6129 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6130 /* Signal to the parent that we're ready. */
6131 pthread_mutex_lock(&info
->mutex
);
6132 pthread_cond_broadcast(&info
->cond
);
6133 pthread_mutex_unlock(&info
->mutex
);
6134 /* Wait until the parent has finished initializing the tls state. */
6135 pthread_mutex_lock(&clone_lock
);
6136 pthread_mutex_unlock(&clone_lock
);
6142 /* do_fork() Must return host values and target errnos (unlike most
6143 do_*() functions). */
6144 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6145 abi_ulong parent_tidptr
, target_ulong newtls
,
6146 abi_ulong child_tidptr
)
6148 CPUState
*cpu
= env_cpu(env
);
6152 CPUArchState
*new_env
;
6155 flags
&= ~CLONE_IGNORED_FLAGS
;
6157 /* Emulate vfork() with fork() */
6158 if (flags
& CLONE_VFORK
)
6159 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6161 if (flags
& CLONE_VM
) {
6162 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6163 new_thread_info info
;
6164 pthread_attr_t attr
;
6166 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6167 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6168 return -TARGET_EINVAL
;
6171 ts
= g_new0(TaskState
, 1);
6172 init_task_state(ts
);
6174 /* Grab a mutex so that thread setup appears atomic. */
6175 pthread_mutex_lock(&clone_lock
);
6177 /* we create a new CPU instance. */
6178 new_env
= cpu_copy(env
);
6179 /* Init regs that differ from the parent. */
6180 cpu_clone_regs_child(new_env
, newsp
, flags
);
6181 cpu_clone_regs_parent(env
, flags
);
6182 new_cpu
= env_cpu(new_env
);
6183 new_cpu
->opaque
= ts
;
6184 ts
->bprm
= parent_ts
->bprm
;
6185 ts
->info
= parent_ts
->info
;
6186 ts
->signal_mask
= parent_ts
->signal_mask
;
6188 if (flags
& CLONE_CHILD_CLEARTID
) {
6189 ts
->child_tidptr
= child_tidptr
;
6192 if (flags
& CLONE_SETTLS
) {
6193 cpu_set_tls (new_env
, newtls
);
6196 memset(&info
, 0, sizeof(info
));
6197 pthread_mutex_init(&info
.mutex
, NULL
);
6198 pthread_mutex_lock(&info
.mutex
);
6199 pthread_cond_init(&info
.cond
, NULL
);
6201 if (flags
& CLONE_CHILD_SETTID
) {
6202 info
.child_tidptr
= child_tidptr
;
6204 if (flags
& CLONE_PARENT_SETTID
) {
6205 info
.parent_tidptr
= parent_tidptr
;
6208 ret
= pthread_attr_init(&attr
);
6209 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6210 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6211 /* It is not safe to deliver signals until the child has finished
6212 initializing, so temporarily block all signals. */
6213 sigfillset(&sigmask
);
6214 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6215 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6217 /* If this is our first additional thread, we need to ensure we
6218 * generate code for parallel execution and flush old translations.
6220 if (!parallel_cpus
) {
6221 parallel_cpus
= true;
6225 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6226 /* TODO: Free new CPU state if thread creation failed. */
6228 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6229 pthread_attr_destroy(&attr
);
6231 /* Wait for the child to initialize. */
6232 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6237 pthread_mutex_unlock(&info
.mutex
);
6238 pthread_cond_destroy(&info
.cond
);
6239 pthread_mutex_destroy(&info
.mutex
);
6240 pthread_mutex_unlock(&clone_lock
);
6242 /* if no CLONE_VM, we consider it is a fork */
6243 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6244 return -TARGET_EINVAL
;
6247 /* We can't support custom termination signals */
6248 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6249 return -TARGET_EINVAL
;
6252 if (block_signals()) {
6253 return -TARGET_ERESTARTSYS
;
6259 /* Child Process. */
6260 cpu_clone_regs_child(env
, newsp
, flags
);
6262 /* There is a race condition here. The parent process could
6263 theoretically read the TID in the child process before the child
6264 tid is set. This would require using either ptrace
6265 (not implemented) or having *_tidptr to point at a shared memory
6266 mapping. We can't repeat the spinlock hack used above because
6267 the child process gets its own copy of the lock. */
6268 if (flags
& CLONE_CHILD_SETTID
)
6269 put_user_u32(sys_gettid(), child_tidptr
);
6270 if (flags
& CLONE_PARENT_SETTID
)
6271 put_user_u32(sys_gettid(), parent_tidptr
);
6272 ts
= (TaskState
*)cpu
->opaque
;
6273 if (flags
& CLONE_SETTLS
)
6274 cpu_set_tls (env
, newtls
);
6275 if (flags
& CLONE_CHILD_CLEARTID
)
6276 ts
->child_tidptr
= child_tidptr
;
6278 cpu_clone_regs_parent(env
, flags
);
6285 /* warning : doesn't handle linux specific flags... */
6286 static int target_to_host_fcntl_cmd(int cmd
)
6291 case TARGET_F_DUPFD
:
6292 case TARGET_F_GETFD
:
6293 case TARGET_F_SETFD
:
6294 case TARGET_F_GETFL
:
6295 case TARGET_F_SETFL
:
6296 case TARGET_F_OFD_GETLK
:
6297 case TARGET_F_OFD_SETLK
:
6298 case TARGET_F_OFD_SETLKW
:
6301 case TARGET_F_GETLK
:
6304 case TARGET_F_SETLK
:
6307 case TARGET_F_SETLKW
:
6310 case TARGET_F_GETOWN
:
6313 case TARGET_F_SETOWN
:
6316 case TARGET_F_GETSIG
:
6319 case TARGET_F_SETSIG
:
6322 #if TARGET_ABI_BITS == 32
6323 case TARGET_F_GETLK64
:
6326 case TARGET_F_SETLK64
:
6329 case TARGET_F_SETLKW64
:
6333 case TARGET_F_SETLEASE
:
6336 case TARGET_F_GETLEASE
:
6339 #ifdef F_DUPFD_CLOEXEC
6340 case TARGET_F_DUPFD_CLOEXEC
:
6341 ret
= F_DUPFD_CLOEXEC
;
6344 case TARGET_F_NOTIFY
:
6348 case TARGET_F_GETOWN_EX
:
6353 case TARGET_F_SETOWN_EX
:
6358 case TARGET_F_SETPIPE_SZ
:
6361 case TARGET_F_GETPIPE_SZ
:
6366 ret
= -TARGET_EINVAL
;
6370 #if defined(__powerpc64__)
6371 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6372 * is not supported by kernel. The glibc fcntl call actually adjusts
6373 * them to 5, 6 and 7 before making the syscall(). Since we make the
6374 * syscall directly, adjust to what is supported by the kernel.
6376 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6377 ret
-= F_GETLK64
- 5;
6384 #define FLOCK_TRANSTBL \
6386 TRANSTBL_CONVERT(F_RDLCK); \
6387 TRANSTBL_CONVERT(F_WRLCK); \
6388 TRANSTBL_CONVERT(F_UNLCK); \
6389 TRANSTBL_CONVERT(F_EXLCK); \
6390 TRANSTBL_CONVERT(F_SHLCK); \
6393 static int target_to_host_flock(int type
)
6395 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6397 #undef TRANSTBL_CONVERT
6398 return -TARGET_EINVAL
;
6401 static int host_to_target_flock(int type
)
6403 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6405 #undef TRANSTBL_CONVERT
6406 /* if we don't know how to convert the value coming
6407 * from the host we copy to the target field as-is
6412 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6413 abi_ulong target_flock_addr
)
6415 struct target_flock
*target_fl
;
6418 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6419 return -TARGET_EFAULT
;
6422 __get_user(l_type
, &target_fl
->l_type
);
6423 l_type
= target_to_host_flock(l_type
);
6427 fl
->l_type
= l_type
;
6428 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6429 __get_user(fl
->l_start
, &target_fl
->l_start
);
6430 __get_user(fl
->l_len
, &target_fl
->l_len
);
6431 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6432 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6436 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6437 const struct flock64
*fl
)
6439 struct target_flock
*target_fl
;
6442 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6443 return -TARGET_EFAULT
;
6446 l_type
= host_to_target_flock(fl
->l_type
);
6447 __put_user(l_type
, &target_fl
->l_type
);
6448 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6449 __put_user(fl
->l_start
, &target_fl
->l_start
);
6450 __put_user(fl
->l_len
, &target_fl
->l_len
);
6451 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6452 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6456 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6457 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6459 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6460 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6461 abi_ulong target_flock_addr
)
6463 struct target_oabi_flock64
*target_fl
;
6466 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6467 return -TARGET_EFAULT
;
6470 __get_user(l_type
, &target_fl
->l_type
);
6471 l_type
= target_to_host_flock(l_type
);
6475 fl
->l_type
= l_type
;
6476 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6477 __get_user(fl
->l_start
, &target_fl
->l_start
);
6478 __get_user(fl
->l_len
, &target_fl
->l_len
);
6479 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6480 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6484 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6485 const struct flock64
*fl
)
6487 struct target_oabi_flock64
*target_fl
;
6490 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6491 return -TARGET_EFAULT
;
6494 l_type
= host_to_target_flock(fl
->l_type
);
6495 __put_user(l_type
, &target_fl
->l_type
);
6496 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6497 __put_user(fl
->l_start
, &target_fl
->l_start
);
6498 __put_user(fl
->l_len
, &target_fl
->l_len
);
6499 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6500 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6505 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6506 abi_ulong target_flock_addr
)
6508 struct target_flock64
*target_fl
;
6511 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6512 return -TARGET_EFAULT
;
6515 __get_user(l_type
, &target_fl
->l_type
);
6516 l_type
= target_to_host_flock(l_type
);
6520 fl
->l_type
= l_type
;
6521 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6522 __get_user(fl
->l_start
, &target_fl
->l_start
);
6523 __get_user(fl
->l_len
, &target_fl
->l_len
);
6524 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6525 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6529 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6530 const struct flock64
*fl
)
6532 struct target_flock64
*target_fl
;
6535 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6536 return -TARGET_EFAULT
;
6539 l_type
= host_to_target_flock(fl
->l_type
);
6540 __put_user(l_type
, &target_fl
->l_type
);
6541 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6542 __put_user(fl
->l_start
, &target_fl
->l_start
);
6543 __put_user(fl
->l_len
, &target_fl
->l_len
);
6544 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6545 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6549 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6551 struct flock64 fl64
;
6553 struct f_owner_ex fox
;
6554 struct target_f_owner_ex
*target_fox
;
6557 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6559 if (host_cmd
== -TARGET_EINVAL
)
6563 case TARGET_F_GETLK
:
6564 ret
= copy_from_user_flock(&fl64
, arg
);
6568 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6570 ret
= copy_to_user_flock(arg
, &fl64
);
6574 case TARGET_F_SETLK
:
6575 case TARGET_F_SETLKW
:
6576 ret
= copy_from_user_flock(&fl64
, arg
);
6580 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6583 case TARGET_F_GETLK64
:
6584 case TARGET_F_OFD_GETLK
:
6585 ret
= copy_from_user_flock64(&fl64
, arg
);
6589 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6591 ret
= copy_to_user_flock64(arg
, &fl64
);
6594 case TARGET_F_SETLK64
:
6595 case TARGET_F_SETLKW64
:
6596 case TARGET_F_OFD_SETLK
:
6597 case TARGET_F_OFD_SETLKW
:
6598 ret
= copy_from_user_flock64(&fl64
, arg
);
6602 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6605 case TARGET_F_GETFL
:
6606 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6608 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6612 case TARGET_F_SETFL
:
6613 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6614 target_to_host_bitmask(arg
,
6619 case TARGET_F_GETOWN_EX
:
6620 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6622 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6623 return -TARGET_EFAULT
;
6624 target_fox
->type
= tswap32(fox
.type
);
6625 target_fox
->pid
= tswap32(fox
.pid
);
6626 unlock_user_struct(target_fox
, arg
, 1);
6632 case TARGET_F_SETOWN_EX
:
6633 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6634 return -TARGET_EFAULT
;
6635 fox
.type
= tswap32(target_fox
->type
);
6636 fox
.pid
= tswap32(target_fox
->pid
);
6637 unlock_user_struct(target_fox
, arg
, 0);
6638 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6642 case TARGET_F_SETOWN
:
6643 case TARGET_F_GETOWN
:
6644 case TARGET_F_SETSIG
:
6645 case TARGET_F_GETSIG
:
6646 case TARGET_F_SETLEASE
:
6647 case TARGET_F_GETLEASE
:
6648 case TARGET_F_SETPIPE_SZ
:
6649 case TARGET_F_GETPIPE_SZ
:
6650 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6654 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6662 static inline int high2lowuid(int uid
)
6670 static inline int high2lowgid(int gid
)
6678 static inline int low2highuid(int uid
)
6680 if ((int16_t)uid
== -1)
6686 static inline int low2highgid(int gid
)
6688 if ((int16_t)gid
== -1)
6693 static inline int tswapid(int id
)
6698 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6700 #else /* !USE_UID16 */
6701 static inline int high2lowuid(int uid
)
6705 static inline int high2lowgid(int gid
)
6709 static inline int low2highuid(int uid
)
6713 static inline int low2highgid(int gid
)
6717 static inline int tswapid(int id
)
6722 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6724 #endif /* USE_UID16 */
6726 /* We must do direct syscalls for setting UID/GID, because we want to
6727 * implement the Linux system call semantics of "change only for this thread",
6728 * not the libc/POSIX semantics of "change for all threads in process".
6729 * (See http://ewontfix.com/17/ for more details.)
6730 * We use the 32-bit version of the syscalls if present; if it is not
6731 * then either the host architecture supports 32-bit UIDs natively with
6732 * the standard syscall, or the 16-bit UID is the best we can do.
6734 #ifdef __NR_setuid32
6735 #define __NR_sys_setuid __NR_setuid32
6737 #define __NR_sys_setuid __NR_setuid
6739 #ifdef __NR_setgid32
6740 #define __NR_sys_setgid __NR_setgid32
6742 #define __NR_sys_setgid __NR_setgid
6744 #ifdef __NR_setresuid32
6745 #define __NR_sys_setresuid __NR_setresuid32
6747 #define __NR_sys_setresuid __NR_setresuid
6749 #ifdef __NR_setresgid32
6750 #define __NR_sys_setresgid __NR_setresgid32
6752 #define __NR_sys_setresgid __NR_setresgid
6755 _syscall1(int, sys_setuid
, uid_t
, uid
)
6756 _syscall1(int, sys_setgid
, gid_t
, gid
)
6757 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6758 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6760 void syscall_init(void)
6763 const argtype
*arg_type
;
6767 thunk_init(STRUCT_MAX
);
6769 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6770 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6771 #include "syscall_types.h"
6773 #undef STRUCT_SPECIAL
6775 /* Build target_to_host_errno_table[] table from
6776 * host_to_target_errno_table[]. */
6777 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6778 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6781 /* we patch the ioctl size if necessary. We rely on the fact that
6782 no ioctl has all the bits at '1' in the size field */
6784 while (ie
->target_cmd
!= 0) {
6785 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6786 TARGET_IOC_SIZEMASK
) {
6787 arg_type
= ie
->arg_type
;
6788 if (arg_type
[0] != TYPE_PTR
) {
6789 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6794 size
= thunk_type_size(arg_type
, 0);
6795 ie
->target_cmd
= (ie
->target_cmd
&
6796 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6797 (size
<< TARGET_IOC_SIZESHIFT
);
6800 /* automatic consistency check if same arch */
6801 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6802 (defined(__x86_64__) && defined(TARGET_X86_64))
6803 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6804 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6805 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6812 #ifdef TARGET_NR_truncate64
6813 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6818 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6822 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6826 #ifdef TARGET_NR_ftruncate64
6827 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6832 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6836 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6840 #if defined(TARGET_NR_timer_settime) || \
6841 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6842 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
6843 abi_ulong target_addr
)
6845 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
6846 offsetof(struct target_itimerspec
,
6848 target_to_host_timespec(&host_its
->it_value
, target_addr
+
6849 offsetof(struct target_itimerspec
,
6851 return -TARGET_EFAULT
;
6858 #if defined(TARGET_NR_timer_settime64) || \
6859 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6860 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
6861 abi_ulong target_addr
)
6863 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
6864 offsetof(struct target__kernel_itimerspec
,
6866 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
6867 offsetof(struct target__kernel_itimerspec
,
6869 return -TARGET_EFAULT
;
6876 #if ((defined(TARGET_NR_timerfd_gettime) || \
6877 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6878 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6879 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6880 struct itimerspec
*host_its
)
6882 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6884 &host_its
->it_interval
) ||
6885 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6887 &host_its
->it_value
)) {
6888 return -TARGET_EFAULT
;
6894 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6895 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6896 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6897 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
6898 struct itimerspec
*host_its
)
6900 if (host_to_target_timespec64(target_addr
+
6901 offsetof(struct target__kernel_itimerspec
,
6903 &host_its
->it_interval
) ||
6904 host_to_target_timespec64(target_addr
+
6905 offsetof(struct target__kernel_itimerspec
,
6907 &host_its
->it_value
)) {
6908 return -TARGET_EFAULT
;
6914 #if defined(TARGET_NR_adjtimex) || \
6915 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6916 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6917 abi_long target_addr
)
6919 struct target_timex
*target_tx
;
6921 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6922 return -TARGET_EFAULT
;
6925 __get_user(host_tx
->modes
, &target_tx
->modes
);
6926 __get_user(host_tx
->offset
, &target_tx
->offset
);
6927 __get_user(host_tx
->freq
, &target_tx
->freq
);
6928 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6929 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6930 __get_user(host_tx
->status
, &target_tx
->status
);
6931 __get_user(host_tx
->constant
, &target_tx
->constant
);
6932 __get_user(host_tx
->precision
, &target_tx
->precision
);
6933 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6934 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6935 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6936 __get_user(host_tx
->tick
, &target_tx
->tick
);
6937 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6938 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6939 __get_user(host_tx
->shift
, &target_tx
->shift
);
6940 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6941 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6942 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6943 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6944 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6945 __get_user(host_tx
->tai
, &target_tx
->tai
);
6947 unlock_user_struct(target_tx
, target_addr
, 0);
6951 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6952 struct timex
*host_tx
)
6954 struct target_timex
*target_tx
;
6956 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6957 return -TARGET_EFAULT
;
6960 __put_user(host_tx
->modes
, &target_tx
->modes
);
6961 __put_user(host_tx
->offset
, &target_tx
->offset
);
6962 __put_user(host_tx
->freq
, &target_tx
->freq
);
6963 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6964 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6965 __put_user(host_tx
->status
, &target_tx
->status
);
6966 __put_user(host_tx
->constant
, &target_tx
->constant
);
6967 __put_user(host_tx
->precision
, &target_tx
->precision
);
6968 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6969 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6970 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6971 __put_user(host_tx
->tick
, &target_tx
->tick
);
6972 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6973 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6974 __put_user(host_tx
->shift
, &target_tx
->shift
);
6975 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6976 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6977 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6978 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6979 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6980 __put_user(host_tx
->tai
, &target_tx
->tai
);
6982 unlock_user_struct(target_tx
, target_addr
, 1);
6988 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6989 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
6990 abi_long target_addr
)
6992 struct target__kernel_timex
*target_tx
;
6994 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
6995 offsetof(struct target__kernel_timex
,
6997 return -TARGET_EFAULT
;
7000 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7001 return -TARGET_EFAULT
;
7004 __get_user(host_tx
->modes
, &target_tx
->modes
);
7005 __get_user(host_tx
->offset
, &target_tx
->offset
);
7006 __get_user(host_tx
->freq
, &target_tx
->freq
);
7007 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7008 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7009 __get_user(host_tx
->status
, &target_tx
->status
);
7010 __get_user(host_tx
->constant
, &target_tx
->constant
);
7011 __get_user(host_tx
->precision
, &target_tx
->precision
);
7012 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7013 __get_user(host_tx
->tick
, &target_tx
->tick
);
7014 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7015 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7016 __get_user(host_tx
->shift
, &target_tx
->shift
);
7017 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7018 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7019 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7020 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7021 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7022 __get_user(host_tx
->tai
, &target_tx
->tai
);
7024 unlock_user_struct(target_tx
, target_addr
, 0);
7028 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7029 struct timex
*host_tx
)
7031 struct target__kernel_timex
*target_tx
;
7033 if (copy_to_user_timeval64(target_addr
+
7034 offsetof(struct target__kernel_timex
, time
),
7036 return -TARGET_EFAULT
;
7039 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7040 return -TARGET_EFAULT
;
7043 __put_user(host_tx
->modes
, &target_tx
->modes
);
7044 __put_user(host_tx
->offset
, &target_tx
->offset
);
7045 __put_user(host_tx
->freq
, &target_tx
->freq
);
7046 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7047 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7048 __put_user(host_tx
->status
, &target_tx
->status
);
7049 __put_user(host_tx
->constant
, &target_tx
->constant
);
7050 __put_user(host_tx
->precision
, &target_tx
->precision
);
7051 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7052 __put_user(host_tx
->tick
, &target_tx
->tick
);
7053 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7054 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7055 __put_user(host_tx
->shift
, &target_tx
->shift
);
7056 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7057 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7058 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7059 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7060 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7061 __put_user(host_tx
->tai
, &target_tx
->tai
);
7063 unlock_user_struct(target_tx
, target_addr
, 1);
7068 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7069 abi_ulong target_addr
)
7071 struct target_sigevent
*target_sevp
;
7073 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7074 return -TARGET_EFAULT
;
7077 /* This union is awkward on 64 bit systems because it has a 32 bit
7078 * integer and a pointer in it; we follow the conversion approach
7079 * used for handling sigval types in signal.c so the guest should get
7080 * the correct value back even if we did a 64 bit byteswap and it's
7081 * using the 32 bit integer.
7083 host_sevp
->sigev_value
.sival_ptr
=
7084 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7085 host_sevp
->sigev_signo
=
7086 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7087 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7088 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7090 unlock_user_struct(target_sevp
, target_addr
, 1);
7094 #if defined(TARGET_NR_mlockall)
7095 static inline int target_to_host_mlockall_arg(int arg
)
7099 if (arg
& TARGET_MCL_CURRENT
) {
7100 result
|= MCL_CURRENT
;
7102 if (arg
& TARGET_MCL_FUTURE
) {
7103 result
|= MCL_FUTURE
;
7106 if (arg
& TARGET_MCL_ONFAULT
) {
7107 result
|= MCL_ONFAULT
;
7115 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7116 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7117 defined(TARGET_NR_newfstatat))
7118 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7119 abi_ulong target_addr
,
7120 struct stat
*host_st
)
7122 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7123 if (((CPUARMState
*)cpu_env
)->eabi
) {
7124 struct target_eabi_stat64
*target_st
;
7126 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7127 return -TARGET_EFAULT
;
7128 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7129 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7130 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7131 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7132 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7134 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7135 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7136 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7137 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7138 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7139 __put_user(host_st
->st_size
, &target_st
->st_size
);
7140 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7141 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7142 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7143 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7144 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7145 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7146 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7147 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7148 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7150 unlock_user_struct(target_st
, target_addr
, 1);
7154 #if defined(TARGET_HAS_STRUCT_STAT64)
7155 struct target_stat64
*target_st
;
7157 struct target_stat
*target_st
;
7160 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7161 return -TARGET_EFAULT
;
7162 memset(target_st
, 0, sizeof(*target_st
));
7163 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7164 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7165 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7166 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7168 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7169 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7170 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7171 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7172 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7173 /* XXX: better use of kernel struct */
7174 __put_user(host_st
->st_size
, &target_st
->st_size
);
7175 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7176 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7177 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7178 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7179 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7180 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7181 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7182 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7183 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7185 unlock_user_struct(target_st
, target_addr
, 1);
7192 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7193 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7194 abi_ulong target_addr
)
7196 struct target_statx
*target_stx
;
7198 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7199 return -TARGET_EFAULT
;
7201 memset(target_stx
, 0, sizeof(*target_stx
));
7203 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7204 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7205 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7206 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7207 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7208 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7209 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7210 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7211 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7212 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7213 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7214 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7215 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7216 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7217 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7218 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7219 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7220 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7221 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7222 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7223 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7224 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7225 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7227 unlock_user_struct(target_stx
, target_addr
, 1);
7233 static int do_sys_futex(int *uaddr
, int op
, int val
,
7234 const struct timespec
*timeout
, int *uaddr2
,
7237 #if HOST_LONG_BITS == 64
7238 #if defined(__NR_futex)
7239 /* always a 64-bit time_t, it doesn't define _time64 version */
7240 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7243 #else /* HOST_LONG_BITS == 64 */
7244 #if defined(__NR_futex_time64)
7245 if (sizeof(timeout
->tv_sec
) == 8) {
7246 /* _time64 function on 32bit arch */
7247 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7250 #if defined(__NR_futex)
7251 /* old function on 32bit arch */
7252 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7254 #endif /* HOST_LONG_BITS == 64 */
7255 g_assert_not_reached();
7258 static int do_safe_futex(int *uaddr
, int op
, int val
,
7259 const struct timespec
*timeout
, int *uaddr2
,
7262 #if HOST_LONG_BITS == 64
7263 #if defined(__NR_futex)
7264 /* always a 64-bit time_t, it doesn't define _time64 version */
7265 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7267 #else /* HOST_LONG_BITS == 64 */
7268 #if defined(__NR_futex_time64)
7269 if (sizeof(timeout
->tv_sec
) == 8) {
7270 /* _time64 function on 32bit arch */
7271 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7275 #if defined(__NR_futex)
7276 /* old function on 32bit arch */
7277 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7279 #endif /* HOST_LONG_BITS == 64 */
7280 return -TARGET_ENOSYS
;
7283 /* ??? Using host futex calls even when target atomic operations
7284 are not really atomic probably breaks things. However implementing
7285 futexes locally would make futexes shared between multiple processes
7286 tricky. However they're probably useless because guest atomic
7287 operations won't work either. */
7288 #if defined(TARGET_NR_futex)
7289 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7290 target_ulong uaddr2
, int val3
)
7292 struct timespec ts
, *pts
;
7295 /* ??? We assume FUTEX_* constants are the same on both host
7297 #ifdef FUTEX_CMD_MASK
7298 base_op
= op
& FUTEX_CMD_MASK
;
7304 case FUTEX_WAIT_BITSET
:
7307 target_to_host_timespec(pts
, timeout
);
7311 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7313 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7315 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7317 case FUTEX_CMP_REQUEUE
:
7319 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7320 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7321 But the prototype takes a `struct timespec *'; insert casts
7322 to satisfy the compiler. We do not need to tswap TIMEOUT
7323 since it's not compared to guest memory. */
7324 pts
= (struct timespec
*)(uintptr_t) timeout
;
7325 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7326 (base_op
== FUTEX_CMP_REQUEUE
7330 return -TARGET_ENOSYS
;
7335 #if defined(TARGET_NR_futex_time64)
7336 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7337 target_ulong uaddr2
, int val3
)
7339 struct timespec ts
, *pts
;
7342 /* ??? We assume FUTEX_* constants are the same on both host
7344 #ifdef FUTEX_CMD_MASK
7345 base_op
= op
& FUTEX_CMD_MASK
;
7351 case FUTEX_WAIT_BITSET
:
7354 target_to_host_timespec64(pts
, timeout
);
7358 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7360 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7362 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7364 case FUTEX_CMP_REQUEUE
:
7366 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7367 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7368 But the prototype takes a `struct timespec *'; insert casts
7369 to satisfy the compiler. We do not need to tswap TIMEOUT
7370 since it's not compared to guest memory. */
7371 pts
= (struct timespec
*)(uintptr_t) timeout
;
7372 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7373 (base_op
== FUTEX_CMP_REQUEUE
7377 return -TARGET_ENOSYS
;
7382 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7383 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7384 abi_long handle
, abi_long mount_id
,
7387 struct file_handle
*target_fh
;
7388 struct file_handle
*fh
;
7392 unsigned int size
, total_size
;
7394 if (get_user_s32(size
, handle
)) {
7395 return -TARGET_EFAULT
;
7398 name
= lock_user_string(pathname
);
7400 return -TARGET_EFAULT
;
7403 total_size
= sizeof(struct file_handle
) + size
;
7404 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7406 unlock_user(name
, pathname
, 0);
7407 return -TARGET_EFAULT
;
7410 fh
= g_malloc0(total_size
);
7411 fh
->handle_bytes
= size
;
7413 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7414 unlock_user(name
, pathname
, 0);
7416 /* man name_to_handle_at(2):
7417 * Other than the use of the handle_bytes field, the caller should treat
7418 * the file_handle structure as an opaque data type
7421 memcpy(target_fh
, fh
, total_size
);
7422 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7423 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7425 unlock_user(target_fh
, handle
, total_size
);
7427 if (put_user_s32(mid
, mount_id
)) {
7428 return -TARGET_EFAULT
;
7436 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7437 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7440 struct file_handle
*target_fh
;
7441 struct file_handle
*fh
;
7442 unsigned int size
, total_size
;
7445 if (get_user_s32(size
, handle
)) {
7446 return -TARGET_EFAULT
;
7449 total_size
= sizeof(struct file_handle
) + size
;
7450 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7452 return -TARGET_EFAULT
;
7455 fh
= g_memdup(target_fh
, total_size
);
7456 fh
->handle_bytes
= size
;
7457 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7459 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7460 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7464 unlock_user(target_fh
, handle
, total_size
);
7470 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7472 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7475 target_sigset_t
*target_mask
;
7479 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7480 return -TARGET_EINVAL
;
7482 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7483 return -TARGET_EFAULT
;
7486 target_to_host_sigset(&host_mask
, target_mask
);
7488 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7490 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7492 fd_trans_register(ret
, &target_signalfd_trans
);
7495 unlock_user_struct(target_mask
, mask
, 0);
7501 /* Map host to target signal numbers for the wait family of syscalls.
7502 Assume all other status bits are the same. */
7503 int host_to_target_waitstatus(int status
)
7505 if (WIFSIGNALED(status
)) {
7506 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7508 if (WIFSTOPPED(status
)) {
7509 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7515 static int open_self_cmdline(void *cpu_env
, int fd
)
7517 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7518 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7521 for (i
= 0; i
< bprm
->argc
; i
++) {
7522 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7524 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7532 static int open_self_maps(void *cpu_env
, int fd
)
7534 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7535 TaskState
*ts
= cpu
->opaque
;
7536 GSList
*map_info
= read_self_maps();
7540 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7541 MapInfo
*e
= (MapInfo
*) s
->data
;
7543 if (h2g_valid(e
->start
)) {
7544 unsigned long min
= e
->start
;
7545 unsigned long max
= e
->end
;
7546 int flags
= page_get_flags(h2g(min
));
7549 max
= h2g_valid(max
- 1) ?
7550 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7552 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7556 if (h2g(min
) == ts
->info
->stack_limit
) {
7562 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7563 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7564 h2g(min
), h2g(max
- 1) + 1,
7565 e
->is_read
? 'r' : '-',
7566 e
->is_write
? 'w' : '-',
7567 e
->is_exec
? 'x' : '-',
7568 e
->is_priv
? 'p' : '-',
7569 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7571 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7578 free_self_maps(map_info
);
7580 #ifdef TARGET_VSYSCALL_PAGE
7582 * We only support execution from the vsyscall page.
7583 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7585 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7586 " --xp 00000000 00:00 0",
7587 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7588 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7594 static int open_self_stat(void *cpu_env
, int fd
)
7596 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7597 TaskState
*ts
= cpu
->opaque
;
7598 g_autoptr(GString
) buf
= g_string_new(NULL
);
7601 for (i
= 0; i
< 44; i
++) {
7604 g_string_printf(buf
, FMT_pid
" ", getpid());
7605 } else if (i
== 1) {
7607 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7608 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7609 g_string_printf(buf
, "(%.15s) ", bin
);
7610 } else if (i
== 27) {
7612 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7614 /* for the rest, there is MasterCard */
7615 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7618 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7626 static int open_self_auxv(void *cpu_env
, int fd
)
7628 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7629 TaskState
*ts
= cpu
->opaque
;
7630 abi_ulong auxv
= ts
->info
->saved_auxv
;
7631 abi_ulong len
= ts
->info
->auxv_len
;
7635 * Auxiliary vector is stored in target process stack.
7636 * read in whole auxv vector and copy it to file
7638 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7642 r
= write(fd
, ptr
, len
);
7649 lseek(fd
, 0, SEEK_SET
);
7650 unlock_user(ptr
, auxv
, len
);
7656 static int is_proc_myself(const char *filename
, const char *entry
)
7658 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7659 filename
+= strlen("/proc/");
7660 if (!strncmp(filename
, "self/", strlen("self/"))) {
7661 filename
+= strlen("self/");
7662 } else if (*filename
>= '1' && *filename
<= '9') {
7664 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7665 if (!strncmp(filename
, myself
, strlen(myself
))) {
7666 filename
+= strlen(myself
);
7673 if (!strcmp(filename
, entry
)) {
7680 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7681 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7682 static int is_proc(const char *filename
, const char *entry
)
7684 return strcmp(filename
, entry
) == 0;
7688 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7689 static int open_net_route(void *cpu_env
, int fd
)
7696 fp
= fopen("/proc/net/route", "r");
7703 read
= getline(&line
, &len
, fp
);
7704 dprintf(fd
, "%s", line
);
7708 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7710 uint32_t dest
, gw
, mask
;
7711 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7714 fields
= sscanf(line
,
7715 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7716 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7717 &mask
, &mtu
, &window
, &irtt
);
7721 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7722 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7723 metric
, tswap32(mask
), mtu
, window
, irtt
);
7733 #if defined(TARGET_SPARC)
7734 static int open_cpuinfo(void *cpu_env
, int fd
)
7736 dprintf(fd
, "type\t\t: sun4u\n");
7741 #if defined(TARGET_HPPA)
7742 static int open_cpuinfo(void *cpu_env
, int fd
)
7744 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7745 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7746 dprintf(fd
, "capabilities\t: os32\n");
7747 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7748 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7753 #if defined(TARGET_M68K)
7754 static int open_hardware(void *cpu_env
, int fd
)
7756 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7761 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7764 const char *filename
;
7765 int (*fill
)(void *cpu_env
, int fd
);
7766 int (*cmp
)(const char *s1
, const char *s2
);
7768 const struct fake_open
*fake_open
;
7769 static const struct fake_open fakes
[] = {
7770 { "maps", open_self_maps
, is_proc_myself
},
7771 { "stat", open_self_stat
, is_proc_myself
},
7772 { "auxv", open_self_auxv
, is_proc_myself
},
7773 { "cmdline", open_self_cmdline
, is_proc_myself
},
7774 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7775 { "/proc/net/route", open_net_route
, is_proc
},
7777 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7778 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7780 #if defined(TARGET_M68K)
7781 { "/proc/hardware", open_hardware
, is_proc
},
7783 { NULL
, NULL
, NULL
}
7786 if (is_proc_myself(pathname
, "exe")) {
7787 int execfd
= qemu_getauxval(AT_EXECFD
);
7788 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7791 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7792 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7797 if (fake_open
->filename
) {
7799 char filename
[PATH_MAX
];
7802 /* create temporary file to map stat to */
7803 tmpdir
= getenv("TMPDIR");
7806 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7807 fd
= mkstemp(filename
);
7813 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7819 lseek(fd
, 0, SEEK_SET
);
7824 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7827 #define TIMER_MAGIC 0x0caf0000
7828 #define TIMER_MAGIC_MASK 0xffff0000
7830 /* Convert QEMU provided timer ID back to internal 16bit index format */
7831 static target_timer_t
get_timer_id(abi_long arg
)
7833 target_timer_t timerid
= arg
;
7835 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7836 return -TARGET_EINVAL
;
7841 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7842 return -TARGET_EINVAL
;
7848 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7850 abi_ulong target_addr
,
7853 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7854 unsigned host_bits
= sizeof(*host_mask
) * 8;
7855 abi_ulong
*target_mask
;
7858 assert(host_size
>= target_size
);
7860 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7862 return -TARGET_EFAULT
;
7864 memset(host_mask
, 0, host_size
);
7866 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7867 unsigned bit
= i
* target_bits
;
7870 __get_user(val
, &target_mask
[i
]);
7871 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7872 if (val
& (1UL << j
)) {
7873 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7878 unlock_user(target_mask
, target_addr
, 0);
7882 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7884 abi_ulong target_addr
,
7887 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7888 unsigned host_bits
= sizeof(*host_mask
) * 8;
7889 abi_ulong
*target_mask
;
7892 assert(host_size
>= target_size
);
7894 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7896 return -TARGET_EFAULT
;
7899 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7900 unsigned bit
= i
* target_bits
;
7903 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7904 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7908 __put_user(val
, &target_mask
[i
]);
7911 unlock_user(target_mask
, target_addr
, target_size
);
7915 /* This is an internal helper for do_syscall so that it is easier
7916 * to have a single return point, so that actions, such as logging
7917 * of syscall results, can be performed.
7918 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7920 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7921 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7922 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7925 CPUState
*cpu
= env_cpu(cpu_env
);
7927 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7928 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7929 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7930 || defined(TARGET_NR_statx)
7933 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7934 || defined(TARGET_NR_fstatfs)
7940 case TARGET_NR_exit
:
7941 /* In old applications this may be used to implement _exit(2).
7942 However in threaded applictions it is used for thread termination,
7943 and _exit_group is used for application termination.
7944 Do thread termination if we have more then one thread. */
7946 if (block_signals()) {
7947 return -TARGET_ERESTARTSYS
;
7950 pthread_mutex_lock(&clone_lock
);
7952 if (CPU_NEXT(first_cpu
)) {
7953 TaskState
*ts
= cpu
->opaque
;
7955 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7956 object_unref(OBJECT(cpu
));
7958 * At this point the CPU should be unrealized and removed
7959 * from cpu lists. We can clean-up the rest of the thread
7960 * data without the lock held.
7963 pthread_mutex_unlock(&clone_lock
);
7965 if (ts
->child_tidptr
) {
7966 put_user_u32(0, ts
->child_tidptr
);
7967 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7972 rcu_unregister_thread();
7976 pthread_mutex_unlock(&clone_lock
);
7977 preexit_cleanup(cpu_env
, arg1
);
7979 return 0; /* avoid warning */
7980 case TARGET_NR_read
:
7981 if (arg2
== 0 && arg3
== 0) {
7982 return get_errno(safe_read(arg1
, 0, 0));
7984 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7985 return -TARGET_EFAULT
;
7986 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7988 fd_trans_host_to_target_data(arg1
)) {
7989 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7991 unlock_user(p
, arg2
, ret
);
7994 case TARGET_NR_write
:
7995 if (arg2
== 0 && arg3
== 0) {
7996 return get_errno(safe_write(arg1
, 0, 0));
7998 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7999 return -TARGET_EFAULT
;
8000 if (fd_trans_target_to_host_data(arg1
)) {
8001 void *copy
= g_malloc(arg3
);
8002 memcpy(copy
, p
, arg3
);
8003 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8005 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8009 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8011 unlock_user(p
, arg2
, 0);
8014 #ifdef TARGET_NR_open
8015 case TARGET_NR_open
:
8016 if (!(p
= lock_user_string(arg1
)))
8017 return -TARGET_EFAULT
;
8018 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8019 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8021 fd_trans_unregister(ret
);
8022 unlock_user(p
, arg1
, 0);
8025 case TARGET_NR_openat
:
8026 if (!(p
= lock_user_string(arg2
)))
8027 return -TARGET_EFAULT
;
8028 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8029 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8031 fd_trans_unregister(ret
);
8032 unlock_user(p
, arg2
, 0);
8034 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8035 case TARGET_NR_name_to_handle_at
:
8036 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8039 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8040 case TARGET_NR_open_by_handle_at
:
8041 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8042 fd_trans_unregister(ret
);
8045 case TARGET_NR_close
:
8046 fd_trans_unregister(arg1
);
8047 return get_errno(close(arg1
));
8050 return do_brk(arg1
);
8051 #ifdef TARGET_NR_fork
8052 case TARGET_NR_fork
:
8053 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8055 #ifdef TARGET_NR_waitpid
8056 case TARGET_NR_waitpid
:
8059 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8060 if (!is_error(ret
) && arg2
&& ret
8061 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8062 return -TARGET_EFAULT
;
8066 #ifdef TARGET_NR_waitid
8067 case TARGET_NR_waitid
:
8071 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8072 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8073 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8074 return -TARGET_EFAULT
;
8075 host_to_target_siginfo(p
, &info
);
8076 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8081 #ifdef TARGET_NR_creat /* not on alpha */
8082 case TARGET_NR_creat
:
8083 if (!(p
= lock_user_string(arg1
)))
8084 return -TARGET_EFAULT
;
8085 ret
= get_errno(creat(p
, arg2
));
8086 fd_trans_unregister(ret
);
8087 unlock_user(p
, arg1
, 0);
8090 #ifdef TARGET_NR_link
8091 case TARGET_NR_link
:
8094 p
= lock_user_string(arg1
);
8095 p2
= lock_user_string(arg2
);
8097 ret
= -TARGET_EFAULT
;
8099 ret
= get_errno(link(p
, p2
));
8100 unlock_user(p2
, arg2
, 0);
8101 unlock_user(p
, arg1
, 0);
8105 #if defined(TARGET_NR_linkat)
8106 case TARGET_NR_linkat
:
8110 return -TARGET_EFAULT
;
8111 p
= lock_user_string(arg2
);
8112 p2
= lock_user_string(arg4
);
8114 ret
= -TARGET_EFAULT
;
8116 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8117 unlock_user(p
, arg2
, 0);
8118 unlock_user(p2
, arg4
, 0);
8122 #ifdef TARGET_NR_unlink
8123 case TARGET_NR_unlink
:
8124 if (!(p
= lock_user_string(arg1
)))
8125 return -TARGET_EFAULT
;
8126 ret
= get_errno(unlink(p
));
8127 unlock_user(p
, arg1
, 0);
8130 #if defined(TARGET_NR_unlinkat)
8131 case TARGET_NR_unlinkat
:
8132 if (!(p
= lock_user_string(arg2
)))
8133 return -TARGET_EFAULT
;
8134 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8135 unlock_user(p
, arg2
, 0);
8138 case TARGET_NR_execve
:
8140 char **argp
, **envp
;
8143 abi_ulong guest_argp
;
8144 abi_ulong guest_envp
;
8151 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8152 if (get_user_ual(addr
, gp
))
8153 return -TARGET_EFAULT
;
8160 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8161 if (get_user_ual(addr
, gp
))
8162 return -TARGET_EFAULT
;
8168 argp
= g_new0(char *, argc
+ 1);
8169 envp
= g_new0(char *, envc
+ 1);
8171 for (gp
= guest_argp
, q
= argp
; gp
;
8172 gp
+= sizeof(abi_ulong
), q
++) {
8173 if (get_user_ual(addr
, gp
))
8177 if (!(*q
= lock_user_string(addr
)))
8179 total_size
+= strlen(*q
) + 1;
8183 for (gp
= guest_envp
, q
= envp
; gp
;
8184 gp
+= sizeof(abi_ulong
), q
++) {
8185 if (get_user_ual(addr
, gp
))
8189 if (!(*q
= lock_user_string(addr
)))
8191 total_size
+= strlen(*q
) + 1;
8195 if (!(p
= lock_user_string(arg1
)))
8197 /* Although execve() is not an interruptible syscall it is
8198 * a special case where we must use the safe_syscall wrapper:
8199 * if we allow a signal to happen before we make the host
8200 * syscall then we will 'lose' it, because at the point of
8201 * execve the process leaves QEMU's control. So we use the
8202 * safe syscall wrapper to ensure that we either take the
8203 * signal as a guest signal, or else it does not happen
8204 * before the execve completes and makes it the other
8205 * program's problem.
8207 ret
= get_errno(safe_execve(p
, argp
, envp
));
8208 unlock_user(p
, arg1
, 0);
8213 ret
= -TARGET_EFAULT
;
8216 for (gp
= guest_argp
, q
= argp
; *q
;
8217 gp
+= sizeof(abi_ulong
), q
++) {
8218 if (get_user_ual(addr
, gp
)
8221 unlock_user(*q
, addr
, 0);
8223 for (gp
= guest_envp
, q
= envp
; *q
;
8224 gp
+= sizeof(abi_ulong
), q
++) {
8225 if (get_user_ual(addr
, gp
)
8228 unlock_user(*q
, addr
, 0);
8235 case TARGET_NR_chdir
:
8236 if (!(p
= lock_user_string(arg1
)))
8237 return -TARGET_EFAULT
;
8238 ret
= get_errno(chdir(p
));
8239 unlock_user(p
, arg1
, 0);
8241 #ifdef TARGET_NR_time
8242 case TARGET_NR_time
:
8245 ret
= get_errno(time(&host_time
));
8248 && put_user_sal(host_time
, arg1
))
8249 return -TARGET_EFAULT
;
8253 #ifdef TARGET_NR_mknod
8254 case TARGET_NR_mknod
:
8255 if (!(p
= lock_user_string(arg1
)))
8256 return -TARGET_EFAULT
;
8257 ret
= get_errno(mknod(p
, arg2
, arg3
));
8258 unlock_user(p
, arg1
, 0);
8261 #if defined(TARGET_NR_mknodat)
8262 case TARGET_NR_mknodat
:
8263 if (!(p
= lock_user_string(arg2
)))
8264 return -TARGET_EFAULT
;
8265 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8266 unlock_user(p
, arg2
, 0);
8269 #ifdef TARGET_NR_chmod
8270 case TARGET_NR_chmod
:
8271 if (!(p
= lock_user_string(arg1
)))
8272 return -TARGET_EFAULT
;
8273 ret
= get_errno(chmod(p
, arg2
));
8274 unlock_user(p
, arg1
, 0);
8277 #ifdef TARGET_NR_lseek
8278 case TARGET_NR_lseek
:
8279 return get_errno(lseek(arg1
, arg2
, arg3
));
8281 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8282 /* Alpha specific */
8283 case TARGET_NR_getxpid
:
8284 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8285 return get_errno(getpid());
8287 #ifdef TARGET_NR_getpid
8288 case TARGET_NR_getpid
:
8289 return get_errno(getpid());
8291 case TARGET_NR_mount
:
8293 /* need to look at the data field */
8297 p
= lock_user_string(arg1
);
8299 return -TARGET_EFAULT
;
8305 p2
= lock_user_string(arg2
);
8308 unlock_user(p
, arg1
, 0);
8310 return -TARGET_EFAULT
;
8314 p3
= lock_user_string(arg3
);
8317 unlock_user(p
, arg1
, 0);
8319 unlock_user(p2
, arg2
, 0);
8320 return -TARGET_EFAULT
;
8326 /* FIXME - arg5 should be locked, but it isn't clear how to
8327 * do that since it's not guaranteed to be a NULL-terminated
8331 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8333 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8335 ret
= get_errno(ret
);
8338 unlock_user(p
, arg1
, 0);
8340 unlock_user(p2
, arg2
, 0);
8342 unlock_user(p3
, arg3
, 0);
8346 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8347 #if defined(TARGET_NR_umount)
8348 case TARGET_NR_umount
:
8350 #if defined(TARGET_NR_oldumount)
8351 case TARGET_NR_oldumount
:
8353 if (!(p
= lock_user_string(arg1
)))
8354 return -TARGET_EFAULT
;
8355 ret
= get_errno(umount(p
));
8356 unlock_user(p
, arg1
, 0);
8359 #ifdef TARGET_NR_stime /* not on alpha */
8360 case TARGET_NR_stime
:
8364 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8365 return -TARGET_EFAULT
;
8367 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8370 #ifdef TARGET_NR_alarm /* not on alpha */
8371 case TARGET_NR_alarm
:
8374 #ifdef TARGET_NR_pause /* not on alpha */
8375 case TARGET_NR_pause
:
8376 if (!block_signals()) {
8377 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8379 return -TARGET_EINTR
;
8381 #ifdef TARGET_NR_utime
8382 case TARGET_NR_utime
:
8384 struct utimbuf tbuf
, *host_tbuf
;
8385 struct target_utimbuf
*target_tbuf
;
8387 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8388 return -TARGET_EFAULT
;
8389 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8390 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8391 unlock_user_struct(target_tbuf
, arg2
, 0);
8396 if (!(p
= lock_user_string(arg1
)))
8397 return -TARGET_EFAULT
;
8398 ret
= get_errno(utime(p
, host_tbuf
));
8399 unlock_user(p
, arg1
, 0);
8403 #ifdef TARGET_NR_utimes
8404 case TARGET_NR_utimes
:
8406 struct timeval
*tvp
, tv
[2];
8408 if (copy_from_user_timeval(&tv
[0], arg2
)
8409 || copy_from_user_timeval(&tv
[1],
8410 arg2
+ sizeof(struct target_timeval
)))
8411 return -TARGET_EFAULT
;
8416 if (!(p
= lock_user_string(arg1
)))
8417 return -TARGET_EFAULT
;
8418 ret
= get_errno(utimes(p
, tvp
));
8419 unlock_user(p
, arg1
, 0);
8423 #if defined(TARGET_NR_futimesat)
8424 case TARGET_NR_futimesat
:
8426 struct timeval
*tvp
, tv
[2];
8428 if (copy_from_user_timeval(&tv
[0], arg3
)
8429 || copy_from_user_timeval(&tv
[1],
8430 arg3
+ sizeof(struct target_timeval
)))
8431 return -TARGET_EFAULT
;
8436 if (!(p
= lock_user_string(arg2
))) {
8437 return -TARGET_EFAULT
;
8439 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8440 unlock_user(p
, arg2
, 0);
8444 #ifdef TARGET_NR_access
8445 case TARGET_NR_access
:
8446 if (!(p
= lock_user_string(arg1
))) {
8447 return -TARGET_EFAULT
;
8449 ret
= get_errno(access(path(p
), arg2
));
8450 unlock_user(p
, arg1
, 0);
8453 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8454 case TARGET_NR_faccessat
:
8455 if (!(p
= lock_user_string(arg2
))) {
8456 return -TARGET_EFAULT
;
8458 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8459 unlock_user(p
, arg2
, 0);
8462 #ifdef TARGET_NR_nice /* not on alpha */
8463 case TARGET_NR_nice
:
8464 return get_errno(nice(arg1
));
8466 case TARGET_NR_sync
:
8469 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8470 case TARGET_NR_syncfs
:
8471 return get_errno(syncfs(arg1
));
8473 case TARGET_NR_kill
:
8474 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8475 #ifdef TARGET_NR_rename
8476 case TARGET_NR_rename
:
8479 p
= lock_user_string(arg1
);
8480 p2
= lock_user_string(arg2
);
8482 ret
= -TARGET_EFAULT
;
8484 ret
= get_errno(rename(p
, p2
));
8485 unlock_user(p2
, arg2
, 0);
8486 unlock_user(p
, arg1
, 0);
8490 #if defined(TARGET_NR_renameat)
8491 case TARGET_NR_renameat
:
8494 p
= lock_user_string(arg2
);
8495 p2
= lock_user_string(arg4
);
8497 ret
= -TARGET_EFAULT
;
8499 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8500 unlock_user(p2
, arg4
, 0);
8501 unlock_user(p
, arg2
, 0);
8505 #if defined(TARGET_NR_renameat2)
8506 case TARGET_NR_renameat2
:
8509 p
= lock_user_string(arg2
);
8510 p2
= lock_user_string(arg4
);
8512 ret
= -TARGET_EFAULT
;
8514 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8516 unlock_user(p2
, arg4
, 0);
8517 unlock_user(p
, arg2
, 0);
8521 #ifdef TARGET_NR_mkdir
8522 case TARGET_NR_mkdir
:
8523 if (!(p
= lock_user_string(arg1
)))
8524 return -TARGET_EFAULT
;
8525 ret
= get_errno(mkdir(p
, arg2
));
8526 unlock_user(p
, arg1
, 0);
8529 #if defined(TARGET_NR_mkdirat)
8530 case TARGET_NR_mkdirat
:
8531 if (!(p
= lock_user_string(arg2
)))
8532 return -TARGET_EFAULT
;
8533 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8534 unlock_user(p
, arg2
, 0);
8537 #ifdef TARGET_NR_rmdir
8538 case TARGET_NR_rmdir
:
8539 if (!(p
= lock_user_string(arg1
)))
8540 return -TARGET_EFAULT
;
8541 ret
= get_errno(rmdir(p
));
8542 unlock_user(p
, arg1
, 0);
8546 ret
= get_errno(dup(arg1
));
8548 fd_trans_dup(arg1
, ret
);
8551 #ifdef TARGET_NR_pipe
8552 case TARGET_NR_pipe
:
8553 return do_pipe(cpu_env
, arg1
, 0, 0);
8555 #ifdef TARGET_NR_pipe2
8556 case TARGET_NR_pipe2
:
8557 return do_pipe(cpu_env
, arg1
,
8558 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8560 case TARGET_NR_times
:
8562 struct target_tms
*tmsp
;
8564 ret
= get_errno(times(&tms
));
8566 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8568 return -TARGET_EFAULT
;
8569 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8570 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8571 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8572 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8575 ret
= host_to_target_clock_t(ret
);
8578 case TARGET_NR_acct
:
8580 ret
= get_errno(acct(NULL
));
8582 if (!(p
= lock_user_string(arg1
))) {
8583 return -TARGET_EFAULT
;
8585 ret
= get_errno(acct(path(p
)));
8586 unlock_user(p
, arg1
, 0);
8589 #ifdef TARGET_NR_umount2
8590 case TARGET_NR_umount2
:
8591 if (!(p
= lock_user_string(arg1
)))
8592 return -TARGET_EFAULT
;
8593 ret
= get_errno(umount2(p
, arg2
));
8594 unlock_user(p
, arg1
, 0);
8597 case TARGET_NR_ioctl
:
8598 return do_ioctl(arg1
, arg2
, arg3
);
8599 #ifdef TARGET_NR_fcntl
8600 case TARGET_NR_fcntl
:
8601 return do_fcntl(arg1
, arg2
, arg3
);
8603 case TARGET_NR_setpgid
:
8604 return get_errno(setpgid(arg1
, arg2
));
8605 case TARGET_NR_umask
:
8606 return get_errno(umask(arg1
));
8607 case TARGET_NR_chroot
:
8608 if (!(p
= lock_user_string(arg1
)))
8609 return -TARGET_EFAULT
;
8610 ret
= get_errno(chroot(p
));
8611 unlock_user(p
, arg1
, 0);
8613 #ifdef TARGET_NR_dup2
8614 case TARGET_NR_dup2
:
8615 ret
= get_errno(dup2(arg1
, arg2
));
8617 fd_trans_dup(arg1
, arg2
);
8621 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8622 case TARGET_NR_dup3
:
8626 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8629 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8630 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8632 fd_trans_dup(arg1
, arg2
);
8637 #ifdef TARGET_NR_getppid /* not on alpha */
8638 case TARGET_NR_getppid
:
8639 return get_errno(getppid());
8641 #ifdef TARGET_NR_getpgrp
8642 case TARGET_NR_getpgrp
:
8643 return get_errno(getpgrp());
8645 case TARGET_NR_setsid
:
8646 return get_errno(setsid());
8647 #ifdef TARGET_NR_sigaction
8648 case TARGET_NR_sigaction
:
8650 #if defined(TARGET_ALPHA)
8651 struct target_sigaction act
, oact
, *pact
= 0;
8652 struct target_old_sigaction
*old_act
;
8654 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8655 return -TARGET_EFAULT
;
8656 act
._sa_handler
= old_act
->_sa_handler
;
8657 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8658 act
.sa_flags
= old_act
->sa_flags
;
8659 act
.sa_restorer
= 0;
8660 unlock_user_struct(old_act
, arg2
, 0);
8663 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8664 if (!is_error(ret
) && arg3
) {
8665 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8666 return -TARGET_EFAULT
;
8667 old_act
->_sa_handler
= oact
._sa_handler
;
8668 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8669 old_act
->sa_flags
= oact
.sa_flags
;
8670 unlock_user_struct(old_act
, arg3
, 1);
8672 #elif defined(TARGET_MIPS)
8673 struct target_sigaction act
, oact
, *pact
, *old_act
;
8676 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8677 return -TARGET_EFAULT
;
8678 act
._sa_handler
= old_act
->_sa_handler
;
8679 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8680 act
.sa_flags
= old_act
->sa_flags
;
8681 unlock_user_struct(old_act
, arg2
, 0);
8687 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8689 if (!is_error(ret
) && arg3
) {
8690 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8691 return -TARGET_EFAULT
;
8692 old_act
->_sa_handler
= oact
._sa_handler
;
8693 old_act
->sa_flags
= oact
.sa_flags
;
8694 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8695 old_act
->sa_mask
.sig
[1] = 0;
8696 old_act
->sa_mask
.sig
[2] = 0;
8697 old_act
->sa_mask
.sig
[3] = 0;
8698 unlock_user_struct(old_act
, arg3
, 1);
8701 struct target_old_sigaction
*old_act
;
8702 struct target_sigaction act
, oact
, *pact
;
8704 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8705 return -TARGET_EFAULT
;
8706 act
._sa_handler
= old_act
->_sa_handler
;
8707 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8708 act
.sa_flags
= old_act
->sa_flags
;
8709 act
.sa_restorer
= old_act
->sa_restorer
;
8710 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8711 act
.ka_restorer
= 0;
8713 unlock_user_struct(old_act
, arg2
, 0);
8718 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8719 if (!is_error(ret
) && arg3
) {
8720 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8721 return -TARGET_EFAULT
;
8722 old_act
->_sa_handler
= oact
._sa_handler
;
8723 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8724 old_act
->sa_flags
= oact
.sa_flags
;
8725 old_act
->sa_restorer
= oact
.sa_restorer
;
8726 unlock_user_struct(old_act
, arg3
, 1);
8732 case TARGET_NR_rt_sigaction
:
8734 #if defined(TARGET_ALPHA)
8735 /* For Alpha and SPARC this is a 5 argument syscall, with
8736 * a 'restorer' parameter which must be copied into the
8737 * sa_restorer field of the sigaction struct.
8738 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8739 * and arg5 is the sigsetsize.
8740 * Alpha also has a separate rt_sigaction struct that it uses
8741 * here; SPARC uses the usual sigaction struct.
8743 struct target_rt_sigaction
*rt_act
;
8744 struct target_sigaction act
, oact
, *pact
= 0;
8746 if (arg4
!= sizeof(target_sigset_t
)) {
8747 return -TARGET_EINVAL
;
8750 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8751 return -TARGET_EFAULT
;
8752 act
._sa_handler
= rt_act
->_sa_handler
;
8753 act
.sa_mask
= rt_act
->sa_mask
;
8754 act
.sa_flags
= rt_act
->sa_flags
;
8755 act
.sa_restorer
= arg5
;
8756 unlock_user_struct(rt_act
, arg2
, 0);
8759 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8760 if (!is_error(ret
) && arg3
) {
8761 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8762 return -TARGET_EFAULT
;
8763 rt_act
->_sa_handler
= oact
._sa_handler
;
8764 rt_act
->sa_mask
= oact
.sa_mask
;
8765 rt_act
->sa_flags
= oact
.sa_flags
;
8766 unlock_user_struct(rt_act
, arg3
, 1);
8770 target_ulong restorer
= arg4
;
8771 target_ulong sigsetsize
= arg5
;
8773 target_ulong sigsetsize
= arg4
;
8775 struct target_sigaction
*act
;
8776 struct target_sigaction
*oact
;
8778 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8779 return -TARGET_EINVAL
;
8782 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8783 return -TARGET_EFAULT
;
8785 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8786 act
->ka_restorer
= restorer
;
8792 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8793 ret
= -TARGET_EFAULT
;
8794 goto rt_sigaction_fail
;
8798 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8801 unlock_user_struct(act
, arg2
, 0);
8803 unlock_user_struct(oact
, arg3
, 1);
8807 #ifdef TARGET_NR_sgetmask /* not on alpha */
8808 case TARGET_NR_sgetmask
:
8811 abi_ulong target_set
;
8812 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8814 host_to_target_old_sigset(&target_set
, &cur_set
);
8820 #ifdef TARGET_NR_ssetmask /* not on alpha */
8821 case TARGET_NR_ssetmask
:
8824 abi_ulong target_set
= arg1
;
8825 target_to_host_old_sigset(&set
, &target_set
);
8826 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8828 host_to_target_old_sigset(&target_set
, &oset
);
8834 #ifdef TARGET_NR_sigprocmask
8835 case TARGET_NR_sigprocmask
:
8837 #if defined(TARGET_ALPHA)
8838 sigset_t set
, oldset
;
8843 case TARGET_SIG_BLOCK
:
8846 case TARGET_SIG_UNBLOCK
:
8849 case TARGET_SIG_SETMASK
:
8853 return -TARGET_EINVAL
;
8856 target_to_host_old_sigset(&set
, &mask
);
8858 ret
= do_sigprocmask(how
, &set
, &oldset
);
8859 if (!is_error(ret
)) {
8860 host_to_target_old_sigset(&mask
, &oldset
);
8862 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8865 sigset_t set
, oldset
, *set_ptr
;
8870 case TARGET_SIG_BLOCK
:
8873 case TARGET_SIG_UNBLOCK
:
8876 case TARGET_SIG_SETMASK
:
8880 return -TARGET_EINVAL
;
8882 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8883 return -TARGET_EFAULT
;
8884 target_to_host_old_sigset(&set
, p
);
8885 unlock_user(p
, arg2
, 0);
8891 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8892 if (!is_error(ret
) && arg3
) {
8893 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8894 return -TARGET_EFAULT
;
8895 host_to_target_old_sigset(p
, &oldset
);
8896 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8902 case TARGET_NR_rt_sigprocmask
:
8905 sigset_t set
, oldset
, *set_ptr
;
8907 if (arg4
!= sizeof(target_sigset_t
)) {
8908 return -TARGET_EINVAL
;
8913 case TARGET_SIG_BLOCK
:
8916 case TARGET_SIG_UNBLOCK
:
8919 case TARGET_SIG_SETMASK
:
8923 return -TARGET_EINVAL
;
8925 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8926 return -TARGET_EFAULT
;
8927 target_to_host_sigset(&set
, p
);
8928 unlock_user(p
, arg2
, 0);
8934 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8935 if (!is_error(ret
) && arg3
) {
8936 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8937 return -TARGET_EFAULT
;
8938 host_to_target_sigset(p
, &oldset
);
8939 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8943 #ifdef TARGET_NR_sigpending
8944 case TARGET_NR_sigpending
:
8947 ret
= get_errno(sigpending(&set
));
8948 if (!is_error(ret
)) {
8949 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8950 return -TARGET_EFAULT
;
8951 host_to_target_old_sigset(p
, &set
);
8952 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8957 case TARGET_NR_rt_sigpending
:
8961 /* Yes, this check is >, not != like most. We follow the kernel's
8962 * logic and it does it like this because it implements
8963 * NR_sigpending through the same code path, and in that case
8964 * the old_sigset_t is smaller in size.
8966 if (arg2
> sizeof(target_sigset_t
)) {
8967 return -TARGET_EINVAL
;
8970 ret
= get_errno(sigpending(&set
));
8971 if (!is_error(ret
)) {
8972 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8973 return -TARGET_EFAULT
;
8974 host_to_target_sigset(p
, &set
);
8975 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8979 #ifdef TARGET_NR_sigsuspend
8980 case TARGET_NR_sigsuspend
:
8982 TaskState
*ts
= cpu
->opaque
;
8983 #if defined(TARGET_ALPHA)
8984 abi_ulong mask
= arg1
;
8985 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8987 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8988 return -TARGET_EFAULT
;
8989 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8990 unlock_user(p
, arg1
, 0);
8992 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8994 if (ret
!= -TARGET_ERESTARTSYS
) {
8995 ts
->in_sigsuspend
= 1;
9000 case TARGET_NR_rt_sigsuspend
:
9002 TaskState
*ts
= cpu
->opaque
;
9004 if (arg2
!= sizeof(target_sigset_t
)) {
9005 return -TARGET_EINVAL
;
9007 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9008 return -TARGET_EFAULT
;
9009 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9010 unlock_user(p
, arg1
, 0);
9011 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9013 if (ret
!= -TARGET_ERESTARTSYS
) {
9014 ts
->in_sigsuspend
= 1;
9018 #ifdef TARGET_NR_rt_sigtimedwait
9019 case TARGET_NR_rt_sigtimedwait
:
9022 struct timespec uts
, *puts
;
9025 if (arg4
!= sizeof(target_sigset_t
)) {
9026 return -TARGET_EINVAL
;
9029 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9030 return -TARGET_EFAULT
;
9031 target_to_host_sigset(&set
, p
);
9032 unlock_user(p
, arg1
, 0);
9035 if (target_to_host_timespec(puts
, arg3
)) {
9036 return -TARGET_EFAULT
;
9041 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9043 if (!is_error(ret
)) {
9045 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9048 return -TARGET_EFAULT
;
9050 host_to_target_siginfo(p
, &uinfo
);
9051 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9053 ret
= host_to_target_signal(ret
);
9058 #ifdef TARGET_NR_rt_sigtimedwait_time64
9059 case TARGET_NR_rt_sigtimedwait_time64
:
9062 struct timespec uts
, *puts
;
9065 if (arg4
!= sizeof(target_sigset_t
)) {
9066 return -TARGET_EINVAL
;
9069 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9071 return -TARGET_EFAULT
;
9073 target_to_host_sigset(&set
, p
);
9074 unlock_user(p
, arg1
, 0);
9077 if (target_to_host_timespec64(puts
, arg3
)) {
9078 return -TARGET_EFAULT
;
9083 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9085 if (!is_error(ret
)) {
9087 p
= lock_user(VERIFY_WRITE
, arg2
,
9088 sizeof(target_siginfo_t
), 0);
9090 return -TARGET_EFAULT
;
9092 host_to_target_siginfo(p
, &uinfo
);
9093 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9095 ret
= host_to_target_signal(ret
);
9100 case TARGET_NR_rt_sigqueueinfo
:
9104 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9106 return -TARGET_EFAULT
;
9108 target_to_host_siginfo(&uinfo
, p
);
9109 unlock_user(p
, arg3
, 0);
9110 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9113 case TARGET_NR_rt_tgsigqueueinfo
:
9117 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9119 return -TARGET_EFAULT
;
9121 target_to_host_siginfo(&uinfo
, p
);
9122 unlock_user(p
, arg4
, 0);
9123 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9126 #ifdef TARGET_NR_sigreturn
9127 case TARGET_NR_sigreturn
:
9128 if (block_signals()) {
9129 return -TARGET_ERESTARTSYS
;
9131 return do_sigreturn(cpu_env
);
9133 case TARGET_NR_rt_sigreturn
:
9134 if (block_signals()) {
9135 return -TARGET_ERESTARTSYS
;
9137 return do_rt_sigreturn(cpu_env
);
9138 case TARGET_NR_sethostname
:
9139 if (!(p
= lock_user_string(arg1
)))
9140 return -TARGET_EFAULT
;
9141 ret
= get_errno(sethostname(p
, arg2
));
9142 unlock_user(p
, arg1
, 0);
9144 #ifdef TARGET_NR_setrlimit
9145 case TARGET_NR_setrlimit
:
9147 int resource
= target_to_host_resource(arg1
);
9148 struct target_rlimit
*target_rlim
;
9150 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9151 return -TARGET_EFAULT
;
9152 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9153 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9154 unlock_user_struct(target_rlim
, arg2
, 0);
9156 * If we just passed through resource limit settings for memory then
9157 * they would also apply to QEMU's own allocations, and QEMU will
9158 * crash or hang or die if its allocations fail. Ideally we would
9159 * track the guest allocations in QEMU and apply the limits ourselves.
9160 * For now, just tell the guest the call succeeded but don't actually
9163 if (resource
!= RLIMIT_AS
&&
9164 resource
!= RLIMIT_DATA
&&
9165 resource
!= RLIMIT_STACK
) {
9166 return get_errno(setrlimit(resource
, &rlim
));
9172 #ifdef TARGET_NR_getrlimit
9173 case TARGET_NR_getrlimit
:
9175 int resource
= target_to_host_resource(arg1
);
9176 struct target_rlimit
*target_rlim
;
9179 ret
= get_errno(getrlimit(resource
, &rlim
));
9180 if (!is_error(ret
)) {
9181 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9182 return -TARGET_EFAULT
;
9183 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9184 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9185 unlock_user_struct(target_rlim
, arg2
, 1);
9190 case TARGET_NR_getrusage
:
9192 struct rusage rusage
;
9193 ret
= get_errno(getrusage(arg1
, &rusage
));
9194 if (!is_error(ret
)) {
9195 ret
= host_to_target_rusage(arg2
, &rusage
);
9199 #if defined(TARGET_NR_gettimeofday)
9200 case TARGET_NR_gettimeofday
:
9205 ret
= get_errno(gettimeofday(&tv
, &tz
));
9206 if (!is_error(ret
)) {
9207 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9208 return -TARGET_EFAULT
;
9210 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9211 return -TARGET_EFAULT
;
9217 #if defined(TARGET_NR_settimeofday)
9218 case TARGET_NR_settimeofday
:
9220 struct timeval tv
, *ptv
= NULL
;
9221 struct timezone tz
, *ptz
= NULL
;
9224 if (copy_from_user_timeval(&tv
, arg1
)) {
9225 return -TARGET_EFAULT
;
9231 if (copy_from_user_timezone(&tz
, arg2
)) {
9232 return -TARGET_EFAULT
;
9237 return get_errno(settimeofday(ptv
, ptz
));
9240 #if defined(TARGET_NR_select)
9241 case TARGET_NR_select
:
9242 #if defined(TARGET_WANT_NI_OLD_SELECT)
9243 /* some architectures used to have old_select here
9244 * but now ENOSYS it.
9246 ret
= -TARGET_ENOSYS
;
9247 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9248 ret
= do_old_select(arg1
);
9250 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9254 #ifdef TARGET_NR_pselect6
9255 case TARGET_NR_pselect6
:
9257 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9258 fd_set rfds
, wfds
, efds
;
9259 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9260 struct timespec ts
, *ts_ptr
;
9263 * The 6th arg is actually two args smashed together,
9264 * so we cannot use the C library.
9272 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9273 target_sigset_t
*target_sigset
;
9281 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9285 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9289 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9295 * This takes a timespec, and not a timeval, so we cannot
9296 * use the do_select() helper ...
9299 if (target_to_host_timespec(&ts
, ts_addr
)) {
9300 return -TARGET_EFAULT
;
9307 /* Extract the two packed args for the sigset */
9310 sig
.size
= SIGSET_T_SIZE
;
9312 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9314 return -TARGET_EFAULT
;
9316 arg_sigset
= tswapal(arg7
[0]);
9317 arg_sigsize
= tswapal(arg7
[1]);
9318 unlock_user(arg7
, arg6
, 0);
9322 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9323 /* Like the kernel, we enforce correct size sigsets */
9324 return -TARGET_EINVAL
;
9326 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9327 sizeof(*target_sigset
), 1);
9328 if (!target_sigset
) {
9329 return -TARGET_EFAULT
;
9331 target_to_host_sigset(&set
, target_sigset
);
9332 unlock_user(target_sigset
, arg_sigset
, 0);
9340 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9343 if (!is_error(ret
)) {
9344 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9345 return -TARGET_EFAULT
;
9346 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9347 return -TARGET_EFAULT
;
9348 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9349 return -TARGET_EFAULT
;
9351 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9352 return -TARGET_EFAULT
;
9357 #ifdef TARGET_NR_symlink
9358 case TARGET_NR_symlink
:
9361 p
= lock_user_string(arg1
);
9362 p2
= lock_user_string(arg2
);
9364 ret
= -TARGET_EFAULT
;
9366 ret
= get_errno(symlink(p
, p2
));
9367 unlock_user(p2
, arg2
, 0);
9368 unlock_user(p
, arg1
, 0);
9372 #if defined(TARGET_NR_symlinkat)
9373 case TARGET_NR_symlinkat
:
9376 p
= lock_user_string(arg1
);
9377 p2
= lock_user_string(arg3
);
9379 ret
= -TARGET_EFAULT
;
9381 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9382 unlock_user(p2
, arg3
, 0);
9383 unlock_user(p
, arg1
, 0);
9387 #ifdef TARGET_NR_readlink
9388 case TARGET_NR_readlink
:
9391 p
= lock_user_string(arg1
);
9392 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9394 ret
= -TARGET_EFAULT
;
9396 /* Short circuit this for the magic exe check. */
9397 ret
= -TARGET_EINVAL
;
9398 } else if (is_proc_myself((const char *)p
, "exe")) {
9399 char real
[PATH_MAX
], *temp
;
9400 temp
= realpath(exec_path
, real
);
9401 /* Return value is # of bytes that we wrote to the buffer. */
9403 ret
= get_errno(-1);
9405 /* Don't worry about sign mismatch as earlier mapping
9406 * logic would have thrown a bad address error. */
9407 ret
= MIN(strlen(real
), arg3
);
9408 /* We cannot NUL terminate the string. */
9409 memcpy(p2
, real
, ret
);
9412 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9414 unlock_user(p2
, arg2
, ret
);
9415 unlock_user(p
, arg1
, 0);
9419 #if defined(TARGET_NR_readlinkat)
9420 case TARGET_NR_readlinkat
:
9423 p
= lock_user_string(arg2
);
9424 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9426 ret
= -TARGET_EFAULT
;
9427 } else if (is_proc_myself((const char *)p
, "exe")) {
9428 char real
[PATH_MAX
], *temp
;
9429 temp
= realpath(exec_path
, real
);
9430 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9431 snprintf((char *)p2
, arg4
, "%s", real
);
9433 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9435 unlock_user(p2
, arg3
, ret
);
9436 unlock_user(p
, arg2
, 0);
9440 #ifdef TARGET_NR_swapon
9441 case TARGET_NR_swapon
:
9442 if (!(p
= lock_user_string(arg1
)))
9443 return -TARGET_EFAULT
;
9444 ret
= get_errno(swapon(p
, arg2
));
9445 unlock_user(p
, arg1
, 0);
9448 case TARGET_NR_reboot
:
9449 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9450 /* arg4 must be ignored in all other cases */
9451 p
= lock_user_string(arg4
);
9453 return -TARGET_EFAULT
;
9455 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9456 unlock_user(p
, arg4
, 0);
9458 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9461 #ifdef TARGET_NR_mmap
9462 case TARGET_NR_mmap
:
9463 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9464 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9465 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9466 || defined(TARGET_S390X)
9469 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9470 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9471 return -TARGET_EFAULT
;
9478 unlock_user(v
, arg1
, 0);
9479 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9480 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9484 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9485 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9491 #ifdef TARGET_NR_mmap2
9492 case TARGET_NR_mmap2
:
9494 #define MMAP_SHIFT 12
9496 ret
= target_mmap(arg1
, arg2
, arg3
,
9497 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9498 arg5
, arg6
<< MMAP_SHIFT
);
9499 return get_errno(ret
);
9501 case TARGET_NR_munmap
:
9502 return get_errno(target_munmap(arg1
, arg2
));
9503 case TARGET_NR_mprotect
:
9505 TaskState
*ts
= cpu
->opaque
;
9506 /* Special hack to detect libc making the stack executable. */
9507 if ((arg3
& PROT_GROWSDOWN
)
9508 && arg1
>= ts
->info
->stack_limit
9509 && arg1
<= ts
->info
->start_stack
) {
9510 arg3
&= ~PROT_GROWSDOWN
;
9511 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9512 arg1
= ts
->info
->stack_limit
;
9515 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9516 #ifdef TARGET_NR_mremap
9517 case TARGET_NR_mremap
:
9518 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9520 /* ??? msync/mlock/munlock are broken for softmmu. */
9521 #ifdef TARGET_NR_msync
9522 case TARGET_NR_msync
:
9523 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9525 #ifdef TARGET_NR_mlock
9526 case TARGET_NR_mlock
:
9527 return get_errno(mlock(g2h(arg1
), arg2
));
9529 #ifdef TARGET_NR_munlock
9530 case TARGET_NR_munlock
:
9531 return get_errno(munlock(g2h(arg1
), arg2
));
9533 #ifdef TARGET_NR_mlockall
9534 case TARGET_NR_mlockall
:
9535 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9537 #ifdef TARGET_NR_munlockall
9538 case TARGET_NR_munlockall
:
9539 return get_errno(munlockall());
9541 #ifdef TARGET_NR_truncate
9542 case TARGET_NR_truncate
:
9543 if (!(p
= lock_user_string(arg1
)))
9544 return -TARGET_EFAULT
;
9545 ret
= get_errno(truncate(p
, arg2
));
9546 unlock_user(p
, arg1
, 0);
9549 #ifdef TARGET_NR_ftruncate
9550 case TARGET_NR_ftruncate
:
9551 return get_errno(ftruncate(arg1
, arg2
));
9553 case TARGET_NR_fchmod
:
9554 return get_errno(fchmod(arg1
, arg2
));
9555 #if defined(TARGET_NR_fchmodat)
9556 case TARGET_NR_fchmodat
:
9557 if (!(p
= lock_user_string(arg2
)))
9558 return -TARGET_EFAULT
;
9559 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9560 unlock_user(p
, arg2
, 0);
9563 case TARGET_NR_getpriority
:
9564 /* Note that negative values are valid for getpriority, so we must
9565 differentiate based on errno settings. */
9567 ret
= getpriority(arg1
, arg2
);
9568 if (ret
== -1 && errno
!= 0) {
9569 return -host_to_target_errno(errno
);
9572 /* Return value is the unbiased priority. Signal no error. */
9573 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9575 /* Return value is a biased priority to avoid negative numbers. */
9579 case TARGET_NR_setpriority
:
9580 return get_errno(setpriority(arg1
, arg2
, arg3
));
9581 #ifdef TARGET_NR_statfs
9582 case TARGET_NR_statfs
:
9583 if (!(p
= lock_user_string(arg1
))) {
9584 return -TARGET_EFAULT
;
9586 ret
= get_errno(statfs(path(p
), &stfs
));
9587 unlock_user(p
, arg1
, 0);
9589 if (!is_error(ret
)) {
9590 struct target_statfs
*target_stfs
;
9592 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9593 return -TARGET_EFAULT
;
9594 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9595 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9596 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9597 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9598 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9599 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9600 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9601 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9602 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9603 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9604 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9605 #ifdef _STATFS_F_FLAGS
9606 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9608 __put_user(0, &target_stfs
->f_flags
);
9610 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9611 unlock_user_struct(target_stfs
, arg2
, 1);
9615 #ifdef TARGET_NR_fstatfs
9616 case TARGET_NR_fstatfs
:
9617 ret
= get_errno(fstatfs(arg1
, &stfs
));
9618 goto convert_statfs
;
9620 #ifdef TARGET_NR_statfs64
9621 case TARGET_NR_statfs64
:
9622 if (!(p
= lock_user_string(arg1
))) {
9623 return -TARGET_EFAULT
;
9625 ret
= get_errno(statfs(path(p
), &stfs
));
9626 unlock_user(p
, arg1
, 0);
9628 if (!is_error(ret
)) {
9629 struct target_statfs64
*target_stfs
;
9631 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9632 return -TARGET_EFAULT
;
9633 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9634 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9635 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9636 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9637 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9638 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9639 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9640 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9641 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9642 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9643 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9644 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9645 unlock_user_struct(target_stfs
, arg3
, 1);
9648 case TARGET_NR_fstatfs64
:
9649 ret
= get_errno(fstatfs(arg1
, &stfs
));
9650 goto convert_statfs64
;
9652 #ifdef TARGET_NR_socketcall
9653 case TARGET_NR_socketcall
:
9654 return do_socketcall(arg1
, arg2
);
9656 #ifdef TARGET_NR_accept
9657 case TARGET_NR_accept
:
9658 return do_accept4(arg1
, arg2
, arg3
, 0);
9660 #ifdef TARGET_NR_accept4
9661 case TARGET_NR_accept4
:
9662 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9664 #ifdef TARGET_NR_bind
9665 case TARGET_NR_bind
:
9666 return do_bind(arg1
, arg2
, arg3
);
9668 #ifdef TARGET_NR_connect
9669 case TARGET_NR_connect
:
9670 return do_connect(arg1
, arg2
, arg3
);
9672 #ifdef TARGET_NR_getpeername
9673 case TARGET_NR_getpeername
:
9674 return do_getpeername(arg1
, arg2
, arg3
);
9676 #ifdef TARGET_NR_getsockname
9677 case TARGET_NR_getsockname
:
9678 return do_getsockname(arg1
, arg2
, arg3
);
9680 #ifdef TARGET_NR_getsockopt
9681 case TARGET_NR_getsockopt
:
9682 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9684 #ifdef TARGET_NR_listen
9685 case TARGET_NR_listen
:
9686 return get_errno(listen(arg1
, arg2
));
9688 #ifdef TARGET_NR_recv
9689 case TARGET_NR_recv
:
9690 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9692 #ifdef TARGET_NR_recvfrom
9693 case TARGET_NR_recvfrom
:
9694 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9696 #ifdef TARGET_NR_recvmsg
9697 case TARGET_NR_recvmsg
:
9698 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9700 #ifdef TARGET_NR_send
9701 case TARGET_NR_send
:
9702 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9704 #ifdef TARGET_NR_sendmsg
9705 case TARGET_NR_sendmsg
:
9706 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9708 #ifdef TARGET_NR_sendmmsg
9709 case TARGET_NR_sendmmsg
:
9710 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9712 #ifdef TARGET_NR_recvmmsg
9713 case TARGET_NR_recvmmsg
:
9714 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9716 #ifdef TARGET_NR_sendto
9717 case TARGET_NR_sendto
:
9718 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9720 #ifdef TARGET_NR_shutdown
9721 case TARGET_NR_shutdown
:
9722 return get_errno(shutdown(arg1
, arg2
));
9724 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9725 case TARGET_NR_getrandom
:
9726 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9728 return -TARGET_EFAULT
;
9730 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9731 unlock_user(p
, arg1
, ret
);
9734 #ifdef TARGET_NR_socket
9735 case TARGET_NR_socket
:
9736 return do_socket(arg1
, arg2
, arg3
);
9738 #ifdef TARGET_NR_socketpair
9739 case TARGET_NR_socketpair
:
9740 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9742 #ifdef TARGET_NR_setsockopt
9743 case TARGET_NR_setsockopt
:
9744 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9746 #if defined(TARGET_NR_syslog)
9747 case TARGET_NR_syslog
:
9752 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9753 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9754 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9755 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9756 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9757 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9758 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9759 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9760 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9761 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9762 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9763 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9766 return -TARGET_EINVAL
;
9771 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9773 return -TARGET_EFAULT
;
9775 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9776 unlock_user(p
, arg2
, arg3
);
9780 return -TARGET_EINVAL
;
9785 case TARGET_NR_setitimer
:
9787 struct itimerval value
, ovalue
, *pvalue
;
9791 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9792 || copy_from_user_timeval(&pvalue
->it_value
,
9793 arg2
+ sizeof(struct target_timeval
)))
9794 return -TARGET_EFAULT
;
9798 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9799 if (!is_error(ret
) && arg3
) {
9800 if (copy_to_user_timeval(arg3
,
9801 &ovalue
.it_interval
)
9802 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9804 return -TARGET_EFAULT
;
9808 case TARGET_NR_getitimer
:
9810 struct itimerval value
;
9812 ret
= get_errno(getitimer(arg1
, &value
));
9813 if (!is_error(ret
) && arg2
) {
9814 if (copy_to_user_timeval(arg2
,
9816 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9818 return -TARGET_EFAULT
;
9822 #ifdef TARGET_NR_stat
9823 case TARGET_NR_stat
:
9824 if (!(p
= lock_user_string(arg1
))) {
9825 return -TARGET_EFAULT
;
9827 ret
= get_errno(stat(path(p
), &st
));
9828 unlock_user(p
, arg1
, 0);
9831 #ifdef TARGET_NR_lstat
9832 case TARGET_NR_lstat
:
9833 if (!(p
= lock_user_string(arg1
))) {
9834 return -TARGET_EFAULT
;
9836 ret
= get_errno(lstat(path(p
), &st
));
9837 unlock_user(p
, arg1
, 0);
9840 #ifdef TARGET_NR_fstat
9841 case TARGET_NR_fstat
:
9843 ret
= get_errno(fstat(arg1
, &st
));
9844 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9847 if (!is_error(ret
)) {
9848 struct target_stat
*target_st
;
9850 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9851 return -TARGET_EFAULT
;
9852 memset(target_st
, 0, sizeof(*target_st
));
9853 __put_user(st
.st_dev
, &target_st
->st_dev
);
9854 __put_user(st
.st_ino
, &target_st
->st_ino
);
9855 __put_user(st
.st_mode
, &target_st
->st_mode
);
9856 __put_user(st
.st_uid
, &target_st
->st_uid
);
9857 __put_user(st
.st_gid
, &target_st
->st_gid
);
9858 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9859 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9860 __put_user(st
.st_size
, &target_st
->st_size
);
9861 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9862 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9863 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9864 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9865 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9866 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9867 defined(TARGET_STAT_HAVE_NSEC)
9868 __put_user(st
.st_atim
.tv_nsec
,
9869 &target_st
->target_st_atime_nsec
);
9870 __put_user(st
.st_mtim
.tv_nsec
,
9871 &target_st
->target_st_mtime_nsec
);
9872 __put_user(st
.st_ctim
.tv_nsec
,
9873 &target_st
->target_st_ctime_nsec
);
9875 unlock_user_struct(target_st
, arg2
, 1);
9880 case TARGET_NR_vhangup
:
9881 return get_errno(vhangup());
9882 #ifdef TARGET_NR_syscall
9883 case TARGET_NR_syscall
:
9884 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9885 arg6
, arg7
, arg8
, 0);
9887 #if defined(TARGET_NR_wait4)
9888 case TARGET_NR_wait4
:
9891 abi_long status_ptr
= arg2
;
9892 struct rusage rusage
, *rusage_ptr
;
9893 abi_ulong target_rusage
= arg4
;
9894 abi_long rusage_err
;
9896 rusage_ptr
= &rusage
;
9899 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9900 if (!is_error(ret
)) {
9901 if (status_ptr
&& ret
) {
9902 status
= host_to_target_waitstatus(status
);
9903 if (put_user_s32(status
, status_ptr
))
9904 return -TARGET_EFAULT
;
9906 if (target_rusage
) {
9907 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9916 #ifdef TARGET_NR_swapoff
9917 case TARGET_NR_swapoff
:
9918 if (!(p
= lock_user_string(arg1
)))
9919 return -TARGET_EFAULT
;
9920 ret
= get_errno(swapoff(p
));
9921 unlock_user(p
, arg1
, 0);
9924 case TARGET_NR_sysinfo
:
9926 struct target_sysinfo
*target_value
;
9927 struct sysinfo value
;
9928 ret
= get_errno(sysinfo(&value
));
9929 if (!is_error(ret
) && arg1
)
9931 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9932 return -TARGET_EFAULT
;
9933 __put_user(value
.uptime
, &target_value
->uptime
);
9934 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9935 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9936 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9937 __put_user(value
.totalram
, &target_value
->totalram
);
9938 __put_user(value
.freeram
, &target_value
->freeram
);
9939 __put_user(value
.sharedram
, &target_value
->sharedram
);
9940 __put_user(value
.bufferram
, &target_value
->bufferram
);
9941 __put_user(value
.totalswap
, &target_value
->totalswap
);
9942 __put_user(value
.freeswap
, &target_value
->freeswap
);
9943 __put_user(value
.procs
, &target_value
->procs
);
9944 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9945 __put_user(value
.freehigh
, &target_value
->freehigh
);
9946 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9947 unlock_user_struct(target_value
, arg1
, 1);
9951 #ifdef TARGET_NR_ipc
9953 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9955 #ifdef TARGET_NR_semget
9956 case TARGET_NR_semget
:
9957 return get_errno(semget(arg1
, arg2
, arg3
));
9959 #ifdef TARGET_NR_semop
9960 case TARGET_NR_semop
:
9961 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
9963 #ifdef TARGET_NR_semtimedop
9964 case TARGET_NR_semtimedop
:
9965 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
9967 #ifdef TARGET_NR_semtimedop_time64
9968 case TARGET_NR_semtimedop_time64
:
9969 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
9971 #ifdef TARGET_NR_semctl
9972 case TARGET_NR_semctl
:
9973 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9975 #ifdef TARGET_NR_msgctl
9976 case TARGET_NR_msgctl
:
9977 return do_msgctl(arg1
, arg2
, arg3
);
9979 #ifdef TARGET_NR_msgget
9980 case TARGET_NR_msgget
:
9981 return get_errno(msgget(arg1
, arg2
));
9983 #ifdef TARGET_NR_msgrcv
9984 case TARGET_NR_msgrcv
:
9985 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9987 #ifdef TARGET_NR_msgsnd
9988 case TARGET_NR_msgsnd
:
9989 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9991 #ifdef TARGET_NR_shmget
9992 case TARGET_NR_shmget
:
9993 return get_errno(shmget(arg1
, arg2
, arg3
));
9995 #ifdef TARGET_NR_shmctl
9996 case TARGET_NR_shmctl
:
9997 return do_shmctl(arg1
, arg2
, arg3
);
9999 #ifdef TARGET_NR_shmat
10000 case TARGET_NR_shmat
:
10001 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10003 #ifdef TARGET_NR_shmdt
10004 case TARGET_NR_shmdt
:
10005 return do_shmdt(arg1
);
10007 case TARGET_NR_fsync
:
10008 return get_errno(fsync(arg1
));
10009 case TARGET_NR_clone
:
10010 /* Linux manages to have three different orderings for its
10011 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10012 * match the kernel's CONFIG_CLONE_* settings.
10013 * Microblaze is further special in that it uses a sixth
10014 * implicit argument to clone for the TLS pointer.
10016 #if defined(TARGET_MICROBLAZE)
10017 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10018 #elif defined(TARGET_CLONE_BACKWARDS)
10019 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10020 #elif defined(TARGET_CLONE_BACKWARDS2)
10021 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10023 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10026 #ifdef __NR_exit_group
10027 /* new thread calls */
10028 case TARGET_NR_exit_group
:
10029 preexit_cleanup(cpu_env
, arg1
);
10030 return get_errno(exit_group(arg1
));
10032 case TARGET_NR_setdomainname
:
10033 if (!(p
= lock_user_string(arg1
)))
10034 return -TARGET_EFAULT
;
10035 ret
= get_errno(setdomainname(p
, arg2
));
10036 unlock_user(p
, arg1
, 0);
10038 case TARGET_NR_uname
:
10039 /* no need to transcode because we use the linux syscall */
10041 struct new_utsname
* buf
;
10043 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10044 return -TARGET_EFAULT
;
10045 ret
= get_errno(sys_uname(buf
));
10046 if (!is_error(ret
)) {
10047 /* Overwrite the native machine name with whatever is being
10049 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10050 sizeof(buf
->machine
));
10051 /* Allow the user to override the reported release. */
10052 if (qemu_uname_release
&& *qemu_uname_release
) {
10053 g_strlcpy(buf
->release
, qemu_uname_release
,
10054 sizeof(buf
->release
));
10057 unlock_user_struct(buf
, arg1
, 1);
10061 case TARGET_NR_modify_ldt
:
10062 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10063 #if !defined(TARGET_X86_64)
10064 case TARGET_NR_vm86
:
10065 return do_vm86(cpu_env
, arg1
, arg2
);
10068 #if defined(TARGET_NR_adjtimex)
10069 case TARGET_NR_adjtimex
:
10071 struct timex host_buf
;
10073 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10074 return -TARGET_EFAULT
;
10076 ret
= get_errno(adjtimex(&host_buf
));
10077 if (!is_error(ret
)) {
10078 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10079 return -TARGET_EFAULT
;
10085 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10086 case TARGET_NR_clock_adjtime
:
10088 struct timex htx
, *phtx
= &htx
;
10090 if (target_to_host_timex(phtx
, arg2
) != 0) {
10091 return -TARGET_EFAULT
;
10093 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10094 if (!is_error(ret
) && phtx
) {
10095 if (host_to_target_timex(arg2
, phtx
) != 0) {
10096 return -TARGET_EFAULT
;
10102 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10103 case TARGET_NR_clock_adjtime64
:
10107 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10108 return -TARGET_EFAULT
;
10110 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10111 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10112 return -TARGET_EFAULT
;
10117 case TARGET_NR_getpgid
:
10118 return get_errno(getpgid(arg1
));
10119 case TARGET_NR_fchdir
:
10120 return get_errno(fchdir(arg1
));
10121 case TARGET_NR_personality
:
10122 return get_errno(personality(arg1
));
10123 #ifdef TARGET_NR__llseek /* Not on alpha */
10124 case TARGET_NR__llseek
:
10127 #if !defined(__NR_llseek)
10128 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10130 ret
= get_errno(res
);
10135 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10137 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10138 return -TARGET_EFAULT
;
10143 #ifdef TARGET_NR_getdents
10144 case TARGET_NR_getdents
:
10145 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10146 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10148 struct target_dirent
*target_dirp
;
10149 struct linux_dirent
*dirp
;
10150 abi_long count
= arg3
;
10152 dirp
= g_try_malloc(count
);
10154 return -TARGET_ENOMEM
;
10157 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10158 if (!is_error(ret
)) {
10159 struct linux_dirent
*de
;
10160 struct target_dirent
*tde
;
10162 int reclen
, treclen
;
10163 int count1
, tnamelen
;
10167 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10168 return -TARGET_EFAULT
;
10171 reclen
= de
->d_reclen
;
10172 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10173 assert(tnamelen
>= 0);
10174 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10175 assert(count1
+ treclen
<= count
);
10176 tde
->d_reclen
= tswap16(treclen
);
10177 tde
->d_ino
= tswapal(de
->d_ino
);
10178 tde
->d_off
= tswapal(de
->d_off
);
10179 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10180 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10182 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10186 unlock_user(target_dirp
, arg2
, ret
);
10192 struct linux_dirent
*dirp
;
10193 abi_long count
= arg3
;
10195 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10196 return -TARGET_EFAULT
;
10197 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10198 if (!is_error(ret
)) {
10199 struct linux_dirent
*de
;
10204 reclen
= de
->d_reclen
;
10207 de
->d_reclen
= tswap16(reclen
);
10208 tswapls(&de
->d_ino
);
10209 tswapls(&de
->d_off
);
10210 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10214 unlock_user(dirp
, arg2
, ret
);
10218 /* Implement getdents in terms of getdents64 */
10220 struct linux_dirent64
*dirp
;
10221 abi_long count
= arg3
;
10223 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10225 return -TARGET_EFAULT
;
10227 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10228 if (!is_error(ret
)) {
10229 /* Convert the dirent64 structs to target dirent. We do this
10230 * in-place, since we can guarantee that a target_dirent is no
10231 * larger than a dirent64; however this means we have to be
10232 * careful to read everything before writing in the new format.
10234 struct linux_dirent64
*de
;
10235 struct target_dirent
*tde
;
10240 tde
= (struct target_dirent
*)dirp
;
10242 int namelen
, treclen
;
10243 int reclen
= de
->d_reclen
;
10244 uint64_t ino
= de
->d_ino
;
10245 int64_t off
= de
->d_off
;
10246 uint8_t type
= de
->d_type
;
10248 namelen
= strlen(de
->d_name
);
10249 treclen
= offsetof(struct target_dirent
, d_name
)
10251 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10253 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10254 tde
->d_ino
= tswapal(ino
);
10255 tde
->d_off
= tswapal(off
);
10256 tde
->d_reclen
= tswap16(treclen
);
10257 /* The target_dirent type is in what was formerly a padding
10258 * byte at the end of the structure:
10260 *(((char *)tde
) + treclen
- 1) = type
;
10262 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10263 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10269 unlock_user(dirp
, arg2
, ret
);
10273 #endif /* TARGET_NR_getdents */
10274 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10275 case TARGET_NR_getdents64
:
10277 struct linux_dirent64
*dirp
;
10278 abi_long count
= arg3
;
10279 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10280 return -TARGET_EFAULT
;
10281 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10282 if (!is_error(ret
)) {
10283 struct linux_dirent64
*de
;
10288 reclen
= de
->d_reclen
;
10291 de
->d_reclen
= tswap16(reclen
);
10292 tswap64s((uint64_t *)&de
->d_ino
);
10293 tswap64s((uint64_t *)&de
->d_off
);
10294 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10298 unlock_user(dirp
, arg2
, ret
);
10301 #endif /* TARGET_NR_getdents64 */
10302 #if defined(TARGET_NR__newselect)
10303 case TARGET_NR__newselect
:
10304 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10306 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10307 # ifdef TARGET_NR_poll
10308 case TARGET_NR_poll
:
10310 # ifdef TARGET_NR_ppoll
10311 case TARGET_NR_ppoll
:
10314 struct target_pollfd
*target_pfd
;
10315 unsigned int nfds
= arg2
;
10316 struct pollfd
*pfd
;
10322 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10323 return -TARGET_EINVAL
;
10326 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10327 sizeof(struct target_pollfd
) * nfds
, 1);
10329 return -TARGET_EFAULT
;
10332 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10333 for (i
= 0; i
< nfds
; i
++) {
10334 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10335 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10340 # ifdef TARGET_NR_ppoll
10341 case TARGET_NR_ppoll
:
10343 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10344 target_sigset_t
*target_set
;
10345 sigset_t _set
, *set
= &_set
;
10348 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10349 unlock_user(target_pfd
, arg1
, 0);
10350 return -TARGET_EFAULT
;
10357 if (arg5
!= sizeof(target_sigset_t
)) {
10358 unlock_user(target_pfd
, arg1
, 0);
10359 return -TARGET_EINVAL
;
10362 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10364 unlock_user(target_pfd
, arg1
, 0);
10365 return -TARGET_EFAULT
;
10367 target_to_host_sigset(set
, target_set
);
10372 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10373 set
, SIGSET_T_SIZE
));
10375 if (!is_error(ret
) && arg3
) {
10376 host_to_target_timespec(arg3
, timeout_ts
);
10379 unlock_user(target_set
, arg4
, 0);
10384 # ifdef TARGET_NR_poll
10385 case TARGET_NR_poll
:
10387 struct timespec ts
, *pts
;
10390 /* Convert ms to secs, ns */
10391 ts
.tv_sec
= arg3
/ 1000;
10392 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10395 /* -ve poll() timeout means "infinite" */
10398 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10403 g_assert_not_reached();
10406 if (!is_error(ret
)) {
10407 for(i
= 0; i
< nfds
; i
++) {
10408 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10411 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10415 case TARGET_NR_flock
:
10416 /* NOTE: the flock constant seems to be the same for every
10418 return get_errno(safe_flock(arg1
, arg2
));
10419 case TARGET_NR_readv
:
10421 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10423 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10424 unlock_iovec(vec
, arg2
, arg3
, 1);
10426 ret
= -host_to_target_errno(errno
);
10430 case TARGET_NR_writev
:
10432 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10434 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10435 unlock_iovec(vec
, arg2
, arg3
, 0);
10437 ret
= -host_to_target_errno(errno
);
10441 #if defined(TARGET_NR_preadv)
10442 case TARGET_NR_preadv
:
10444 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10446 unsigned long low
, high
;
10448 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10449 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10450 unlock_iovec(vec
, arg2
, arg3
, 1);
10452 ret
= -host_to_target_errno(errno
);
10457 #if defined(TARGET_NR_pwritev)
10458 case TARGET_NR_pwritev
:
10460 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10462 unsigned long low
, high
;
10464 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10465 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10466 unlock_iovec(vec
, arg2
, arg3
, 0);
10468 ret
= -host_to_target_errno(errno
);
10473 case TARGET_NR_getsid
:
10474 return get_errno(getsid(arg1
));
10475 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10476 case TARGET_NR_fdatasync
:
10477 return get_errno(fdatasync(arg1
));
10479 #ifdef TARGET_NR__sysctl
10480 case TARGET_NR__sysctl
:
10481 /* We don't implement this, but ENOTDIR is always a safe
10483 return -TARGET_ENOTDIR
;
10485 case TARGET_NR_sched_getaffinity
:
10487 unsigned int mask_size
;
10488 unsigned long *mask
;
10491 * sched_getaffinity needs multiples of ulong, so need to take
10492 * care of mismatches between target ulong and host ulong sizes.
10494 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10495 return -TARGET_EINVAL
;
10497 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10499 mask
= alloca(mask_size
);
10500 memset(mask
, 0, mask_size
);
10501 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10503 if (!is_error(ret
)) {
10505 /* More data returned than the caller's buffer will fit.
10506 * This only happens if sizeof(abi_long) < sizeof(long)
10507 * and the caller passed us a buffer holding an odd number
10508 * of abi_longs. If the host kernel is actually using the
10509 * extra 4 bytes then fail EINVAL; otherwise we can just
10510 * ignore them and only copy the interesting part.
10512 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10513 if (numcpus
> arg2
* 8) {
10514 return -TARGET_EINVAL
;
10519 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10520 return -TARGET_EFAULT
;
10525 case TARGET_NR_sched_setaffinity
:
10527 unsigned int mask_size
;
10528 unsigned long *mask
;
10531 * sched_setaffinity needs multiples of ulong, so need to take
10532 * care of mismatches between target ulong and host ulong sizes.
10534 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10535 return -TARGET_EINVAL
;
10537 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10538 mask
= alloca(mask_size
);
10540 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10545 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10547 case TARGET_NR_getcpu
:
10549 unsigned cpu
, node
;
10550 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10551 arg2
? &node
: NULL
,
10553 if (is_error(ret
)) {
10556 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10557 return -TARGET_EFAULT
;
10559 if (arg2
&& put_user_u32(node
, arg2
)) {
10560 return -TARGET_EFAULT
;
10564 case TARGET_NR_sched_setparam
:
10566 struct sched_param
*target_schp
;
10567 struct sched_param schp
;
10570 return -TARGET_EINVAL
;
10572 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10573 return -TARGET_EFAULT
;
10574 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10575 unlock_user_struct(target_schp
, arg2
, 0);
10576 return get_errno(sched_setparam(arg1
, &schp
));
10578 case TARGET_NR_sched_getparam
:
10580 struct sched_param
*target_schp
;
10581 struct sched_param schp
;
10584 return -TARGET_EINVAL
;
10586 ret
= get_errno(sched_getparam(arg1
, &schp
));
10587 if (!is_error(ret
)) {
10588 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10589 return -TARGET_EFAULT
;
10590 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10591 unlock_user_struct(target_schp
, arg2
, 1);
10595 case TARGET_NR_sched_setscheduler
:
10597 struct sched_param
*target_schp
;
10598 struct sched_param schp
;
10600 return -TARGET_EINVAL
;
10602 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10603 return -TARGET_EFAULT
;
10604 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10605 unlock_user_struct(target_schp
, arg3
, 0);
10606 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10608 case TARGET_NR_sched_getscheduler
:
10609 return get_errno(sched_getscheduler(arg1
));
10610 case TARGET_NR_sched_yield
:
10611 return get_errno(sched_yield());
10612 case TARGET_NR_sched_get_priority_max
:
10613 return get_errno(sched_get_priority_max(arg1
));
10614 case TARGET_NR_sched_get_priority_min
:
10615 return get_errno(sched_get_priority_min(arg1
));
10616 #ifdef TARGET_NR_sched_rr_get_interval
10617 case TARGET_NR_sched_rr_get_interval
:
10619 struct timespec ts
;
10620 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10621 if (!is_error(ret
)) {
10622 ret
= host_to_target_timespec(arg2
, &ts
);
10627 #ifdef TARGET_NR_sched_rr_get_interval_time64
10628 case TARGET_NR_sched_rr_get_interval_time64
:
10630 struct timespec ts
;
10631 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10632 if (!is_error(ret
)) {
10633 ret
= host_to_target_timespec64(arg2
, &ts
);
10638 #if defined(TARGET_NR_nanosleep)
10639 case TARGET_NR_nanosleep
:
10641 struct timespec req
, rem
;
10642 target_to_host_timespec(&req
, arg1
);
10643 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10644 if (is_error(ret
) && arg2
) {
10645 host_to_target_timespec(arg2
, &rem
);
10650 case TARGET_NR_prctl
:
10652 case PR_GET_PDEATHSIG
:
10655 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10656 if (!is_error(ret
) && arg2
10657 && put_user_ual(deathsig
, arg2
)) {
10658 return -TARGET_EFAULT
;
10665 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10667 return -TARGET_EFAULT
;
10669 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10670 arg3
, arg4
, arg5
));
10671 unlock_user(name
, arg2
, 16);
10676 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10678 return -TARGET_EFAULT
;
10680 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10681 arg3
, arg4
, arg5
));
10682 unlock_user(name
, arg2
, 0);
10687 case TARGET_PR_GET_FP_MODE
:
10689 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10691 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10692 ret
|= TARGET_PR_FP_MODE_FR
;
10694 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10695 ret
|= TARGET_PR_FP_MODE_FRE
;
10699 case TARGET_PR_SET_FP_MODE
:
10701 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10702 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10703 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10704 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10705 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10707 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10708 TARGET_PR_FP_MODE_FRE
;
10710 /* If nothing to change, return right away, successfully. */
10711 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10714 /* Check the value is valid */
10715 if (arg2
& ~known_bits
) {
10716 return -TARGET_EOPNOTSUPP
;
10718 /* Setting FRE without FR is not supported. */
10719 if (new_fre
&& !new_fr
) {
10720 return -TARGET_EOPNOTSUPP
;
10722 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10723 /* FR1 is not supported */
10724 return -TARGET_EOPNOTSUPP
;
10726 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10727 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10728 /* cannot set FR=0 */
10729 return -TARGET_EOPNOTSUPP
;
10731 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10732 /* Cannot set FRE=1 */
10733 return -TARGET_EOPNOTSUPP
;
10737 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10738 for (i
= 0; i
< 32 ; i
+= 2) {
10739 if (!old_fr
&& new_fr
) {
10740 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10741 } else if (old_fr
&& !new_fr
) {
10742 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10747 env
->CP0_Status
|= (1 << CP0St_FR
);
10748 env
->hflags
|= MIPS_HFLAG_F64
;
10750 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10751 env
->hflags
&= ~MIPS_HFLAG_F64
;
10754 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10755 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10756 env
->hflags
|= MIPS_HFLAG_FRE
;
10759 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10760 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10766 #ifdef TARGET_AARCH64
10767 case TARGET_PR_SVE_SET_VL
:
10769 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10770 * PR_SVE_VL_INHERIT. Note the kernel definition
10771 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10772 * even though the current architectural maximum is VQ=16.
10774 ret
= -TARGET_EINVAL
;
10775 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10776 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10777 CPUARMState
*env
= cpu_env
;
10778 ARMCPU
*cpu
= env_archcpu(env
);
10779 uint32_t vq
, old_vq
;
10781 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10782 vq
= MAX(arg2
/ 16, 1);
10783 vq
= MIN(vq
, cpu
->sve_max_vq
);
10786 aarch64_sve_narrow_vq(env
, vq
);
10788 env
->vfp
.zcr_el
[1] = vq
- 1;
10789 arm_rebuild_hflags(env
);
10793 case TARGET_PR_SVE_GET_VL
:
10794 ret
= -TARGET_EINVAL
;
10796 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10797 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10798 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10802 case TARGET_PR_PAC_RESET_KEYS
:
10804 CPUARMState
*env
= cpu_env
;
10805 ARMCPU
*cpu
= env_archcpu(env
);
10807 if (arg3
|| arg4
|| arg5
) {
10808 return -TARGET_EINVAL
;
10810 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10811 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10812 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10813 TARGET_PR_PAC_APGAKEY
);
10819 } else if (arg2
& ~all
) {
10820 return -TARGET_EINVAL
;
10822 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10823 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10824 sizeof(ARMPACKey
), &err
);
10826 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10827 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10828 sizeof(ARMPACKey
), &err
);
10830 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10831 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10832 sizeof(ARMPACKey
), &err
);
10834 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10835 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10836 sizeof(ARMPACKey
), &err
);
10838 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10839 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10840 sizeof(ARMPACKey
), &err
);
10844 * Some unknown failure in the crypto. The best
10845 * we can do is log it and fail the syscall.
10846 * The real syscall cannot fail this way.
10848 qemu_log_mask(LOG_UNIMP
,
10849 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10850 error_get_pretty(err
));
10852 return -TARGET_EIO
;
10857 return -TARGET_EINVAL
;
10858 #endif /* AARCH64 */
10859 case PR_GET_SECCOMP
:
10860 case PR_SET_SECCOMP
:
10861 /* Disable seccomp to prevent the target disabling syscalls we
10863 return -TARGET_EINVAL
;
10865 /* Most prctl options have no pointer arguments */
10866 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10869 #ifdef TARGET_NR_arch_prctl
10870 case TARGET_NR_arch_prctl
:
10871 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10873 #ifdef TARGET_NR_pread64
10874 case TARGET_NR_pread64
:
10875 if (regpairs_aligned(cpu_env
, num
)) {
10879 if (arg2
== 0 && arg3
== 0) {
10880 /* Special-case NULL buffer and zero length, which should succeed */
10883 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10885 return -TARGET_EFAULT
;
10888 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10889 unlock_user(p
, arg2
, ret
);
10891 case TARGET_NR_pwrite64
:
10892 if (regpairs_aligned(cpu_env
, num
)) {
10896 if (arg2
== 0 && arg3
== 0) {
10897 /* Special-case NULL buffer and zero length, which should succeed */
10900 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10902 return -TARGET_EFAULT
;
10905 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10906 unlock_user(p
, arg2
, 0);
10909 case TARGET_NR_getcwd
:
10910 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10911 return -TARGET_EFAULT
;
10912 ret
= get_errno(sys_getcwd1(p
, arg2
));
10913 unlock_user(p
, arg1
, ret
);
10915 case TARGET_NR_capget
:
10916 case TARGET_NR_capset
:
10918 struct target_user_cap_header
*target_header
;
10919 struct target_user_cap_data
*target_data
= NULL
;
10920 struct __user_cap_header_struct header
;
10921 struct __user_cap_data_struct data
[2];
10922 struct __user_cap_data_struct
*dataptr
= NULL
;
10923 int i
, target_datalen
;
10924 int data_items
= 1;
10926 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10927 return -TARGET_EFAULT
;
10929 header
.version
= tswap32(target_header
->version
);
10930 header
.pid
= tswap32(target_header
->pid
);
10932 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10933 /* Version 2 and up takes pointer to two user_data structs */
10937 target_datalen
= sizeof(*target_data
) * data_items
;
10940 if (num
== TARGET_NR_capget
) {
10941 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10943 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10945 if (!target_data
) {
10946 unlock_user_struct(target_header
, arg1
, 0);
10947 return -TARGET_EFAULT
;
10950 if (num
== TARGET_NR_capset
) {
10951 for (i
= 0; i
< data_items
; i
++) {
10952 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10953 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10954 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10961 if (num
== TARGET_NR_capget
) {
10962 ret
= get_errno(capget(&header
, dataptr
));
10964 ret
= get_errno(capset(&header
, dataptr
));
10967 /* The kernel always updates version for both capget and capset */
10968 target_header
->version
= tswap32(header
.version
);
10969 unlock_user_struct(target_header
, arg1
, 1);
10972 if (num
== TARGET_NR_capget
) {
10973 for (i
= 0; i
< data_items
; i
++) {
10974 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10975 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10976 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10978 unlock_user(target_data
, arg2
, target_datalen
);
10980 unlock_user(target_data
, arg2
, 0);
10985 case TARGET_NR_sigaltstack
:
10986 return do_sigaltstack(arg1
, arg2
,
10987 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10989 #ifdef CONFIG_SENDFILE
10990 #ifdef TARGET_NR_sendfile
10991 case TARGET_NR_sendfile
:
10993 off_t
*offp
= NULL
;
10996 ret
= get_user_sal(off
, arg3
);
10997 if (is_error(ret
)) {
11002 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11003 if (!is_error(ret
) && arg3
) {
11004 abi_long ret2
= put_user_sal(off
, arg3
);
11005 if (is_error(ret2
)) {
11012 #ifdef TARGET_NR_sendfile64
11013 case TARGET_NR_sendfile64
:
11015 off_t
*offp
= NULL
;
11018 ret
= get_user_s64(off
, arg3
);
11019 if (is_error(ret
)) {
11024 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11025 if (!is_error(ret
) && arg3
) {
11026 abi_long ret2
= put_user_s64(off
, arg3
);
11027 if (is_error(ret2
)) {
11035 #ifdef TARGET_NR_vfork
11036 case TARGET_NR_vfork
:
11037 return get_errno(do_fork(cpu_env
,
11038 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11041 #ifdef TARGET_NR_ugetrlimit
11042 case TARGET_NR_ugetrlimit
:
11044 struct rlimit rlim
;
11045 int resource
= target_to_host_resource(arg1
);
11046 ret
= get_errno(getrlimit(resource
, &rlim
));
11047 if (!is_error(ret
)) {
11048 struct target_rlimit
*target_rlim
;
11049 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11050 return -TARGET_EFAULT
;
11051 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11052 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11053 unlock_user_struct(target_rlim
, arg2
, 1);
11058 #ifdef TARGET_NR_truncate64
11059 case TARGET_NR_truncate64
:
11060 if (!(p
= lock_user_string(arg1
)))
11061 return -TARGET_EFAULT
;
11062 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11063 unlock_user(p
, arg1
, 0);
11066 #ifdef TARGET_NR_ftruncate64
11067 case TARGET_NR_ftruncate64
:
11068 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11070 #ifdef TARGET_NR_stat64
11071 case TARGET_NR_stat64
:
11072 if (!(p
= lock_user_string(arg1
))) {
11073 return -TARGET_EFAULT
;
11075 ret
= get_errno(stat(path(p
), &st
));
11076 unlock_user(p
, arg1
, 0);
11077 if (!is_error(ret
))
11078 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11081 #ifdef TARGET_NR_lstat64
11082 case TARGET_NR_lstat64
:
11083 if (!(p
= lock_user_string(arg1
))) {
11084 return -TARGET_EFAULT
;
11086 ret
= get_errno(lstat(path(p
), &st
));
11087 unlock_user(p
, arg1
, 0);
11088 if (!is_error(ret
))
11089 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11092 #ifdef TARGET_NR_fstat64
11093 case TARGET_NR_fstat64
:
11094 ret
= get_errno(fstat(arg1
, &st
));
11095 if (!is_error(ret
))
11096 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11099 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11100 #ifdef TARGET_NR_fstatat64
11101 case TARGET_NR_fstatat64
:
11103 #ifdef TARGET_NR_newfstatat
11104 case TARGET_NR_newfstatat
:
11106 if (!(p
= lock_user_string(arg2
))) {
11107 return -TARGET_EFAULT
;
11109 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11110 unlock_user(p
, arg2
, 0);
11111 if (!is_error(ret
))
11112 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11115 #if defined(TARGET_NR_statx)
11116 case TARGET_NR_statx
:
11118 struct target_statx
*target_stx
;
11122 p
= lock_user_string(arg2
);
11124 return -TARGET_EFAULT
;
11126 #if defined(__NR_statx)
11129 * It is assumed that struct statx is architecture independent.
11131 struct target_statx host_stx
;
11134 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11135 if (!is_error(ret
)) {
11136 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11137 unlock_user(p
, arg2
, 0);
11138 return -TARGET_EFAULT
;
11142 if (ret
!= -TARGET_ENOSYS
) {
11143 unlock_user(p
, arg2
, 0);
11148 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11149 unlock_user(p
, arg2
, 0);
11151 if (!is_error(ret
)) {
11152 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11153 return -TARGET_EFAULT
;
11155 memset(target_stx
, 0, sizeof(*target_stx
));
11156 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11157 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11158 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11159 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11160 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11161 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11162 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11163 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11164 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11165 __put_user(st
.st_size
, &target_stx
->stx_size
);
11166 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11167 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11168 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11169 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11170 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11171 unlock_user_struct(target_stx
, arg5
, 1);
11176 #ifdef TARGET_NR_lchown
11177 case TARGET_NR_lchown
:
11178 if (!(p
= lock_user_string(arg1
)))
11179 return -TARGET_EFAULT
;
11180 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11181 unlock_user(p
, arg1
, 0);
11184 #ifdef TARGET_NR_getuid
11185 case TARGET_NR_getuid
:
11186 return get_errno(high2lowuid(getuid()));
11188 #ifdef TARGET_NR_getgid
11189 case TARGET_NR_getgid
:
11190 return get_errno(high2lowgid(getgid()));
11192 #ifdef TARGET_NR_geteuid
11193 case TARGET_NR_geteuid
:
11194 return get_errno(high2lowuid(geteuid()));
11196 #ifdef TARGET_NR_getegid
11197 case TARGET_NR_getegid
:
11198 return get_errno(high2lowgid(getegid()));
11200 case TARGET_NR_setreuid
:
11201 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11202 case TARGET_NR_setregid
:
11203 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11204 case TARGET_NR_getgroups
:
11206 int gidsetsize
= arg1
;
11207 target_id
*target_grouplist
;
11211 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11212 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11213 if (gidsetsize
== 0)
11215 if (!is_error(ret
)) {
11216 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11217 if (!target_grouplist
)
11218 return -TARGET_EFAULT
;
11219 for(i
= 0;i
< ret
; i
++)
11220 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11221 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11225 case TARGET_NR_setgroups
:
11227 int gidsetsize
= arg1
;
11228 target_id
*target_grouplist
;
11229 gid_t
*grouplist
= NULL
;
11232 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11233 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11234 if (!target_grouplist
) {
11235 return -TARGET_EFAULT
;
11237 for (i
= 0; i
< gidsetsize
; i
++) {
11238 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11240 unlock_user(target_grouplist
, arg2
, 0);
11242 return get_errno(setgroups(gidsetsize
, grouplist
));
11244 case TARGET_NR_fchown
:
11245 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11246 #if defined(TARGET_NR_fchownat)
11247 case TARGET_NR_fchownat
:
11248 if (!(p
= lock_user_string(arg2
)))
11249 return -TARGET_EFAULT
;
11250 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11251 low2highgid(arg4
), arg5
));
11252 unlock_user(p
, arg2
, 0);
11255 #ifdef TARGET_NR_setresuid
11256 case TARGET_NR_setresuid
:
11257 return get_errno(sys_setresuid(low2highuid(arg1
),
11259 low2highuid(arg3
)));
11261 #ifdef TARGET_NR_getresuid
11262 case TARGET_NR_getresuid
:
11264 uid_t ruid
, euid
, suid
;
11265 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11266 if (!is_error(ret
)) {
11267 if (put_user_id(high2lowuid(ruid
), arg1
)
11268 || put_user_id(high2lowuid(euid
), arg2
)
11269 || put_user_id(high2lowuid(suid
), arg3
))
11270 return -TARGET_EFAULT
;
11275 #ifdef TARGET_NR_getresgid
11276 case TARGET_NR_setresgid
:
11277 return get_errno(sys_setresgid(low2highgid(arg1
),
11279 low2highgid(arg3
)));
11281 #ifdef TARGET_NR_getresgid
11282 case TARGET_NR_getresgid
:
11284 gid_t rgid
, egid
, sgid
;
11285 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11286 if (!is_error(ret
)) {
11287 if (put_user_id(high2lowgid(rgid
), arg1
)
11288 || put_user_id(high2lowgid(egid
), arg2
)
11289 || put_user_id(high2lowgid(sgid
), arg3
))
11290 return -TARGET_EFAULT
;
11295 #ifdef TARGET_NR_chown
11296 case TARGET_NR_chown
:
11297 if (!(p
= lock_user_string(arg1
)))
11298 return -TARGET_EFAULT
;
11299 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11300 unlock_user(p
, arg1
, 0);
11303 case TARGET_NR_setuid
:
11304 return get_errno(sys_setuid(low2highuid(arg1
)));
11305 case TARGET_NR_setgid
:
11306 return get_errno(sys_setgid(low2highgid(arg1
)));
11307 case TARGET_NR_setfsuid
:
11308 return get_errno(setfsuid(arg1
));
11309 case TARGET_NR_setfsgid
:
11310 return get_errno(setfsgid(arg1
));
11312 #ifdef TARGET_NR_lchown32
11313 case TARGET_NR_lchown32
:
11314 if (!(p
= lock_user_string(arg1
)))
11315 return -TARGET_EFAULT
;
11316 ret
= get_errno(lchown(p
, arg2
, arg3
));
11317 unlock_user(p
, arg1
, 0);
11320 #ifdef TARGET_NR_getuid32
11321 case TARGET_NR_getuid32
:
11322 return get_errno(getuid());
11325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11326 /* Alpha specific */
11327 case TARGET_NR_getxuid
:
11331 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11333 return get_errno(getuid());
11335 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11336 /* Alpha specific */
11337 case TARGET_NR_getxgid
:
11341 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11343 return get_errno(getgid());
11345 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11346 /* Alpha specific */
11347 case TARGET_NR_osf_getsysinfo
:
11348 ret
= -TARGET_EOPNOTSUPP
;
11350 case TARGET_GSI_IEEE_FP_CONTROL
:
11352 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11353 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11355 swcr
&= ~SWCR_STATUS_MASK
;
11356 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11358 if (put_user_u64 (swcr
, arg2
))
11359 return -TARGET_EFAULT
;
11364 /* case GSI_IEEE_STATE_AT_SIGNAL:
11365 -- Not implemented in linux kernel.
11367 -- Retrieves current unaligned access state; not much used.
11368 case GSI_PROC_TYPE:
11369 -- Retrieves implver information; surely not used.
11370 case GSI_GET_HWRPB:
11371 -- Grabs a copy of the HWRPB; surely not used.
11376 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11377 /* Alpha specific */
11378 case TARGET_NR_osf_setsysinfo
:
11379 ret
= -TARGET_EOPNOTSUPP
;
11381 case TARGET_SSI_IEEE_FP_CONTROL
:
11383 uint64_t swcr
, fpcr
;
11385 if (get_user_u64 (swcr
, arg2
)) {
11386 return -TARGET_EFAULT
;
11390 * The kernel calls swcr_update_status to update the
11391 * status bits from the fpcr at every point that it
11392 * could be queried. Therefore, we store the status
11393 * bits only in FPCR.
11395 ((CPUAlphaState
*)cpu_env
)->swcr
11396 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11398 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11399 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11400 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11401 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11406 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11408 uint64_t exc
, fpcr
, fex
;
11410 if (get_user_u64(exc
, arg2
)) {
11411 return -TARGET_EFAULT
;
11413 exc
&= SWCR_STATUS_MASK
;
11414 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11416 /* Old exceptions are not signaled. */
11417 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11419 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11420 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11422 /* Update the hardware fpcr. */
11423 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11424 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11427 int si_code
= TARGET_FPE_FLTUNK
;
11428 target_siginfo_t info
;
11430 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11431 si_code
= TARGET_FPE_FLTUND
;
11433 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11434 si_code
= TARGET_FPE_FLTRES
;
11436 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11437 si_code
= TARGET_FPE_FLTUND
;
11439 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11440 si_code
= TARGET_FPE_FLTOVF
;
11442 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11443 si_code
= TARGET_FPE_FLTDIV
;
11445 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11446 si_code
= TARGET_FPE_FLTINV
;
11449 info
.si_signo
= SIGFPE
;
11451 info
.si_code
= si_code
;
11452 info
._sifields
._sigfault
._addr
11453 = ((CPUArchState
*)cpu_env
)->pc
;
11454 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11455 QEMU_SI_FAULT
, &info
);
11461 /* case SSI_NVPAIRS:
11462 -- Used with SSIN_UACPROC to enable unaligned accesses.
11463 case SSI_IEEE_STATE_AT_SIGNAL:
11464 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11465 -- Not implemented in linux kernel
11470 #ifdef TARGET_NR_osf_sigprocmask
11471 /* Alpha specific. */
11472 case TARGET_NR_osf_sigprocmask
:
11476 sigset_t set
, oldset
;
11479 case TARGET_SIG_BLOCK
:
11482 case TARGET_SIG_UNBLOCK
:
11485 case TARGET_SIG_SETMASK
:
11489 return -TARGET_EINVAL
;
11492 target_to_host_old_sigset(&set
, &mask
);
11493 ret
= do_sigprocmask(how
, &set
, &oldset
);
11495 host_to_target_old_sigset(&mask
, &oldset
);
11502 #ifdef TARGET_NR_getgid32
11503 case TARGET_NR_getgid32
:
11504 return get_errno(getgid());
11506 #ifdef TARGET_NR_geteuid32
11507 case TARGET_NR_geteuid32
:
11508 return get_errno(geteuid());
11510 #ifdef TARGET_NR_getegid32
11511 case TARGET_NR_getegid32
:
11512 return get_errno(getegid());
11514 #ifdef TARGET_NR_setreuid32
11515 case TARGET_NR_setreuid32
:
11516 return get_errno(setreuid(arg1
, arg2
));
11518 #ifdef TARGET_NR_setregid32
11519 case TARGET_NR_setregid32
:
11520 return get_errno(setregid(arg1
, arg2
));
11522 #ifdef TARGET_NR_getgroups32
11523 case TARGET_NR_getgroups32
:
11525 int gidsetsize
= arg1
;
11526 uint32_t *target_grouplist
;
11530 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11531 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11532 if (gidsetsize
== 0)
11534 if (!is_error(ret
)) {
11535 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11536 if (!target_grouplist
) {
11537 return -TARGET_EFAULT
;
11539 for(i
= 0;i
< ret
; i
++)
11540 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11541 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11546 #ifdef TARGET_NR_setgroups32
11547 case TARGET_NR_setgroups32
:
11549 int gidsetsize
= arg1
;
11550 uint32_t *target_grouplist
;
11554 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11555 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11556 if (!target_grouplist
) {
11557 return -TARGET_EFAULT
;
11559 for(i
= 0;i
< gidsetsize
; i
++)
11560 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11561 unlock_user(target_grouplist
, arg2
, 0);
11562 return get_errno(setgroups(gidsetsize
, grouplist
));
11565 #ifdef TARGET_NR_fchown32
11566 case TARGET_NR_fchown32
:
11567 return get_errno(fchown(arg1
, arg2
, arg3
));
11569 #ifdef TARGET_NR_setresuid32
11570 case TARGET_NR_setresuid32
:
11571 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11573 #ifdef TARGET_NR_getresuid32
11574 case TARGET_NR_getresuid32
:
11576 uid_t ruid
, euid
, suid
;
11577 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11578 if (!is_error(ret
)) {
11579 if (put_user_u32(ruid
, arg1
)
11580 || put_user_u32(euid
, arg2
)
11581 || put_user_u32(suid
, arg3
))
11582 return -TARGET_EFAULT
;
11587 #ifdef TARGET_NR_setresgid32
11588 case TARGET_NR_setresgid32
:
11589 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11591 #ifdef TARGET_NR_getresgid32
11592 case TARGET_NR_getresgid32
:
11594 gid_t rgid
, egid
, sgid
;
11595 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11596 if (!is_error(ret
)) {
11597 if (put_user_u32(rgid
, arg1
)
11598 || put_user_u32(egid
, arg2
)
11599 || put_user_u32(sgid
, arg3
))
11600 return -TARGET_EFAULT
;
11605 #ifdef TARGET_NR_chown32
11606 case TARGET_NR_chown32
:
11607 if (!(p
= lock_user_string(arg1
)))
11608 return -TARGET_EFAULT
;
11609 ret
= get_errno(chown(p
, arg2
, arg3
));
11610 unlock_user(p
, arg1
, 0);
11613 #ifdef TARGET_NR_setuid32
11614 case TARGET_NR_setuid32
:
11615 return get_errno(sys_setuid(arg1
));
11617 #ifdef TARGET_NR_setgid32
11618 case TARGET_NR_setgid32
:
11619 return get_errno(sys_setgid(arg1
));
11621 #ifdef TARGET_NR_setfsuid32
11622 case TARGET_NR_setfsuid32
:
11623 return get_errno(setfsuid(arg1
));
11625 #ifdef TARGET_NR_setfsgid32
11626 case TARGET_NR_setfsgid32
:
11627 return get_errno(setfsgid(arg1
));
11629 #ifdef TARGET_NR_mincore
11630 case TARGET_NR_mincore
:
11632 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11634 return -TARGET_ENOMEM
;
11636 p
= lock_user_string(arg3
);
11638 ret
= -TARGET_EFAULT
;
11640 ret
= get_errno(mincore(a
, arg2
, p
));
11641 unlock_user(p
, arg3
, ret
);
11643 unlock_user(a
, arg1
, 0);
11647 #ifdef TARGET_NR_arm_fadvise64_64
11648 case TARGET_NR_arm_fadvise64_64
:
11649 /* arm_fadvise64_64 looks like fadvise64_64 but
11650 * with different argument order: fd, advice, offset, len
11651 * rather than the usual fd, offset, len, advice.
11652 * Note that offset and len are both 64-bit so appear as
11653 * pairs of 32-bit registers.
11655 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11656 target_offset64(arg5
, arg6
), arg2
);
11657 return -host_to_target_errno(ret
);
11660 #if TARGET_ABI_BITS == 32
11662 #ifdef TARGET_NR_fadvise64_64
11663 case TARGET_NR_fadvise64_64
:
11664 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11665 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11673 /* 6 args: fd, offset (high, low), len (high, low), advice */
11674 if (regpairs_aligned(cpu_env
, num
)) {
11675 /* offset is in (3,4), len in (5,6) and advice in 7 */
11683 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11684 target_offset64(arg4
, arg5
), arg6
);
11685 return -host_to_target_errno(ret
);
11688 #ifdef TARGET_NR_fadvise64
11689 case TARGET_NR_fadvise64
:
11690 /* 5 args: fd, offset (high, low), len, advice */
11691 if (regpairs_aligned(cpu_env
, num
)) {
11692 /* offset is in (3,4), len in 5 and advice in 6 */
11698 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11699 return -host_to_target_errno(ret
);
11702 #else /* not a 32-bit ABI */
11703 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11704 #ifdef TARGET_NR_fadvise64_64
11705 case TARGET_NR_fadvise64_64
:
11707 #ifdef TARGET_NR_fadvise64
11708 case TARGET_NR_fadvise64
:
11710 #ifdef TARGET_S390X
11712 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11713 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11714 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11715 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11719 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11721 #endif /* end of 64-bit ABI fadvise handling */
11723 #ifdef TARGET_NR_madvise
11724 case TARGET_NR_madvise
:
11725 /* A straight passthrough may not be safe because qemu sometimes
11726 turns private file-backed mappings into anonymous mappings.
11727 This will break MADV_DONTNEED.
11728 This is a hint, so ignoring and returning success is ok. */
11731 #ifdef TARGET_NR_fcntl64
11732 case TARGET_NR_fcntl64
:
11736 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11737 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11740 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11741 copyfrom
= copy_from_user_oabi_flock64
;
11742 copyto
= copy_to_user_oabi_flock64
;
11746 cmd
= target_to_host_fcntl_cmd(arg2
);
11747 if (cmd
== -TARGET_EINVAL
) {
11752 case TARGET_F_GETLK64
:
11753 ret
= copyfrom(&fl
, arg3
);
11757 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11759 ret
= copyto(arg3
, &fl
);
11763 case TARGET_F_SETLK64
:
11764 case TARGET_F_SETLKW64
:
11765 ret
= copyfrom(&fl
, arg3
);
11769 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11772 ret
= do_fcntl(arg1
, arg2
, arg3
);
11778 #ifdef TARGET_NR_cacheflush
11779 case TARGET_NR_cacheflush
:
11780 /* self-modifying code is handled automatically, so nothing needed */
11783 #ifdef TARGET_NR_getpagesize
11784 case TARGET_NR_getpagesize
:
11785 return TARGET_PAGE_SIZE
;
11787 case TARGET_NR_gettid
:
11788 return get_errno(sys_gettid());
11789 #ifdef TARGET_NR_readahead
11790 case TARGET_NR_readahead
:
11791 #if TARGET_ABI_BITS == 32
11792 if (regpairs_aligned(cpu_env
, num
)) {
11797 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11799 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11804 #ifdef TARGET_NR_setxattr
11805 case TARGET_NR_listxattr
:
11806 case TARGET_NR_llistxattr
:
11810 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11812 return -TARGET_EFAULT
;
11815 p
= lock_user_string(arg1
);
11817 if (num
== TARGET_NR_listxattr
) {
11818 ret
= get_errno(listxattr(p
, b
, arg3
));
11820 ret
= get_errno(llistxattr(p
, b
, arg3
));
11823 ret
= -TARGET_EFAULT
;
11825 unlock_user(p
, arg1
, 0);
11826 unlock_user(b
, arg2
, arg3
);
11829 case TARGET_NR_flistxattr
:
11833 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11835 return -TARGET_EFAULT
;
11838 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11839 unlock_user(b
, arg2
, arg3
);
11842 case TARGET_NR_setxattr
:
11843 case TARGET_NR_lsetxattr
:
11845 void *p
, *n
, *v
= 0;
11847 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11849 return -TARGET_EFAULT
;
11852 p
= lock_user_string(arg1
);
11853 n
= lock_user_string(arg2
);
11855 if (num
== TARGET_NR_setxattr
) {
11856 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11858 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11861 ret
= -TARGET_EFAULT
;
11863 unlock_user(p
, arg1
, 0);
11864 unlock_user(n
, arg2
, 0);
11865 unlock_user(v
, arg3
, 0);
11868 case TARGET_NR_fsetxattr
:
11872 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11874 return -TARGET_EFAULT
;
11877 n
= lock_user_string(arg2
);
11879 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11881 ret
= -TARGET_EFAULT
;
11883 unlock_user(n
, arg2
, 0);
11884 unlock_user(v
, arg3
, 0);
11887 case TARGET_NR_getxattr
:
11888 case TARGET_NR_lgetxattr
:
11890 void *p
, *n
, *v
= 0;
11892 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11894 return -TARGET_EFAULT
;
11897 p
= lock_user_string(arg1
);
11898 n
= lock_user_string(arg2
);
11900 if (num
== TARGET_NR_getxattr
) {
11901 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11903 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11906 ret
= -TARGET_EFAULT
;
11908 unlock_user(p
, arg1
, 0);
11909 unlock_user(n
, arg2
, 0);
11910 unlock_user(v
, arg3
, arg4
);
11913 case TARGET_NR_fgetxattr
:
11917 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11919 return -TARGET_EFAULT
;
11922 n
= lock_user_string(arg2
);
11924 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11926 ret
= -TARGET_EFAULT
;
11928 unlock_user(n
, arg2
, 0);
11929 unlock_user(v
, arg3
, arg4
);
11932 case TARGET_NR_removexattr
:
11933 case TARGET_NR_lremovexattr
:
11936 p
= lock_user_string(arg1
);
11937 n
= lock_user_string(arg2
);
11939 if (num
== TARGET_NR_removexattr
) {
11940 ret
= get_errno(removexattr(p
, n
));
11942 ret
= get_errno(lremovexattr(p
, n
));
11945 ret
= -TARGET_EFAULT
;
11947 unlock_user(p
, arg1
, 0);
11948 unlock_user(n
, arg2
, 0);
11951 case TARGET_NR_fremovexattr
:
11954 n
= lock_user_string(arg2
);
11956 ret
= get_errno(fremovexattr(arg1
, n
));
11958 ret
= -TARGET_EFAULT
;
11960 unlock_user(n
, arg2
, 0);
11964 #endif /* CONFIG_ATTR */
11965 #ifdef TARGET_NR_set_thread_area
11966 case TARGET_NR_set_thread_area
:
11967 #if defined(TARGET_MIPS)
11968 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11970 #elif defined(TARGET_CRIS)
11972 ret
= -TARGET_EINVAL
;
11974 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11978 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11979 return do_set_thread_area(cpu_env
, arg1
);
11980 #elif defined(TARGET_M68K)
11982 TaskState
*ts
= cpu
->opaque
;
11983 ts
->tp_value
= arg1
;
11987 return -TARGET_ENOSYS
;
11990 #ifdef TARGET_NR_get_thread_area
11991 case TARGET_NR_get_thread_area
:
11992 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11993 return do_get_thread_area(cpu_env
, arg1
);
11994 #elif defined(TARGET_M68K)
11996 TaskState
*ts
= cpu
->opaque
;
11997 return ts
->tp_value
;
12000 return -TARGET_ENOSYS
;
12003 #ifdef TARGET_NR_getdomainname
12004 case TARGET_NR_getdomainname
:
12005 return -TARGET_ENOSYS
;
12008 #ifdef TARGET_NR_clock_settime
12009 case TARGET_NR_clock_settime
:
12011 struct timespec ts
;
12013 ret
= target_to_host_timespec(&ts
, arg2
);
12014 if (!is_error(ret
)) {
12015 ret
= get_errno(clock_settime(arg1
, &ts
));
12020 #ifdef TARGET_NR_clock_settime64
12021 case TARGET_NR_clock_settime64
:
12023 struct timespec ts
;
12025 ret
= target_to_host_timespec64(&ts
, arg2
);
12026 if (!is_error(ret
)) {
12027 ret
= get_errno(clock_settime(arg1
, &ts
));
12032 #ifdef TARGET_NR_clock_gettime
12033 case TARGET_NR_clock_gettime
:
12035 struct timespec ts
;
12036 ret
= get_errno(clock_gettime(arg1
, &ts
));
12037 if (!is_error(ret
)) {
12038 ret
= host_to_target_timespec(arg2
, &ts
);
12043 #ifdef TARGET_NR_clock_gettime64
12044 case TARGET_NR_clock_gettime64
:
12046 struct timespec ts
;
12047 ret
= get_errno(clock_gettime(arg1
, &ts
));
12048 if (!is_error(ret
)) {
12049 ret
= host_to_target_timespec64(arg2
, &ts
);
12054 #ifdef TARGET_NR_clock_getres
12055 case TARGET_NR_clock_getres
:
12057 struct timespec ts
;
12058 ret
= get_errno(clock_getres(arg1
, &ts
));
12059 if (!is_error(ret
)) {
12060 host_to_target_timespec(arg2
, &ts
);
12065 #ifdef TARGET_NR_clock_getres_time64
12066 case TARGET_NR_clock_getres_time64
:
12068 struct timespec ts
;
12069 ret
= get_errno(clock_getres(arg1
, &ts
));
12070 if (!is_error(ret
)) {
12071 host_to_target_timespec64(arg2
, &ts
);
12076 #ifdef TARGET_NR_clock_nanosleep
12077 case TARGET_NR_clock_nanosleep
:
12079 struct timespec ts
;
12080 if (target_to_host_timespec(&ts
, arg3
)) {
12081 return -TARGET_EFAULT
;
12083 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12084 &ts
, arg4
? &ts
: NULL
));
12086 * if the call is interrupted by a signal handler, it fails
12087 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12088 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12090 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12091 host_to_target_timespec(arg4
, &ts
)) {
12092 return -TARGET_EFAULT
;
12098 #ifdef TARGET_NR_clock_nanosleep_time64
12099 case TARGET_NR_clock_nanosleep_time64
:
12101 struct timespec ts
;
12103 if (target_to_host_timespec64(&ts
, arg3
)) {
12104 return -TARGET_EFAULT
;
12107 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12108 &ts
, arg4
? &ts
: NULL
));
12110 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12111 host_to_target_timespec64(arg4
, &ts
)) {
12112 return -TARGET_EFAULT
;
12118 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12119 case TARGET_NR_set_tid_address
:
12120 return get_errno(set_tid_address((int *)g2h(arg1
)));
12123 case TARGET_NR_tkill
:
12124 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12126 case TARGET_NR_tgkill
:
12127 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12128 target_to_host_signal(arg3
)));
12130 #ifdef TARGET_NR_set_robust_list
12131 case TARGET_NR_set_robust_list
:
12132 case TARGET_NR_get_robust_list
:
12133 /* The ABI for supporting robust futexes has userspace pass
12134 * the kernel a pointer to a linked list which is updated by
12135 * userspace after the syscall; the list is walked by the kernel
12136 * when the thread exits. Since the linked list in QEMU guest
12137 * memory isn't a valid linked list for the host and we have
12138 * no way to reliably intercept the thread-death event, we can't
12139 * support these. Silently return ENOSYS so that guest userspace
12140 * falls back to a non-robust futex implementation (which should
12141 * be OK except in the corner case of the guest crashing while
12142 * holding a mutex that is shared with another process via
12145 return -TARGET_ENOSYS
;
12148 #if defined(TARGET_NR_utimensat)
12149 case TARGET_NR_utimensat
:
12151 struct timespec
*tsp
, ts
[2];
12155 if (target_to_host_timespec(ts
, arg3
)) {
12156 return -TARGET_EFAULT
;
12158 if (target_to_host_timespec(ts
+ 1, arg3
+
12159 sizeof(struct target_timespec
))) {
12160 return -TARGET_EFAULT
;
12165 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12167 if (!(p
= lock_user_string(arg2
))) {
12168 return -TARGET_EFAULT
;
12170 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12171 unlock_user(p
, arg2
, 0);
12176 #ifdef TARGET_NR_utimensat_time64
12177 case TARGET_NR_utimensat_time64
:
12179 struct timespec
*tsp
, ts
[2];
12183 if (target_to_host_timespec64(ts
, arg3
)) {
12184 return -TARGET_EFAULT
;
12186 if (target_to_host_timespec64(ts
+ 1, arg3
+
12187 sizeof(struct target__kernel_timespec
))) {
12188 return -TARGET_EFAULT
;
12193 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12195 p
= lock_user_string(arg2
);
12197 return -TARGET_EFAULT
;
12199 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12200 unlock_user(p
, arg2
, 0);
12205 #ifdef TARGET_NR_futex
12206 case TARGET_NR_futex
:
12207 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12209 #ifdef TARGET_NR_futex_time64
12210 case TARGET_NR_futex_time64
:
12211 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12213 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12214 case TARGET_NR_inotify_init
:
12215 ret
= get_errno(sys_inotify_init());
12217 fd_trans_register(ret
, &target_inotify_trans
);
12221 #ifdef CONFIG_INOTIFY1
12222 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12223 case TARGET_NR_inotify_init1
:
12224 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12225 fcntl_flags_tbl
)));
12227 fd_trans_register(ret
, &target_inotify_trans
);
12232 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12233 case TARGET_NR_inotify_add_watch
:
12234 p
= lock_user_string(arg2
);
12235 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12236 unlock_user(p
, arg2
, 0);
12239 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12240 case TARGET_NR_inotify_rm_watch
:
12241 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12245 case TARGET_NR_mq_open
:
12247 struct mq_attr posix_mq_attr
;
12248 struct mq_attr
*pposix_mq_attr
;
12251 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12252 pposix_mq_attr
= NULL
;
12254 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12255 return -TARGET_EFAULT
;
12257 pposix_mq_attr
= &posix_mq_attr
;
12259 p
= lock_user_string(arg1
- 1);
12261 return -TARGET_EFAULT
;
12263 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12264 unlock_user (p
, arg1
, 0);
12268 case TARGET_NR_mq_unlink
:
12269 p
= lock_user_string(arg1
- 1);
12271 return -TARGET_EFAULT
;
12273 ret
= get_errno(mq_unlink(p
));
12274 unlock_user (p
, arg1
, 0);
12277 #ifdef TARGET_NR_mq_timedsend
12278 case TARGET_NR_mq_timedsend
:
12280 struct timespec ts
;
12282 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12284 if (target_to_host_timespec(&ts
, arg5
)) {
12285 return -TARGET_EFAULT
;
12287 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12288 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12289 return -TARGET_EFAULT
;
12292 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12294 unlock_user (p
, arg2
, arg3
);
12298 #ifdef TARGET_NR_mq_timedsend_time64
12299 case TARGET_NR_mq_timedsend_time64
:
12301 struct timespec ts
;
12303 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12305 if (target_to_host_timespec64(&ts
, arg5
)) {
12306 return -TARGET_EFAULT
;
12308 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12309 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12310 return -TARGET_EFAULT
;
12313 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12315 unlock_user(p
, arg2
, arg3
);
12320 #ifdef TARGET_NR_mq_timedreceive
12321 case TARGET_NR_mq_timedreceive
:
12323 struct timespec ts
;
12326 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12328 if (target_to_host_timespec(&ts
, arg5
)) {
12329 return -TARGET_EFAULT
;
12331 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12333 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12334 return -TARGET_EFAULT
;
12337 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12340 unlock_user (p
, arg2
, arg3
);
12342 put_user_u32(prio
, arg4
);
12346 #ifdef TARGET_NR_mq_timedreceive_time64
12347 case TARGET_NR_mq_timedreceive_time64
:
12349 struct timespec ts
;
12352 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12354 if (target_to_host_timespec64(&ts
, arg5
)) {
12355 return -TARGET_EFAULT
;
12357 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12359 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12360 return -TARGET_EFAULT
;
12363 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12366 unlock_user(p
, arg2
, arg3
);
12368 put_user_u32(prio
, arg4
);
12374 /* Not implemented for now... */
12375 /* case TARGET_NR_mq_notify: */
12378 case TARGET_NR_mq_getsetattr
:
12380 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12383 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12384 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12385 &posix_mq_attr_out
));
12386 } else if (arg3
!= 0) {
12387 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12389 if (ret
== 0 && arg3
!= 0) {
12390 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12396 #ifdef CONFIG_SPLICE
12397 #ifdef TARGET_NR_tee
12398 case TARGET_NR_tee
:
12400 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12404 #ifdef TARGET_NR_splice
12405 case TARGET_NR_splice
:
12407 loff_t loff_in
, loff_out
;
12408 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12410 if (get_user_u64(loff_in
, arg2
)) {
12411 return -TARGET_EFAULT
;
12413 ploff_in
= &loff_in
;
12416 if (get_user_u64(loff_out
, arg4
)) {
12417 return -TARGET_EFAULT
;
12419 ploff_out
= &loff_out
;
12421 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12423 if (put_user_u64(loff_in
, arg2
)) {
12424 return -TARGET_EFAULT
;
12428 if (put_user_u64(loff_out
, arg4
)) {
12429 return -TARGET_EFAULT
;
12435 #ifdef TARGET_NR_vmsplice
12436 case TARGET_NR_vmsplice
:
12438 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12440 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12441 unlock_iovec(vec
, arg2
, arg3
, 0);
12443 ret
= -host_to_target_errno(errno
);
12448 #endif /* CONFIG_SPLICE */
12449 #ifdef CONFIG_EVENTFD
12450 #if defined(TARGET_NR_eventfd)
12451 case TARGET_NR_eventfd
:
12452 ret
= get_errno(eventfd(arg1
, 0));
12454 fd_trans_register(ret
, &target_eventfd_trans
);
12458 #if defined(TARGET_NR_eventfd2)
12459 case TARGET_NR_eventfd2
:
12461 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12462 if (arg2
& TARGET_O_NONBLOCK
) {
12463 host_flags
|= O_NONBLOCK
;
12465 if (arg2
& TARGET_O_CLOEXEC
) {
12466 host_flags
|= O_CLOEXEC
;
12468 ret
= get_errno(eventfd(arg1
, host_flags
));
12470 fd_trans_register(ret
, &target_eventfd_trans
);
12475 #endif /* CONFIG_EVENTFD */
12476 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12477 case TARGET_NR_fallocate
:
12478 #if TARGET_ABI_BITS == 32
12479 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12480 target_offset64(arg5
, arg6
)));
12482 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12486 #if defined(CONFIG_SYNC_FILE_RANGE)
12487 #if defined(TARGET_NR_sync_file_range)
12488 case TARGET_NR_sync_file_range
:
12489 #if TARGET_ABI_BITS == 32
12490 #if defined(TARGET_MIPS)
12491 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12492 target_offset64(arg5
, arg6
), arg7
));
12494 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12495 target_offset64(arg4
, arg5
), arg6
));
12496 #endif /* !TARGET_MIPS */
12498 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12502 #if defined(TARGET_NR_sync_file_range2) || \
12503 defined(TARGET_NR_arm_sync_file_range)
12504 #if defined(TARGET_NR_sync_file_range2)
12505 case TARGET_NR_sync_file_range2
:
12507 #if defined(TARGET_NR_arm_sync_file_range)
12508 case TARGET_NR_arm_sync_file_range
:
12510 /* This is like sync_file_range but the arguments are reordered */
12511 #if TARGET_ABI_BITS == 32
12512 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12513 target_offset64(arg5
, arg6
), arg2
));
12515 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12520 #if defined(TARGET_NR_signalfd4)
12521 case TARGET_NR_signalfd4
:
12522 return do_signalfd4(arg1
, arg2
, arg4
);
12524 #if defined(TARGET_NR_signalfd)
12525 case TARGET_NR_signalfd
:
12526 return do_signalfd4(arg1
, arg2
, 0);
12528 #if defined(CONFIG_EPOLL)
12529 #if defined(TARGET_NR_epoll_create)
12530 case TARGET_NR_epoll_create
:
12531 return get_errno(epoll_create(arg1
));
12533 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12534 case TARGET_NR_epoll_create1
:
12535 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12537 #if defined(TARGET_NR_epoll_ctl)
12538 case TARGET_NR_epoll_ctl
:
12540 struct epoll_event ep
;
12541 struct epoll_event
*epp
= 0;
12543 struct target_epoll_event
*target_ep
;
12544 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12545 return -TARGET_EFAULT
;
12547 ep
.events
= tswap32(target_ep
->events
);
12548 /* The epoll_data_t union is just opaque data to the kernel,
12549 * so we transfer all 64 bits across and need not worry what
12550 * actual data type it is.
12552 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12553 unlock_user_struct(target_ep
, arg4
, 0);
12556 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12560 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12561 #if defined(TARGET_NR_epoll_wait)
12562 case TARGET_NR_epoll_wait
:
12564 #if defined(TARGET_NR_epoll_pwait)
12565 case TARGET_NR_epoll_pwait
:
12568 struct target_epoll_event
*target_ep
;
12569 struct epoll_event
*ep
;
12571 int maxevents
= arg3
;
12572 int timeout
= arg4
;
12574 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12575 return -TARGET_EINVAL
;
12578 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12579 maxevents
* sizeof(struct target_epoll_event
), 1);
12581 return -TARGET_EFAULT
;
12584 ep
= g_try_new(struct epoll_event
, maxevents
);
12586 unlock_user(target_ep
, arg2
, 0);
12587 return -TARGET_ENOMEM
;
12591 #if defined(TARGET_NR_epoll_pwait)
12592 case TARGET_NR_epoll_pwait
:
12594 target_sigset_t
*target_set
;
12595 sigset_t _set
, *set
= &_set
;
12598 if (arg6
!= sizeof(target_sigset_t
)) {
12599 ret
= -TARGET_EINVAL
;
12603 target_set
= lock_user(VERIFY_READ
, arg5
,
12604 sizeof(target_sigset_t
), 1);
12606 ret
= -TARGET_EFAULT
;
12609 target_to_host_sigset(set
, target_set
);
12610 unlock_user(target_set
, arg5
, 0);
12615 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12616 set
, SIGSET_T_SIZE
));
12620 #if defined(TARGET_NR_epoll_wait)
12621 case TARGET_NR_epoll_wait
:
12622 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12627 ret
= -TARGET_ENOSYS
;
12629 if (!is_error(ret
)) {
12631 for (i
= 0; i
< ret
; i
++) {
12632 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12633 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12635 unlock_user(target_ep
, arg2
,
12636 ret
* sizeof(struct target_epoll_event
));
12638 unlock_user(target_ep
, arg2
, 0);
12645 #ifdef TARGET_NR_prlimit64
12646 case TARGET_NR_prlimit64
:
12648 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12649 struct target_rlimit64
*target_rnew
, *target_rold
;
12650 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12651 int resource
= target_to_host_resource(arg2
);
12653 if (arg3
&& (resource
!= RLIMIT_AS
&&
12654 resource
!= RLIMIT_DATA
&&
12655 resource
!= RLIMIT_STACK
)) {
12656 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12657 return -TARGET_EFAULT
;
12659 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12660 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12661 unlock_user_struct(target_rnew
, arg3
, 0);
12665 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12666 if (!is_error(ret
) && arg4
) {
12667 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12668 return -TARGET_EFAULT
;
12670 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12671 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12672 unlock_user_struct(target_rold
, arg4
, 1);
12677 #ifdef TARGET_NR_gethostname
12678 case TARGET_NR_gethostname
:
12680 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12682 ret
= get_errno(gethostname(name
, arg2
));
12683 unlock_user(name
, arg1
, arg2
);
12685 ret
= -TARGET_EFAULT
;
12690 #ifdef TARGET_NR_atomic_cmpxchg_32
12691 case TARGET_NR_atomic_cmpxchg_32
:
12693 /* should use start_exclusive from main.c */
12694 abi_ulong mem_value
;
12695 if (get_user_u32(mem_value
, arg6
)) {
12696 target_siginfo_t info
;
12697 info
.si_signo
= SIGSEGV
;
12699 info
.si_code
= TARGET_SEGV_MAPERR
;
12700 info
._sifields
._sigfault
._addr
= arg6
;
12701 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12702 QEMU_SI_FAULT
, &info
);
12706 if (mem_value
== arg2
)
12707 put_user_u32(arg1
, arg6
);
12711 #ifdef TARGET_NR_atomic_barrier
12712 case TARGET_NR_atomic_barrier
:
12713 /* Like the kernel implementation and the
12714 qemu arm barrier, no-op this? */
12718 #ifdef TARGET_NR_timer_create
12719 case TARGET_NR_timer_create
:
12721 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12723 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12726 int timer_index
= next_free_host_timer();
12728 if (timer_index
< 0) {
12729 ret
= -TARGET_EAGAIN
;
12731 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12734 phost_sevp
= &host_sevp
;
12735 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12741 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12745 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12746 return -TARGET_EFAULT
;
12754 #ifdef TARGET_NR_timer_settime
12755 case TARGET_NR_timer_settime
:
12757 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12758 * struct itimerspec * old_value */
12759 target_timer_t timerid
= get_timer_id(arg1
);
12763 } else if (arg3
== 0) {
12764 ret
= -TARGET_EINVAL
;
12766 timer_t htimer
= g_posix_timers
[timerid
];
12767 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12769 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12770 return -TARGET_EFAULT
;
12773 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12774 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12775 return -TARGET_EFAULT
;
12782 #ifdef TARGET_NR_timer_settime64
12783 case TARGET_NR_timer_settime64
:
12785 target_timer_t timerid
= get_timer_id(arg1
);
12789 } else if (arg3
== 0) {
12790 ret
= -TARGET_EINVAL
;
12792 timer_t htimer
= g_posix_timers
[timerid
];
12793 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12795 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12796 return -TARGET_EFAULT
;
12799 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12800 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12801 return -TARGET_EFAULT
;
12808 #ifdef TARGET_NR_timer_gettime
12809 case TARGET_NR_timer_gettime
:
12811 /* args: timer_t timerid, struct itimerspec *curr_value */
12812 target_timer_t timerid
= get_timer_id(arg1
);
12816 } else if (!arg2
) {
12817 ret
= -TARGET_EFAULT
;
12819 timer_t htimer
= g_posix_timers
[timerid
];
12820 struct itimerspec hspec
;
12821 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12823 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12824 ret
= -TARGET_EFAULT
;
12831 #ifdef TARGET_NR_timer_gettime64
12832 case TARGET_NR_timer_gettime64
:
12834 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12835 target_timer_t timerid
= get_timer_id(arg1
);
12839 } else if (!arg2
) {
12840 ret
= -TARGET_EFAULT
;
12842 timer_t htimer
= g_posix_timers
[timerid
];
12843 struct itimerspec hspec
;
12844 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12846 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12847 ret
= -TARGET_EFAULT
;
12854 #ifdef TARGET_NR_timer_getoverrun
12855 case TARGET_NR_timer_getoverrun
:
12857 /* args: timer_t timerid */
12858 target_timer_t timerid
= get_timer_id(arg1
);
12863 timer_t htimer
= g_posix_timers
[timerid
];
12864 ret
= get_errno(timer_getoverrun(htimer
));
12870 #ifdef TARGET_NR_timer_delete
12871 case TARGET_NR_timer_delete
:
12873 /* args: timer_t timerid */
12874 target_timer_t timerid
= get_timer_id(arg1
);
12879 timer_t htimer
= g_posix_timers
[timerid
];
12880 ret
= get_errno(timer_delete(htimer
));
12881 g_posix_timers
[timerid
] = 0;
12887 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12888 case TARGET_NR_timerfd_create
:
12889 return get_errno(timerfd_create(arg1
,
12890 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12893 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12894 case TARGET_NR_timerfd_gettime
:
12896 struct itimerspec its_curr
;
12898 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12900 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12901 return -TARGET_EFAULT
;
12907 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12908 case TARGET_NR_timerfd_gettime64
:
12910 struct itimerspec its_curr
;
12912 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12914 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12915 return -TARGET_EFAULT
;
12921 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12922 case TARGET_NR_timerfd_settime
:
12924 struct itimerspec its_new
, its_old
, *p_new
;
12927 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12928 return -TARGET_EFAULT
;
12935 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12937 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12938 return -TARGET_EFAULT
;
12944 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12945 case TARGET_NR_timerfd_settime64
:
12947 struct itimerspec its_new
, its_old
, *p_new
;
12950 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
12951 return -TARGET_EFAULT
;
12958 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12960 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
12961 return -TARGET_EFAULT
;
12967 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12968 case TARGET_NR_ioprio_get
:
12969 return get_errno(ioprio_get(arg1
, arg2
));
12972 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12973 case TARGET_NR_ioprio_set
:
12974 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12977 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12978 case TARGET_NR_setns
:
12979 return get_errno(setns(arg1
, arg2
));
12981 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12982 case TARGET_NR_unshare
:
12983 return get_errno(unshare(arg1
));
12985 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12986 case TARGET_NR_kcmp
:
12987 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12989 #ifdef TARGET_NR_swapcontext
12990 case TARGET_NR_swapcontext
:
12991 /* PowerPC specific. */
12992 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12994 #ifdef TARGET_NR_memfd_create
12995 case TARGET_NR_memfd_create
:
12996 p
= lock_user_string(arg1
);
12998 return -TARGET_EFAULT
;
13000 ret
= get_errno(memfd_create(p
, arg2
));
13001 fd_trans_unregister(ret
);
13002 unlock_user(p
, arg1
, 0);
13005 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13006 case TARGET_NR_membarrier
:
13007 return get_errno(membarrier(arg1
, arg2
));
13011 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13012 return -TARGET_ENOSYS
;
13017 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13018 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13019 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13022 CPUState
*cpu
= env_cpu(cpu_env
);
13025 #ifdef DEBUG_ERESTARTSYS
13026 /* Debug-only code for exercising the syscall-restart code paths
13027 * in the per-architecture cpu main loops: restart every syscall
13028 * the guest makes once before letting it through.
13034 return -TARGET_ERESTARTSYS
;
13039 record_syscall_start(cpu
, num
, arg1
,
13040 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13042 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13043 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13046 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13047 arg5
, arg6
, arg7
, arg8
);
13049 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13050 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13051 arg3
, arg4
, arg5
, arg6
);
13054 record_syscall_return(cpu
, num
, ret
);