4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
119 #include "linux_loop.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
131 #define CLONE_IO 0x80000000 /* Clone io context */
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid
)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
296 loff_t
*, res
, uint
, wh
);
298 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
299 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
301 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group
,int,error_code
)
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address
,int *,tidptr
)
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
310 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
314 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
318 unsigned long *, user_mask_ptr
);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
321 unsigned long *, user_mask_ptr
);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
324 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
326 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
327 struct __user_cap_data_struct
*, data
);
328 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
329 struct __user_cap_data_struct
*, data
);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get
, int, which
, int, who
)
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
342 unsigned long, idx1
, unsigned long, idx2
)
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
350 unsigned int, mask
, struct target_statx
*, statxbuf
)
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier
, int, cmd
, int, flags
)
356 static bitmask_transtbl fcntl_flags_tbl
[] = {
357 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
358 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
359 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
360 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
361 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
362 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
363 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
364 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
365 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
366 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
367 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
368 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
369 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
380 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
392 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
398 const struct timespec
*,tsp
,int,flags
)
400 static int sys_utimensat(int dirfd
, const char *pathname
,
401 const struct timespec times
[2], int flags
)
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
413 const char *, new, unsigned int, flags
)
415 static int sys_renameat2(int oldfd
, const char *old
,
416 int newfd
, const char *new, int flags
)
419 return renameat(oldfd
, old
, newfd
, new);
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
439 return (inotify_add_watch(fd
, pathname
, mask
));
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
445 return (inotify_rm_watch(fd
, wd
));
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags
)
452 return (inotify_init1(flags
));
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64
{
474 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
475 const struct host_rlimit64
*, new_limit
,
476 struct host_rlimit64
*, old_limit
)
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers
[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
489 if (g_posix_timers
[k
] == 0) {
490 g_posix_timers
[k
] = (timer_t
) 1;
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
510 [EAGAIN
] = TARGET_EAGAIN
,
511 [EIDRM
] = TARGET_EIDRM
,
512 [ECHRNG
] = TARGET_ECHRNG
,
513 [EL2NSYNC
] = TARGET_EL2NSYNC
,
514 [EL3HLT
] = TARGET_EL3HLT
,
515 [EL3RST
] = TARGET_EL3RST
,
516 [ELNRNG
] = TARGET_ELNRNG
,
517 [EUNATCH
] = TARGET_EUNATCH
,
518 [ENOCSI
] = TARGET_ENOCSI
,
519 [EL2HLT
] = TARGET_EL2HLT
,
520 [EDEADLK
] = TARGET_EDEADLK
,
521 [ENOLCK
] = TARGET_ENOLCK
,
522 [EBADE
] = TARGET_EBADE
,
523 [EBADR
] = TARGET_EBADR
,
524 [EXFULL
] = TARGET_EXFULL
,
525 [ENOANO
] = TARGET_ENOANO
,
526 [EBADRQC
] = TARGET_EBADRQC
,
527 [EBADSLT
] = TARGET_EBADSLT
,
528 [EBFONT
] = TARGET_EBFONT
,
529 [ENOSTR
] = TARGET_ENOSTR
,
530 [ENODATA
] = TARGET_ENODATA
,
531 [ETIME
] = TARGET_ETIME
,
532 [ENOSR
] = TARGET_ENOSR
,
533 [ENONET
] = TARGET_ENONET
,
534 [ENOPKG
] = TARGET_ENOPKG
,
535 [EREMOTE
] = TARGET_EREMOTE
,
536 [ENOLINK
] = TARGET_ENOLINK
,
537 [EADV
] = TARGET_EADV
,
538 [ESRMNT
] = TARGET_ESRMNT
,
539 [ECOMM
] = TARGET_ECOMM
,
540 [EPROTO
] = TARGET_EPROTO
,
541 [EDOTDOT
] = TARGET_EDOTDOT
,
542 [EMULTIHOP
] = TARGET_EMULTIHOP
,
543 [EBADMSG
] = TARGET_EBADMSG
,
544 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
545 [EOVERFLOW
] = TARGET_EOVERFLOW
,
546 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
547 [EBADFD
] = TARGET_EBADFD
,
548 [EREMCHG
] = TARGET_EREMCHG
,
549 [ELIBACC
] = TARGET_ELIBACC
,
550 [ELIBBAD
] = TARGET_ELIBBAD
,
551 [ELIBSCN
] = TARGET_ELIBSCN
,
552 [ELIBMAX
] = TARGET_ELIBMAX
,
553 [ELIBEXEC
] = TARGET_ELIBEXEC
,
554 [EILSEQ
] = TARGET_EILSEQ
,
555 [ENOSYS
] = TARGET_ENOSYS
,
556 [ELOOP
] = TARGET_ELOOP
,
557 [ERESTART
] = TARGET_ERESTART
,
558 [ESTRPIPE
] = TARGET_ESTRPIPE
,
559 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
560 [EUSERS
] = TARGET_EUSERS
,
561 [ENOTSOCK
] = TARGET_ENOTSOCK
,
562 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
563 [EMSGSIZE
] = TARGET_EMSGSIZE
,
564 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
565 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
566 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
567 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
568 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
569 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
570 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
571 [EADDRINUSE
] = TARGET_EADDRINUSE
,
572 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
573 [ENETDOWN
] = TARGET_ENETDOWN
,
574 [ENETUNREACH
] = TARGET_ENETUNREACH
,
575 [ENETRESET
] = TARGET_ENETRESET
,
576 [ECONNABORTED
] = TARGET_ECONNABORTED
,
577 [ECONNRESET
] = TARGET_ECONNRESET
,
578 [ENOBUFS
] = TARGET_ENOBUFS
,
579 [EISCONN
] = TARGET_EISCONN
,
580 [ENOTCONN
] = TARGET_ENOTCONN
,
581 [EUCLEAN
] = TARGET_EUCLEAN
,
582 [ENOTNAM
] = TARGET_ENOTNAM
,
583 [ENAVAIL
] = TARGET_ENAVAIL
,
584 [EISNAM
] = TARGET_EISNAM
,
585 [EREMOTEIO
] = TARGET_EREMOTEIO
,
586 [EDQUOT
] = TARGET_EDQUOT
,
587 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
588 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
589 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
590 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
591 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
592 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
593 [EALREADY
] = TARGET_EALREADY
,
594 [EINPROGRESS
] = TARGET_EINPROGRESS
,
595 [ESTALE
] = TARGET_ESTALE
,
596 [ECANCELED
] = TARGET_ECANCELED
,
597 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
598 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
600 [ENOKEY
] = TARGET_ENOKEY
,
603 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
606 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
609 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
612 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
618 [ENOMSG
] = TARGET_ENOMSG
,
621 [ERFKILL
] = TARGET_ERFKILL
,
624 [EHWPOISON
] = TARGET_EHWPOISON
,
628 static inline int host_to_target_errno(int err
)
630 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
631 host_to_target_errno_table
[err
]) {
632 return host_to_target_errno_table
[err
];
637 static inline int target_to_host_errno(int err
)
639 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
640 target_to_host_errno_table
[err
]) {
641 return target_to_host_errno_table
[err
];
646 static inline abi_long
get_errno(abi_long ret
)
649 return -host_to_target_errno(errno
);
654 const char *target_strerror(int err
)
656 if (err
== TARGET_ERESTARTSYS
) {
657 return "To be restarted";
659 if (err
== TARGET_QEMU_ESIGRETURN
) {
660 return "Successful exit from sigreturn";
663 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
666 return strerror(target_to_host_errno(err
));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
717 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
718 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
719 int, flags
, mode_t
, mode
)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
722 struct rusage
*, rusage
)
724 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
725 int, options
, struct rusage
*, rusage
)
726 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
730 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
734 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
737 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
738 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
742 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
746 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
748 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
749 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
750 safe_syscall2(int, tkill
, int, tid
, int, sig
)
751 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
752 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
753 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
754 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
755 unsigned long, pos_l
, unsigned long, pos_h
)
756 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
757 unsigned long, pos_l
, unsigned long, pos_h
)
758 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
760 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
761 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
762 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
763 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
764 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
765 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
766 safe_syscall2(int, flock
, int, fd
, int, operation
)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
769 const struct timespec
*, uts
, size_t, sigsetsize
)
771 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
775 struct timespec
*, rem
)
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
779 const struct timespec
*, req
, struct timespec
*, rem
)
783 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
786 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
787 void *, ptr
, long, fifth
)
791 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
795 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
796 long, msgtype
, int, flags
)
798 #ifdef __NR_semtimedop
799 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
800 unsigned, nsops
, const struct timespec
*, timeout
)
802 #ifdef TARGET_NR_mq_timedsend
803 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
804 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
806 #ifdef TARGET_NR_mq_timedreceive
807 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
808 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811 * "third argument might be integer or pointer or not present" behaviour of
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817 * use the flock64 struct rather than unsuffixed flock
818 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
826 static inline int host_to_target_sock_type(int host_type
)
830 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
832 target_type
= TARGET_SOCK_DGRAM
;
835 target_type
= TARGET_SOCK_STREAM
;
838 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
842 #if defined(SOCK_CLOEXEC)
843 if (host_type
& SOCK_CLOEXEC
) {
844 target_type
|= TARGET_SOCK_CLOEXEC
;
848 #if defined(SOCK_NONBLOCK)
849 if (host_type
& SOCK_NONBLOCK
) {
850 target_type
|= TARGET_SOCK_NONBLOCK
;
857 static abi_ulong target_brk
;
858 static abi_ulong target_original_brk
;
859 static abi_ulong brk_page
;
861 void target_set_brk(abi_ulong new_brk
)
863 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
864 brk_page
= HOST_PAGE_ALIGN(target_brk
);
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
870 /* do_brk() must return target values and target errnos. */
871 abi_long
do_brk(abi_ulong new_brk
)
873 abi_long mapped_addr
;
874 abi_ulong new_alloc_size
;
876 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
879 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
882 if (new_brk
< target_original_brk
) {
883 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
888 /* If the new brk is less than the highest page reserved to the
889 * target heap allocation, set it and we're almost done... */
890 if (new_brk
<= brk_page
) {
891 /* Heap contents are initialized to zero, as for anonymous
893 if (new_brk
> target_brk
) {
894 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
896 target_brk
= new_brk
;
897 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
901 /* We need to allocate more memory after the brk... Note that
902 * we don't use MAP_FIXED because that will map over the top of
903 * any existing mapping (like the one with the host libc or qemu
904 * itself); instead we treat "mapped but at wrong address" as
905 * a failure and unmap again.
907 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
908 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
909 PROT_READ
|PROT_WRITE
,
910 MAP_ANON
|MAP_PRIVATE
, 0, 0));
912 if (mapped_addr
== brk_page
) {
913 /* Heap contents are initialized to zero, as for anonymous
914 * mapped pages. Technically the new pages are already
915 * initialized to zero since they *are* anonymous mapped
916 * pages, however we have to take care with the contents that
917 * come from the remaining part of the previous page: it may
918 * contains garbage data due to a previous heap usage (grown
920 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
922 target_brk
= new_brk
;
923 brk_page
= HOST_PAGE_ALIGN(target_brk
);
924 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
927 } else if (mapped_addr
!= -1) {
928 /* Mapped but at wrong address, meaning there wasn't actually
929 * enough space for this brk.
931 target_munmap(mapped_addr
, new_alloc_size
);
933 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
936 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
939 #if defined(TARGET_ALPHA)
940 /* We (partially) emulate OSF/1 on Alpha, which requires we
941 return a proper errno, not an unchanged brk value. */
942 return -TARGET_ENOMEM
;
944 /* For everything else, return the previous break. */
948 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
949 defined(TARGET_NR_pselect6)
950 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
951 abi_ulong target_fds_addr
,
955 abi_ulong b
, *target_fds
;
957 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
958 if (!(target_fds
= lock_user(VERIFY_READ
,
960 sizeof(abi_ulong
) * nw
,
962 return -TARGET_EFAULT
;
966 for (i
= 0; i
< nw
; i
++) {
967 /* grab the abi_ulong */
968 __get_user(b
, &target_fds
[i
]);
969 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
970 /* check the bit inside the abi_ulong */
977 unlock_user(target_fds
, target_fds_addr
, 0);
982 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
983 abi_ulong target_fds_addr
,
986 if (target_fds_addr
) {
987 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
988 return -TARGET_EFAULT
;
996 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1002 abi_ulong
*target_fds
;
1004 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1005 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1007 sizeof(abi_ulong
) * nw
,
1009 return -TARGET_EFAULT
;
1012 for (i
= 0; i
< nw
; i
++) {
1014 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1015 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1018 __put_user(v
, &target_fds
[i
]);
1021 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1033 static inline abi_long
host_to_target_clock_t(long ticks
)
1035 #if HOST_HZ == TARGET_HZ
1038 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1042 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1043 const struct rusage
*rusage
)
1045 struct target_rusage
*target_rusage
;
1047 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1048 return -TARGET_EFAULT
;
1049 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1050 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1051 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1052 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1053 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1054 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1055 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1056 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1057 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1058 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1059 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1060 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1061 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1062 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1063 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1064 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1065 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1066 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1067 unlock_user_struct(target_rusage
, target_addr
, 1);
1072 #ifdef TARGET_NR_setrlimit
1073 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1075 abi_ulong target_rlim_swap
;
1078 target_rlim_swap
= tswapal(target_rlim
);
1079 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1080 return RLIM_INFINITY
;
1082 result
= target_rlim_swap
;
1083 if (target_rlim_swap
!= (rlim_t
)result
)
1084 return RLIM_INFINITY
;
1090 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1091 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1093 abi_ulong target_rlim_swap
;
1096 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1097 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1099 target_rlim_swap
= rlim
;
1100 result
= tswapal(target_rlim_swap
);
1106 static inline int target_to_host_resource(int code
)
1109 case TARGET_RLIMIT_AS
:
1111 case TARGET_RLIMIT_CORE
:
1113 case TARGET_RLIMIT_CPU
:
1115 case TARGET_RLIMIT_DATA
:
1117 case TARGET_RLIMIT_FSIZE
:
1118 return RLIMIT_FSIZE
;
1119 case TARGET_RLIMIT_LOCKS
:
1120 return RLIMIT_LOCKS
;
1121 case TARGET_RLIMIT_MEMLOCK
:
1122 return RLIMIT_MEMLOCK
;
1123 case TARGET_RLIMIT_MSGQUEUE
:
1124 return RLIMIT_MSGQUEUE
;
1125 case TARGET_RLIMIT_NICE
:
1127 case TARGET_RLIMIT_NOFILE
:
1128 return RLIMIT_NOFILE
;
1129 case TARGET_RLIMIT_NPROC
:
1130 return RLIMIT_NPROC
;
1131 case TARGET_RLIMIT_RSS
:
1133 case TARGET_RLIMIT_RTPRIO
:
1134 return RLIMIT_RTPRIO
;
1135 case TARGET_RLIMIT_SIGPENDING
:
1136 return RLIMIT_SIGPENDING
;
1137 case TARGET_RLIMIT_STACK
:
1138 return RLIMIT_STACK
;
1144 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1145 abi_ulong target_tv_addr
)
1147 struct target_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1150 return -TARGET_EFAULT
;
1153 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1154 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1156 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1161 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1162 const struct timeval
*tv
)
1164 struct target_timeval
*target_tv
;
1166 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1167 return -TARGET_EFAULT
;
1170 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1171 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1173 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1178 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1179 const struct timeval
*tv
)
1181 struct target__kernel_sock_timeval
*target_tv
;
1183 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1184 return -TARGET_EFAULT
;
1187 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1188 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1190 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1195 #if defined(TARGET_NR_futex) || \
1196 defined(TARGET_NR_rt_sigtimedwait) || \
1197 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202 defined(TARGET_NR_timer_settime) || \
1203 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1205 abi_ulong target_addr
)
1207 struct target_timespec
*target_ts
;
1209 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1210 return -TARGET_EFAULT
;
1212 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1213 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1214 unlock_user_struct(target_ts
, target_addr
, 0);
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220 defined(TARGET_NR_timer_settime64) || \
1221 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1222 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1223 abi_ulong target_addr
)
1225 struct target__kernel_timespec
*target_ts
;
1227 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1228 return -TARGET_EFAULT
;
1230 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1231 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1232 /* in 32bit mode, this drops the padding */
1233 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1234 unlock_user_struct(target_ts
, target_addr
, 0);
1239 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1240 struct timespec
*host_ts
)
1242 struct target_timespec
*target_ts
;
1244 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1245 return -TARGET_EFAULT
;
1247 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1248 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1249 unlock_user_struct(target_ts
, target_addr
, 1);
1253 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1254 struct timespec
*host_ts
)
1256 struct target__kernel_timespec
*target_ts
;
1258 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1259 return -TARGET_EFAULT
;
1261 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1262 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1263 unlock_user_struct(target_ts
, target_addr
, 1);
1267 #if defined(TARGET_NR_gettimeofday)
1268 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1269 struct timezone
*tz
)
1271 struct target_timezone
*target_tz
;
1273 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1274 return -TARGET_EFAULT
;
1277 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1278 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1280 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1286 #if defined(TARGET_NR_settimeofday)
1287 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1288 abi_ulong target_tz_addr
)
1290 struct target_timezone
*target_tz
;
1292 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1293 return -TARGET_EFAULT
;
1296 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1297 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1299 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1305 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1308 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1309 abi_ulong target_mq_attr_addr
)
1311 struct target_mq_attr
*target_mq_attr
;
1313 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1314 target_mq_attr_addr
, 1))
1315 return -TARGET_EFAULT
;
1317 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1318 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1319 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1320 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1322 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1327 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1328 const struct mq_attr
*attr
)
1330 struct target_mq_attr
*target_mq_attr
;
1332 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1333 target_mq_attr_addr
, 0))
1334 return -TARGET_EFAULT
;
1336 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1337 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1338 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1339 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1341 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1347 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1348 /* do_select() must return target values and target errnos. */
1349 static abi_long
do_select(int n
,
1350 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1351 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1353 fd_set rfds
, wfds
, efds
;
1354 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1356 struct timespec ts
, *ts_ptr
;
1359 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1363 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1367 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1372 if (target_tv_addr
) {
1373 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1374 return -TARGET_EFAULT
;
1375 ts
.tv_sec
= tv
.tv_sec
;
1376 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1382 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1385 if (!is_error(ret
)) {
1386 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1387 return -TARGET_EFAULT
;
1388 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1389 return -TARGET_EFAULT
;
1390 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1391 return -TARGET_EFAULT
;
1393 if (target_tv_addr
) {
1394 tv
.tv_sec
= ts
.tv_sec
;
1395 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1396 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1397 return -TARGET_EFAULT
;
1405 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1406 static abi_long
do_old_select(abi_ulong arg1
)
1408 struct target_sel_arg_struct
*sel
;
1409 abi_ulong inp
, outp
, exp
, tvp
;
1412 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1413 return -TARGET_EFAULT
;
1416 nsel
= tswapal(sel
->n
);
1417 inp
= tswapal(sel
->inp
);
1418 outp
= tswapal(sel
->outp
);
1419 exp
= tswapal(sel
->exp
);
1420 tvp
= tswapal(sel
->tvp
);
1422 unlock_user_struct(sel
, arg1
, 0);
1424 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1429 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1432 return pipe2(host_pipe
, flags
);
1438 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1439 int flags
, int is_pipe2
)
1443 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1446 return get_errno(ret
);
1448 /* Several targets have special calling conventions for the original
1449 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1451 #if defined(TARGET_ALPHA)
1452 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1453 return host_pipe
[0];
1454 #elif defined(TARGET_MIPS)
1455 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1456 return host_pipe
[0];
1457 #elif defined(TARGET_SH4)
1458 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1459 return host_pipe
[0];
1460 #elif defined(TARGET_SPARC)
1461 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1462 return host_pipe
[0];
1466 if (put_user_s32(host_pipe
[0], pipedes
)
1467 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1468 return -TARGET_EFAULT
;
1469 return get_errno(ret
);
1472 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1473 abi_ulong target_addr
,
1476 struct target_ip_mreqn
*target_smreqn
;
1478 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1480 return -TARGET_EFAULT
;
1481 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1482 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1483 if (len
== sizeof(struct target_ip_mreqn
))
1484 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1485 unlock_user(target_smreqn
, target_addr
, 0);
1490 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1491 abi_ulong target_addr
,
1494 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1495 sa_family_t sa_family
;
1496 struct target_sockaddr
*target_saddr
;
1498 if (fd_trans_target_to_host_addr(fd
)) {
1499 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1502 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1504 return -TARGET_EFAULT
;
1506 sa_family
= tswap16(target_saddr
->sa_family
);
1508 /* Oops. The caller might send a incomplete sun_path; sun_path
1509 * must be terminated by \0 (see the manual page), but
1510 * unfortunately it is quite common to specify sockaddr_un
1511 * length as "strlen(x->sun_path)" while it should be
1512 * "strlen(...) + 1". We'll fix that here if needed.
1513 * Linux kernel has a similar feature.
1516 if (sa_family
== AF_UNIX
) {
1517 if (len
< unix_maxlen
&& len
> 0) {
1518 char *cp
= (char*)target_saddr
;
1520 if ( cp
[len
-1] && !cp
[len
] )
1523 if (len
> unix_maxlen
)
1527 memcpy(addr
, target_saddr
, len
);
1528 addr
->sa_family
= sa_family
;
1529 if (sa_family
== AF_NETLINK
) {
1530 struct sockaddr_nl
*nladdr
;
1532 nladdr
= (struct sockaddr_nl
*)addr
;
1533 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1534 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1535 } else if (sa_family
== AF_PACKET
) {
1536 struct target_sockaddr_ll
*lladdr
;
1538 lladdr
= (struct target_sockaddr_ll
*)addr
;
1539 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1540 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1542 unlock_user(target_saddr
, target_addr
, 0);
1547 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1548 struct sockaddr
*addr
,
1551 struct target_sockaddr
*target_saddr
;
1558 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1560 return -TARGET_EFAULT
;
1561 memcpy(target_saddr
, addr
, len
);
1562 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1563 sizeof(target_saddr
->sa_family
)) {
1564 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1566 if (addr
->sa_family
== AF_NETLINK
&&
1567 len
>= sizeof(struct target_sockaddr_nl
)) {
1568 struct target_sockaddr_nl
*target_nl
=
1569 (struct target_sockaddr_nl
*)target_saddr
;
1570 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1571 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1572 } else if (addr
->sa_family
== AF_PACKET
) {
1573 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1574 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1575 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1576 } else if (addr
->sa_family
== AF_INET6
&&
1577 len
>= sizeof(struct target_sockaddr_in6
)) {
1578 struct target_sockaddr_in6
*target_in6
=
1579 (struct target_sockaddr_in6
*)target_saddr
;
1580 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1582 unlock_user(target_saddr
, target_addr
, len
);
1587 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1588 struct target_msghdr
*target_msgh
)
1590 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1591 abi_long msg_controllen
;
1592 abi_ulong target_cmsg_addr
;
1593 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1594 socklen_t space
= 0;
1596 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1597 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1599 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1600 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1601 target_cmsg_start
= target_cmsg
;
1603 return -TARGET_EFAULT
;
1605 while (cmsg
&& target_cmsg
) {
1606 void *data
= CMSG_DATA(cmsg
);
1607 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1609 int len
= tswapal(target_cmsg
->cmsg_len
)
1610 - sizeof(struct target_cmsghdr
);
1612 space
+= CMSG_SPACE(len
);
1613 if (space
> msgh
->msg_controllen
) {
1614 space
-= CMSG_SPACE(len
);
1615 /* This is a QEMU bug, since we allocated the payload
1616 * area ourselves (unlike overflow in host-to-target
1617 * conversion, which is just the guest giving us a buffer
1618 * that's too small). It can't happen for the payload types
1619 * we currently support; if it becomes an issue in future
1620 * we would need to improve our allocation strategy to
1621 * something more intelligent than "twice the size of the
1622 * target buffer we're reading from".
1624 qemu_log_mask(LOG_UNIMP
,
1625 ("Unsupported ancillary data %d/%d: "
1626 "unhandled msg size\n"),
1627 tswap32(target_cmsg
->cmsg_level
),
1628 tswap32(target_cmsg
->cmsg_type
));
1632 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1633 cmsg
->cmsg_level
= SOL_SOCKET
;
1635 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1637 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1638 cmsg
->cmsg_len
= CMSG_LEN(len
);
1640 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1641 int *fd
= (int *)data
;
1642 int *target_fd
= (int *)target_data
;
1643 int i
, numfds
= len
/ sizeof(int);
1645 for (i
= 0; i
< numfds
; i
++) {
1646 __get_user(fd
[i
], target_fd
+ i
);
1648 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1649 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1650 struct ucred
*cred
= (struct ucred
*)data
;
1651 struct target_ucred
*target_cred
=
1652 (struct target_ucred
*)target_data
;
1654 __get_user(cred
->pid
, &target_cred
->pid
);
1655 __get_user(cred
->uid
, &target_cred
->uid
);
1656 __get_user(cred
->gid
, &target_cred
->gid
);
1658 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1659 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1660 memcpy(data
, target_data
, len
);
1663 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1664 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1667 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1669 msgh
->msg_controllen
= space
;
1673 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1674 struct msghdr
*msgh
)
1676 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1677 abi_long msg_controllen
;
1678 abi_ulong target_cmsg_addr
;
1679 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1680 socklen_t space
= 0;
1682 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1683 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1685 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1686 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1687 target_cmsg_start
= target_cmsg
;
1689 return -TARGET_EFAULT
;
1691 while (cmsg
&& target_cmsg
) {
1692 void *data
= CMSG_DATA(cmsg
);
1693 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1695 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1696 int tgt_len
, tgt_space
;
1698 /* We never copy a half-header but may copy half-data;
1699 * this is Linux's behaviour in put_cmsg(). Note that
1700 * truncation here is a guest problem (which we report
1701 * to the guest via the CTRUNC bit), unlike truncation
1702 * in target_to_host_cmsg, which is a QEMU bug.
1704 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1705 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1709 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1710 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1712 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1714 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1716 /* Payload types which need a different size of payload on
1717 * the target must adjust tgt_len here.
1720 switch (cmsg
->cmsg_level
) {
1722 switch (cmsg
->cmsg_type
) {
1724 tgt_len
= sizeof(struct target_timeval
);
1734 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1735 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1736 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1739 /* We must now copy-and-convert len bytes of payload
1740 * into tgt_len bytes of destination space. Bear in mind
1741 * that in both source and destination we may be dealing
1742 * with a truncated value!
1744 switch (cmsg
->cmsg_level
) {
1746 switch (cmsg
->cmsg_type
) {
1749 int *fd
= (int *)data
;
1750 int *target_fd
= (int *)target_data
;
1751 int i
, numfds
= tgt_len
/ sizeof(int);
1753 for (i
= 0; i
< numfds
; i
++) {
1754 __put_user(fd
[i
], target_fd
+ i
);
1760 struct timeval
*tv
= (struct timeval
*)data
;
1761 struct target_timeval
*target_tv
=
1762 (struct target_timeval
*)target_data
;
1764 if (len
!= sizeof(struct timeval
) ||
1765 tgt_len
!= sizeof(struct target_timeval
)) {
1769 /* copy struct timeval to target */
1770 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1771 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1774 case SCM_CREDENTIALS
:
1776 struct ucred
*cred
= (struct ucred
*)data
;
1777 struct target_ucred
*target_cred
=
1778 (struct target_ucred
*)target_data
;
1780 __put_user(cred
->pid
, &target_cred
->pid
);
1781 __put_user(cred
->uid
, &target_cred
->uid
);
1782 __put_user(cred
->gid
, &target_cred
->gid
);
1791 switch (cmsg
->cmsg_type
) {
1794 uint32_t *v
= (uint32_t *)data
;
1795 uint32_t *t_int
= (uint32_t *)target_data
;
1797 if (len
!= sizeof(uint32_t) ||
1798 tgt_len
!= sizeof(uint32_t)) {
1801 __put_user(*v
, t_int
);
1807 struct sock_extended_err ee
;
1808 struct sockaddr_in offender
;
1810 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1811 struct errhdr_t
*target_errh
=
1812 (struct errhdr_t
*)target_data
;
1814 if (len
!= sizeof(struct errhdr_t
) ||
1815 tgt_len
!= sizeof(struct errhdr_t
)) {
1818 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1819 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1820 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1821 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1822 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1823 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1824 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1825 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1826 (void *) &errh
->offender
, sizeof(errh
->offender
));
1835 switch (cmsg
->cmsg_type
) {
1838 uint32_t *v
= (uint32_t *)data
;
1839 uint32_t *t_int
= (uint32_t *)target_data
;
1841 if (len
!= sizeof(uint32_t) ||
1842 tgt_len
!= sizeof(uint32_t)) {
1845 __put_user(*v
, t_int
);
1851 struct sock_extended_err ee
;
1852 struct sockaddr_in6 offender
;
1854 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1855 struct errhdr6_t
*target_errh
=
1856 (struct errhdr6_t
*)target_data
;
1858 if (len
!= sizeof(struct errhdr6_t
) ||
1859 tgt_len
!= sizeof(struct errhdr6_t
)) {
1862 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1863 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1864 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1865 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1866 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1867 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1868 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1869 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1870 (void *) &errh
->offender
, sizeof(errh
->offender
));
1880 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1881 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1882 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1883 if (tgt_len
> len
) {
1884 memset(target_data
+ len
, 0, tgt_len
- len
);
1888 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1889 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1890 if (msg_controllen
< tgt_space
) {
1891 tgt_space
= msg_controllen
;
1893 msg_controllen
-= tgt_space
;
1895 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1896 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1899 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1901 target_msgh
->msg_controllen
= tswapal(space
);
1905 /* do_setsockopt() Must return target values and target errnos. */
1906 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1907 abi_ulong optval_addr
, socklen_t optlen
)
1911 struct ip_mreqn
*ip_mreq
;
1912 struct ip_mreq_source
*ip_mreq_source
;
1916 /* TCP options all take an 'int' value. */
1917 if (optlen
< sizeof(uint32_t))
1918 return -TARGET_EINVAL
;
1920 if (get_user_u32(val
, optval_addr
))
1921 return -TARGET_EFAULT
;
1922 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1929 case IP_ROUTER_ALERT
:
1933 case IP_MTU_DISCOVER
:
1940 case IP_MULTICAST_TTL
:
1941 case IP_MULTICAST_LOOP
:
1943 if (optlen
>= sizeof(uint32_t)) {
1944 if (get_user_u32(val
, optval_addr
))
1945 return -TARGET_EFAULT
;
1946 } else if (optlen
>= 1) {
1947 if (get_user_u8(val
, optval_addr
))
1948 return -TARGET_EFAULT
;
1950 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1952 case IP_ADD_MEMBERSHIP
:
1953 case IP_DROP_MEMBERSHIP
:
1954 if (optlen
< sizeof (struct target_ip_mreq
) ||
1955 optlen
> sizeof (struct target_ip_mreqn
))
1956 return -TARGET_EINVAL
;
1958 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1959 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1960 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1963 case IP_BLOCK_SOURCE
:
1964 case IP_UNBLOCK_SOURCE
:
1965 case IP_ADD_SOURCE_MEMBERSHIP
:
1966 case IP_DROP_SOURCE_MEMBERSHIP
:
1967 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1968 return -TARGET_EINVAL
;
1970 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1971 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1972 unlock_user (ip_mreq_source
, optval_addr
, 0);
1981 case IPV6_MTU_DISCOVER
:
1984 case IPV6_RECVPKTINFO
:
1985 case IPV6_UNICAST_HOPS
:
1986 case IPV6_MULTICAST_HOPS
:
1987 case IPV6_MULTICAST_LOOP
:
1989 case IPV6_RECVHOPLIMIT
:
1990 case IPV6_2292HOPLIMIT
:
1993 case IPV6_2292PKTINFO
:
1994 case IPV6_RECVTCLASS
:
1995 case IPV6_RECVRTHDR
:
1996 case IPV6_2292RTHDR
:
1997 case IPV6_RECVHOPOPTS
:
1998 case IPV6_2292HOPOPTS
:
1999 case IPV6_RECVDSTOPTS
:
2000 case IPV6_2292DSTOPTS
:
2002 #ifdef IPV6_RECVPATHMTU
2003 case IPV6_RECVPATHMTU
:
2005 #ifdef IPV6_TRANSPARENT
2006 case IPV6_TRANSPARENT
:
2008 #ifdef IPV6_FREEBIND
2011 #ifdef IPV6_RECVORIGDSTADDR
2012 case IPV6_RECVORIGDSTADDR
:
2015 if (optlen
< sizeof(uint32_t)) {
2016 return -TARGET_EINVAL
;
2018 if (get_user_u32(val
, optval_addr
)) {
2019 return -TARGET_EFAULT
;
2021 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2022 &val
, sizeof(val
)));
2026 struct in6_pktinfo pki
;
2028 if (optlen
< sizeof(pki
)) {
2029 return -TARGET_EINVAL
;
2032 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2033 return -TARGET_EFAULT
;
2036 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2038 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2039 &pki
, sizeof(pki
)));
2042 case IPV6_ADD_MEMBERSHIP
:
2043 case IPV6_DROP_MEMBERSHIP
:
2045 struct ipv6_mreq ipv6mreq
;
2047 if (optlen
< sizeof(ipv6mreq
)) {
2048 return -TARGET_EINVAL
;
2051 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2052 return -TARGET_EFAULT
;
2055 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2057 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2058 &ipv6mreq
, sizeof(ipv6mreq
)));
2069 struct icmp6_filter icmp6f
;
2071 if (optlen
> sizeof(icmp6f
)) {
2072 optlen
= sizeof(icmp6f
);
2075 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2076 return -TARGET_EFAULT
;
2079 for (val
= 0; val
< 8; val
++) {
2080 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2095 /* those take an u32 value */
2096 if (optlen
< sizeof(uint32_t)) {
2097 return -TARGET_EINVAL
;
2100 if (get_user_u32(val
, optval_addr
)) {
2101 return -TARGET_EFAULT
;
2103 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2104 &val
, sizeof(val
)));
2111 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2116 char *alg_key
= g_malloc(optlen
);
2119 return -TARGET_ENOMEM
;
2121 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2123 return -TARGET_EFAULT
;
2125 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2130 case ALG_SET_AEAD_AUTHSIZE
:
2132 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2141 case TARGET_SOL_SOCKET
:
2143 case TARGET_SO_RCVTIMEO
:
2147 optname
= SO_RCVTIMEO
;
2150 if (optlen
!= sizeof(struct target_timeval
)) {
2151 return -TARGET_EINVAL
;
2154 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2155 return -TARGET_EFAULT
;
2158 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2162 case TARGET_SO_SNDTIMEO
:
2163 optname
= SO_SNDTIMEO
;
2165 case TARGET_SO_ATTACH_FILTER
:
2167 struct target_sock_fprog
*tfprog
;
2168 struct target_sock_filter
*tfilter
;
2169 struct sock_fprog fprog
;
2170 struct sock_filter
*filter
;
2173 if (optlen
!= sizeof(*tfprog
)) {
2174 return -TARGET_EINVAL
;
2176 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2177 return -TARGET_EFAULT
;
2179 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2180 tswapal(tfprog
->filter
), 0)) {
2181 unlock_user_struct(tfprog
, optval_addr
, 1);
2182 return -TARGET_EFAULT
;
2185 fprog
.len
= tswap16(tfprog
->len
);
2186 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2187 if (filter
== NULL
) {
2188 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2189 unlock_user_struct(tfprog
, optval_addr
, 1);
2190 return -TARGET_ENOMEM
;
2192 for (i
= 0; i
< fprog
.len
; i
++) {
2193 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2194 filter
[i
].jt
= tfilter
[i
].jt
;
2195 filter
[i
].jf
= tfilter
[i
].jf
;
2196 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2198 fprog
.filter
= filter
;
2200 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2201 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2204 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2205 unlock_user_struct(tfprog
, optval_addr
, 1);
2208 case TARGET_SO_BINDTODEVICE
:
2210 char *dev_ifname
, *addr_ifname
;
2212 if (optlen
> IFNAMSIZ
- 1) {
2213 optlen
= IFNAMSIZ
- 1;
2215 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2217 return -TARGET_EFAULT
;
2219 optname
= SO_BINDTODEVICE
;
2220 addr_ifname
= alloca(IFNAMSIZ
);
2221 memcpy(addr_ifname
, dev_ifname
, optlen
);
2222 addr_ifname
[optlen
] = 0;
2223 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2224 addr_ifname
, optlen
));
2225 unlock_user (dev_ifname
, optval_addr
, 0);
2228 case TARGET_SO_LINGER
:
2231 struct target_linger
*tlg
;
2233 if (optlen
!= sizeof(struct target_linger
)) {
2234 return -TARGET_EINVAL
;
2236 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2237 return -TARGET_EFAULT
;
2239 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2240 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2241 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2243 unlock_user_struct(tlg
, optval_addr
, 0);
2246 /* Options with 'int' argument. */
2247 case TARGET_SO_DEBUG
:
2250 case TARGET_SO_REUSEADDR
:
2251 optname
= SO_REUSEADDR
;
2254 case TARGET_SO_REUSEPORT
:
2255 optname
= SO_REUSEPORT
;
2258 case TARGET_SO_TYPE
:
2261 case TARGET_SO_ERROR
:
2264 case TARGET_SO_DONTROUTE
:
2265 optname
= SO_DONTROUTE
;
2267 case TARGET_SO_BROADCAST
:
2268 optname
= SO_BROADCAST
;
2270 case TARGET_SO_SNDBUF
:
2271 optname
= SO_SNDBUF
;
2273 case TARGET_SO_SNDBUFFORCE
:
2274 optname
= SO_SNDBUFFORCE
;
2276 case TARGET_SO_RCVBUF
:
2277 optname
= SO_RCVBUF
;
2279 case TARGET_SO_RCVBUFFORCE
:
2280 optname
= SO_RCVBUFFORCE
;
2282 case TARGET_SO_KEEPALIVE
:
2283 optname
= SO_KEEPALIVE
;
2285 case TARGET_SO_OOBINLINE
:
2286 optname
= SO_OOBINLINE
;
2288 case TARGET_SO_NO_CHECK
:
2289 optname
= SO_NO_CHECK
;
2291 case TARGET_SO_PRIORITY
:
2292 optname
= SO_PRIORITY
;
2295 case TARGET_SO_BSDCOMPAT
:
2296 optname
= SO_BSDCOMPAT
;
2299 case TARGET_SO_PASSCRED
:
2300 optname
= SO_PASSCRED
;
2302 case TARGET_SO_PASSSEC
:
2303 optname
= SO_PASSSEC
;
2305 case TARGET_SO_TIMESTAMP
:
2306 optname
= SO_TIMESTAMP
;
2308 case TARGET_SO_RCVLOWAT
:
2309 optname
= SO_RCVLOWAT
;
2314 if (optlen
< sizeof(uint32_t))
2315 return -TARGET_EINVAL
;
2317 if (get_user_u32(val
, optval_addr
))
2318 return -TARGET_EFAULT
;
2319 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2324 case NETLINK_PKTINFO
:
2325 case NETLINK_ADD_MEMBERSHIP
:
2326 case NETLINK_DROP_MEMBERSHIP
:
2327 case NETLINK_BROADCAST_ERROR
:
2328 case NETLINK_NO_ENOBUFS
:
2329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2330 case NETLINK_LISTEN_ALL_NSID
:
2331 case NETLINK_CAP_ACK
:
2332 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2334 case NETLINK_EXT_ACK
:
2335 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2336 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2337 case NETLINK_GET_STRICT_CHK
:
2338 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2344 if (optlen
< sizeof(uint32_t)) {
2345 return -TARGET_EINVAL
;
2347 if (get_user_u32(val
, optval_addr
)) {
2348 return -TARGET_EFAULT
;
2350 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2353 #endif /* SOL_NETLINK */
2356 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2358 ret
= -TARGET_ENOPROTOOPT
;
2363 /* do_getsockopt() Must return target values and target errnos. */
2364 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2365 abi_ulong optval_addr
, abi_ulong optlen
)
2372 case TARGET_SOL_SOCKET
:
2375 /* These don't just return a single integer */
2376 case TARGET_SO_PEERNAME
:
2378 case TARGET_SO_RCVTIMEO
: {
2382 optname
= SO_RCVTIMEO
;
2385 if (get_user_u32(len
, optlen
)) {
2386 return -TARGET_EFAULT
;
2389 return -TARGET_EINVAL
;
2393 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2398 if (len
> sizeof(struct target_timeval
)) {
2399 len
= sizeof(struct target_timeval
);
2401 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2402 return -TARGET_EFAULT
;
2404 if (put_user_u32(len
, optlen
)) {
2405 return -TARGET_EFAULT
;
2409 case TARGET_SO_SNDTIMEO
:
2410 optname
= SO_SNDTIMEO
;
2412 case TARGET_SO_PEERCRED
: {
2415 struct target_ucred
*tcr
;
2417 if (get_user_u32(len
, optlen
)) {
2418 return -TARGET_EFAULT
;
2421 return -TARGET_EINVAL
;
2425 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2433 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2434 return -TARGET_EFAULT
;
2436 __put_user(cr
.pid
, &tcr
->pid
);
2437 __put_user(cr
.uid
, &tcr
->uid
);
2438 __put_user(cr
.gid
, &tcr
->gid
);
2439 unlock_user_struct(tcr
, optval_addr
, 1);
2440 if (put_user_u32(len
, optlen
)) {
2441 return -TARGET_EFAULT
;
2445 case TARGET_SO_PEERSEC
: {
2448 if (get_user_u32(len
, optlen
)) {
2449 return -TARGET_EFAULT
;
2452 return -TARGET_EINVAL
;
2454 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2456 return -TARGET_EFAULT
;
2459 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2461 if (put_user_u32(lv
, optlen
)) {
2462 ret
= -TARGET_EFAULT
;
2464 unlock_user(name
, optval_addr
, lv
);
2467 case TARGET_SO_LINGER
:
2471 struct target_linger
*tlg
;
2473 if (get_user_u32(len
, optlen
)) {
2474 return -TARGET_EFAULT
;
2477 return -TARGET_EINVAL
;
2481 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2489 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2490 return -TARGET_EFAULT
;
2492 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2493 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2494 unlock_user_struct(tlg
, optval_addr
, 1);
2495 if (put_user_u32(len
, optlen
)) {
2496 return -TARGET_EFAULT
;
2500 /* Options with 'int' argument. */
2501 case TARGET_SO_DEBUG
:
2504 case TARGET_SO_REUSEADDR
:
2505 optname
= SO_REUSEADDR
;
2508 case TARGET_SO_REUSEPORT
:
2509 optname
= SO_REUSEPORT
;
2512 case TARGET_SO_TYPE
:
2515 case TARGET_SO_ERROR
:
2518 case TARGET_SO_DONTROUTE
:
2519 optname
= SO_DONTROUTE
;
2521 case TARGET_SO_BROADCAST
:
2522 optname
= SO_BROADCAST
;
2524 case TARGET_SO_SNDBUF
:
2525 optname
= SO_SNDBUF
;
2527 case TARGET_SO_RCVBUF
:
2528 optname
= SO_RCVBUF
;
2530 case TARGET_SO_KEEPALIVE
:
2531 optname
= SO_KEEPALIVE
;
2533 case TARGET_SO_OOBINLINE
:
2534 optname
= SO_OOBINLINE
;
2536 case TARGET_SO_NO_CHECK
:
2537 optname
= SO_NO_CHECK
;
2539 case TARGET_SO_PRIORITY
:
2540 optname
= SO_PRIORITY
;
2543 case TARGET_SO_BSDCOMPAT
:
2544 optname
= SO_BSDCOMPAT
;
2547 case TARGET_SO_PASSCRED
:
2548 optname
= SO_PASSCRED
;
2550 case TARGET_SO_TIMESTAMP
:
2551 optname
= SO_TIMESTAMP
;
2553 case TARGET_SO_RCVLOWAT
:
2554 optname
= SO_RCVLOWAT
;
2556 case TARGET_SO_ACCEPTCONN
:
2557 optname
= SO_ACCEPTCONN
;
2564 /* TCP options all take an 'int' value. */
2566 if (get_user_u32(len
, optlen
))
2567 return -TARGET_EFAULT
;
2569 return -TARGET_EINVAL
;
2571 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2574 if (optname
== SO_TYPE
) {
2575 val
= host_to_target_sock_type(val
);
2580 if (put_user_u32(val
, optval_addr
))
2581 return -TARGET_EFAULT
;
2583 if (put_user_u8(val
, optval_addr
))
2584 return -TARGET_EFAULT
;
2586 if (put_user_u32(len
, optlen
))
2587 return -TARGET_EFAULT
;
2594 case IP_ROUTER_ALERT
:
2598 case IP_MTU_DISCOVER
:
2604 case IP_MULTICAST_TTL
:
2605 case IP_MULTICAST_LOOP
:
2606 if (get_user_u32(len
, optlen
))
2607 return -TARGET_EFAULT
;
2609 return -TARGET_EINVAL
;
2611 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2614 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2616 if (put_user_u32(len
, optlen
)
2617 || put_user_u8(val
, optval_addr
))
2618 return -TARGET_EFAULT
;
2620 if (len
> sizeof(int))
2622 if (put_user_u32(len
, optlen
)
2623 || put_user_u32(val
, optval_addr
))
2624 return -TARGET_EFAULT
;
2628 ret
= -TARGET_ENOPROTOOPT
;
2634 case IPV6_MTU_DISCOVER
:
2637 case IPV6_RECVPKTINFO
:
2638 case IPV6_UNICAST_HOPS
:
2639 case IPV6_MULTICAST_HOPS
:
2640 case IPV6_MULTICAST_LOOP
:
2642 case IPV6_RECVHOPLIMIT
:
2643 case IPV6_2292HOPLIMIT
:
2646 case IPV6_2292PKTINFO
:
2647 case IPV6_RECVTCLASS
:
2648 case IPV6_RECVRTHDR
:
2649 case IPV6_2292RTHDR
:
2650 case IPV6_RECVHOPOPTS
:
2651 case IPV6_2292HOPOPTS
:
2652 case IPV6_RECVDSTOPTS
:
2653 case IPV6_2292DSTOPTS
:
2655 #ifdef IPV6_RECVPATHMTU
2656 case IPV6_RECVPATHMTU
:
2658 #ifdef IPV6_TRANSPARENT
2659 case IPV6_TRANSPARENT
:
2661 #ifdef IPV6_FREEBIND
2664 #ifdef IPV6_RECVORIGDSTADDR
2665 case IPV6_RECVORIGDSTADDR
:
2667 if (get_user_u32(len
, optlen
))
2668 return -TARGET_EFAULT
;
2670 return -TARGET_EINVAL
;
2672 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2675 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2677 if (put_user_u32(len
, optlen
)
2678 || put_user_u8(val
, optval_addr
))
2679 return -TARGET_EFAULT
;
2681 if (len
> sizeof(int))
2683 if (put_user_u32(len
, optlen
)
2684 || put_user_u32(val
, optval_addr
))
2685 return -TARGET_EFAULT
;
2689 ret
= -TARGET_ENOPROTOOPT
;
2696 case NETLINK_PKTINFO
:
2697 case NETLINK_BROADCAST_ERROR
:
2698 case NETLINK_NO_ENOBUFS
:
2699 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2700 case NETLINK_LISTEN_ALL_NSID
:
2701 case NETLINK_CAP_ACK
:
2702 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2703 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2704 case NETLINK_EXT_ACK
:
2705 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2706 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2707 case NETLINK_GET_STRICT_CHK
:
2708 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2709 if (get_user_u32(len
, optlen
)) {
2710 return -TARGET_EFAULT
;
2712 if (len
!= sizeof(val
)) {
2713 return -TARGET_EINVAL
;
2716 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2720 if (put_user_u32(lv
, optlen
)
2721 || put_user_u32(val
, optval_addr
)) {
2722 return -TARGET_EFAULT
;
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726 case NETLINK_LIST_MEMBERSHIPS
:
2730 if (get_user_u32(len
, optlen
)) {
2731 return -TARGET_EFAULT
;
2734 return -TARGET_EINVAL
;
2736 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2738 return -TARGET_EFAULT
;
2741 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2743 unlock_user(results
, optval_addr
, 0);
2746 /* swap host endianess to target endianess. */
2747 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2748 results
[i
] = tswap32(results
[i
]);
2750 if (put_user_u32(lv
, optlen
)) {
2751 return -TARGET_EFAULT
;
2753 unlock_user(results
, optval_addr
, 0);
2756 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2761 #endif /* SOL_NETLINK */
2764 qemu_log_mask(LOG_UNIMP
,
2765 "getsockopt level=%d optname=%d not yet supported\n",
2767 ret
= -TARGET_EOPNOTSUPP
;
2773 /* Convert target low/high pair representing file offset into the host
2774 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2775 * as the kernel doesn't handle them either.
2777 static void target_to_host_low_high(abi_ulong tlow
,
2779 unsigned long *hlow
,
2780 unsigned long *hhigh
)
2782 uint64_t off
= tlow
|
2783 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2784 TARGET_LONG_BITS
/ 2;
2787 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2790 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2791 abi_ulong count
, int copy
)
2793 struct target_iovec
*target_vec
;
2795 abi_ulong total_len
, max_len
;
2798 bool bad_address
= false;
2804 if (count
> IOV_MAX
) {
2809 vec
= g_try_new0(struct iovec
, count
);
2815 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2816 count
* sizeof(struct target_iovec
), 1);
2817 if (target_vec
== NULL
) {
2822 /* ??? If host page size > target page size, this will result in a
2823 value larger than what we can actually support. */
2824 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2827 for (i
= 0; i
< count
; i
++) {
2828 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2829 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2834 } else if (len
== 0) {
2835 /* Zero length pointer is ignored. */
2836 vec
[i
].iov_base
= 0;
2838 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2839 /* If the first buffer pointer is bad, this is a fault. But
2840 * subsequent bad buffers will result in a partial write; this
2841 * is realized by filling the vector with null pointers and
2843 if (!vec
[i
].iov_base
) {
2854 if (len
> max_len
- total_len
) {
2855 len
= max_len
- total_len
;
2858 vec
[i
].iov_len
= len
;
2862 unlock_user(target_vec
, target_addr
, 0);
2867 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2868 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2871 unlock_user(target_vec
, target_addr
, 0);
2878 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2879 abi_ulong count
, int copy
)
2881 struct target_iovec
*target_vec
;
2884 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2885 count
* sizeof(struct target_iovec
), 1);
2887 for (i
= 0; i
< count
; i
++) {
2888 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2889 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2893 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2895 unlock_user(target_vec
, target_addr
, 0);
2901 static inline int target_to_host_sock_type(int *type
)
2904 int target_type
= *type
;
2906 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2907 case TARGET_SOCK_DGRAM
:
2908 host_type
= SOCK_DGRAM
;
2910 case TARGET_SOCK_STREAM
:
2911 host_type
= SOCK_STREAM
;
2914 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2917 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2918 #if defined(SOCK_CLOEXEC)
2919 host_type
|= SOCK_CLOEXEC
;
2921 return -TARGET_EINVAL
;
2924 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2925 #if defined(SOCK_NONBLOCK)
2926 host_type
|= SOCK_NONBLOCK
;
2927 #elif !defined(O_NONBLOCK)
2928 return -TARGET_EINVAL
;
2935 /* Try to emulate socket type flags after socket creation. */
2936 static int sock_flags_fixup(int fd
, int target_type
)
2938 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2939 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2940 int flags
= fcntl(fd
, F_GETFL
);
2941 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2943 return -TARGET_EINVAL
;
2950 /* do_socket() Must return target values and target errnos. */
2951 static abi_long
do_socket(int domain
, int type
, int protocol
)
2953 int target_type
= type
;
2956 ret
= target_to_host_sock_type(&type
);
2961 if (domain
== PF_NETLINK
&& !(
2962 #ifdef CONFIG_RTNETLINK
2963 protocol
== NETLINK_ROUTE
||
2965 protocol
== NETLINK_KOBJECT_UEVENT
||
2966 protocol
== NETLINK_AUDIT
)) {
2967 return -TARGET_EPROTONOSUPPORT
;
2970 if (domain
== AF_PACKET
||
2971 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2972 protocol
= tswap16(protocol
);
2975 ret
= get_errno(socket(domain
, type
, protocol
));
2977 ret
= sock_flags_fixup(ret
, target_type
);
2978 if (type
== SOCK_PACKET
) {
2979 /* Manage an obsolete case :
2980 * if socket type is SOCK_PACKET, bind by name
2982 fd_trans_register(ret
, &target_packet_trans
);
2983 } else if (domain
== PF_NETLINK
) {
2985 #ifdef CONFIG_RTNETLINK
2987 fd_trans_register(ret
, &target_netlink_route_trans
);
2990 case NETLINK_KOBJECT_UEVENT
:
2991 /* nothing to do: messages are strings */
2994 fd_trans_register(ret
, &target_netlink_audit_trans
);
2997 g_assert_not_reached();
3004 /* do_bind() Must return target values and target errnos. */
3005 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3011 if ((int)addrlen
< 0) {
3012 return -TARGET_EINVAL
;
3015 addr
= alloca(addrlen
+1);
3017 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3021 return get_errno(bind(sockfd
, addr
, addrlen
));
3024 /* do_connect() Must return target values and target errnos. */
3025 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3031 if ((int)addrlen
< 0) {
3032 return -TARGET_EINVAL
;
3035 addr
= alloca(addrlen
+1);
3037 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3041 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3044 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3045 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3046 int flags
, int send
)
3052 abi_ulong target_vec
;
3054 if (msgp
->msg_name
) {
3055 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3056 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3057 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3058 tswapal(msgp
->msg_name
),
3060 if (ret
== -TARGET_EFAULT
) {
3061 /* For connected sockets msg_name and msg_namelen must
3062 * be ignored, so returning EFAULT immediately is wrong.
3063 * Instead, pass a bad msg_name to the host kernel, and
3064 * let it decide whether to return EFAULT or not.
3066 msg
.msg_name
= (void *)-1;
3071 msg
.msg_name
= NULL
;
3072 msg
.msg_namelen
= 0;
3074 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3075 msg
.msg_control
= alloca(msg
.msg_controllen
);
3076 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3078 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3080 count
= tswapal(msgp
->msg_iovlen
);
3081 target_vec
= tswapal(msgp
->msg_iov
);
3083 if (count
> IOV_MAX
) {
3084 /* sendrcvmsg returns a different errno for this condition than
3085 * readv/writev, so we must catch it here before lock_iovec() does.
3087 ret
= -TARGET_EMSGSIZE
;
3091 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3092 target_vec
, count
, send
);
3094 ret
= -host_to_target_errno(errno
);
3097 msg
.msg_iovlen
= count
;
3101 if (fd_trans_target_to_host_data(fd
)) {
3104 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3105 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3106 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3107 msg
.msg_iov
->iov_len
);
3109 msg
.msg_iov
->iov_base
= host_msg
;
3110 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3114 ret
= target_to_host_cmsg(&msg
, msgp
);
3116 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3120 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3121 if (!is_error(ret
)) {
3123 if (fd_trans_host_to_target_data(fd
)) {
3124 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3125 MIN(msg
.msg_iov
->iov_len
, len
));
3127 ret
= host_to_target_cmsg(msgp
, &msg
);
3129 if (!is_error(ret
)) {
3130 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3131 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3132 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3133 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3134 msg
.msg_name
, msg
.msg_namelen
);
3146 unlock_iovec(vec
, target_vec
, count
, !send
);
3151 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3152 int flags
, int send
)
3155 struct target_msghdr
*msgp
;
3157 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3161 return -TARGET_EFAULT
;
3163 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3164 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3168 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3169 * so it might not have this *mmsg-specific flag either.
3171 #ifndef MSG_WAITFORONE
3172 #define MSG_WAITFORONE 0x10000
3175 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3176 unsigned int vlen
, unsigned int flags
,
3179 struct target_mmsghdr
*mmsgp
;
3183 if (vlen
> UIO_MAXIOV
) {
3187 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3189 return -TARGET_EFAULT
;
3192 for (i
= 0; i
< vlen
; i
++) {
3193 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3194 if (is_error(ret
)) {
3197 mmsgp
[i
].msg_len
= tswap32(ret
);
3198 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3199 if (flags
& MSG_WAITFORONE
) {
3200 flags
|= MSG_DONTWAIT
;
3204 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3206 /* Return number of datagrams sent if we sent any at all;
3207 * otherwise return the error.
3215 /* do_accept4() Must return target values and target errnos. */
3216 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3217 abi_ulong target_addrlen_addr
, int flags
)
3219 socklen_t addrlen
, ret_addrlen
;
3224 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3226 if (target_addr
== 0) {
3227 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3230 /* linux returns EINVAL if addrlen pointer is invalid */
3231 if (get_user_u32(addrlen
, target_addrlen_addr
))
3232 return -TARGET_EINVAL
;
3234 if ((int)addrlen
< 0) {
3235 return -TARGET_EINVAL
;
3238 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3239 return -TARGET_EINVAL
;
3241 addr
= alloca(addrlen
);
3243 ret_addrlen
= addrlen
;
3244 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3245 if (!is_error(ret
)) {
3246 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3247 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3248 ret
= -TARGET_EFAULT
;
3254 /* do_getpeername() Must return target values and target errnos. */
3255 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3256 abi_ulong target_addrlen_addr
)
3258 socklen_t addrlen
, ret_addrlen
;
3262 if (get_user_u32(addrlen
, target_addrlen_addr
))
3263 return -TARGET_EFAULT
;
3265 if ((int)addrlen
< 0) {
3266 return -TARGET_EINVAL
;
3269 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3270 return -TARGET_EFAULT
;
3272 addr
= alloca(addrlen
);
3274 ret_addrlen
= addrlen
;
3275 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3276 if (!is_error(ret
)) {
3277 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3278 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3279 ret
= -TARGET_EFAULT
;
3285 /* do_getsockname() Must return target values and target errnos. */
3286 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3287 abi_ulong target_addrlen_addr
)
3289 socklen_t addrlen
, ret_addrlen
;
3293 if (get_user_u32(addrlen
, target_addrlen_addr
))
3294 return -TARGET_EFAULT
;
3296 if ((int)addrlen
< 0) {
3297 return -TARGET_EINVAL
;
3300 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3301 return -TARGET_EFAULT
;
3303 addr
= alloca(addrlen
);
3305 ret_addrlen
= addrlen
;
3306 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3307 if (!is_error(ret
)) {
3308 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3309 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3310 ret
= -TARGET_EFAULT
;
3316 /* do_socketpair() Must return target values and target errnos. */
3317 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3318 abi_ulong target_tab_addr
)
3323 target_to_host_sock_type(&type
);
3325 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3326 if (!is_error(ret
)) {
3327 if (put_user_s32(tab
[0], target_tab_addr
)
3328 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3329 ret
= -TARGET_EFAULT
;
3334 /* do_sendto() Must return target values and target errnos. */
3335 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3336 abi_ulong target_addr
, socklen_t addrlen
)
3340 void *copy_msg
= NULL
;
3343 if ((int)addrlen
< 0) {
3344 return -TARGET_EINVAL
;
3347 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3349 return -TARGET_EFAULT
;
3350 if (fd_trans_target_to_host_data(fd
)) {
3351 copy_msg
= host_msg
;
3352 host_msg
= g_malloc(len
);
3353 memcpy(host_msg
, copy_msg
, len
);
3354 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3360 addr
= alloca(addrlen
+1);
3361 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3365 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3367 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3372 host_msg
= copy_msg
;
3374 unlock_user(host_msg
, msg
, 0);
3378 /* do_recvfrom() Must return target values and target errnos. */
3379 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3380 abi_ulong target_addr
,
3381 abi_ulong target_addrlen
)
3383 socklen_t addrlen
, ret_addrlen
;
3388 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3390 return -TARGET_EFAULT
;
3392 if (get_user_u32(addrlen
, target_addrlen
)) {
3393 ret
= -TARGET_EFAULT
;
3396 if ((int)addrlen
< 0) {
3397 ret
= -TARGET_EINVAL
;
3400 addr
= alloca(addrlen
);
3401 ret_addrlen
= addrlen
;
3402 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3403 addr
, &ret_addrlen
));
3405 addr
= NULL
; /* To keep compiler quiet. */
3406 addrlen
= 0; /* To keep compiler quiet. */
3407 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3409 if (!is_error(ret
)) {
3410 if (fd_trans_host_to_target_data(fd
)) {
3412 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3413 if (is_error(trans
)) {
3419 host_to_target_sockaddr(target_addr
, addr
,
3420 MIN(addrlen
, ret_addrlen
));
3421 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3422 ret
= -TARGET_EFAULT
;
3426 unlock_user(host_msg
, msg
, len
);
3429 unlock_user(host_msg
, msg
, 0);
3434 #ifdef TARGET_NR_socketcall
3435 /* do_socketcall() must return target values and target errnos. */
3436 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3438 static const unsigned nargs
[] = { /* number of arguments per operation */
3439 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3440 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3441 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3442 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3443 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3444 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3445 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3446 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3447 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3448 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3449 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3450 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3451 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3452 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3453 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3454 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3455 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3456 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3457 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3458 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3460 abi_long a
[6]; /* max 6 args */
3463 /* check the range of the first argument num */
3464 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3465 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3466 return -TARGET_EINVAL
;
3468 /* ensure we have space for args */
3469 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3470 return -TARGET_EINVAL
;
3472 /* collect the arguments in a[] according to nargs[] */
3473 for (i
= 0; i
< nargs
[num
]; ++i
) {
3474 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3475 return -TARGET_EFAULT
;
3478 /* now when we have the args, invoke the appropriate underlying function */
3480 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3481 return do_socket(a
[0], a
[1], a
[2]);
3482 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3483 return do_bind(a
[0], a
[1], a
[2]);
3484 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3485 return do_connect(a
[0], a
[1], a
[2]);
3486 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3487 return get_errno(listen(a
[0], a
[1]));
3488 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3489 return do_accept4(a
[0], a
[1], a
[2], 0);
3490 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3491 return do_getsockname(a
[0], a
[1], a
[2]);
3492 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3493 return do_getpeername(a
[0], a
[1], a
[2]);
3494 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3495 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3496 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3497 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3498 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3499 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3500 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3501 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3502 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3503 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3504 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3505 return get_errno(shutdown(a
[0], a
[1]));
3506 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3507 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3508 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3509 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3510 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3511 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3512 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3513 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3514 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3515 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3516 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3517 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3518 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3519 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3521 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3522 return -TARGET_EINVAL
;
3527 #define N_SHM_REGIONS 32
3529 static struct shm_region
{
3533 } shm_regions
[N_SHM_REGIONS
];
3535 #ifndef TARGET_SEMID64_DS
3536 /* asm-generic version of this struct */
3537 struct target_semid64_ds
3539 struct target_ipc_perm sem_perm
;
3540 abi_ulong sem_otime
;
3541 #if TARGET_ABI_BITS == 32
3542 abi_ulong __unused1
;
3544 abi_ulong sem_ctime
;
3545 #if TARGET_ABI_BITS == 32
3546 abi_ulong __unused2
;
3548 abi_ulong sem_nsems
;
3549 abi_ulong __unused3
;
3550 abi_ulong __unused4
;
3554 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3555 abi_ulong target_addr
)
3557 struct target_ipc_perm
*target_ip
;
3558 struct target_semid64_ds
*target_sd
;
3560 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3561 return -TARGET_EFAULT
;
3562 target_ip
= &(target_sd
->sem_perm
);
3563 host_ip
->__key
= tswap32(target_ip
->__key
);
3564 host_ip
->uid
= tswap32(target_ip
->uid
);
3565 host_ip
->gid
= tswap32(target_ip
->gid
);
3566 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3567 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3568 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3569 host_ip
->mode
= tswap32(target_ip
->mode
);
3571 host_ip
->mode
= tswap16(target_ip
->mode
);
3573 #if defined(TARGET_PPC)
3574 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3576 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3578 unlock_user_struct(target_sd
, target_addr
, 0);
3582 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3583 struct ipc_perm
*host_ip
)
3585 struct target_ipc_perm
*target_ip
;
3586 struct target_semid64_ds
*target_sd
;
3588 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3589 return -TARGET_EFAULT
;
3590 target_ip
= &(target_sd
->sem_perm
);
3591 target_ip
->__key
= tswap32(host_ip
->__key
);
3592 target_ip
->uid
= tswap32(host_ip
->uid
);
3593 target_ip
->gid
= tswap32(host_ip
->gid
);
3594 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3595 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3596 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3597 target_ip
->mode
= tswap32(host_ip
->mode
);
3599 target_ip
->mode
= tswap16(host_ip
->mode
);
3601 #if defined(TARGET_PPC)
3602 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3604 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3606 unlock_user_struct(target_sd
, target_addr
, 1);
3610 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3611 abi_ulong target_addr
)
3613 struct target_semid64_ds
*target_sd
;
3615 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3616 return -TARGET_EFAULT
;
3617 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3618 return -TARGET_EFAULT
;
3619 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3620 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3621 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3622 unlock_user_struct(target_sd
, target_addr
, 0);
3626 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3627 struct semid_ds
*host_sd
)
3629 struct target_semid64_ds
*target_sd
;
3631 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3632 return -TARGET_EFAULT
;
3633 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3634 return -TARGET_EFAULT
;
3635 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3636 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3637 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3638 unlock_user_struct(target_sd
, target_addr
, 1);
3642 struct target_seminfo
{
3655 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3656 struct seminfo
*host_seminfo
)
3658 struct target_seminfo
*target_seminfo
;
3659 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3660 return -TARGET_EFAULT
;
3661 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3662 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3663 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3664 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3665 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3666 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3667 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3668 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3669 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3670 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3671 unlock_user_struct(target_seminfo
, target_addr
, 1);
3677 struct semid_ds
*buf
;
3678 unsigned short *array
;
3679 struct seminfo
*__buf
;
3682 union target_semun
{
3689 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3690 abi_ulong target_addr
)
3693 unsigned short *array
;
3695 struct semid_ds semid_ds
;
3698 semun
.buf
= &semid_ds
;
3700 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3702 return get_errno(ret
);
3704 nsems
= semid_ds
.sem_nsems
;
3706 *host_array
= g_try_new(unsigned short, nsems
);
3708 return -TARGET_ENOMEM
;
3710 array
= lock_user(VERIFY_READ
, target_addr
,
3711 nsems
*sizeof(unsigned short), 1);
3713 g_free(*host_array
);
3714 return -TARGET_EFAULT
;
3717 for(i
=0; i
<nsems
; i
++) {
3718 __get_user((*host_array
)[i
], &array
[i
]);
3720 unlock_user(array
, target_addr
, 0);
3725 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3726 unsigned short **host_array
)
3729 unsigned short *array
;
3731 struct semid_ds semid_ds
;
3734 semun
.buf
= &semid_ds
;
3736 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3738 return get_errno(ret
);
3740 nsems
= semid_ds
.sem_nsems
;
3742 array
= lock_user(VERIFY_WRITE
, target_addr
,
3743 nsems
*sizeof(unsigned short), 0);
3745 return -TARGET_EFAULT
;
3747 for(i
=0; i
<nsems
; i
++) {
3748 __put_user((*host_array
)[i
], &array
[i
]);
3750 g_free(*host_array
);
3751 unlock_user(array
, target_addr
, 1);
3756 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3757 abi_ulong target_arg
)
3759 union target_semun target_su
= { .buf
= target_arg
};
3761 struct semid_ds dsarg
;
3762 unsigned short *array
= NULL
;
3763 struct seminfo seminfo
;
3764 abi_long ret
= -TARGET_EINVAL
;
3771 /* In 64 bit cross-endian situations, we will erroneously pick up
3772 * the wrong half of the union for the "val" element. To rectify
3773 * this, the entire 8-byte structure is byteswapped, followed by
3774 * a swap of the 4 byte val field. In other cases, the data is
3775 * already in proper host byte order. */
3776 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3777 target_su
.buf
= tswapal(target_su
.buf
);
3778 arg
.val
= tswap32(target_su
.val
);
3780 arg
.val
= target_su
.val
;
3782 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3786 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3790 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3791 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3798 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3802 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3803 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3809 arg
.__buf
= &seminfo
;
3810 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3811 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3819 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3826 struct target_sembuf
{
3827 unsigned short sem_num
;
3832 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3833 abi_ulong target_addr
,
3836 struct target_sembuf
*target_sembuf
;
3839 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3840 nsops
*sizeof(struct target_sembuf
), 1);
3842 return -TARGET_EFAULT
;
3844 for(i
=0; i
<nsops
; i
++) {
3845 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3846 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3847 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3850 unlock_user(target_sembuf
, target_addr
, 0);
3855 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3856 defined(TARGET_NR_semtimedop)
3859 * This macro is required to handle the s390 variants, which passes the
3860 * arguments in a different order than default.
3863 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3864 (__nsops), (__timeout), (__sops)
3866 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3867 (__nsops), 0, (__sops), (__timeout)
3870 static inline abi_long
do_semtimedop(int semid
,
3875 struct sembuf
*sops
;
3876 struct timespec ts
, *pts
= NULL
;
3881 if (target_to_host_timespec(pts
, timeout
)) {
3882 return -TARGET_EFAULT
;
3886 if (nsops
> TARGET_SEMOPM
) {
3887 return -TARGET_E2BIG
;
3890 sops
= g_new(struct sembuf
, nsops
);
3892 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
3894 return -TARGET_EFAULT
;
3897 ret
= -TARGET_ENOSYS
;
3898 #ifdef __NR_semtimedop
3899 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
3902 if (ret
== -TARGET_ENOSYS
) {
3903 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
3904 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
3912 struct target_msqid_ds
3914 struct target_ipc_perm msg_perm
;
3915 abi_ulong msg_stime
;
3916 #if TARGET_ABI_BITS == 32
3917 abi_ulong __unused1
;
3919 abi_ulong msg_rtime
;
3920 #if TARGET_ABI_BITS == 32
3921 abi_ulong __unused2
;
3923 abi_ulong msg_ctime
;
3924 #if TARGET_ABI_BITS == 32
3925 abi_ulong __unused3
;
3927 abi_ulong __msg_cbytes
;
3929 abi_ulong msg_qbytes
;
3930 abi_ulong msg_lspid
;
3931 abi_ulong msg_lrpid
;
3932 abi_ulong __unused4
;
3933 abi_ulong __unused5
;
3936 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3937 abi_ulong target_addr
)
3939 struct target_msqid_ds
*target_md
;
3941 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3942 return -TARGET_EFAULT
;
3943 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3944 return -TARGET_EFAULT
;
3945 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3946 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3947 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3948 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3949 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3950 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3951 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3952 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3953 unlock_user_struct(target_md
, target_addr
, 0);
3957 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3958 struct msqid_ds
*host_md
)
3960 struct target_msqid_ds
*target_md
;
3962 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3963 return -TARGET_EFAULT
;
3964 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3965 return -TARGET_EFAULT
;
3966 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3967 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3968 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3969 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3970 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3971 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3972 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3973 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3974 unlock_user_struct(target_md
, target_addr
, 1);
3978 struct target_msginfo
{
3986 unsigned short int msgseg
;
3989 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3990 struct msginfo
*host_msginfo
)
3992 struct target_msginfo
*target_msginfo
;
3993 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3994 return -TARGET_EFAULT
;
3995 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3996 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3997 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3998 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3999 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4000 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4001 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4002 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4003 unlock_user_struct(target_msginfo
, target_addr
, 1);
4007 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4009 struct msqid_ds dsarg
;
4010 struct msginfo msginfo
;
4011 abi_long ret
= -TARGET_EINVAL
;
4019 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4020 return -TARGET_EFAULT
;
4021 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4022 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4023 return -TARGET_EFAULT
;
4026 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4030 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4031 if (host_to_target_msginfo(ptr
, &msginfo
))
4032 return -TARGET_EFAULT
;
4039 struct target_msgbuf
{
4044 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4045 ssize_t msgsz
, int msgflg
)
4047 struct target_msgbuf
*target_mb
;
4048 struct msgbuf
*host_mb
;
4052 return -TARGET_EINVAL
;
4055 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4056 return -TARGET_EFAULT
;
4057 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4059 unlock_user_struct(target_mb
, msgp
, 0);
4060 return -TARGET_ENOMEM
;
4062 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4063 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4064 ret
= -TARGET_ENOSYS
;
4066 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4069 if (ret
== -TARGET_ENOSYS
) {
4071 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4074 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4080 unlock_user_struct(target_mb
, msgp
, 0);
4086 #if defined(__sparc__)
4087 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4088 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4089 #elif defined(__s390x__)
4090 /* The s390 sys_ipc variant has only five parameters. */
4091 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4092 ((long int[]){(long int)__msgp, __msgtyp})
4094 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4095 ((long int[]){(long int)__msgp, __msgtyp}), 0
4099 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4100 ssize_t msgsz
, abi_long msgtyp
,
4103 struct target_msgbuf
*target_mb
;
4105 struct msgbuf
*host_mb
;
4109 return -TARGET_EINVAL
;
4112 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4113 return -TARGET_EFAULT
;
4115 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4117 ret
= -TARGET_ENOMEM
;
4120 ret
= -TARGET_ENOSYS
;
4122 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4125 if (ret
== -TARGET_ENOSYS
) {
4126 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4127 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4132 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4133 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4134 if (!target_mtext
) {
4135 ret
= -TARGET_EFAULT
;
4138 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4139 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4142 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4146 unlock_user_struct(target_mb
, msgp
, 1);
4151 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4152 abi_ulong target_addr
)
4154 struct target_shmid_ds
*target_sd
;
4156 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4157 return -TARGET_EFAULT
;
4158 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4159 return -TARGET_EFAULT
;
4160 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4161 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4162 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4163 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4164 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4165 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4166 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4167 unlock_user_struct(target_sd
, target_addr
, 0);
4171 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4172 struct shmid_ds
*host_sd
)
4174 struct target_shmid_ds
*target_sd
;
4176 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4177 return -TARGET_EFAULT
;
4178 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4179 return -TARGET_EFAULT
;
4180 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4181 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4182 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4183 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4184 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4185 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4186 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4187 unlock_user_struct(target_sd
, target_addr
, 1);
4191 struct target_shminfo
{
4199 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4200 struct shminfo
*host_shminfo
)
4202 struct target_shminfo
*target_shminfo
;
4203 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4204 return -TARGET_EFAULT
;
4205 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4206 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4207 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4208 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4209 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4210 unlock_user_struct(target_shminfo
, target_addr
, 1);
4214 struct target_shm_info
{
4219 abi_ulong swap_attempts
;
4220 abi_ulong swap_successes
;
4223 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4224 struct shm_info
*host_shm_info
)
4226 struct target_shm_info
*target_shm_info
;
4227 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4228 return -TARGET_EFAULT
;
4229 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4230 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4231 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4232 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4233 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4234 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4235 unlock_user_struct(target_shm_info
, target_addr
, 1);
4239 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4241 struct shmid_ds dsarg
;
4242 struct shminfo shminfo
;
4243 struct shm_info shm_info
;
4244 abi_long ret
= -TARGET_EINVAL
;
4252 if (target_to_host_shmid_ds(&dsarg
, buf
))
4253 return -TARGET_EFAULT
;
4254 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4255 if (host_to_target_shmid_ds(buf
, &dsarg
))
4256 return -TARGET_EFAULT
;
4259 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4260 if (host_to_target_shminfo(buf
, &shminfo
))
4261 return -TARGET_EFAULT
;
4264 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4265 if (host_to_target_shm_info(buf
, &shm_info
))
4266 return -TARGET_EFAULT
;
4271 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4278 #ifndef TARGET_FORCE_SHMLBA
4279 /* For most architectures, SHMLBA is the same as the page size;
4280 * some architectures have larger values, in which case they should
4281 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4282 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4283 * and defining its own value for SHMLBA.
4285 * The kernel also permits SHMLBA to be set by the architecture to a
4286 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4287 * this means that addresses are rounded to the large size if
4288 * SHM_RND is set but addresses not aligned to that size are not rejected
4289 * as long as they are at least page-aligned. Since the only architecture
4290 * which uses this is ia64 this code doesn't provide for that oddity.
4292 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4294 return TARGET_PAGE_SIZE
;
4298 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4299 int shmid
, abi_ulong shmaddr
, int shmflg
)
4303 struct shmid_ds shm_info
;
4307 /* find out the length of the shared memory segment */
4308 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4309 if (is_error(ret
)) {
4310 /* can't get length, bail out */
4314 shmlba
= target_shmlba(cpu_env
);
4316 if (shmaddr
& (shmlba
- 1)) {
4317 if (shmflg
& SHM_RND
) {
4318 shmaddr
&= ~(shmlba
- 1);
4320 return -TARGET_EINVAL
;
4323 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4324 return -TARGET_EINVAL
;
4330 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4332 abi_ulong mmap_start
;
4334 /* In order to use the host shmat, we need to honor host SHMLBA. */
4335 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4337 if (mmap_start
== -1) {
4339 host_raddr
= (void *)-1;
4341 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4344 if (host_raddr
== (void *)-1) {
4346 return get_errno((long)host_raddr
);
4348 raddr
=h2g((unsigned long)host_raddr
);
4350 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4351 PAGE_VALID
| PAGE_READ
|
4352 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4354 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4355 if (!shm_regions
[i
].in_use
) {
4356 shm_regions
[i
].in_use
= true;
4357 shm_regions
[i
].start
= raddr
;
4358 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4368 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4375 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4376 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4377 shm_regions
[i
].in_use
= false;
4378 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4382 rv
= get_errno(shmdt(g2h(shmaddr
)));
4389 #ifdef TARGET_NR_ipc
4390 /* ??? This only works with linear mappings. */
4391 /* do_ipc() must return target values and target errnos. */
4392 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4393 unsigned int call
, abi_long first
,
4394 abi_long second
, abi_long third
,
4395 abi_long ptr
, abi_long fifth
)
4400 version
= call
>> 16;
4405 ret
= do_semtimedop(first
, ptr
, second
, 0);
4407 case IPCOP_semtimedop
:
4409 * The s390 sys_ipc variant has only five parameters instead of six
4410 * (as for default variant) and the only difference is the handling of
4411 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4412 * to a struct timespec where the generic variant uses fifth parameter.
4414 #if defined(TARGET_S390X)
4415 ret
= do_semtimedop(first
, ptr
, second
, third
);
4417 ret
= do_semtimedop(first
, ptr
, second
, fifth
);
4422 ret
= get_errno(semget(first
, second
, third
));
4425 case IPCOP_semctl
: {
4426 /* The semun argument to semctl is passed by value, so dereference the
4429 get_user_ual(atptr
, ptr
);
4430 ret
= do_semctl(first
, second
, third
, atptr
);
4435 ret
= get_errno(msgget(first
, second
));
4439 ret
= do_msgsnd(first
, ptr
, second
, third
);
4443 ret
= do_msgctl(first
, second
, ptr
);
4450 struct target_ipc_kludge
{
4455 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4456 ret
= -TARGET_EFAULT
;
4460 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4462 unlock_user_struct(tmp
, ptr
, 0);
4466 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4475 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4476 if (is_error(raddr
))
4477 return get_errno(raddr
);
4478 if (put_user_ual(raddr
, third
))
4479 return -TARGET_EFAULT
;
4483 ret
= -TARGET_EINVAL
;
4488 ret
= do_shmdt(ptr
);
4492 /* IPC_* flag values are the same on all linux platforms */
4493 ret
= get_errno(shmget(first
, second
, third
));
4496 /* IPC_* and SHM_* command values are the same on all linux platforms */
4498 ret
= do_shmctl(first
, second
, ptr
);
4501 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4503 ret
= -TARGET_ENOSYS
;
4510 /* kernel structure types definitions */
4512 #define STRUCT(name, ...) STRUCT_ ## name,
4513 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4515 #include "syscall_types.h"
4519 #undef STRUCT_SPECIAL
4521 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4522 #define STRUCT_SPECIAL(name)
4523 #include "syscall_types.h"
4525 #undef STRUCT_SPECIAL
4527 #define MAX_STRUCT_SIZE 4096
4529 #ifdef CONFIG_FIEMAP
4530 /* So fiemap access checks don't overflow on 32 bit systems.
4531 * This is very slightly smaller than the limit imposed by
4532 * the underlying kernel.
4534 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4535 / sizeof(struct fiemap_extent))
4537 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4538 int fd
, int cmd
, abi_long arg
)
4540 /* The parameter for this ioctl is a struct fiemap followed
4541 * by an array of struct fiemap_extent whose size is set
4542 * in fiemap->fm_extent_count. The array is filled in by the
4545 int target_size_in
, target_size_out
;
4547 const argtype
*arg_type
= ie
->arg_type
;
4548 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4551 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4555 assert(arg_type
[0] == TYPE_PTR
);
4556 assert(ie
->access
== IOC_RW
);
4558 target_size_in
= thunk_type_size(arg_type
, 0);
4559 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4561 return -TARGET_EFAULT
;
4563 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4564 unlock_user(argptr
, arg
, 0);
4565 fm
= (struct fiemap
*)buf_temp
;
4566 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4567 return -TARGET_EINVAL
;
4570 outbufsz
= sizeof (*fm
) +
4571 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4573 if (outbufsz
> MAX_STRUCT_SIZE
) {
4574 /* We can't fit all the extents into the fixed size buffer.
4575 * Allocate one that is large enough and use it instead.
4577 fm
= g_try_malloc(outbufsz
);
4579 return -TARGET_ENOMEM
;
4581 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4584 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4585 if (!is_error(ret
)) {
4586 target_size_out
= target_size_in
;
4587 /* An extent_count of 0 means we were only counting the extents
4588 * so there are no structs to copy
4590 if (fm
->fm_extent_count
!= 0) {
4591 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4593 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4595 ret
= -TARGET_EFAULT
;
4597 /* Convert the struct fiemap */
4598 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4599 if (fm
->fm_extent_count
!= 0) {
4600 p
= argptr
+ target_size_in
;
4601 /* ...and then all the struct fiemap_extents */
4602 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4603 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4608 unlock_user(argptr
, arg
, target_size_out
);
4618 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4619 int fd
, int cmd
, abi_long arg
)
4621 const argtype
*arg_type
= ie
->arg_type
;
4625 struct ifconf
*host_ifconf
;
4627 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4628 int target_ifreq_size
;
4633 abi_long target_ifc_buf
;
4637 assert(arg_type
[0] == TYPE_PTR
);
4638 assert(ie
->access
== IOC_RW
);
4641 target_size
= thunk_type_size(arg_type
, 0);
4643 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4645 return -TARGET_EFAULT
;
4646 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4647 unlock_user(argptr
, arg
, 0);
4649 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4650 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4651 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4653 if (target_ifc_buf
!= 0) {
4654 target_ifc_len
= host_ifconf
->ifc_len
;
4655 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4656 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4658 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4659 if (outbufsz
> MAX_STRUCT_SIZE
) {
4661 * We can't fit all the extents into the fixed size buffer.
4662 * Allocate one that is large enough and use it instead.
4664 host_ifconf
= malloc(outbufsz
);
4666 return -TARGET_ENOMEM
;
4668 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4671 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4673 host_ifconf
->ifc_len
= host_ifc_len
;
4675 host_ifc_buf
= NULL
;
4677 host_ifconf
->ifc_buf
= host_ifc_buf
;
4679 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4680 if (!is_error(ret
)) {
4681 /* convert host ifc_len to target ifc_len */
4683 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4684 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4685 host_ifconf
->ifc_len
= target_ifc_len
;
4687 /* restore target ifc_buf */
4689 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4691 /* copy struct ifconf to target user */
4693 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4695 return -TARGET_EFAULT
;
4696 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4697 unlock_user(argptr
, arg
, target_size
);
4699 if (target_ifc_buf
!= 0) {
4700 /* copy ifreq[] to target user */
4701 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4702 for (i
= 0; i
< nb_ifreq
; i
++) {
4703 thunk_convert(argptr
+ i
* target_ifreq_size
,
4704 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4705 ifreq_arg_type
, THUNK_TARGET
);
4707 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4718 #if defined(CONFIG_USBFS)
4719 #if HOST_LONG_BITS > 64
4720 #error USBDEVFS thunks do not support >64 bit hosts yet.
4723 uint64_t target_urb_adr
;
4724 uint64_t target_buf_adr
;
4725 char *target_buf_ptr
;
4726 struct usbdevfs_urb host_urb
;
4729 static GHashTable
*usbdevfs_urb_hashtable(void)
4731 static GHashTable
*urb_hashtable
;
4733 if (!urb_hashtable
) {
4734 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4736 return urb_hashtable
;
4739 static void urb_hashtable_insert(struct live_urb
*urb
)
4741 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4742 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4745 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4747 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4748 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4751 static void urb_hashtable_remove(struct live_urb
*urb
)
4753 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4754 g_hash_table_remove(urb_hashtable
, urb
);
4758 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4759 int fd
, int cmd
, abi_long arg
)
4761 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4762 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4763 struct live_urb
*lurb
;
4767 uintptr_t target_urb_adr
;
4770 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4772 memset(buf_temp
, 0, sizeof(uint64_t));
4773 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4774 if (is_error(ret
)) {
4778 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4779 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4780 if (!lurb
->target_urb_adr
) {
4781 return -TARGET_EFAULT
;
4783 urb_hashtable_remove(lurb
);
4784 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4785 lurb
->host_urb
.buffer_length
);
4786 lurb
->target_buf_ptr
= NULL
;
4788 /* restore the guest buffer pointer */
4789 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4791 /* update the guest urb struct */
4792 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4795 return -TARGET_EFAULT
;
4797 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4798 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4800 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4801 /* write back the urb handle */
4802 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4805 return -TARGET_EFAULT
;
4808 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4809 target_urb_adr
= lurb
->target_urb_adr
;
4810 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4811 unlock_user(argptr
, arg
, target_size
);
4818 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4819 uint8_t *buf_temp
__attribute__((unused
)),
4820 int fd
, int cmd
, abi_long arg
)
4822 struct live_urb
*lurb
;
4824 /* map target address back to host URB with metadata. */
4825 lurb
= urb_hashtable_lookup(arg
);
4827 return -TARGET_EFAULT
;
4829 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4833 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4834 int fd
, int cmd
, abi_long arg
)
4836 const argtype
*arg_type
= ie
->arg_type
;
4841 struct live_urb
*lurb
;
4844 * each submitted URB needs to map to a unique ID for the
4845 * kernel, and that unique ID needs to be a pointer to
4846 * host memory. hence, we need to malloc for each URB.
4847 * isochronous transfers have a variable length struct.
4850 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4852 /* construct host copy of urb and metadata */
4853 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4855 return -TARGET_ENOMEM
;
4858 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4861 return -TARGET_EFAULT
;
4863 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4864 unlock_user(argptr
, arg
, 0);
4866 lurb
->target_urb_adr
= arg
;
4867 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4869 /* buffer space used depends on endpoint type so lock the entire buffer */
4870 /* control type urbs should check the buffer contents for true direction */
4871 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4872 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4873 lurb
->host_urb
.buffer_length
, 1);
4874 if (lurb
->target_buf_ptr
== NULL
) {
4876 return -TARGET_EFAULT
;
4879 /* update buffer pointer in host copy */
4880 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4882 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4883 if (is_error(ret
)) {
4884 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4887 urb_hashtable_insert(lurb
);
4892 #endif /* CONFIG_USBFS */
4894 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4895 int cmd
, abi_long arg
)
4898 struct dm_ioctl
*host_dm
;
4899 abi_long guest_data
;
4900 uint32_t guest_data_size
;
4902 const argtype
*arg_type
= ie
->arg_type
;
4904 void *big_buf
= NULL
;
4908 target_size
= thunk_type_size(arg_type
, 0);
4909 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4911 ret
= -TARGET_EFAULT
;
4914 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4915 unlock_user(argptr
, arg
, 0);
4917 /* buf_temp is too small, so fetch things into a bigger buffer */
4918 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4919 memcpy(big_buf
, buf_temp
, target_size
);
4923 guest_data
= arg
+ host_dm
->data_start
;
4924 if ((guest_data
- arg
) < 0) {
4925 ret
= -TARGET_EINVAL
;
4928 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4929 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4931 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4933 ret
= -TARGET_EFAULT
;
4937 switch (ie
->host_cmd
) {
4939 case DM_LIST_DEVICES
:
4942 case DM_DEV_SUSPEND
:
4945 case DM_TABLE_STATUS
:
4946 case DM_TABLE_CLEAR
:
4948 case DM_LIST_VERSIONS
:
4952 case DM_DEV_SET_GEOMETRY
:
4953 /* data contains only strings */
4954 memcpy(host_data
, argptr
, guest_data_size
);
4957 memcpy(host_data
, argptr
, guest_data_size
);
4958 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4962 void *gspec
= argptr
;
4963 void *cur_data
= host_data
;
4964 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4965 int spec_size
= thunk_type_size(arg_type
, 0);
4968 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4969 struct dm_target_spec
*spec
= cur_data
;
4973 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4974 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4976 spec
->next
= sizeof(*spec
) + slen
;
4977 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4979 cur_data
+= spec
->next
;
4984 ret
= -TARGET_EINVAL
;
4985 unlock_user(argptr
, guest_data
, 0);
4988 unlock_user(argptr
, guest_data
, 0);
4990 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4991 if (!is_error(ret
)) {
4992 guest_data
= arg
+ host_dm
->data_start
;
4993 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4994 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4995 switch (ie
->host_cmd
) {
5000 case DM_DEV_SUSPEND
:
5003 case DM_TABLE_CLEAR
:
5005 case DM_DEV_SET_GEOMETRY
:
5006 /* no return data */
5008 case DM_LIST_DEVICES
:
5010 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5011 uint32_t remaining_data
= guest_data_size
;
5012 void *cur_data
= argptr
;
5013 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5014 int nl_size
= 12; /* can't use thunk_size due to alignment */
5017 uint32_t next
= nl
->next
;
5019 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5021 if (remaining_data
< nl
->next
) {
5022 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5025 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5026 strcpy(cur_data
+ nl_size
, nl
->name
);
5027 cur_data
+= nl
->next
;
5028 remaining_data
-= nl
->next
;
5032 nl
= (void*)nl
+ next
;
5037 case DM_TABLE_STATUS
:
5039 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5040 void *cur_data
= argptr
;
5041 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5042 int spec_size
= thunk_type_size(arg_type
, 0);
5045 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5046 uint32_t next
= spec
->next
;
5047 int slen
= strlen((char*)&spec
[1]) + 1;
5048 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5049 if (guest_data_size
< spec
->next
) {
5050 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5053 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5054 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5055 cur_data
= argptr
+ spec
->next
;
5056 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5062 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5063 int count
= *(uint32_t*)hdata
;
5064 uint64_t *hdev
= hdata
+ 8;
5065 uint64_t *gdev
= argptr
+ 8;
5068 *(uint32_t*)argptr
= tswap32(count
);
5069 for (i
= 0; i
< count
; i
++) {
5070 *gdev
= tswap64(*hdev
);
5076 case DM_LIST_VERSIONS
:
5078 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5079 uint32_t remaining_data
= guest_data_size
;
5080 void *cur_data
= argptr
;
5081 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5082 int vers_size
= thunk_type_size(arg_type
, 0);
5085 uint32_t next
= vers
->next
;
5087 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5089 if (remaining_data
< vers
->next
) {
5090 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5093 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5094 strcpy(cur_data
+ vers_size
, vers
->name
);
5095 cur_data
+= vers
->next
;
5096 remaining_data
-= vers
->next
;
5100 vers
= (void*)vers
+ next
;
5105 unlock_user(argptr
, guest_data
, 0);
5106 ret
= -TARGET_EINVAL
;
5109 unlock_user(argptr
, guest_data
, guest_data_size
);
5111 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5113 ret
= -TARGET_EFAULT
;
5116 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5117 unlock_user(argptr
, arg
, target_size
);
5124 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5125 int cmd
, abi_long arg
)
5129 const argtype
*arg_type
= ie
->arg_type
;
5130 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5133 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5134 struct blkpg_partition host_part
;
5136 /* Read and convert blkpg */
5138 target_size
= thunk_type_size(arg_type
, 0);
5139 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5141 ret
= -TARGET_EFAULT
;
5144 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5145 unlock_user(argptr
, arg
, 0);
5147 switch (host_blkpg
->op
) {
5148 case BLKPG_ADD_PARTITION
:
5149 case BLKPG_DEL_PARTITION
:
5150 /* payload is struct blkpg_partition */
5153 /* Unknown opcode */
5154 ret
= -TARGET_EINVAL
;
5158 /* Read and convert blkpg->data */
5159 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5160 target_size
= thunk_type_size(part_arg_type
, 0);
5161 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5163 ret
= -TARGET_EFAULT
;
5166 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5167 unlock_user(argptr
, arg
, 0);
5169 /* Swizzle the data pointer to our local copy and call! */
5170 host_blkpg
->data
= &host_part
;
5171 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5177 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5178 int fd
, int cmd
, abi_long arg
)
5180 const argtype
*arg_type
= ie
->arg_type
;
5181 const StructEntry
*se
;
5182 const argtype
*field_types
;
5183 const int *dst_offsets
, *src_offsets
;
5186 abi_ulong
*target_rt_dev_ptr
= NULL
;
5187 unsigned long *host_rt_dev_ptr
= NULL
;
5191 assert(ie
->access
== IOC_W
);
5192 assert(*arg_type
== TYPE_PTR
);
5194 assert(*arg_type
== TYPE_STRUCT
);
5195 target_size
= thunk_type_size(arg_type
, 0);
5196 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5198 return -TARGET_EFAULT
;
5201 assert(*arg_type
== (int)STRUCT_rtentry
);
5202 se
= struct_entries
+ *arg_type
++;
5203 assert(se
->convert
[0] == NULL
);
5204 /* convert struct here to be able to catch rt_dev string */
5205 field_types
= se
->field_types
;
5206 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5207 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5208 for (i
= 0; i
< se
->nb_fields
; i
++) {
5209 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5210 assert(*field_types
== TYPE_PTRVOID
);
5211 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5212 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5213 if (*target_rt_dev_ptr
!= 0) {
5214 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5215 tswapal(*target_rt_dev_ptr
));
5216 if (!*host_rt_dev_ptr
) {
5217 unlock_user(argptr
, arg
, 0);
5218 return -TARGET_EFAULT
;
5221 *host_rt_dev_ptr
= 0;
5226 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5227 argptr
+ src_offsets
[i
],
5228 field_types
, THUNK_HOST
);
5230 unlock_user(argptr
, arg
, 0);
5232 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5234 assert(host_rt_dev_ptr
!= NULL
);
5235 assert(target_rt_dev_ptr
!= NULL
);
5236 if (*host_rt_dev_ptr
!= 0) {
5237 unlock_user((void *)*host_rt_dev_ptr
,
5238 *target_rt_dev_ptr
, 0);
5243 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5244 int fd
, int cmd
, abi_long arg
)
5246 int sig
= target_to_host_signal(arg
);
5247 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5250 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5251 int fd
, int cmd
, abi_long arg
)
5256 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5257 if (is_error(ret
)) {
5261 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5262 if (copy_to_user_timeval(arg
, &tv
)) {
5263 return -TARGET_EFAULT
;
5266 if (copy_to_user_timeval64(arg
, &tv
)) {
5267 return -TARGET_EFAULT
;
5274 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5275 int fd
, int cmd
, abi_long arg
)
5280 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5281 if (is_error(ret
)) {
5285 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5286 if (host_to_target_timespec(arg
, &ts
)) {
5287 return -TARGET_EFAULT
;
5290 if (host_to_target_timespec64(arg
, &ts
)) {
5291 return -TARGET_EFAULT
;
5299 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5300 int fd
, int cmd
, abi_long arg
)
5302 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5303 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5309 static void unlock_drm_version(struct drm_version
*host_ver
,
5310 struct target_drm_version
*target_ver
,
5313 unlock_user(host_ver
->name
, target_ver
->name
,
5314 copy
? host_ver
->name_len
: 0);
5315 unlock_user(host_ver
->date
, target_ver
->date
,
5316 copy
? host_ver
->date_len
: 0);
5317 unlock_user(host_ver
->desc
, target_ver
->desc
,
5318 copy
? host_ver
->desc_len
: 0);
5321 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5322 struct target_drm_version
*target_ver
)
5324 memset(host_ver
, 0, sizeof(*host_ver
));
5326 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5327 if (host_ver
->name_len
) {
5328 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5329 target_ver
->name_len
, 0);
5330 if (!host_ver
->name
) {
5335 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5336 if (host_ver
->date_len
) {
5337 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5338 target_ver
->date_len
, 0);
5339 if (!host_ver
->date
) {
5344 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5345 if (host_ver
->desc_len
) {
5346 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5347 target_ver
->desc_len
, 0);
5348 if (!host_ver
->desc
) {
5355 unlock_drm_version(host_ver
, target_ver
, false);
5359 static inline void host_to_target_drmversion(
5360 struct target_drm_version
*target_ver
,
5361 struct drm_version
*host_ver
)
5363 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5364 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5365 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5366 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5367 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5368 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5369 unlock_drm_version(host_ver
, target_ver
, true);
5372 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5373 int fd
, int cmd
, abi_long arg
)
5375 struct drm_version
*ver
;
5376 struct target_drm_version
*target_ver
;
5379 switch (ie
->host_cmd
) {
5380 case DRM_IOCTL_VERSION
:
5381 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5382 return -TARGET_EFAULT
;
5384 ver
= (struct drm_version
*)buf_temp
;
5385 ret
= target_to_host_drmversion(ver
, target_ver
);
5386 if (!is_error(ret
)) {
5387 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5388 if (is_error(ret
)) {
5389 unlock_drm_version(ver
, target_ver
, false);
5391 host_to_target_drmversion(target_ver
, ver
);
5394 unlock_user_struct(target_ver
, arg
, 0);
5397 return -TARGET_ENOSYS
;
5400 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5401 struct drm_i915_getparam
*gparam
,
5402 int fd
, abi_long arg
)
5406 struct target_drm_i915_getparam
*target_gparam
;
5408 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5409 return -TARGET_EFAULT
;
5412 __get_user(gparam
->param
, &target_gparam
->param
);
5413 gparam
->value
= &value
;
5414 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5415 put_user_s32(value
, target_gparam
->value
);
5417 unlock_user_struct(target_gparam
, arg
, 0);
5421 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5422 int fd
, int cmd
, abi_long arg
)
5424 switch (ie
->host_cmd
) {
5425 case DRM_IOCTL_I915_GETPARAM
:
5426 return do_ioctl_drm_i915_getparam(ie
,
5427 (struct drm_i915_getparam
*)buf_temp
,
5430 return -TARGET_ENOSYS
;
5436 IOCTLEntry ioctl_entries
[] = {
5437 #define IOCTL(cmd, access, ...) \
5438 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5439 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5440 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5441 #define IOCTL_IGNORE(cmd) \
5442 { TARGET_ ## cmd, 0, #cmd },
5447 /* ??? Implement proper locking for ioctls. */
5448 /* do_ioctl() Must return target values and target errnos. */
5449 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5451 const IOCTLEntry
*ie
;
5452 const argtype
*arg_type
;
5454 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5460 if (ie
->target_cmd
== 0) {
5462 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5463 return -TARGET_ENOSYS
;
5465 if (ie
->target_cmd
== cmd
)
5469 arg_type
= ie
->arg_type
;
5471 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5472 } else if (!ie
->host_cmd
) {
5473 /* Some architectures define BSD ioctls in their headers
5474 that are not implemented in Linux. */
5475 return -TARGET_ENOSYS
;
5478 switch(arg_type
[0]) {
5481 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5487 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5491 target_size
= thunk_type_size(arg_type
, 0);
5492 switch(ie
->access
) {
5494 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5495 if (!is_error(ret
)) {
5496 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5498 return -TARGET_EFAULT
;
5499 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5500 unlock_user(argptr
, arg
, target_size
);
5504 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5506 return -TARGET_EFAULT
;
5507 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5508 unlock_user(argptr
, arg
, 0);
5509 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5513 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5515 return -TARGET_EFAULT
;
5516 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5517 unlock_user(argptr
, arg
, 0);
5518 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5519 if (!is_error(ret
)) {
5520 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5522 return -TARGET_EFAULT
;
5523 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5524 unlock_user(argptr
, arg
, target_size
);
5530 qemu_log_mask(LOG_UNIMP
,
5531 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5532 (long)cmd
, arg_type
[0]);
5533 ret
= -TARGET_ENOSYS
;
5539 static const bitmask_transtbl iflag_tbl
[] = {
5540 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5541 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5542 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5543 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5544 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5545 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5546 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5547 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5548 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5549 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5550 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5551 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5552 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5553 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5554 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5558 static const bitmask_transtbl oflag_tbl
[] = {
5559 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5560 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5561 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5562 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5563 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5564 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5565 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5566 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5567 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5568 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5569 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5570 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5571 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5572 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5573 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5574 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5575 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5576 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5577 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5578 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5579 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5580 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5581 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5582 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5586 static const bitmask_transtbl cflag_tbl
[] = {
5587 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5588 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5589 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5590 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5591 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5592 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5593 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5594 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5595 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5596 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5597 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5598 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5599 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5600 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5601 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5602 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5603 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5604 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5605 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5606 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5607 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5608 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5609 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5610 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5611 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5612 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5613 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5614 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5615 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5616 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5617 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5621 static const bitmask_transtbl lflag_tbl
[] = {
5622 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5623 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5624 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5625 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5626 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5627 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5628 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5629 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5630 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5631 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5632 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5633 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5634 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5635 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5636 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5637 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5641 static void target_to_host_termios (void *dst
, const void *src
)
5643 struct host_termios
*host
= dst
;
5644 const struct target_termios
*target
= src
;
5647 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5649 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5651 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5653 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5654 host
->c_line
= target
->c_line
;
5656 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5657 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5658 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5659 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5660 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5661 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5662 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5663 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5664 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5665 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5666 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5667 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5668 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5669 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5670 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5671 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5672 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5673 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5676 static void host_to_target_termios (void *dst
, const void *src
)
5678 struct target_termios
*target
= dst
;
5679 const struct host_termios
*host
= src
;
5682 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5684 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5686 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5688 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5689 target
->c_line
= host
->c_line
;
5691 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5692 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5693 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5694 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5695 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5696 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5697 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5698 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5699 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5700 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5701 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5702 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5703 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5704 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5705 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5706 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5707 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5708 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5711 static const StructEntry struct_termios_def
= {
5712 .convert
= { host_to_target_termios
, target_to_host_termios
},
5713 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5714 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5715 .print
= print_termios
,
5718 static bitmask_transtbl mmap_flags_tbl
[] = {
5719 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5720 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5721 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5722 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5723 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5724 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5725 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5726 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5727 MAP_DENYWRITE
, MAP_DENYWRITE
},
5728 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5729 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5730 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5731 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5732 MAP_NORESERVE
, MAP_NORESERVE
},
5733 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5734 /* MAP_STACK had been ignored by the kernel for quite some time.
5735 Recognize it for the target insofar as we do not want to pass
5736 it through to the host. */
5737 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5742 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5743 * TARGET_I386 is defined if TARGET_X86_64 is defined
5745 #if defined(TARGET_I386)
5747 /* NOTE: there is really one LDT for all the threads */
5748 static uint8_t *ldt_table
;
5750 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5757 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5758 if (size
> bytecount
)
5760 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5762 return -TARGET_EFAULT
;
5763 /* ??? Should this by byteswapped? */
5764 memcpy(p
, ldt_table
, size
);
5765 unlock_user(p
, ptr
, size
);
5769 /* XXX: add locking support */
5770 static abi_long
write_ldt(CPUX86State
*env
,
5771 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5773 struct target_modify_ldt_ldt_s ldt_info
;
5774 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5775 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5776 int seg_not_present
, useable
, lm
;
5777 uint32_t *lp
, entry_1
, entry_2
;
5779 if (bytecount
!= sizeof(ldt_info
))
5780 return -TARGET_EINVAL
;
5781 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5782 return -TARGET_EFAULT
;
5783 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5784 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5785 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5786 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5787 unlock_user_struct(target_ldt_info
, ptr
, 0);
5789 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5790 return -TARGET_EINVAL
;
5791 seg_32bit
= ldt_info
.flags
& 1;
5792 contents
= (ldt_info
.flags
>> 1) & 3;
5793 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5794 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5795 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5796 useable
= (ldt_info
.flags
>> 6) & 1;
5800 lm
= (ldt_info
.flags
>> 7) & 1;
5802 if (contents
== 3) {
5804 return -TARGET_EINVAL
;
5805 if (seg_not_present
== 0)
5806 return -TARGET_EINVAL
;
5808 /* allocate the LDT */
5810 env
->ldt
.base
= target_mmap(0,
5811 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5812 PROT_READ
|PROT_WRITE
,
5813 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5814 if (env
->ldt
.base
== -1)
5815 return -TARGET_ENOMEM
;
5816 memset(g2h(env
->ldt
.base
), 0,
5817 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5818 env
->ldt
.limit
= 0xffff;
5819 ldt_table
= g2h(env
->ldt
.base
);
5822 /* NOTE: same code as Linux kernel */
5823 /* Allow LDTs to be cleared by the user. */
5824 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5827 read_exec_only
== 1 &&
5829 limit_in_pages
== 0 &&
5830 seg_not_present
== 1 &&
5838 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5839 (ldt_info
.limit
& 0x0ffff);
5840 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5841 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5842 (ldt_info
.limit
& 0xf0000) |
5843 ((read_exec_only
^ 1) << 9) |
5845 ((seg_not_present
^ 1) << 15) |
5847 (limit_in_pages
<< 23) |
5851 entry_2
|= (useable
<< 20);
5853 /* Install the new entry ... */
5855 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5856 lp
[0] = tswap32(entry_1
);
5857 lp
[1] = tswap32(entry_2
);
5861 /* specific and weird i386 syscalls */
5862 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5863 unsigned long bytecount
)
5869 ret
= read_ldt(ptr
, bytecount
);
5872 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5875 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5878 ret
= -TARGET_ENOSYS
;
5884 #if defined(TARGET_ABI32)
5885 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5887 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5888 struct target_modify_ldt_ldt_s ldt_info
;
5889 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5890 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5891 int seg_not_present
, useable
, lm
;
5892 uint32_t *lp
, entry_1
, entry_2
;
5895 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5896 if (!target_ldt_info
)
5897 return -TARGET_EFAULT
;
5898 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5899 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5900 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5901 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5902 if (ldt_info
.entry_number
== -1) {
5903 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5904 if (gdt_table
[i
] == 0) {
5905 ldt_info
.entry_number
= i
;
5906 target_ldt_info
->entry_number
= tswap32(i
);
5911 unlock_user_struct(target_ldt_info
, ptr
, 1);
5913 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5914 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5915 return -TARGET_EINVAL
;
5916 seg_32bit
= ldt_info
.flags
& 1;
5917 contents
= (ldt_info
.flags
>> 1) & 3;
5918 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5919 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5920 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5921 useable
= (ldt_info
.flags
>> 6) & 1;
5925 lm
= (ldt_info
.flags
>> 7) & 1;
5928 if (contents
== 3) {
5929 if (seg_not_present
== 0)
5930 return -TARGET_EINVAL
;
5933 /* NOTE: same code as Linux kernel */
5934 /* Allow LDTs to be cleared by the user. */
5935 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5936 if ((contents
== 0 &&
5937 read_exec_only
== 1 &&
5939 limit_in_pages
== 0 &&
5940 seg_not_present
== 1 &&
5948 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5949 (ldt_info
.limit
& 0x0ffff);
5950 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5951 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5952 (ldt_info
.limit
& 0xf0000) |
5953 ((read_exec_only
^ 1) << 9) |
5955 ((seg_not_present
^ 1) << 15) |
5957 (limit_in_pages
<< 23) |
5962 /* Install the new entry ... */
5964 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5965 lp
[0] = tswap32(entry_1
);
5966 lp
[1] = tswap32(entry_2
);
5970 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5972 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5973 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5974 uint32_t base_addr
, limit
, flags
;
5975 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5976 int seg_not_present
, useable
, lm
;
5977 uint32_t *lp
, entry_1
, entry_2
;
5979 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5980 if (!target_ldt_info
)
5981 return -TARGET_EFAULT
;
5982 idx
= tswap32(target_ldt_info
->entry_number
);
5983 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5984 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5985 unlock_user_struct(target_ldt_info
, ptr
, 1);
5986 return -TARGET_EINVAL
;
5988 lp
= (uint32_t *)(gdt_table
+ idx
);
5989 entry_1
= tswap32(lp
[0]);
5990 entry_2
= tswap32(lp
[1]);
5992 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5993 contents
= (entry_2
>> 10) & 3;
5994 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5995 seg_32bit
= (entry_2
>> 22) & 1;
5996 limit_in_pages
= (entry_2
>> 23) & 1;
5997 useable
= (entry_2
>> 20) & 1;
6001 lm
= (entry_2
>> 21) & 1;
6003 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6004 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6005 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6006 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6007 base_addr
= (entry_1
>> 16) |
6008 (entry_2
& 0xff000000) |
6009 ((entry_2
& 0xff) << 16);
6010 target_ldt_info
->base_addr
= tswapal(base_addr
);
6011 target_ldt_info
->limit
= tswap32(limit
);
6012 target_ldt_info
->flags
= tswap32(flags
);
6013 unlock_user_struct(target_ldt_info
, ptr
, 1);
6017 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6019 return -TARGET_ENOSYS
;
6022 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6029 case TARGET_ARCH_SET_GS
:
6030 case TARGET_ARCH_SET_FS
:
6031 if (code
== TARGET_ARCH_SET_GS
)
6035 cpu_x86_load_seg(env
, idx
, 0);
6036 env
->segs
[idx
].base
= addr
;
6038 case TARGET_ARCH_GET_GS
:
6039 case TARGET_ARCH_GET_FS
:
6040 if (code
== TARGET_ARCH_GET_GS
)
6044 val
= env
->segs
[idx
].base
;
6045 if (put_user(val
, addr
, abi_ulong
))
6046 ret
= -TARGET_EFAULT
;
6049 ret
= -TARGET_EINVAL
;
6054 #endif /* defined(TARGET_ABI32 */
6056 #endif /* defined(TARGET_I386) */
6058 #define NEW_STACK_SIZE 0x40000
6061 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6064 pthread_mutex_t mutex
;
6065 pthread_cond_t cond
;
6068 abi_ulong child_tidptr
;
6069 abi_ulong parent_tidptr
;
6073 static void *clone_func(void *arg
)
6075 new_thread_info
*info
= arg
;
6080 rcu_register_thread();
6081 tcg_register_thread();
6085 ts
= (TaskState
*)cpu
->opaque
;
6086 info
->tid
= sys_gettid();
6088 if (info
->child_tidptr
)
6089 put_user_u32(info
->tid
, info
->child_tidptr
);
6090 if (info
->parent_tidptr
)
6091 put_user_u32(info
->tid
, info
->parent_tidptr
);
6092 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6093 /* Enable signals. */
6094 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6095 /* Signal to the parent that we're ready. */
6096 pthread_mutex_lock(&info
->mutex
);
6097 pthread_cond_broadcast(&info
->cond
);
6098 pthread_mutex_unlock(&info
->mutex
);
6099 /* Wait until the parent has finished initializing the tls state. */
6100 pthread_mutex_lock(&clone_lock
);
6101 pthread_mutex_unlock(&clone_lock
);
6107 /* do_fork() Must return host values and target errnos (unlike most
6108 do_*() functions). */
6109 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6110 abi_ulong parent_tidptr
, target_ulong newtls
,
6111 abi_ulong child_tidptr
)
6113 CPUState
*cpu
= env_cpu(env
);
6117 CPUArchState
*new_env
;
6120 flags
&= ~CLONE_IGNORED_FLAGS
;
6122 /* Emulate vfork() with fork() */
6123 if (flags
& CLONE_VFORK
)
6124 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6126 if (flags
& CLONE_VM
) {
6127 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6128 new_thread_info info
;
6129 pthread_attr_t attr
;
6131 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6132 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6133 return -TARGET_EINVAL
;
6136 ts
= g_new0(TaskState
, 1);
6137 init_task_state(ts
);
6139 /* Grab a mutex so that thread setup appears atomic. */
6140 pthread_mutex_lock(&clone_lock
);
6142 /* we create a new CPU instance. */
6143 new_env
= cpu_copy(env
);
6144 /* Init regs that differ from the parent. */
6145 cpu_clone_regs_child(new_env
, newsp
, flags
);
6146 cpu_clone_regs_parent(env
, flags
);
6147 new_cpu
= env_cpu(new_env
);
6148 new_cpu
->opaque
= ts
;
6149 ts
->bprm
= parent_ts
->bprm
;
6150 ts
->info
= parent_ts
->info
;
6151 ts
->signal_mask
= parent_ts
->signal_mask
;
6153 if (flags
& CLONE_CHILD_CLEARTID
) {
6154 ts
->child_tidptr
= child_tidptr
;
6157 if (flags
& CLONE_SETTLS
) {
6158 cpu_set_tls (new_env
, newtls
);
6161 memset(&info
, 0, sizeof(info
));
6162 pthread_mutex_init(&info
.mutex
, NULL
);
6163 pthread_mutex_lock(&info
.mutex
);
6164 pthread_cond_init(&info
.cond
, NULL
);
6166 if (flags
& CLONE_CHILD_SETTID
) {
6167 info
.child_tidptr
= child_tidptr
;
6169 if (flags
& CLONE_PARENT_SETTID
) {
6170 info
.parent_tidptr
= parent_tidptr
;
6173 ret
= pthread_attr_init(&attr
);
6174 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6175 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6176 /* It is not safe to deliver signals until the child has finished
6177 initializing, so temporarily block all signals. */
6178 sigfillset(&sigmask
);
6179 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6180 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6182 /* If this is our first additional thread, we need to ensure we
6183 * generate code for parallel execution and flush old translations.
6185 if (!parallel_cpus
) {
6186 parallel_cpus
= true;
6190 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6191 /* TODO: Free new CPU state if thread creation failed. */
6193 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6194 pthread_attr_destroy(&attr
);
6196 /* Wait for the child to initialize. */
6197 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6202 pthread_mutex_unlock(&info
.mutex
);
6203 pthread_cond_destroy(&info
.cond
);
6204 pthread_mutex_destroy(&info
.mutex
);
6205 pthread_mutex_unlock(&clone_lock
);
6207 /* if no CLONE_VM, we consider it is a fork */
6208 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6209 return -TARGET_EINVAL
;
6212 /* We can't support custom termination signals */
6213 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6214 return -TARGET_EINVAL
;
6217 if (block_signals()) {
6218 return -TARGET_ERESTARTSYS
;
6224 /* Child Process. */
6225 cpu_clone_regs_child(env
, newsp
, flags
);
6227 /* There is a race condition here. The parent process could
6228 theoretically read the TID in the child process before the child
6229 tid is set. This would require using either ptrace
6230 (not implemented) or having *_tidptr to point at a shared memory
6231 mapping. We can't repeat the spinlock hack used above because
6232 the child process gets its own copy of the lock. */
6233 if (flags
& CLONE_CHILD_SETTID
)
6234 put_user_u32(sys_gettid(), child_tidptr
);
6235 if (flags
& CLONE_PARENT_SETTID
)
6236 put_user_u32(sys_gettid(), parent_tidptr
);
6237 ts
= (TaskState
*)cpu
->opaque
;
6238 if (flags
& CLONE_SETTLS
)
6239 cpu_set_tls (env
, newtls
);
6240 if (flags
& CLONE_CHILD_CLEARTID
)
6241 ts
->child_tidptr
= child_tidptr
;
6243 cpu_clone_regs_parent(env
, flags
);
6250 /* warning : doesn't handle linux specific flags... */
6251 static int target_to_host_fcntl_cmd(int cmd
)
6256 case TARGET_F_DUPFD
:
6257 case TARGET_F_GETFD
:
6258 case TARGET_F_SETFD
:
6259 case TARGET_F_GETFL
:
6260 case TARGET_F_SETFL
:
6261 case TARGET_F_OFD_GETLK
:
6262 case TARGET_F_OFD_SETLK
:
6263 case TARGET_F_OFD_SETLKW
:
6266 case TARGET_F_GETLK
:
6269 case TARGET_F_SETLK
:
6272 case TARGET_F_SETLKW
:
6275 case TARGET_F_GETOWN
:
6278 case TARGET_F_SETOWN
:
6281 case TARGET_F_GETSIG
:
6284 case TARGET_F_SETSIG
:
6287 #if TARGET_ABI_BITS == 32
6288 case TARGET_F_GETLK64
:
6291 case TARGET_F_SETLK64
:
6294 case TARGET_F_SETLKW64
:
6298 case TARGET_F_SETLEASE
:
6301 case TARGET_F_GETLEASE
:
6304 #ifdef F_DUPFD_CLOEXEC
6305 case TARGET_F_DUPFD_CLOEXEC
:
6306 ret
= F_DUPFD_CLOEXEC
;
6309 case TARGET_F_NOTIFY
:
6313 case TARGET_F_GETOWN_EX
:
6318 case TARGET_F_SETOWN_EX
:
6323 case TARGET_F_SETPIPE_SZ
:
6326 case TARGET_F_GETPIPE_SZ
:
6331 ret
= -TARGET_EINVAL
;
6335 #if defined(__powerpc64__)
6336 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6337 * is not supported by kernel. The glibc fcntl call actually adjusts
6338 * them to 5, 6 and 7 before making the syscall(). Since we make the
6339 * syscall directly, adjust to what is supported by the kernel.
6341 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6342 ret
-= F_GETLK64
- 5;
6349 #define FLOCK_TRANSTBL \
6351 TRANSTBL_CONVERT(F_RDLCK); \
6352 TRANSTBL_CONVERT(F_WRLCK); \
6353 TRANSTBL_CONVERT(F_UNLCK); \
6354 TRANSTBL_CONVERT(F_EXLCK); \
6355 TRANSTBL_CONVERT(F_SHLCK); \
6358 static int target_to_host_flock(int type
)
6360 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6362 #undef TRANSTBL_CONVERT
6363 return -TARGET_EINVAL
;
6366 static int host_to_target_flock(int type
)
6368 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6370 #undef TRANSTBL_CONVERT
6371 /* if we don't know how to convert the value coming
6372 * from the host we copy to the target field as-is
6377 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6378 abi_ulong target_flock_addr
)
6380 struct target_flock
*target_fl
;
6383 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6384 return -TARGET_EFAULT
;
6387 __get_user(l_type
, &target_fl
->l_type
);
6388 l_type
= target_to_host_flock(l_type
);
6392 fl
->l_type
= l_type
;
6393 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6394 __get_user(fl
->l_start
, &target_fl
->l_start
);
6395 __get_user(fl
->l_len
, &target_fl
->l_len
);
6396 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6397 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6401 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6402 const struct flock64
*fl
)
6404 struct target_flock
*target_fl
;
6407 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6408 return -TARGET_EFAULT
;
6411 l_type
= host_to_target_flock(fl
->l_type
);
6412 __put_user(l_type
, &target_fl
->l_type
);
6413 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6414 __put_user(fl
->l_start
, &target_fl
->l_start
);
6415 __put_user(fl
->l_len
, &target_fl
->l_len
);
6416 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6417 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6421 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6422 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6424 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6425 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6426 abi_ulong target_flock_addr
)
6428 struct target_oabi_flock64
*target_fl
;
6431 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6432 return -TARGET_EFAULT
;
6435 __get_user(l_type
, &target_fl
->l_type
);
6436 l_type
= target_to_host_flock(l_type
);
6440 fl
->l_type
= l_type
;
6441 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6442 __get_user(fl
->l_start
, &target_fl
->l_start
);
6443 __get_user(fl
->l_len
, &target_fl
->l_len
);
6444 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6445 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6449 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6450 const struct flock64
*fl
)
6452 struct target_oabi_flock64
*target_fl
;
6455 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6456 return -TARGET_EFAULT
;
6459 l_type
= host_to_target_flock(fl
->l_type
);
6460 __put_user(l_type
, &target_fl
->l_type
);
6461 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6462 __put_user(fl
->l_start
, &target_fl
->l_start
);
6463 __put_user(fl
->l_len
, &target_fl
->l_len
);
6464 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6465 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6470 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6471 abi_ulong target_flock_addr
)
6473 struct target_flock64
*target_fl
;
6476 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6477 return -TARGET_EFAULT
;
6480 __get_user(l_type
, &target_fl
->l_type
);
6481 l_type
= target_to_host_flock(l_type
);
6485 fl
->l_type
= l_type
;
6486 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6487 __get_user(fl
->l_start
, &target_fl
->l_start
);
6488 __get_user(fl
->l_len
, &target_fl
->l_len
);
6489 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6490 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6494 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6495 const struct flock64
*fl
)
6497 struct target_flock64
*target_fl
;
6500 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6501 return -TARGET_EFAULT
;
6504 l_type
= host_to_target_flock(fl
->l_type
);
6505 __put_user(l_type
, &target_fl
->l_type
);
6506 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6507 __put_user(fl
->l_start
, &target_fl
->l_start
);
6508 __put_user(fl
->l_len
, &target_fl
->l_len
);
6509 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6510 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6514 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6516 struct flock64 fl64
;
6518 struct f_owner_ex fox
;
6519 struct target_f_owner_ex
*target_fox
;
6522 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6524 if (host_cmd
== -TARGET_EINVAL
)
6528 case TARGET_F_GETLK
:
6529 ret
= copy_from_user_flock(&fl64
, arg
);
6533 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6535 ret
= copy_to_user_flock(arg
, &fl64
);
6539 case TARGET_F_SETLK
:
6540 case TARGET_F_SETLKW
:
6541 ret
= copy_from_user_flock(&fl64
, arg
);
6545 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6548 case TARGET_F_GETLK64
:
6549 case TARGET_F_OFD_GETLK
:
6550 ret
= copy_from_user_flock64(&fl64
, arg
);
6554 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6556 ret
= copy_to_user_flock64(arg
, &fl64
);
6559 case TARGET_F_SETLK64
:
6560 case TARGET_F_SETLKW64
:
6561 case TARGET_F_OFD_SETLK
:
6562 case TARGET_F_OFD_SETLKW
:
6563 ret
= copy_from_user_flock64(&fl64
, arg
);
6567 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6570 case TARGET_F_GETFL
:
6571 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6573 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6577 case TARGET_F_SETFL
:
6578 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6579 target_to_host_bitmask(arg
,
6584 case TARGET_F_GETOWN_EX
:
6585 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6587 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6588 return -TARGET_EFAULT
;
6589 target_fox
->type
= tswap32(fox
.type
);
6590 target_fox
->pid
= tswap32(fox
.pid
);
6591 unlock_user_struct(target_fox
, arg
, 1);
6597 case TARGET_F_SETOWN_EX
:
6598 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6599 return -TARGET_EFAULT
;
6600 fox
.type
= tswap32(target_fox
->type
);
6601 fox
.pid
= tswap32(target_fox
->pid
);
6602 unlock_user_struct(target_fox
, arg
, 0);
6603 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6607 case TARGET_F_SETOWN
:
6608 case TARGET_F_GETOWN
:
6609 case TARGET_F_SETSIG
:
6610 case TARGET_F_GETSIG
:
6611 case TARGET_F_SETLEASE
:
6612 case TARGET_F_GETLEASE
:
6613 case TARGET_F_SETPIPE_SZ
:
6614 case TARGET_F_GETPIPE_SZ
:
6615 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6619 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6627 static inline int high2lowuid(int uid
)
6635 static inline int high2lowgid(int gid
)
6643 static inline int low2highuid(int uid
)
6645 if ((int16_t)uid
== -1)
6651 static inline int low2highgid(int gid
)
6653 if ((int16_t)gid
== -1)
6658 static inline int tswapid(int id
)
6663 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6665 #else /* !USE_UID16 */
6666 static inline int high2lowuid(int uid
)
6670 static inline int high2lowgid(int gid
)
6674 static inline int low2highuid(int uid
)
6678 static inline int low2highgid(int gid
)
6682 static inline int tswapid(int id
)
6687 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6689 #endif /* USE_UID16 */
6691 /* We must do direct syscalls for setting UID/GID, because we want to
6692 * implement the Linux system call semantics of "change only for this thread",
6693 * not the libc/POSIX semantics of "change for all threads in process".
6694 * (See http://ewontfix.com/17/ for more details.)
6695 * We use the 32-bit version of the syscalls if present; if it is not
6696 * then either the host architecture supports 32-bit UIDs natively with
6697 * the standard syscall, or the 16-bit UID is the best we can do.
6699 #ifdef __NR_setuid32
6700 #define __NR_sys_setuid __NR_setuid32
6702 #define __NR_sys_setuid __NR_setuid
6704 #ifdef __NR_setgid32
6705 #define __NR_sys_setgid __NR_setgid32
6707 #define __NR_sys_setgid __NR_setgid
6709 #ifdef __NR_setresuid32
6710 #define __NR_sys_setresuid __NR_setresuid32
6712 #define __NR_sys_setresuid __NR_setresuid
6714 #ifdef __NR_setresgid32
6715 #define __NR_sys_setresgid __NR_setresgid32
6717 #define __NR_sys_setresgid __NR_setresgid
6720 _syscall1(int, sys_setuid
, uid_t
, uid
)
6721 _syscall1(int, sys_setgid
, gid_t
, gid
)
6722 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6723 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6725 void syscall_init(void)
6728 const argtype
*arg_type
;
6732 thunk_init(STRUCT_MAX
);
6734 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6735 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6736 #include "syscall_types.h"
6738 #undef STRUCT_SPECIAL
6740 /* Build target_to_host_errno_table[] table from
6741 * host_to_target_errno_table[]. */
6742 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6743 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6746 /* we patch the ioctl size if necessary. We rely on the fact that
6747 no ioctl has all the bits at '1' in the size field */
6749 while (ie
->target_cmd
!= 0) {
6750 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6751 TARGET_IOC_SIZEMASK
) {
6752 arg_type
= ie
->arg_type
;
6753 if (arg_type
[0] != TYPE_PTR
) {
6754 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6759 size
= thunk_type_size(arg_type
, 0);
6760 ie
->target_cmd
= (ie
->target_cmd
&
6761 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6762 (size
<< TARGET_IOC_SIZESHIFT
);
6765 /* automatic consistency check if same arch */
6766 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6767 (defined(__x86_64__) && defined(TARGET_X86_64))
6768 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6769 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6770 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6777 #ifdef TARGET_NR_truncate64
6778 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6783 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6787 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6791 #ifdef TARGET_NR_ftruncate64
6792 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6797 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6801 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6805 #if defined(TARGET_NR_timer_settime) || \
6806 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6807 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
6808 abi_ulong target_addr
)
6810 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
6811 offsetof(struct target_itimerspec
,
6813 target_to_host_timespec(&host_its
->it_value
, target_addr
+
6814 offsetof(struct target_itimerspec
,
6816 return -TARGET_EFAULT
;
6823 #if defined(TARGET_NR_timer_settime64) || \
6824 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6825 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
6826 abi_ulong target_addr
)
6828 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
6829 offsetof(struct target__kernel_itimerspec
,
6831 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
6832 offsetof(struct target__kernel_itimerspec
,
6834 return -TARGET_EFAULT
;
6841 #if ((defined(TARGET_NR_timerfd_gettime) || \
6842 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6843 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6844 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6845 struct itimerspec
*host_its
)
6847 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6849 &host_its
->it_interval
) ||
6850 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6852 &host_its
->it_value
)) {
6853 return -TARGET_EFAULT
;
6859 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6860 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6861 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6862 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
6863 struct itimerspec
*host_its
)
6865 if (host_to_target_timespec64(target_addr
+
6866 offsetof(struct target__kernel_itimerspec
,
6868 &host_its
->it_interval
) ||
6869 host_to_target_timespec64(target_addr
+
6870 offsetof(struct target__kernel_itimerspec
,
6872 &host_its
->it_value
)) {
6873 return -TARGET_EFAULT
;
6879 #if defined(TARGET_NR_adjtimex) || \
6880 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6881 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6882 abi_long target_addr
)
6884 struct target_timex
*target_tx
;
6886 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6887 return -TARGET_EFAULT
;
6890 __get_user(host_tx
->modes
, &target_tx
->modes
);
6891 __get_user(host_tx
->offset
, &target_tx
->offset
);
6892 __get_user(host_tx
->freq
, &target_tx
->freq
);
6893 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6894 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6895 __get_user(host_tx
->status
, &target_tx
->status
);
6896 __get_user(host_tx
->constant
, &target_tx
->constant
);
6897 __get_user(host_tx
->precision
, &target_tx
->precision
);
6898 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6899 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6900 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6901 __get_user(host_tx
->tick
, &target_tx
->tick
);
6902 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6903 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6904 __get_user(host_tx
->shift
, &target_tx
->shift
);
6905 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6906 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6907 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6908 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6909 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6910 __get_user(host_tx
->tai
, &target_tx
->tai
);
6912 unlock_user_struct(target_tx
, target_addr
, 0);
6916 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6917 struct timex
*host_tx
)
6919 struct target_timex
*target_tx
;
6921 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6922 return -TARGET_EFAULT
;
6925 __put_user(host_tx
->modes
, &target_tx
->modes
);
6926 __put_user(host_tx
->offset
, &target_tx
->offset
);
6927 __put_user(host_tx
->freq
, &target_tx
->freq
);
6928 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6929 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6930 __put_user(host_tx
->status
, &target_tx
->status
);
6931 __put_user(host_tx
->constant
, &target_tx
->constant
);
6932 __put_user(host_tx
->precision
, &target_tx
->precision
);
6933 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6934 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6935 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6936 __put_user(host_tx
->tick
, &target_tx
->tick
);
6937 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6938 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6939 __put_user(host_tx
->shift
, &target_tx
->shift
);
6940 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6941 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6942 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6943 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6944 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6945 __put_user(host_tx
->tai
, &target_tx
->tai
);
6947 unlock_user_struct(target_tx
, target_addr
, 1);
6952 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6953 abi_ulong target_addr
)
6955 struct target_sigevent
*target_sevp
;
6957 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6958 return -TARGET_EFAULT
;
6961 /* This union is awkward on 64 bit systems because it has a 32 bit
6962 * integer and a pointer in it; we follow the conversion approach
6963 * used for handling sigval types in signal.c so the guest should get
6964 * the correct value back even if we did a 64 bit byteswap and it's
6965 * using the 32 bit integer.
6967 host_sevp
->sigev_value
.sival_ptr
=
6968 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6969 host_sevp
->sigev_signo
=
6970 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6971 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6972 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6974 unlock_user_struct(target_sevp
, target_addr
, 1);
6978 #if defined(TARGET_NR_mlockall)
6979 static inline int target_to_host_mlockall_arg(int arg
)
6983 if (arg
& TARGET_MCL_CURRENT
) {
6984 result
|= MCL_CURRENT
;
6986 if (arg
& TARGET_MCL_FUTURE
) {
6987 result
|= MCL_FUTURE
;
6990 if (arg
& TARGET_MCL_ONFAULT
) {
6991 result
|= MCL_ONFAULT
;
6999 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7000 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7001 defined(TARGET_NR_newfstatat))
7002 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7003 abi_ulong target_addr
,
7004 struct stat
*host_st
)
7006 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7007 if (((CPUARMState
*)cpu_env
)->eabi
) {
7008 struct target_eabi_stat64
*target_st
;
7010 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7011 return -TARGET_EFAULT
;
7012 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7013 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7014 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7015 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7016 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7018 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7019 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7020 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7021 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7022 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7023 __put_user(host_st
->st_size
, &target_st
->st_size
);
7024 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7025 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7026 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7027 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7028 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7029 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7030 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7031 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7032 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7034 unlock_user_struct(target_st
, target_addr
, 1);
7038 #if defined(TARGET_HAS_STRUCT_STAT64)
7039 struct target_stat64
*target_st
;
7041 struct target_stat
*target_st
;
7044 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7045 return -TARGET_EFAULT
;
7046 memset(target_st
, 0, sizeof(*target_st
));
7047 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7048 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7049 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7050 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7052 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7053 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7054 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7055 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7056 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7057 /* XXX: better use of kernel struct */
7058 __put_user(host_st
->st_size
, &target_st
->st_size
);
7059 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7060 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7061 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7062 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7063 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7064 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7065 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7066 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7067 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7069 unlock_user_struct(target_st
, target_addr
, 1);
7076 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7077 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7078 abi_ulong target_addr
)
7080 struct target_statx
*target_stx
;
7082 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7083 return -TARGET_EFAULT
;
7085 memset(target_stx
, 0, sizeof(*target_stx
));
7087 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7088 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7089 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7090 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7091 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7092 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7093 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7094 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7095 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7096 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7097 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7098 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7099 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7100 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7101 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7102 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7103 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7104 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7105 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7106 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7107 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7108 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7109 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7111 unlock_user_struct(target_stx
, target_addr
, 1);
7117 static int do_sys_futex(int *uaddr
, int op
, int val
,
7118 const struct timespec
*timeout
, int *uaddr2
,
7121 #if HOST_LONG_BITS == 64
7122 #if defined(__NR_futex)
7123 /* always a 64-bit time_t, it doesn't define _time64 version */
7124 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7127 #else /* HOST_LONG_BITS == 64 */
7128 #if defined(__NR_futex_time64)
7129 if (sizeof(timeout
->tv_sec
) == 8) {
7130 /* _time64 function on 32bit arch */
7131 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7134 #if defined(__NR_futex)
7135 /* old function on 32bit arch */
7136 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7138 #endif /* HOST_LONG_BITS == 64 */
7139 g_assert_not_reached();
7142 static int do_safe_futex(int *uaddr
, int op
, int val
,
7143 const struct timespec
*timeout
, int *uaddr2
,
7146 #if HOST_LONG_BITS == 64
7147 #if defined(__NR_futex)
7148 /* always a 64-bit time_t, it doesn't define _time64 version */
7149 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7151 #else /* HOST_LONG_BITS == 64 */
7152 #if defined(__NR_futex_time64)
7153 if (sizeof(timeout
->tv_sec
) == 8) {
7154 /* _time64 function on 32bit arch */
7155 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7159 #if defined(__NR_futex)
7160 /* old function on 32bit arch */
7161 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7163 #endif /* HOST_LONG_BITS == 64 */
7164 return -TARGET_ENOSYS
;
7167 /* ??? Using host futex calls even when target atomic operations
7168 are not really atomic probably breaks things. However implementing
7169 futexes locally would make futexes shared between multiple processes
7170 tricky. However they're probably useless because guest atomic
7171 operations won't work either. */
7172 #if defined(TARGET_NR_futex)
7173 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7174 target_ulong uaddr2
, int val3
)
7176 struct timespec ts
, *pts
;
7179 /* ??? We assume FUTEX_* constants are the same on both host
7181 #ifdef FUTEX_CMD_MASK
7182 base_op
= op
& FUTEX_CMD_MASK
;
7188 case FUTEX_WAIT_BITSET
:
7191 target_to_host_timespec(pts
, timeout
);
7195 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7197 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7199 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7201 case FUTEX_CMP_REQUEUE
:
7203 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7204 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7205 But the prototype takes a `struct timespec *'; insert casts
7206 to satisfy the compiler. We do not need to tswap TIMEOUT
7207 since it's not compared to guest memory. */
7208 pts
= (struct timespec
*)(uintptr_t) timeout
;
7209 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7210 (base_op
== FUTEX_CMP_REQUEUE
7214 return -TARGET_ENOSYS
;
7219 #if defined(TARGET_NR_futex_time64)
7220 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7221 target_ulong uaddr2
, int val3
)
7223 struct timespec ts
, *pts
;
7226 /* ??? We assume FUTEX_* constants are the same on both host
7228 #ifdef FUTEX_CMD_MASK
7229 base_op
= op
& FUTEX_CMD_MASK
;
7235 case FUTEX_WAIT_BITSET
:
7238 target_to_host_timespec64(pts
, timeout
);
7242 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7244 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7246 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7248 case FUTEX_CMP_REQUEUE
:
7250 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7251 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7252 But the prototype takes a `struct timespec *'; insert casts
7253 to satisfy the compiler. We do not need to tswap TIMEOUT
7254 since it's not compared to guest memory. */
7255 pts
= (struct timespec
*)(uintptr_t) timeout
;
7256 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7257 (base_op
== FUTEX_CMP_REQUEUE
7261 return -TARGET_ENOSYS
;
7266 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7267 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7268 abi_long handle
, abi_long mount_id
,
7271 struct file_handle
*target_fh
;
7272 struct file_handle
*fh
;
7276 unsigned int size
, total_size
;
7278 if (get_user_s32(size
, handle
)) {
7279 return -TARGET_EFAULT
;
7282 name
= lock_user_string(pathname
);
7284 return -TARGET_EFAULT
;
7287 total_size
= sizeof(struct file_handle
) + size
;
7288 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7290 unlock_user(name
, pathname
, 0);
7291 return -TARGET_EFAULT
;
7294 fh
= g_malloc0(total_size
);
7295 fh
->handle_bytes
= size
;
7297 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7298 unlock_user(name
, pathname
, 0);
7300 /* man name_to_handle_at(2):
7301 * Other than the use of the handle_bytes field, the caller should treat
7302 * the file_handle structure as an opaque data type
7305 memcpy(target_fh
, fh
, total_size
);
7306 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7307 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7309 unlock_user(target_fh
, handle
, total_size
);
7311 if (put_user_s32(mid
, mount_id
)) {
7312 return -TARGET_EFAULT
;
7320 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7321 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7324 struct file_handle
*target_fh
;
7325 struct file_handle
*fh
;
7326 unsigned int size
, total_size
;
7329 if (get_user_s32(size
, handle
)) {
7330 return -TARGET_EFAULT
;
7333 total_size
= sizeof(struct file_handle
) + size
;
7334 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7336 return -TARGET_EFAULT
;
7339 fh
= g_memdup(target_fh
, total_size
);
7340 fh
->handle_bytes
= size
;
7341 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7343 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7344 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7348 unlock_user(target_fh
, handle
, total_size
);
7354 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7356 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7359 target_sigset_t
*target_mask
;
7363 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7364 return -TARGET_EINVAL
;
7366 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7367 return -TARGET_EFAULT
;
7370 target_to_host_sigset(&host_mask
, target_mask
);
7372 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7374 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7376 fd_trans_register(ret
, &target_signalfd_trans
);
7379 unlock_user_struct(target_mask
, mask
, 0);
7385 /* Map host to target signal numbers for the wait family of syscalls.
7386 Assume all other status bits are the same. */
7387 int host_to_target_waitstatus(int status
)
7389 if (WIFSIGNALED(status
)) {
7390 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7392 if (WIFSTOPPED(status
)) {
7393 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7399 static int open_self_cmdline(void *cpu_env
, int fd
)
7401 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7402 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7405 for (i
= 0; i
< bprm
->argc
; i
++) {
7406 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7408 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7416 static int open_self_maps(void *cpu_env
, int fd
)
7418 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7419 TaskState
*ts
= cpu
->opaque
;
7420 GSList
*map_info
= read_self_maps();
7424 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7425 MapInfo
*e
= (MapInfo
*) s
->data
;
7427 if (h2g_valid(e
->start
)) {
7428 unsigned long min
= e
->start
;
7429 unsigned long max
= e
->end
;
7430 int flags
= page_get_flags(h2g(min
));
7433 max
= h2g_valid(max
- 1) ?
7434 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7436 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7440 if (h2g(min
) == ts
->info
->stack_limit
) {
7446 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7447 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7448 h2g(min
), h2g(max
- 1) + 1,
7449 e
->is_read
? 'r' : '-',
7450 e
->is_write
? 'w' : '-',
7451 e
->is_exec
? 'x' : '-',
7452 e
->is_priv
? 'p' : '-',
7453 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7455 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7462 free_self_maps(map_info
);
7464 #ifdef TARGET_VSYSCALL_PAGE
7466 * We only support execution from the vsyscall page.
7467 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7469 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7470 " --xp 00000000 00:00 0",
7471 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7472 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7478 static int open_self_stat(void *cpu_env
, int fd
)
7480 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7481 TaskState
*ts
= cpu
->opaque
;
7482 g_autoptr(GString
) buf
= g_string_new(NULL
);
7485 for (i
= 0; i
< 44; i
++) {
7488 g_string_printf(buf
, FMT_pid
" ", getpid());
7489 } else if (i
== 1) {
7491 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7492 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7493 g_string_printf(buf
, "(%.15s) ", bin
);
7494 } else if (i
== 27) {
7496 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7498 /* for the rest, there is MasterCard */
7499 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7502 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7510 static int open_self_auxv(void *cpu_env
, int fd
)
7512 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7513 TaskState
*ts
= cpu
->opaque
;
7514 abi_ulong auxv
= ts
->info
->saved_auxv
;
7515 abi_ulong len
= ts
->info
->auxv_len
;
7519 * Auxiliary vector is stored in target process stack.
7520 * read in whole auxv vector and copy it to file
7522 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7526 r
= write(fd
, ptr
, len
);
7533 lseek(fd
, 0, SEEK_SET
);
7534 unlock_user(ptr
, auxv
, len
);
7540 static int is_proc_myself(const char *filename
, const char *entry
)
7542 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7543 filename
+= strlen("/proc/");
7544 if (!strncmp(filename
, "self/", strlen("self/"))) {
7545 filename
+= strlen("self/");
7546 } else if (*filename
>= '1' && *filename
<= '9') {
7548 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7549 if (!strncmp(filename
, myself
, strlen(myself
))) {
7550 filename
+= strlen(myself
);
7557 if (!strcmp(filename
, entry
)) {
7564 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7565 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7566 static int is_proc(const char *filename
, const char *entry
)
7568 return strcmp(filename
, entry
) == 0;
7572 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7573 static int open_net_route(void *cpu_env
, int fd
)
7580 fp
= fopen("/proc/net/route", "r");
7587 read
= getline(&line
, &len
, fp
);
7588 dprintf(fd
, "%s", line
);
7592 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7594 uint32_t dest
, gw
, mask
;
7595 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7598 fields
= sscanf(line
,
7599 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7600 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7601 &mask
, &mtu
, &window
, &irtt
);
7605 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7606 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7607 metric
, tswap32(mask
), mtu
, window
, irtt
);
7617 #if defined(TARGET_SPARC)
7618 static int open_cpuinfo(void *cpu_env
, int fd
)
7620 dprintf(fd
, "type\t\t: sun4u\n");
7625 #if defined(TARGET_HPPA)
7626 static int open_cpuinfo(void *cpu_env
, int fd
)
7628 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7629 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7630 dprintf(fd
, "capabilities\t: os32\n");
7631 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7632 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7637 #if defined(TARGET_M68K)
7638 static int open_hardware(void *cpu_env
, int fd
)
7640 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7645 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7648 const char *filename
;
7649 int (*fill
)(void *cpu_env
, int fd
);
7650 int (*cmp
)(const char *s1
, const char *s2
);
7652 const struct fake_open
*fake_open
;
7653 static const struct fake_open fakes
[] = {
7654 { "maps", open_self_maps
, is_proc_myself
},
7655 { "stat", open_self_stat
, is_proc_myself
},
7656 { "auxv", open_self_auxv
, is_proc_myself
},
7657 { "cmdline", open_self_cmdline
, is_proc_myself
},
7658 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7659 { "/proc/net/route", open_net_route
, is_proc
},
7661 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7662 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7664 #if defined(TARGET_M68K)
7665 { "/proc/hardware", open_hardware
, is_proc
},
7667 { NULL
, NULL
, NULL
}
7670 if (is_proc_myself(pathname
, "exe")) {
7671 int execfd
= qemu_getauxval(AT_EXECFD
);
7672 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7675 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7676 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7681 if (fake_open
->filename
) {
7683 char filename
[PATH_MAX
];
7686 /* create temporary file to map stat to */
7687 tmpdir
= getenv("TMPDIR");
7690 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7691 fd
= mkstemp(filename
);
7697 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7703 lseek(fd
, 0, SEEK_SET
);
7708 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7711 #define TIMER_MAGIC 0x0caf0000
7712 #define TIMER_MAGIC_MASK 0xffff0000
7714 /* Convert QEMU provided timer ID back to internal 16bit index format */
7715 static target_timer_t
get_timer_id(abi_long arg
)
7717 target_timer_t timerid
= arg
;
7719 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7720 return -TARGET_EINVAL
;
7725 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7726 return -TARGET_EINVAL
;
7732 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7734 abi_ulong target_addr
,
7737 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7738 unsigned host_bits
= sizeof(*host_mask
) * 8;
7739 abi_ulong
*target_mask
;
7742 assert(host_size
>= target_size
);
7744 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7746 return -TARGET_EFAULT
;
7748 memset(host_mask
, 0, host_size
);
7750 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7751 unsigned bit
= i
* target_bits
;
7754 __get_user(val
, &target_mask
[i
]);
7755 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7756 if (val
& (1UL << j
)) {
7757 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7762 unlock_user(target_mask
, target_addr
, 0);
7766 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7768 abi_ulong target_addr
,
7771 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7772 unsigned host_bits
= sizeof(*host_mask
) * 8;
7773 abi_ulong
*target_mask
;
7776 assert(host_size
>= target_size
);
7778 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7780 return -TARGET_EFAULT
;
7783 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7784 unsigned bit
= i
* target_bits
;
7787 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7788 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7792 __put_user(val
, &target_mask
[i
]);
7795 unlock_user(target_mask
, target_addr
, target_size
);
7799 /* This is an internal helper for do_syscall so that it is easier
7800 * to have a single return point, so that actions, such as logging
7801 * of syscall results, can be performed.
7802 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7804 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7805 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7806 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7809 CPUState
*cpu
= env_cpu(cpu_env
);
7811 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7812 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7813 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7814 || defined(TARGET_NR_statx)
7817 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7818 || defined(TARGET_NR_fstatfs)
7824 case TARGET_NR_exit
:
7825 /* In old applications this may be used to implement _exit(2).
7826 However in threaded applictions it is used for thread termination,
7827 and _exit_group is used for application termination.
7828 Do thread termination if we have more then one thread. */
7830 if (block_signals()) {
7831 return -TARGET_ERESTARTSYS
;
7834 pthread_mutex_lock(&clone_lock
);
7836 if (CPU_NEXT(first_cpu
)) {
7837 TaskState
*ts
= cpu
->opaque
;
7839 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7840 object_unref(OBJECT(cpu
));
7842 * At this point the CPU should be unrealized and removed
7843 * from cpu lists. We can clean-up the rest of the thread
7844 * data without the lock held.
7847 pthread_mutex_unlock(&clone_lock
);
7849 if (ts
->child_tidptr
) {
7850 put_user_u32(0, ts
->child_tidptr
);
7851 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7856 rcu_unregister_thread();
7860 pthread_mutex_unlock(&clone_lock
);
7861 preexit_cleanup(cpu_env
, arg1
);
7863 return 0; /* avoid warning */
7864 case TARGET_NR_read
:
7865 if (arg2
== 0 && arg3
== 0) {
7866 return get_errno(safe_read(arg1
, 0, 0));
7868 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7869 return -TARGET_EFAULT
;
7870 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7872 fd_trans_host_to_target_data(arg1
)) {
7873 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7875 unlock_user(p
, arg2
, ret
);
7878 case TARGET_NR_write
:
7879 if (arg2
== 0 && arg3
== 0) {
7880 return get_errno(safe_write(arg1
, 0, 0));
7882 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7883 return -TARGET_EFAULT
;
7884 if (fd_trans_target_to_host_data(arg1
)) {
7885 void *copy
= g_malloc(arg3
);
7886 memcpy(copy
, p
, arg3
);
7887 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7889 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7893 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7895 unlock_user(p
, arg2
, 0);
7898 #ifdef TARGET_NR_open
7899 case TARGET_NR_open
:
7900 if (!(p
= lock_user_string(arg1
)))
7901 return -TARGET_EFAULT
;
7902 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7903 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7905 fd_trans_unregister(ret
);
7906 unlock_user(p
, arg1
, 0);
7909 case TARGET_NR_openat
:
7910 if (!(p
= lock_user_string(arg2
)))
7911 return -TARGET_EFAULT
;
7912 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7913 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7915 fd_trans_unregister(ret
);
7916 unlock_user(p
, arg2
, 0);
7918 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7919 case TARGET_NR_name_to_handle_at
:
7920 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7923 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7924 case TARGET_NR_open_by_handle_at
:
7925 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7926 fd_trans_unregister(ret
);
7929 case TARGET_NR_close
:
7930 fd_trans_unregister(arg1
);
7931 return get_errno(close(arg1
));
7934 return do_brk(arg1
);
7935 #ifdef TARGET_NR_fork
7936 case TARGET_NR_fork
:
7937 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7939 #ifdef TARGET_NR_waitpid
7940 case TARGET_NR_waitpid
:
7943 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7944 if (!is_error(ret
) && arg2
&& ret
7945 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7946 return -TARGET_EFAULT
;
7950 #ifdef TARGET_NR_waitid
7951 case TARGET_NR_waitid
:
7955 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7956 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7957 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7958 return -TARGET_EFAULT
;
7959 host_to_target_siginfo(p
, &info
);
7960 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7965 #ifdef TARGET_NR_creat /* not on alpha */
7966 case TARGET_NR_creat
:
7967 if (!(p
= lock_user_string(arg1
)))
7968 return -TARGET_EFAULT
;
7969 ret
= get_errno(creat(p
, arg2
));
7970 fd_trans_unregister(ret
);
7971 unlock_user(p
, arg1
, 0);
7974 #ifdef TARGET_NR_link
7975 case TARGET_NR_link
:
7978 p
= lock_user_string(arg1
);
7979 p2
= lock_user_string(arg2
);
7981 ret
= -TARGET_EFAULT
;
7983 ret
= get_errno(link(p
, p2
));
7984 unlock_user(p2
, arg2
, 0);
7985 unlock_user(p
, arg1
, 0);
7989 #if defined(TARGET_NR_linkat)
7990 case TARGET_NR_linkat
:
7994 return -TARGET_EFAULT
;
7995 p
= lock_user_string(arg2
);
7996 p2
= lock_user_string(arg4
);
7998 ret
= -TARGET_EFAULT
;
8000 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8001 unlock_user(p
, arg2
, 0);
8002 unlock_user(p2
, arg4
, 0);
8006 #ifdef TARGET_NR_unlink
8007 case TARGET_NR_unlink
:
8008 if (!(p
= lock_user_string(arg1
)))
8009 return -TARGET_EFAULT
;
8010 ret
= get_errno(unlink(p
));
8011 unlock_user(p
, arg1
, 0);
8014 #if defined(TARGET_NR_unlinkat)
8015 case TARGET_NR_unlinkat
:
8016 if (!(p
= lock_user_string(arg2
)))
8017 return -TARGET_EFAULT
;
8018 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8019 unlock_user(p
, arg2
, 0);
8022 case TARGET_NR_execve
:
8024 char **argp
, **envp
;
8027 abi_ulong guest_argp
;
8028 abi_ulong guest_envp
;
8035 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8036 if (get_user_ual(addr
, gp
))
8037 return -TARGET_EFAULT
;
8044 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8045 if (get_user_ual(addr
, gp
))
8046 return -TARGET_EFAULT
;
8052 argp
= g_new0(char *, argc
+ 1);
8053 envp
= g_new0(char *, envc
+ 1);
8055 for (gp
= guest_argp
, q
= argp
; gp
;
8056 gp
+= sizeof(abi_ulong
), q
++) {
8057 if (get_user_ual(addr
, gp
))
8061 if (!(*q
= lock_user_string(addr
)))
8063 total_size
+= strlen(*q
) + 1;
8067 for (gp
= guest_envp
, q
= envp
; gp
;
8068 gp
+= sizeof(abi_ulong
), q
++) {
8069 if (get_user_ual(addr
, gp
))
8073 if (!(*q
= lock_user_string(addr
)))
8075 total_size
+= strlen(*q
) + 1;
8079 if (!(p
= lock_user_string(arg1
)))
8081 /* Although execve() is not an interruptible syscall it is
8082 * a special case where we must use the safe_syscall wrapper:
8083 * if we allow a signal to happen before we make the host
8084 * syscall then we will 'lose' it, because at the point of
8085 * execve the process leaves QEMU's control. So we use the
8086 * safe syscall wrapper to ensure that we either take the
8087 * signal as a guest signal, or else it does not happen
8088 * before the execve completes and makes it the other
8089 * program's problem.
8091 ret
= get_errno(safe_execve(p
, argp
, envp
));
8092 unlock_user(p
, arg1
, 0);
8097 ret
= -TARGET_EFAULT
;
8100 for (gp
= guest_argp
, q
= argp
; *q
;
8101 gp
+= sizeof(abi_ulong
), q
++) {
8102 if (get_user_ual(addr
, gp
)
8105 unlock_user(*q
, addr
, 0);
8107 for (gp
= guest_envp
, q
= envp
; *q
;
8108 gp
+= sizeof(abi_ulong
), q
++) {
8109 if (get_user_ual(addr
, gp
)
8112 unlock_user(*q
, addr
, 0);
8119 case TARGET_NR_chdir
:
8120 if (!(p
= lock_user_string(arg1
)))
8121 return -TARGET_EFAULT
;
8122 ret
= get_errno(chdir(p
));
8123 unlock_user(p
, arg1
, 0);
8125 #ifdef TARGET_NR_time
8126 case TARGET_NR_time
:
8129 ret
= get_errno(time(&host_time
));
8132 && put_user_sal(host_time
, arg1
))
8133 return -TARGET_EFAULT
;
8137 #ifdef TARGET_NR_mknod
8138 case TARGET_NR_mknod
:
8139 if (!(p
= lock_user_string(arg1
)))
8140 return -TARGET_EFAULT
;
8141 ret
= get_errno(mknod(p
, arg2
, arg3
));
8142 unlock_user(p
, arg1
, 0);
8145 #if defined(TARGET_NR_mknodat)
8146 case TARGET_NR_mknodat
:
8147 if (!(p
= lock_user_string(arg2
)))
8148 return -TARGET_EFAULT
;
8149 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8150 unlock_user(p
, arg2
, 0);
8153 #ifdef TARGET_NR_chmod
8154 case TARGET_NR_chmod
:
8155 if (!(p
= lock_user_string(arg1
)))
8156 return -TARGET_EFAULT
;
8157 ret
= get_errno(chmod(p
, arg2
));
8158 unlock_user(p
, arg1
, 0);
8161 #ifdef TARGET_NR_lseek
8162 case TARGET_NR_lseek
:
8163 return get_errno(lseek(arg1
, arg2
, arg3
));
8165 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8166 /* Alpha specific */
8167 case TARGET_NR_getxpid
:
8168 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8169 return get_errno(getpid());
8171 #ifdef TARGET_NR_getpid
8172 case TARGET_NR_getpid
:
8173 return get_errno(getpid());
8175 case TARGET_NR_mount
:
8177 /* need to look at the data field */
8181 p
= lock_user_string(arg1
);
8183 return -TARGET_EFAULT
;
8189 p2
= lock_user_string(arg2
);
8192 unlock_user(p
, arg1
, 0);
8194 return -TARGET_EFAULT
;
8198 p3
= lock_user_string(arg3
);
8201 unlock_user(p
, arg1
, 0);
8203 unlock_user(p2
, arg2
, 0);
8204 return -TARGET_EFAULT
;
8210 /* FIXME - arg5 should be locked, but it isn't clear how to
8211 * do that since it's not guaranteed to be a NULL-terminated
8215 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8217 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8219 ret
= get_errno(ret
);
8222 unlock_user(p
, arg1
, 0);
8224 unlock_user(p2
, arg2
, 0);
8226 unlock_user(p3
, arg3
, 0);
8230 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8231 #if defined(TARGET_NR_umount)
8232 case TARGET_NR_umount
:
8234 #if defined(TARGET_NR_oldumount)
8235 case TARGET_NR_oldumount
:
8237 if (!(p
= lock_user_string(arg1
)))
8238 return -TARGET_EFAULT
;
8239 ret
= get_errno(umount(p
));
8240 unlock_user(p
, arg1
, 0);
8243 #ifdef TARGET_NR_stime /* not on alpha */
8244 case TARGET_NR_stime
:
8248 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8249 return -TARGET_EFAULT
;
8251 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8254 #ifdef TARGET_NR_alarm /* not on alpha */
8255 case TARGET_NR_alarm
:
8258 #ifdef TARGET_NR_pause /* not on alpha */
8259 case TARGET_NR_pause
:
8260 if (!block_signals()) {
8261 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8263 return -TARGET_EINTR
;
8265 #ifdef TARGET_NR_utime
8266 case TARGET_NR_utime
:
8268 struct utimbuf tbuf
, *host_tbuf
;
8269 struct target_utimbuf
*target_tbuf
;
8271 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8272 return -TARGET_EFAULT
;
8273 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8274 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8275 unlock_user_struct(target_tbuf
, arg2
, 0);
8280 if (!(p
= lock_user_string(arg1
)))
8281 return -TARGET_EFAULT
;
8282 ret
= get_errno(utime(p
, host_tbuf
));
8283 unlock_user(p
, arg1
, 0);
8287 #ifdef TARGET_NR_utimes
8288 case TARGET_NR_utimes
:
8290 struct timeval
*tvp
, tv
[2];
8292 if (copy_from_user_timeval(&tv
[0], arg2
)
8293 || copy_from_user_timeval(&tv
[1],
8294 arg2
+ sizeof(struct target_timeval
)))
8295 return -TARGET_EFAULT
;
8300 if (!(p
= lock_user_string(arg1
)))
8301 return -TARGET_EFAULT
;
8302 ret
= get_errno(utimes(p
, tvp
));
8303 unlock_user(p
, arg1
, 0);
8307 #if defined(TARGET_NR_futimesat)
8308 case TARGET_NR_futimesat
:
8310 struct timeval
*tvp
, tv
[2];
8312 if (copy_from_user_timeval(&tv
[0], arg3
)
8313 || copy_from_user_timeval(&tv
[1],
8314 arg3
+ sizeof(struct target_timeval
)))
8315 return -TARGET_EFAULT
;
8320 if (!(p
= lock_user_string(arg2
))) {
8321 return -TARGET_EFAULT
;
8323 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8324 unlock_user(p
, arg2
, 0);
8328 #ifdef TARGET_NR_access
8329 case TARGET_NR_access
:
8330 if (!(p
= lock_user_string(arg1
))) {
8331 return -TARGET_EFAULT
;
8333 ret
= get_errno(access(path(p
), arg2
));
8334 unlock_user(p
, arg1
, 0);
8337 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8338 case TARGET_NR_faccessat
:
8339 if (!(p
= lock_user_string(arg2
))) {
8340 return -TARGET_EFAULT
;
8342 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8343 unlock_user(p
, arg2
, 0);
8346 #ifdef TARGET_NR_nice /* not on alpha */
8347 case TARGET_NR_nice
:
8348 return get_errno(nice(arg1
));
8350 case TARGET_NR_sync
:
8353 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8354 case TARGET_NR_syncfs
:
8355 return get_errno(syncfs(arg1
));
8357 case TARGET_NR_kill
:
8358 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8359 #ifdef TARGET_NR_rename
8360 case TARGET_NR_rename
:
8363 p
= lock_user_string(arg1
);
8364 p2
= lock_user_string(arg2
);
8366 ret
= -TARGET_EFAULT
;
8368 ret
= get_errno(rename(p
, p2
));
8369 unlock_user(p2
, arg2
, 0);
8370 unlock_user(p
, arg1
, 0);
8374 #if defined(TARGET_NR_renameat)
8375 case TARGET_NR_renameat
:
8378 p
= lock_user_string(arg2
);
8379 p2
= lock_user_string(arg4
);
8381 ret
= -TARGET_EFAULT
;
8383 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8384 unlock_user(p2
, arg4
, 0);
8385 unlock_user(p
, arg2
, 0);
8389 #if defined(TARGET_NR_renameat2)
8390 case TARGET_NR_renameat2
:
8393 p
= lock_user_string(arg2
);
8394 p2
= lock_user_string(arg4
);
8396 ret
= -TARGET_EFAULT
;
8398 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8400 unlock_user(p2
, arg4
, 0);
8401 unlock_user(p
, arg2
, 0);
8405 #ifdef TARGET_NR_mkdir
8406 case TARGET_NR_mkdir
:
8407 if (!(p
= lock_user_string(arg1
)))
8408 return -TARGET_EFAULT
;
8409 ret
= get_errno(mkdir(p
, arg2
));
8410 unlock_user(p
, arg1
, 0);
8413 #if defined(TARGET_NR_mkdirat)
8414 case TARGET_NR_mkdirat
:
8415 if (!(p
= lock_user_string(arg2
)))
8416 return -TARGET_EFAULT
;
8417 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8418 unlock_user(p
, arg2
, 0);
8421 #ifdef TARGET_NR_rmdir
8422 case TARGET_NR_rmdir
:
8423 if (!(p
= lock_user_string(arg1
)))
8424 return -TARGET_EFAULT
;
8425 ret
= get_errno(rmdir(p
));
8426 unlock_user(p
, arg1
, 0);
8430 ret
= get_errno(dup(arg1
));
8432 fd_trans_dup(arg1
, ret
);
8435 #ifdef TARGET_NR_pipe
8436 case TARGET_NR_pipe
:
8437 return do_pipe(cpu_env
, arg1
, 0, 0);
8439 #ifdef TARGET_NR_pipe2
8440 case TARGET_NR_pipe2
:
8441 return do_pipe(cpu_env
, arg1
,
8442 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8444 case TARGET_NR_times
:
8446 struct target_tms
*tmsp
;
8448 ret
= get_errno(times(&tms
));
8450 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8452 return -TARGET_EFAULT
;
8453 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8454 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8455 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8456 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8459 ret
= host_to_target_clock_t(ret
);
8462 case TARGET_NR_acct
:
8464 ret
= get_errno(acct(NULL
));
8466 if (!(p
= lock_user_string(arg1
))) {
8467 return -TARGET_EFAULT
;
8469 ret
= get_errno(acct(path(p
)));
8470 unlock_user(p
, arg1
, 0);
8473 #ifdef TARGET_NR_umount2
8474 case TARGET_NR_umount2
:
8475 if (!(p
= lock_user_string(arg1
)))
8476 return -TARGET_EFAULT
;
8477 ret
= get_errno(umount2(p
, arg2
));
8478 unlock_user(p
, arg1
, 0);
8481 case TARGET_NR_ioctl
:
8482 return do_ioctl(arg1
, arg2
, arg3
);
8483 #ifdef TARGET_NR_fcntl
8484 case TARGET_NR_fcntl
:
8485 return do_fcntl(arg1
, arg2
, arg3
);
8487 case TARGET_NR_setpgid
:
8488 return get_errno(setpgid(arg1
, arg2
));
8489 case TARGET_NR_umask
:
8490 return get_errno(umask(arg1
));
8491 case TARGET_NR_chroot
:
8492 if (!(p
= lock_user_string(arg1
)))
8493 return -TARGET_EFAULT
;
8494 ret
= get_errno(chroot(p
));
8495 unlock_user(p
, arg1
, 0);
8497 #ifdef TARGET_NR_dup2
8498 case TARGET_NR_dup2
:
8499 ret
= get_errno(dup2(arg1
, arg2
));
8501 fd_trans_dup(arg1
, arg2
);
8505 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8506 case TARGET_NR_dup3
:
8510 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8513 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8514 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8516 fd_trans_dup(arg1
, arg2
);
8521 #ifdef TARGET_NR_getppid /* not on alpha */
8522 case TARGET_NR_getppid
:
8523 return get_errno(getppid());
8525 #ifdef TARGET_NR_getpgrp
8526 case TARGET_NR_getpgrp
:
8527 return get_errno(getpgrp());
8529 case TARGET_NR_setsid
:
8530 return get_errno(setsid());
8531 #ifdef TARGET_NR_sigaction
8532 case TARGET_NR_sigaction
:
8534 #if defined(TARGET_ALPHA)
8535 struct target_sigaction act
, oact
, *pact
= 0;
8536 struct target_old_sigaction
*old_act
;
8538 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8539 return -TARGET_EFAULT
;
8540 act
._sa_handler
= old_act
->_sa_handler
;
8541 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8542 act
.sa_flags
= old_act
->sa_flags
;
8543 act
.sa_restorer
= 0;
8544 unlock_user_struct(old_act
, arg2
, 0);
8547 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8548 if (!is_error(ret
) && arg3
) {
8549 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8550 return -TARGET_EFAULT
;
8551 old_act
->_sa_handler
= oact
._sa_handler
;
8552 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8553 old_act
->sa_flags
= oact
.sa_flags
;
8554 unlock_user_struct(old_act
, arg3
, 1);
8556 #elif defined(TARGET_MIPS)
8557 struct target_sigaction act
, oact
, *pact
, *old_act
;
8560 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8561 return -TARGET_EFAULT
;
8562 act
._sa_handler
= old_act
->_sa_handler
;
8563 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8564 act
.sa_flags
= old_act
->sa_flags
;
8565 unlock_user_struct(old_act
, arg2
, 0);
8571 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8573 if (!is_error(ret
) && arg3
) {
8574 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8575 return -TARGET_EFAULT
;
8576 old_act
->_sa_handler
= oact
._sa_handler
;
8577 old_act
->sa_flags
= oact
.sa_flags
;
8578 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8579 old_act
->sa_mask
.sig
[1] = 0;
8580 old_act
->sa_mask
.sig
[2] = 0;
8581 old_act
->sa_mask
.sig
[3] = 0;
8582 unlock_user_struct(old_act
, arg3
, 1);
8585 struct target_old_sigaction
*old_act
;
8586 struct target_sigaction act
, oact
, *pact
;
8588 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8589 return -TARGET_EFAULT
;
8590 act
._sa_handler
= old_act
->_sa_handler
;
8591 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8592 act
.sa_flags
= old_act
->sa_flags
;
8593 act
.sa_restorer
= old_act
->sa_restorer
;
8594 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8595 act
.ka_restorer
= 0;
8597 unlock_user_struct(old_act
, arg2
, 0);
8602 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8603 if (!is_error(ret
) && arg3
) {
8604 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8605 return -TARGET_EFAULT
;
8606 old_act
->_sa_handler
= oact
._sa_handler
;
8607 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8608 old_act
->sa_flags
= oact
.sa_flags
;
8609 old_act
->sa_restorer
= oact
.sa_restorer
;
8610 unlock_user_struct(old_act
, arg3
, 1);
8616 case TARGET_NR_rt_sigaction
:
8618 #if defined(TARGET_ALPHA)
8619 /* For Alpha and SPARC this is a 5 argument syscall, with
8620 * a 'restorer' parameter which must be copied into the
8621 * sa_restorer field of the sigaction struct.
8622 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8623 * and arg5 is the sigsetsize.
8624 * Alpha also has a separate rt_sigaction struct that it uses
8625 * here; SPARC uses the usual sigaction struct.
8627 struct target_rt_sigaction
*rt_act
;
8628 struct target_sigaction act
, oact
, *pact
= 0;
8630 if (arg4
!= sizeof(target_sigset_t
)) {
8631 return -TARGET_EINVAL
;
8634 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8635 return -TARGET_EFAULT
;
8636 act
._sa_handler
= rt_act
->_sa_handler
;
8637 act
.sa_mask
= rt_act
->sa_mask
;
8638 act
.sa_flags
= rt_act
->sa_flags
;
8639 act
.sa_restorer
= arg5
;
8640 unlock_user_struct(rt_act
, arg2
, 0);
8643 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8644 if (!is_error(ret
) && arg3
) {
8645 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8646 return -TARGET_EFAULT
;
8647 rt_act
->_sa_handler
= oact
._sa_handler
;
8648 rt_act
->sa_mask
= oact
.sa_mask
;
8649 rt_act
->sa_flags
= oact
.sa_flags
;
8650 unlock_user_struct(rt_act
, arg3
, 1);
8654 target_ulong restorer
= arg4
;
8655 target_ulong sigsetsize
= arg5
;
8657 target_ulong sigsetsize
= arg4
;
8659 struct target_sigaction
*act
;
8660 struct target_sigaction
*oact
;
8662 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8663 return -TARGET_EINVAL
;
8666 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8667 return -TARGET_EFAULT
;
8669 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8670 act
->ka_restorer
= restorer
;
8676 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8677 ret
= -TARGET_EFAULT
;
8678 goto rt_sigaction_fail
;
8682 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8685 unlock_user_struct(act
, arg2
, 0);
8687 unlock_user_struct(oact
, arg3
, 1);
8691 #ifdef TARGET_NR_sgetmask /* not on alpha */
8692 case TARGET_NR_sgetmask
:
8695 abi_ulong target_set
;
8696 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8698 host_to_target_old_sigset(&target_set
, &cur_set
);
8704 #ifdef TARGET_NR_ssetmask /* not on alpha */
8705 case TARGET_NR_ssetmask
:
8708 abi_ulong target_set
= arg1
;
8709 target_to_host_old_sigset(&set
, &target_set
);
8710 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8712 host_to_target_old_sigset(&target_set
, &oset
);
8718 #ifdef TARGET_NR_sigprocmask
8719 case TARGET_NR_sigprocmask
:
8721 #if defined(TARGET_ALPHA)
8722 sigset_t set
, oldset
;
8727 case TARGET_SIG_BLOCK
:
8730 case TARGET_SIG_UNBLOCK
:
8733 case TARGET_SIG_SETMASK
:
8737 return -TARGET_EINVAL
;
8740 target_to_host_old_sigset(&set
, &mask
);
8742 ret
= do_sigprocmask(how
, &set
, &oldset
);
8743 if (!is_error(ret
)) {
8744 host_to_target_old_sigset(&mask
, &oldset
);
8746 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8749 sigset_t set
, oldset
, *set_ptr
;
8754 case TARGET_SIG_BLOCK
:
8757 case TARGET_SIG_UNBLOCK
:
8760 case TARGET_SIG_SETMASK
:
8764 return -TARGET_EINVAL
;
8766 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8767 return -TARGET_EFAULT
;
8768 target_to_host_old_sigset(&set
, p
);
8769 unlock_user(p
, arg2
, 0);
8775 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8776 if (!is_error(ret
) && arg3
) {
8777 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8778 return -TARGET_EFAULT
;
8779 host_to_target_old_sigset(p
, &oldset
);
8780 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8786 case TARGET_NR_rt_sigprocmask
:
8789 sigset_t set
, oldset
, *set_ptr
;
8791 if (arg4
!= sizeof(target_sigset_t
)) {
8792 return -TARGET_EINVAL
;
8797 case TARGET_SIG_BLOCK
:
8800 case TARGET_SIG_UNBLOCK
:
8803 case TARGET_SIG_SETMASK
:
8807 return -TARGET_EINVAL
;
8809 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8810 return -TARGET_EFAULT
;
8811 target_to_host_sigset(&set
, p
);
8812 unlock_user(p
, arg2
, 0);
8818 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8819 if (!is_error(ret
) && arg3
) {
8820 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8821 return -TARGET_EFAULT
;
8822 host_to_target_sigset(p
, &oldset
);
8823 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8827 #ifdef TARGET_NR_sigpending
8828 case TARGET_NR_sigpending
:
8831 ret
= get_errno(sigpending(&set
));
8832 if (!is_error(ret
)) {
8833 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8834 return -TARGET_EFAULT
;
8835 host_to_target_old_sigset(p
, &set
);
8836 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8841 case TARGET_NR_rt_sigpending
:
8845 /* Yes, this check is >, not != like most. We follow the kernel's
8846 * logic and it does it like this because it implements
8847 * NR_sigpending through the same code path, and in that case
8848 * the old_sigset_t is smaller in size.
8850 if (arg2
> sizeof(target_sigset_t
)) {
8851 return -TARGET_EINVAL
;
8854 ret
= get_errno(sigpending(&set
));
8855 if (!is_error(ret
)) {
8856 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8857 return -TARGET_EFAULT
;
8858 host_to_target_sigset(p
, &set
);
8859 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8863 #ifdef TARGET_NR_sigsuspend
8864 case TARGET_NR_sigsuspend
:
8866 TaskState
*ts
= cpu
->opaque
;
8867 #if defined(TARGET_ALPHA)
8868 abi_ulong mask
= arg1
;
8869 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8871 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8872 return -TARGET_EFAULT
;
8873 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8874 unlock_user(p
, arg1
, 0);
8876 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8878 if (ret
!= -TARGET_ERESTARTSYS
) {
8879 ts
->in_sigsuspend
= 1;
8884 case TARGET_NR_rt_sigsuspend
:
8886 TaskState
*ts
= cpu
->opaque
;
8888 if (arg2
!= sizeof(target_sigset_t
)) {
8889 return -TARGET_EINVAL
;
8891 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8892 return -TARGET_EFAULT
;
8893 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8894 unlock_user(p
, arg1
, 0);
8895 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8897 if (ret
!= -TARGET_ERESTARTSYS
) {
8898 ts
->in_sigsuspend
= 1;
8902 #ifdef TARGET_NR_rt_sigtimedwait
8903 case TARGET_NR_rt_sigtimedwait
:
8906 struct timespec uts
, *puts
;
8909 if (arg4
!= sizeof(target_sigset_t
)) {
8910 return -TARGET_EINVAL
;
8913 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8914 return -TARGET_EFAULT
;
8915 target_to_host_sigset(&set
, p
);
8916 unlock_user(p
, arg1
, 0);
8919 if (target_to_host_timespec(puts
, arg3
)) {
8920 return -TARGET_EFAULT
;
8925 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8927 if (!is_error(ret
)) {
8929 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8932 return -TARGET_EFAULT
;
8934 host_to_target_siginfo(p
, &uinfo
);
8935 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8937 ret
= host_to_target_signal(ret
);
8942 case TARGET_NR_rt_sigqueueinfo
:
8946 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8948 return -TARGET_EFAULT
;
8950 target_to_host_siginfo(&uinfo
, p
);
8951 unlock_user(p
, arg3
, 0);
8952 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8955 case TARGET_NR_rt_tgsigqueueinfo
:
8959 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8961 return -TARGET_EFAULT
;
8963 target_to_host_siginfo(&uinfo
, p
);
8964 unlock_user(p
, arg4
, 0);
8965 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8968 #ifdef TARGET_NR_sigreturn
8969 case TARGET_NR_sigreturn
:
8970 if (block_signals()) {
8971 return -TARGET_ERESTARTSYS
;
8973 return do_sigreturn(cpu_env
);
8975 case TARGET_NR_rt_sigreturn
:
8976 if (block_signals()) {
8977 return -TARGET_ERESTARTSYS
;
8979 return do_rt_sigreturn(cpu_env
);
8980 case TARGET_NR_sethostname
:
8981 if (!(p
= lock_user_string(arg1
)))
8982 return -TARGET_EFAULT
;
8983 ret
= get_errno(sethostname(p
, arg2
));
8984 unlock_user(p
, arg1
, 0);
8986 #ifdef TARGET_NR_setrlimit
8987 case TARGET_NR_setrlimit
:
8989 int resource
= target_to_host_resource(arg1
);
8990 struct target_rlimit
*target_rlim
;
8992 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8993 return -TARGET_EFAULT
;
8994 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8995 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8996 unlock_user_struct(target_rlim
, arg2
, 0);
8998 * If we just passed through resource limit settings for memory then
8999 * they would also apply to QEMU's own allocations, and QEMU will
9000 * crash or hang or die if its allocations fail. Ideally we would
9001 * track the guest allocations in QEMU and apply the limits ourselves.
9002 * For now, just tell the guest the call succeeded but don't actually
9005 if (resource
!= RLIMIT_AS
&&
9006 resource
!= RLIMIT_DATA
&&
9007 resource
!= RLIMIT_STACK
) {
9008 return get_errno(setrlimit(resource
, &rlim
));
9014 #ifdef TARGET_NR_getrlimit
9015 case TARGET_NR_getrlimit
:
9017 int resource
= target_to_host_resource(arg1
);
9018 struct target_rlimit
*target_rlim
;
9021 ret
= get_errno(getrlimit(resource
, &rlim
));
9022 if (!is_error(ret
)) {
9023 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9024 return -TARGET_EFAULT
;
9025 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9026 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9027 unlock_user_struct(target_rlim
, arg2
, 1);
9032 case TARGET_NR_getrusage
:
9034 struct rusage rusage
;
9035 ret
= get_errno(getrusage(arg1
, &rusage
));
9036 if (!is_error(ret
)) {
9037 ret
= host_to_target_rusage(arg2
, &rusage
);
9041 #if defined(TARGET_NR_gettimeofday)
9042 case TARGET_NR_gettimeofday
:
9047 ret
= get_errno(gettimeofday(&tv
, &tz
));
9048 if (!is_error(ret
)) {
9049 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9050 return -TARGET_EFAULT
;
9052 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9053 return -TARGET_EFAULT
;
9059 #if defined(TARGET_NR_settimeofday)
9060 case TARGET_NR_settimeofday
:
9062 struct timeval tv
, *ptv
= NULL
;
9063 struct timezone tz
, *ptz
= NULL
;
9066 if (copy_from_user_timeval(&tv
, arg1
)) {
9067 return -TARGET_EFAULT
;
9073 if (copy_from_user_timezone(&tz
, arg2
)) {
9074 return -TARGET_EFAULT
;
9079 return get_errno(settimeofday(ptv
, ptz
));
9082 #if defined(TARGET_NR_select)
9083 case TARGET_NR_select
:
9084 #if defined(TARGET_WANT_NI_OLD_SELECT)
9085 /* some architectures used to have old_select here
9086 * but now ENOSYS it.
9088 ret
= -TARGET_ENOSYS
;
9089 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9090 ret
= do_old_select(arg1
);
9092 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9096 #ifdef TARGET_NR_pselect6
9097 case TARGET_NR_pselect6
:
9099 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9100 fd_set rfds
, wfds
, efds
;
9101 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9102 struct timespec ts
, *ts_ptr
;
9105 * The 6th arg is actually two args smashed together,
9106 * so we cannot use the C library.
9114 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9115 target_sigset_t
*target_sigset
;
9123 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9127 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9131 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9137 * This takes a timespec, and not a timeval, so we cannot
9138 * use the do_select() helper ...
9141 if (target_to_host_timespec(&ts
, ts_addr
)) {
9142 return -TARGET_EFAULT
;
9149 /* Extract the two packed args for the sigset */
9152 sig
.size
= SIGSET_T_SIZE
;
9154 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9156 return -TARGET_EFAULT
;
9158 arg_sigset
= tswapal(arg7
[0]);
9159 arg_sigsize
= tswapal(arg7
[1]);
9160 unlock_user(arg7
, arg6
, 0);
9164 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9165 /* Like the kernel, we enforce correct size sigsets */
9166 return -TARGET_EINVAL
;
9168 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9169 sizeof(*target_sigset
), 1);
9170 if (!target_sigset
) {
9171 return -TARGET_EFAULT
;
9173 target_to_host_sigset(&set
, target_sigset
);
9174 unlock_user(target_sigset
, arg_sigset
, 0);
9182 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9185 if (!is_error(ret
)) {
9186 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9187 return -TARGET_EFAULT
;
9188 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9189 return -TARGET_EFAULT
;
9190 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9191 return -TARGET_EFAULT
;
9193 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9194 return -TARGET_EFAULT
;
9199 #ifdef TARGET_NR_symlink
9200 case TARGET_NR_symlink
:
9203 p
= lock_user_string(arg1
);
9204 p2
= lock_user_string(arg2
);
9206 ret
= -TARGET_EFAULT
;
9208 ret
= get_errno(symlink(p
, p2
));
9209 unlock_user(p2
, arg2
, 0);
9210 unlock_user(p
, arg1
, 0);
9214 #if defined(TARGET_NR_symlinkat)
9215 case TARGET_NR_symlinkat
:
9218 p
= lock_user_string(arg1
);
9219 p2
= lock_user_string(arg3
);
9221 ret
= -TARGET_EFAULT
;
9223 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9224 unlock_user(p2
, arg3
, 0);
9225 unlock_user(p
, arg1
, 0);
9229 #ifdef TARGET_NR_readlink
9230 case TARGET_NR_readlink
:
9233 p
= lock_user_string(arg1
);
9234 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9236 ret
= -TARGET_EFAULT
;
9238 /* Short circuit this for the magic exe check. */
9239 ret
= -TARGET_EINVAL
;
9240 } else if (is_proc_myself((const char *)p
, "exe")) {
9241 char real
[PATH_MAX
], *temp
;
9242 temp
= realpath(exec_path
, real
);
9243 /* Return value is # of bytes that we wrote to the buffer. */
9245 ret
= get_errno(-1);
9247 /* Don't worry about sign mismatch as earlier mapping
9248 * logic would have thrown a bad address error. */
9249 ret
= MIN(strlen(real
), arg3
);
9250 /* We cannot NUL terminate the string. */
9251 memcpy(p2
, real
, ret
);
9254 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9256 unlock_user(p2
, arg2
, ret
);
9257 unlock_user(p
, arg1
, 0);
9261 #if defined(TARGET_NR_readlinkat)
9262 case TARGET_NR_readlinkat
:
9265 p
= lock_user_string(arg2
);
9266 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9268 ret
= -TARGET_EFAULT
;
9269 } else if (is_proc_myself((const char *)p
, "exe")) {
9270 char real
[PATH_MAX
], *temp
;
9271 temp
= realpath(exec_path
, real
);
9272 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9273 snprintf((char *)p2
, arg4
, "%s", real
);
9275 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9277 unlock_user(p2
, arg3
, ret
);
9278 unlock_user(p
, arg2
, 0);
9282 #ifdef TARGET_NR_swapon
9283 case TARGET_NR_swapon
:
9284 if (!(p
= lock_user_string(arg1
)))
9285 return -TARGET_EFAULT
;
9286 ret
= get_errno(swapon(p
, arg2
));
9287 unlock_user(p
, arg1
, 0);
9290 case TARGET_NR_reboot
:
9291 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9292 /* arg4 must be ignored in all other cases */
9293 p
= lock_user_string(arg4
);
9295 return -TARGET_EFAULT
;
9297 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9298 unlock_user(p
, arg4
, 0);
9300 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9303 #ifdef TARGET_NR_mmap
9304 case TARGET_NR_mmap
:
9305 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9306 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9307 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9308 || defined(TARGET_S390X)
9311 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9312 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9313 return -TARGET_EFAULT
;
9320 unlock_user(v
, arg1
, 0);
9321 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9322 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9326 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9327 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9333 #ifdef TARGET_NR_mmap2
9334 case TARGET_NR_mmap2
:
9336 #define MMAP_SHIFT 12
9338 ret
= target_mmap(arg1
, arg2
, arg3
,
9339 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9340 arg5
, arg6
<< MMAP_SHIFT
);
9341 return get_errno(ret
);
9343 case TARGET_NR_munmap
:
9344 return get_errno(target_munmap(arg1
, arg2
));
9345 case TARGET_NR_mprotect
:
9347 TaskState
*ts
= cpu
->opaque
;
9348 /* Special hack to detect libc making the stack executable. */
9349 if ((arg3
& PROT_GROWSDOWN
)
9350 && arg1
>= ts
->info
->stack_limit
9351 && arg1
<= ts
->info
->start_stack
) {
9352 arg3
&= ~PROT_GROWSDOWN
;
9353 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9354 arg1
= ts
->info
->stack_limit
;
9357 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9358 #ifdef TARGET_NR_mremap
9359 case TARGET_NR_mremap
:
9360 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9362 /* ??? msync/mlock/munlock are broken for softmmu. */
9363 #ifdef TARGET_NR_msync
9364 case TARGET_NR_msync
:
9365 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9367 #ifdef TARGET_NR_mlock
9368 case TARGET_NR_mlock
:
9369 return get_errno(mlock(g2h(arg1
), arg2
));
9371 #ifdef TARGET_NR_munlock
9372 case TARGET_NR_munlock
:
9373 return get_errno(munlock(g2h(arg1
), arg2
));
9375 #ifdef TARGET_NR_mlockall
9376 case TARGET_NR_mlockall
:
9377 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9379 #ifdef TARGET_NR_munlockall
9380 case TARGET_NR_munlockall
:
9381 return get_errno(munlockall());
9383 #ifdef TARGET_NR_truncate
9384 case TARGET_NR_truncate
:
9385 if (!(p
= lock_user_string(arg1
)))
9386 return -TARGET_EFAULT
;
9387 ret
= get_errno(truncate(p
, arg2
));
9388 unlock_user(p
, arg1
, 0);
9391 #ifdef TARGET_NR_ftruncate
9392 case TARGET_NR_ftruncate
:
9393 return get_errno(ftruncate(arg1
, arg2
));
9395 case TARGET_NR_fchmod
:
9396 return get_errno(fchmod(arg1
, arg2
));
9397 #if defined(TARGET_NR_fchmodat)
9398 case TARGET_NR_fchmodat
:
9399 if (!(p
= lock_user_string(arg2
)))
9400 return -TARGET_EFAULT
;
9401 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9402 unlock_user(p
, arg2
, 0);
9405 case TARGET_NR_getpriority
:
9406 /* Note that negative values are valid for getpriority, so we must
9407 differentiate based on errno settings. */
9409 ret
= getpriority(arg1
, arg2
);
9410 if (ret
== -1 && errno
!= 0) {
9411 return -host_to_target_errno(errno
);
9414 /* Return value is the unbiased priority. Signal no error. */
9415 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9417 /* Return value is a biased priority to avoid negative numbers. */
9421 case TARGET_NR_setpriority
:
9422 return get_errno(setpriority(arg1
, arg2
, arg3
));
9423 #ifdef TARGET_NR_statfs
9424 case TARGET_NR_statfs
:
9425 if (!(p
= lock_user_string(arg1
))) {
9426 return -TARGET_EFAULT
;
9428 ret
= get_errno(statfs(path(p
), &stfs
));
9429 unlock_user(p
, arg1
, 0);
9431 if (!is_error(ret
)) {
9432 struct target_statfs
*target_stfs
;
9434 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9435 return -TARGET_EFAULT
;
9436 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9437 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9438 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9439 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9440 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9441 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9442 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9443 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9444 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9445 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9446 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9447 #ifdef _STATFS_F_FLAGS
9448 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9450 __put_user(0, &target_stfs
->f_flags
);
9452 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9453 unlock_user_struct(target_stfs
, arg2
, 1);
9457 #ifdef TARGET_NR_fstatfs
9458 case TARGET_NR_fstatfs
:
9459 ret
= get_errno(fstatfs(arg1
, &stfs
));
9460 goto convert_statfs
;
9462 #ifdef TARGET_NR_statfs64
9463 case TARGET_NR_statfs64
:
9464 if (!(p
= lock_user_string(arg1
))) {
9465 return -TARGET_EFAULT
;
9467 ret
= get_errno(statfs(path(p
), &stfs
));
9468 unlock_user(p
, arg1
, 0);
9470 if (!is_error(ret
)) {
9471 struct target_statfs64
*target_stfs
;
9473 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9474 return -TARGET_EFAULT
;
9475 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9476 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9477 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9478 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9479 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9480 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9481 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9482 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9483 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9484 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9485 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9486 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9487 unlock_user_struct(target_stfs
, arg3
, 1);
9490 case TARGET_NR_fstatfs64
:
9491 ret
= get_errno(fstatfs(arg1
, &stfs
));
9492 goto convert_statfs64
;
9494 #ifdef TARGET_NR_socketcall
9495 case TARGET_NR_socketcall
:
9496 return do_socketcall(arg1
, arg2
);
9498 #ifdef TARGET_NR_accept
9499 case TARGET_NR_accept
:
9500 return do_accept4(arg1
, arg2
, arg3
, 0);
9502 #ifdef TARGET_NR_accept4
9503 case TARGET_NR_accept4
:
9504 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9506 #ifdef TARGET_NR_bind
9507 case TARGET_NR_bind
:
9508 return do_bind(arg1
, arg2
, arg3
);
9510 #ifdef TARGET_NR_connect
9511 case TARGET_NR_connect
:
9512 return do_connect(arg1
, arg2
, arg3
);
9514 #ifdef TARGET_NR_getpeername
9515 case TARGET_NR_getpeername
:
9516 return do_getpeername(arg1
, arg2
, arg3
);
9518 #ifdef TARGET_NR_getsockname
9519 case TARGET_NR_getsockname
:
9520 return do_getsockname(arg1
, arg2
, arg3
);
9522 #ifdef TARGET_NR_getsockopt
9523 case TARGET_NR_getsockopt
:
9524 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9526 #ifdef TARGET_NR_listen
9527 case TARGET_NR_listen
:
9528 return get_errno(listen(arg1
, arg2
));
9530 #ifdef TARGET_NR_recv
9531 case TARGET_NR_recv
:
9532 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9534 #ifdef TARGET_NR_recvfrom
9535 case TARGET_NR_recvfrom
:
9536 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9538 #ifdef TARGET_NR_recvmsg
9539 case TARGET_NR_recvmsg
:
9540 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9542 #ifdef TARGET_NR_send
9543 case TARGET_NR_send
:
9544 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9546 #ifdef TARGET_NR_sendmsg
9547 case TARGET_NR_sendmsg
:
9548 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9550 #ifdef TARGET_NR_sendmmsg
9551 case TARGET_NR_sendmmsg
:
9552 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9554 #ifdef TARGET_NR_recvmmsg
9555 case TARGET_NR_recvmmsg
:
9556 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9558 #ifdef TARGET_NR_sendto
9559 case TARGET_NR_sendto
:
9560 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9562 #ifdef TARGET_NR_shutdown
9563 case TARGET_NR_shutdown
:
9564 return get_errno(shutdown(arg1
, arg2
));
9566 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9567 case TARGET_NR_getrandom
:
9568 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9570 return -TARGET_EFAULT
;
9572 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9573 unlock_user(p
, arg1
, ret
);
9576 #ifdef TARGET_NR_socket
9577 case TARGET_NR_socket
:
9578 return do_socket(arg1
, arg2
, arg3
);
9580 #ifdef TARGET_NR_socketpair
9581 case TARGET_NR_socketpair
:
9582 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9584 #ifdef TARGET_NR_setsockopt
9585 case TARGET_NR_setsockopt
:
9586 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9588 #if defined(TARGET_NR_syslog)
9589 case TARGET_NR_syslog
:
9594 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9595 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9596 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9597 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9598 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9599 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9600 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9601 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9602 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9603 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9604 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9605 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9608 return -TARGET_EINVAL
;
9613 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9615 return -TARGET_EFAULT
;
9617 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9618 unlock_user(p
, arg2
, arg3
);
9622 return -TARGET_EINVAL
;
9627 case TARGET_NR_setitimer
:
9629 struct itimerval value
, ovalue
, *pvalue
;
9633 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9634 || copy_from_user_timeval(&pvalue
->it_value
,
9635 arg2
+ sizeof(struct target_timeval
)))
9636 return -TARGET_EFAULT
;
9640 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9641 if (!is_error(ret
) && arg3
) {
9642 if (copy_to_user_timeval(arg3
,
9643 &ovalue
.it_interval
)
9644 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9646 return -TARGET_EFAULT
;
9650 case TARGET_NR_getitimer
:
9652 struct itimerval value
;
9654 ret
= get_errno(getitimer(arg1
, &value
));
9655 if (!is_error(ret
) && arg2
) {
9656 if (copy_to_user_timeval(arg2
,
9658 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9660 return -TARGET_EFAULT
;
9664 #ifdef TARGET_NR_stat
9665 case TARGET_NR_stat
:
9666 if (!(p
= lock_user_string(arg1
))) {
9667 return -TARGET_EFAULT
;
9669 ret
= get_errno(stat(path(p
), &st
));
9670 unlock_user(p
, arg1
, 0);
9673 #ifdef TARGET_NR_lstat
9674 case TARGET_NR_lstat
:
9675 if (!(p
= lock_user_string(arg1
))) {
9676 return -TARGET_EFAULT
;
9678 ret
= get_errno(lstat(path(p
), &st
));
9679 unlock_user(p
, arg1
, 0);
9682 #ifdef TARGET_NR_fstat
9683 case TARGET_NR_fstat
:
9685 ret
= get_errno(fstat(arg1
, &st
));
9686 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9689 if (!is_error(ret
)) {
9690 struct target_stat
*target_st
;
9692 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9693 return -TARGET_EFAULT
;
9694 memset(target_st
, 0, sizeof(*target_st
));
9695 __put_user(st
.st_dev
, &target_st
->st_dev
);
9696 __put_user(st
.st_ino
, &target_st
->st_ino
);
9697 __put_user(st
.st_mode
, &target_st
->st_mode
);
9698 __put_user(st
.st_uid
, &target_st
->st_uid
);
9699 __put_user(st
.st_gid
, &target_st
->st_gid
);
9700 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9701 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9702 __put_user(st
.st_size
, &target_st
->st_size
);
9703 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9704 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9705 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9706 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9707 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9708 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9709 defined(TARGET_STAT_HAVE_NSEC)
9710 __put_user(st
.st_atim
.tv_nsec
,
9711 &target_st
->target_st_atime_nsec
);
9712 __put_user(st
.st_mtim
.tv_nsec
,
9713 &target_st
->target_st_mtime_nsec
);
9714 __put_user(st
.st_ctim
.tv_nsec
,
9715 &target_st
->target_st_ctime_nsec
);
9717 unlock_user_struct(target_st
, arg2
, 1);
9722 case TARGET_NR_vhangup
:
9723 return get_errno(vhangup());
9724 #ifdef TARGET_NR_syscall
9725 case TARGET_NR_syscall
:
9726 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9727 arg6
, arg7
, arg8
, 0);
9729 #if defined(TARGET_NR_wait4)
9730 case TARGET_NR_wait4
:
9733 abi_long status_ptr
= arg2
;
9734 struct rusage rusage
, *rusage_ptr
;
9735 abi_ulong target_rusage
= arg4
;
9736 abi_long rusage_err
;
9738 rusage_ptr
= &rusage
;
9741 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9742 if (!is_error(ret
)) {
9743 if (status_ptr
&& ret
) {
9744 status
= host_to_target_waitstatus(status
);
9745 if (put_user_s32(status
, status_ptr
))
9746 return -TARGET_EFAULT
;
9748 if (target_rusage
) {
9749 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9758 #ifdef TARGET_NR_swapoff
9759 case TARGET_NR_swapoff
:
9760 if (!(p
= lock_user_string(arg1
)))
9761 return -TARGET_EFAULT
;
9762 ret
= get_errno(swapoff(p
));
9763 unlock_user(p
, arg1
, 0);
9766 case TARGET_NR_sysinfo
:
9768 struct target_sysinfo
*target_value
;
9769 struct sysinfo value
;
9770 ret
= get_errno(sysinfo(&value
));
9771 if (!is_error(ret
) && arg1
)
9773 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9774 return -TARGET_EFAULT
;
9775 __put_user(value
.uptime
, &target_value
->uptime
);
9776 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9777 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9778 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9779 __put_user(value
.totalram
, &target_value
->totalram
);
9780 __put_user(value
.freeram
, &target_value
->freeram
);
9781 __put_user(value
.sharedram
, &target_value
->sharedram
);
9782 __put_user(value
.bufferram
, &target_value
->bufferram
);
9783 __put_user(value
.totalswap
, &target_value
->totalswap
);
9784 __put_user(value
.freeswap
, &target_value
->freeswap
);
9785 __put_user(value
.procs
, &target_value
->procs
);
9786 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9787 __put_user(value
.freehigh
, &target_value
->freehigh
);
9788 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9789 unlock_user_struct(target_value
, arg1
, 1);
9793 #ifdef TARGET_NR_ipc
9795 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9797 #ifdef TARGET_NR_semget
9798 case TARGET_NR_semget
:
9799 return get_errno(semget(arg1
, arg2
, arg3
));
9801 #ifdef TARGET_NR_semop
9802 case TARGET_NR_semop
:
9803 return do_semtimedop(arg1
, arg2
, arg3
, 0);
9805 #ifdef TARGET_NR_semtimedop
9806 case TARGET_NR_semtimedop
:
9807 return do_semtimedop(arg1
, arg2
, arg3
, arg4
);
9809 #ifdef TARGET_NR_semctl
9810 case TARGET_NR_semctl
:
9811 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9813 #ifdef TARGET_NR_msgctl
9814 case TARGET_NR_msgctl
:
9815 return do_msgctl(arg1
, arg2
, arg3
);
9817 #ifdef TARGET_NR_msgget
9818 case TARGET_NR_msgget
:
9819 return get_errno(msgget(arg1
, arg2
));
9821 #ifdef TARGET_NR_msgrcv
9822 case TARGET_NR_msgrcv
:
9823 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9825 #ifdef TARGET_NR_msgsnd
9826 case TARGET_NR_msgsnd
:
9827 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9829 #ifdef TARGET_NR_shmget
9830 case TARGET_NR_shmget
:
9831 return get_errno(shmget(arg1
, arg2
, arg3
));
9833 #ifdef TARGET_NR_shmctl
9834 case TARGET_NR_shmctl
:
9835 return do_shmctl(arg1
, arg2
, arg3
);
9837 #ifdef TARGET_NR_shmat
9838 case TARGET_NR_shmat
:
9839 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9841 #ifdef TARGET_NR_shmdt
9842 case TARGET_NR_shmdt
:
9843 return do_shmdt(arg1
);
9845 case TARGET_NR_fsync
:
9846 return get_errno(fsync(arg1
));
9847 case TARGET_NR_clone
:
9848 /* Linux manages to have three different orderings for its
9849 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9850 * match the kernel's CONFIG_CLONE_* settings.
9851 * Microblaze is further special in that it uses a sixth
9852 * implicit argument to clone for the TLS pointer.
9854 #if defined(TARGET_MICROBLAZE)
9855 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9856 #elif defined(TARGET_CLONE_BACKWARDS)
9857 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9858 #elif defined(TARGET_CLONE_BACKWARDS2)
9859 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9861 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9864 #ifdef __NR_exit_group
9865 /* new thread calls */
9866 case TARGET_NR_exit_group
:
9867 preexit_cleanup(cpu_env
, arg1
);
9868 return get_errno(exit_group(arg1
));
9870 case TARGET_NR_setdomainname
:
9871 if (!(p
= lock_user_string(arg1
)))
9872 return -TARGET_EFAULT
;
9873 ret
= get_errno(setdomainname(p
, arg2
));
9874 unlock_user(p
, arg1
, 0);
9876 case TARGET_NR_uname
:
9877 /* no need to transcode because we use the linux syscall */
9879 struct new_utsname
* buf
;
9881 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9882 return -TARGET_EFAULT
;
9883 ret
= get_errno(sys_uname(buf
));
9884 if (!is_error(ret
)) {
9885 /* Overwrite the native machine name with whatever is being
9887 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9888 sizeof(buf
->machine
));
9889 /* Allow the user to override the reported release. */
9890 if (qemu_uname_release
&& *qemu_uname_release
) {
9891 g_strlcpy(buf
->release
, qemu_uname_release
,
9892 sizeof(buf
->release
));
9895 unlock_user_struct(buf
, arg1
, 1);
9899 case TARGET_NR_modify_ldt
:
9900 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9901 #if !defined(TARGET_X86_64)
9902 case TARGET_NR_vm86
:
9903 return do_vm86(cpu_env
, arg1
, arg2
);
9906 #if defined(TARGET_NR_adjtimex)
9907 case TARGET_NR_adjtimex
:
9909 struct timex host_buf
;
9911 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9912 return -TARGET_EFAULT
;
9914 ret
= get_errno(adjtimex(&host_buf
));
9915 if (!is_error(ret
)) {
9916 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9917 return -TARGET_EFAULT
;
9923 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9924 case TARGET_NR_clock_adjtime
:
9926 struct timex htx
, *phtx
= &htx
;
9928 if (target_to_host_timex(phtx
, arg2
) != 0) {
9929 return -TARGET_EFAULT
;
9931 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9932 if (!is_error(ret
) && phtx
) {
9933 if (host_to_target_timex(arg2
, phtx
) != 0) {
9934 return -TARGET_EFAULT
;
9940 case TARGET_NR_getpgid
:
9941 return get_errno(getpgid(arg1
));
9942 case TARGET_NR_fchdir
:
9943 return get_errno(fchdir(arg1
));
9944 case TARGET_NR_personality
:
9945 return get_errno(personality(arg1
));
9946 #ifdef TARGET_NR__llseek /* Not on alpha */
9947 case TARGET_NR__llseek
:
9950 #if !defined(__NR_llseek)
9951 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9953 ret
= get_errno(res
);
9958 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9960 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9961 return -TARGET_EFAULT
;
9966 #ifdef TARGET_NR_getdents
9967 case TARGET_NR_getdents
:
9968 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9969 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9971 struct target_dirent
*target_dirp
;
9972 struct linux_dirent
*dirp
;
9973 abi_long count
= arg3
;
9975 dirp
= g_try_malloc(count
);
9977 return -TARGET_ENOMEM
;
9980 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9981 if (!is_error(ret
)) {
9982 struct linux_dirent
*de
;
9983 struct target_dirent
*tde
;
9985 int reclen
, treclen
;
9986 int count1
, tnamelen
;
9990 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9991 return -TARGET_EFAULT
;
9994 reclen
= de
->d_reclen
;
9995 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9996 assert(tnamelen
>= 0);
9997 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9998 assert(count1
+ treclen
<= count
);
9999 tde
->d_reclen
= tswap16(treclen
);
10000 tde
->d_ino
= tswapal(de
->d_ino
);
10001 tde
->d_off
= tswapal(de
->d_off
);
10002 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10003 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10005 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10009 unlock_user(target_dirp
, arg2
, ret
);
10015 struct linux_dirent
*dirp
;
10016 abi_long count
= arg3
;
10018 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10019 return -TARGET_EFAULT
;
10020 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10021 if (!is_error(ret
)) {
10022 struct linux_dirent
*de
;
10027 reclen
= de
->d_reclen
;
10030 de
->d_reclen
= tswap16(reclen
);
10031 tswapls(&de
->d_ino
);
10032 tswapls(&de
->d_off
);
10033 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10037 unlock_user(dirp
, arg2
, ret
);
10041 /* Implement getdents in terms of getdents64 */
10043 struct linux_dirent64
*dirp
;
10044 abi_long count
= arg3
;
10046 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10048 return -TARGET_EFAULT
;
10050 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10051 if (!is_error(ret
)) {
10052 /* Convert the dirent64 structs to target dirent. We do this
10053 * in-place, since we can guarantee that a target_dirent is no
10054 * larger than a dirent64; however this means we have to be
10055 * careful to read everything before writing in the new format.
10057 struct linux_dirent64
*de
;
10058 struct target_dirent
*tde
;
10063 tde
= (struct target_dirent
*)dirp
;
10065 int namelen
, treclen
;
10066 int reclen
= de
->d_reclen
;
10067 uint64_t ino
= de
->d_ino
;
10068 int64_t off
= de
->d_off
;
10069 uint8_t type
= de
->d_type
;
10071 namelen
= strlen(de
->d_name
);
10072 treclen
= offsetof(struct target_dirent
, d_name
)
10074 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10076 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10077 tde
->d_ino
= tswapal(ino
);
10078 tde
->d_off
= tswapal(off
);
10079 tde
->d_reclen
= tswap16(treclen
);
10080 /* The target_dirent type is in what was formerly a padding
10081 * byte at the end of the structure:
10083 *(((char *)tde
) + treclen
- 1) = type
;
10085 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10086 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10092 unlock_user(dirp
, arg2
, ret
);
10096 #endif /* TARGET_NR_getdents */
10097 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10098 case TARGET_NR_getdents64
:
10100 struct linux_dirent64
*dirp
;
10101 abi_long count
= arg3
;
10102 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10103 return -TARGET_EFAULT
;
10104 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10105 if (!is_error(ret
)) {
10106 struct linux_dirent64
*de
;
10111 reclen
= de
->d_reclen
;
10114 de
->d_reclen
= tswap16(reclen
);
10115 tswap64s((uint64_t *)&de
->d_ino
);
10116 tswap64s((uint64_t *)&de
->d_off
);
10117 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10121 unlock_user(dirp
, arg2
, ret
);
10124 #endif /* TARGET_NR_getdents64 */
10125 #if defined(TARGET_NR__newselect)
10126 case TARGET_NR__newselect
:
10127 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10129 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10130 # ifdef TARGET_NR_poll
10131 case TARGET_NR_poll
:
10133 # ifdef TARGET_NR_ppoll
10134 case TARGET_NR_ppoll
:
10137 struct target_pollfd
*target_pfd
;
10138 unsigned int nfds
= arg2
;
10139 struct pollfd
*pfd
;
10145 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10146 return -TARGET_EINVAL
;
10149 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10150 sizeof(struct target_pollfd
) * nfds
, 1);
10152 return -TARGET_EFAULT
;
10155 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10156 for (i
= 0; i
< nfds
; i
++) {
10157 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10158 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10163 # ifdef TARGET_NR_ppoll
10164 case TARGET_NR_ppoll
:
10166 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10167 target_sigset_t
*target_set
;
10168 sigset_t _set
, *set
= &_set
;
10171 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10172 unlock_user(target_pfd
, arg1
, 0);
10173 return -TARGET_EFAULT
;
10180 if (arg5
!= sizeof(target_sigset_t
)) {
10181 unlock_user(target_pfd
, arg1
, 0);
10182 return -TARGET_EINVAL
;
10185 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10187 unlock_user(target_pfd
, arg1
, 0);
10188 return -TARGET_EFAULT
;
10190 target_to_host_sigset(set
, target_set
);
10195 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10196 set
, SIGSET_T_SIZE
));
10198 if (!is_error(ret
) && arg3
) {
10199 host_to_target_timespec(arg3
, timeout_ts
);
10202 unlock_user(target_set
, arg4
, 0);
10207 # ifdef TARGET_NR_poll
10208 case TARGET_NR_poll
:
10210 struct timespec ts
, *pts
;
10213 /* Convert ms to secs, ns */
10214 ts
.tv_sec
= arg3
/ 1000;
10215 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10218 /* -ve poll() timeout means "infinite" */
10221 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10226 g_assert_not_reached();
10229 if (!is_error(ret
)) {
10230 for(i
= 0; i
< nfds
; i
++) {
10231 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10234 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10238 case TARGET_NR_flock
:
10239 /* NOTE: the flock constant seems to be the same for every
10241 return get_errno(safe_flock(arg1
, arg2
));
10242 case TARGET_NR_readv
:
10244 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10246 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10247 unlock_iovec(vec
, arg2
, arg3
, 1);
10249 ret
= -host_to_target_errno(errno
);
10253 case TARGET_NR_writev
:
10255 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10257 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10258 unlock_iovec(vec
, arg2
, arg3
, 0);
10260 ret
= -host_to_target_errno(errno
);
10264 #if defined(TARGET_NR_preadv)
10265 case TARGET_NR_preadv
:
10267 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10269 unsigned long low
, high
;
10271 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10272 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10273 unlock_iovec(vec
, arg2
, arg3
, 1);
10275 ret
= -host_to_target_errno(errno
);
10280 #if defined(TARGET_NR_pwritev)
10281 case TARGET_NR_pwritev
:
10283 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10285 unsigned long low
, high
;
10287 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10288 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10289 unlock_iovec(vec
, arg2
, arg3
, 0);
10291 ret
= -host_to_target_errno(errno
);
10296 case TARGET_NR_getsid
:
10297 return get_errno(getsid(arg1
));
10298 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10299 case TARGET_NR_fdatasync
:
10300 return get_errno(fdatasync(arg1
));
10302 #ifdef TARGET_NR__sysctl
10303 case TARGET_NR__sysctl
:
10304 /* We don't implement this, but ENOTDIR is always a safe
10306 return -TARGET_ENOTDIR
;
10308 case TARGET_NR_sched_getaffinity
:
10310 unsigned int mask_size
;
10311 unsigned long *mask
;
10314 * sched_getaffinity needs multiples of ulong, so need to take
10315 * care of mismatches between target ulong and host ulong sizes.
10317 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10318 return -TARGET_EINVAL
;
10320 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10322 mask
= alloca(mask_size
);
10323 memset(mask
, 0, mask_size
);
10324 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10326 if (!is_error(ret
)) {
10328 /* More data returned than the caller's buffer will fit.
10329 * This only happens if sizeof(abi_long) < sizeof(long)
10330 * and the caller passed us a buffer holding an odd number
10331 * of abi_longs. If the host kernel is actually using the
10332 * extra 4 bytes then fail EINVAL; otherwise we can just
10333 * ignore them and only copy the interesting part.
10335 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10336 if (numcpus
> arg2
* 8) {
10337 return -TARGET_EINVAL
;
10342 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10343 return -TARGET_EFAULT
;
10348 case TARGET_NR_sched_setaffinity
:
10350 unsigned int mask_size
;
10351 unsigned long *mask
;
10354 * sched_setaffinity needs multiples of ulong, so need to take
10355 * care of mismatches between target ulong and host ulong sizes.
10357 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10358 return -TARGET_EINVAL
;
10360 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10361 mask
= alloca(mask_size
);
10363 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10368 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10370 case TARGET_NR_getcpu
:
10372 unsigned cpu
, node
;
10373 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10374 arg2
? &node
: NULL
,
10376 if (is_error(ret
)) {
10379 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10380 return -TARGET_EFAULT
;
10382 if (arg2
&& put_user_u32(node
, arg2
)) {
10383 return -TARGET_EFAULT
;
10387 case TARGET_NR_sched_setparam
:
10389 struct sched_param
*target_schp
;
10390 struct sched_param schp
;
10393 return -TARGET_EINVAL
;
10395 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10396 return -TARGET_EFAULT
;
10397 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10398 unlock_user_struct(target_schp
, arg2
, 0);
10399 return get_errno(sched_setparam(arg1
, &schp
));
10401 case TARGET_NR_sched_getparam
:
10403 struct sched_param
*target_schp
;
10404 struct sched_param schp
;
10407 return -TARGET_EINVAL
;
10409 ret
= get_errno(sched_getparam(arg1
, &schp
));
10410 if (!is_error(ret
)) {
10411 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10412 return -TARGET_EFAULT
;
10413 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10414 unlock_user_struct(target_schp
, arg2
, 1);
10418 case TARGET_NR_sched_setscheduler
:
10420 struct sched_param
*target_schp
;
10421 struct sched_param schp
;
10423 return -TARGET_EINVAL
;
10425 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10426 return -TARGET_EFAULT
;
10427 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10428 unlock_user_struct(target_schp
, arg3
, 0);
10429 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10431 case TARGET_NR_sched_getscheduler
:
10432 return get_errno(sched_getscheduler(arg1
));
10433 case TARGET_NR_sched_yield
:
10434 return get_errno(sched_yield());
10435 case TARGET_NR_sched_get_priority_max
:
10436 return get_errno(sched_get_priority_max(arg1
));
10437 case TARGET_NR_sched_get_priority_min
:
10438 return get_errno(sched_get_priority_min(arg1
));
10439 #ifdef TARGET_NR_sched_rr_get_interval
10440 case TARGET_NR_sched_rr_get_interval
:
10442 struct timespec ts
;
10443 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10444 if (!is_error(ret
)) {
10445 ret
= host_to_target_timespec(arg2
, &ts
);
10450 #if defined(TARGET_NR_nanosleep)
10451 case TARGET_NR_nanosleep
:
10453 struct timespec req
, rem
;
10454 target_to_host_timespec(&req
, arg1
);
10455 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10456 if (is_error(ret
) && arg2
) {
10457 host_to_target_timespec(arg2
, &rem
);
10462 case TARGET_NR_prctl
:
10464 case PR_GET_PDEATHSIG
:
10467 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10468 if (!is_error(ret
) && arg2
10469 && put_user_ual(deathsig
, arg2
)) {
10470 return -TARGET_EFAULT
;
10477 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10479 return -TARGET_EFAULT
;
10481 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10482 arg3
, arg4
, arg5
));
10483 unlock_user(name
, arg2
, 16);
10488 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10490 return -TARGET_EFAULT
;
10492 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10493 arg3
, arg4
, arg5
));
10494 unlock_user(name
, arg2
, 0);
10499 case TARGET_PR_GET_FP_MODE
:
10501 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10503 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10504 ret
|= TARGET_PR_FP_MODE_FR
;
10506 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10507 ret
|= TARGET_PR_FP_MODE_FRE
;
10511 case TARGET_PR_SET_FP_MODE
:
10513 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10514 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10515 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10516 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10517 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10519 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10520 TARGET_PR_FP_MODE_FRE
;
10522 /* If nothing to change, return right away, successfully. */
10523 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10526 /* Check the value is valid */
10527 if (arg2
& ~known_bits
) {
10528 return -TARGET_EOPNOTSUPP
;
10530 /* Setting FRE without FR is not supported. */
10531 if (new_fre
&& !new_fr
) {
10532 return -TARGET_EOPNOTSUPP
;
10534 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10535 /* FR1 is not supported */
10536 return -TARGET_EOPNOTSUPP
;
10538 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10539 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10540 /* cannot set FR=0 */
10541 return -TARGET_EOPNOTSUPP
;
10543 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10544 /* Cannot set FRE=1 */
10545 return -TARGET_EOPNOTSUPP
;
10549 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10550 for (i
= 0; i
< 32 ; i
+= 2) {
10551 if (!old_fr
&& new_fr
) {
10552 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10553 } else if (old_fr
&& !new_fr
) {
10554 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10559 env
->CP0_Status
|= (1 << CP0St_FR
);
10560 env
->hflags
|= MIPS_HFLAG_F64
;
10562 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10563 env
->hflags
&= ~MIPS_HFLAG_F64
;
10566 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10567 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10568 env
->hflags
|= MIPS_HFLAG_FRE
;
10571 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10572 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10578 #ifdef TARGET_AARCH64
10579 case TARGET_PR_SVE_SET_VL
:
10581 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10582 * PR_SVE_VL_INHERIT. Note the kernel definition
10583 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10584 * even though the current architectural maximum is VQ=16.
10586 ret
= -TARGET_EINVAL
;
10587 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10588 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10589 CPUARMState
*env
= cpu_env
;
10590 ARMCPU
*cpu
= env_archcpu(env
);
10591 uint32_t vq
, old_vq
;
10593 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10594 vq
= MAX(arg2
/ 16, 1);
10595 vq
= MIN(vq
, cpu
->sve_max_vq
);
10598 aarch64_sve_narrow_vq(env
, vq
);
10600 env
->vfp
.zcr_el
[1] = vq
- 1;
10601 arm_rebuild_hflags(env
);
10605 case TARGET_PR_SVE_GET_VL
:
10606 ret
= -TARGET_EINVAL
;
10608 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10609 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10610 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10614 case TARGET_PR_PAC_RESET_KEYS
:
10616 CPUARMState
*env
= cpu_env
;
10617 ARMCPU
*cpu
= env_archcpu(env
);
10619 if (arg3
|| arg4
|| arg5
) {
10620 return -TARGET_EINVAL
;
10622 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10623 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10624 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10625 TARGET_PR_PAC_APGAKEY
);
10631 } else if (arg2
& ~all
) {
10632 return -TARGET_EINVAL
;
10634 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10635 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10636 sizeof(ARMPACKey
), &err
);
10638 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10639 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10640 sizeof(ARMPACKey
), &err
);
10642 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10643 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10644 sizeof(ARMPACKey
), &err
);
10646 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10647 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10648 sizeof(ARMPACKey
), &err
);
10650 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10651 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10652 sizeof(ARMPACKey
), &err
);
10656 * Some unknown failure in the crypto. The best
10657 * we can do is log it and fail the syscall.
10658 * The real syscall cannot fail this way.
10660 qemu_log_mask(LOG_UNIMP
,
10661 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10662 error_get_pretty(err
));
10664 return -TARGET_EIO
;
10669 return -TARGET_EINVAL
;
10670 #endif /* AARCH64 */
10671 case PR_GET_SECCOMP
:
10672 case PR_SET_SECCOMP
:
10673 /* Disable seccomp to prevent the target disabling syscalls we
10675 return -TARGET_EINVAL
;
10677 /* Most prctl options have no pointer arguments */
10678 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10681 #ifdef TARGET_NR_arch_prctl
10682 case TARGET_NR_arch_prctl
:
10683 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10685 #ifdef TARGET_NR_pread64
10686 case TARGET_NR_pread64
:
10687 if (regpairs_aligned(cpu_env
, num
)) {
10691 if (arg2
== 0 && arg3
== 0) {
10692 /* Special-case NULL buffer and zero length, which should succeed */
10695 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10697 return -TARGET_EFAULT
;
10700 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10701 unlock_user(p
, arg2
, ret
);
10703 case TARGET_NR_pwrite64
:
10704 if (regpairs_aligned(cpu_env
, num
)) {
10708 if (arg2
== 0 && arg3
== 0) {
10709 /* Special-case NULL buffer and zero length, which should succeed */
10712 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10714 return -TARGET_EFAULT
;
10717 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10718 unlock_user(p
, arg2
, 0);
10721 case TARGET_NR_getcwd
:
10722 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10723 return -TARGET_EFAULT
;
10724 ret
= get_errno(sys_getcwd1(p
, arg2
));
10725 unlock_user(p
, arg1
, ret
);
10727 case TARGET_NR_capget
:
10728 case TARGET_NR_capset
:
10730 struct target_user_cap_header
*target_header
;
10731 struct target_user_cap_data
*target_data
= NULL
;
10732 struct __user_cap_header_struct header
;
10733 struct __user_cap_data_struct data
[2];
10734 struct __user_cap_data_struct
*dataptr
= NULL
;
10735 int i
, target_datalen
;
10736 int data_items
= 1;
10738 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10739 return -TARGET_EFAULT
;
10741 header
.version
= tswap32(target_header
->version
);
10742 header
.pid
= tswap32(target_header
->pid
);
10744 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10745 /* Version 2 and up takes pointer to two user_data structs */
10749 target_datalen
= sizeof(*target_data
) * data_items
;
10752 if (num
== TARGET_NR_capget
) {
10753 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10755 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10757 if (!target_data
) {
10758 unlock_user_struct(target_header
, arg1
, 0);
10759 return -TARGET_EFAULT
;
10762 if (num
== TARGET_NR_capset
) {
10763 for (i
= 0; i
< data_items
; i
++) {
10764 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10765 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10766 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10773 if (num
== TARGET_NR_capget
) {
10774 ret
= get_errno(capget(&header
, dataptr
));
10776 ret
= get_errno(capset(&header
, dataptr
));
10779 /* The kernel always updates version for both capget and capset */
10780 target_header
->version
= tswap32(header
.version
);
10781 unlock_user_struct(target_header
, arg1
, 1);
10784 if (num
== TARGET_NR_capget
) {
10785 for (i
= 0; i
< data_items
; i
++) {
10786 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10787 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10788 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10790 unlock_user(target_data
, arg2
, target_datalen
);
10792 unlock_user(target_data
, arg2
, 0);
10797 case TARGET_NR_sigaltstack
:
10798 return do_sigaltstack(arg1
, arg2
,
10799 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10801 #ifdef CONFIG_SENDFILE
10802 #ifdef TARGET_NR_sendfile
10803 case TARGET_NR_sendfile
:
10805 off_t
*offp
= NULL
;
10808 ret
= get_user_sal(off
, arg3
);
10809 if (is_error(ret
)) {
10814 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10815 if (!is_error(ret
) && arg3
) {
10816 abi_long ret2
= put_user_sal(off
, arg3
);
10817 if (is_error(ret2
)) {
10824 #ifdef TARGET_NR_sendfile64
10825 case TARGET_NR_sendfile64
:
10827 off_t
*offp
= NULL
;
10830 ret
= get_user_s64(off
, arg3
);
10831 if (is_error(ret
)) {
10836 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10837 if (!is_error(ret
) && arg3
) {
10838 abi_long ret2
= put_user_s64(off
, arg3
);
10839 if (is_error(ret2
)) {
10847 #ifdef TARGET_NR_vfork
10848 case TARGET_NR_vfork
:
10849 return get_errno(do_fork(cpu_env
,
10850 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10853 #ifdef TARGET_NR_ugetrlimit
10854 case TARGET_NR_ugetrlimit
:
10856 struct rlimit rlim
;
10857 int resource
= target_to_host_resource(arg1
);
10858 ret
= get_errno(getrlimit(resource
, &rlim
));
10859 if (!is_error(ret
)) {
10860 struct target_rlimit
*target_rlim
;
10861 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10862 return -TARGET_EFAULT
;
10863 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10864 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10865 unlock_user_struct(target_rlim
, arg2
, 1);
10870 #ifdef TARGET_NR_truncate64
10871 case TARGET_NR_truncate64
:
10872 if (!(p
= lock_user_string(arg1
)))
10873 return -TARGET_EFAULT
;
10874 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10875 unlock_user(p
, arg1
, 0);
10878 #ifdef TARGET_NR_ftruncate64
10879 case TARGET_NR_ftruncate64
:
10880 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10882 #ifdef TARGET_NR_stat64
10883 case TARGET_NR_stat64
:
10884 if (!(p
= lock_user_string(arg1
))) {
10885 return -TARGET_EFAULT
;
10887 ret
= get_errno(stat(path(p
), &st
));
10888 unlock_user(p
, arg1
, 0);
10889 if (!is_error(ret
))
10890 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10893 #ifdef TARGET_NR_lstat64
10894 case TARGET_NR_lstat64
:
10895 if (!(p
= lock_user_string(arg1
))) {
10896 return -TARGET_EFAULT
;
10898 ret
= get_errno(lstat(path(p
), &st
));
10899 unlock_user(p
, arg1
, 0);
10900 if (!is_error(ret
))
10901 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10904 #ifdef TARGET_NR_fstat64
10905 case TARGET_NR_fstat64
:
10906 ret
= get_errno(fstat(arg1
, &st
));
10907 if (!is_error(ret
))
10908 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10911 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10912 #ifdef TARGET_NR_fstatat64
10913 case TARGET_NR_fstatat64
:
10915 #ifdef TARGET_NR_newfstatat
10916 case TARGET_NR_newfstatat
:
10918 if (!(p
= lock_user_string(arg2
))) {
10919 return -TARGET_EFAULT
;
10921 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10922 unlock_user(p
, arg2
, 0);
10923 if (!is_error(ret
))
10924 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10927 #if defined(TARGET_NR_statx)
10928 case TARGET_NR_statx
:
10930 struct target_statx
*target_stx
;
10934 p
= lock_user_string(arg2
);
10936 return -TARGET_EFAULT
;
10938 #if defined(__NR_statx)
10941 * It is assumed that struct statx is architecture independent.
10943 struct target_statx host_stx
;
10946 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10947 if (!is_error(ret
)) {
10948 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10949 unlock_user(p
, arg2
, 0);
10950 return -TARGET_EFAULT
;
10954 if (ret
!= -TARGET_ENOSYS
) {
10955 unlock_user(p
, arg2
, 0);
10960 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10961 unlock_user(p
, arg2
, 0);
10963 if (!is_error(ret
)) {
10964 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10965 return -TARGET_EFAULT
;
10967 memset(target_stx
, 0, sizeof(*target_stx
));
10968 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10969 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10970 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10971 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10972 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10973 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10974 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10975 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10976 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10977 __put_user(st
.st_size
, &target_stx
->stx_size
);
10978 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10979 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10980 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10981 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10982 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10983 unlock_user_struct(target_stx
, arg5
, 1);
10988 #ifdef TARGET_NR_lchown
10989 case TARGET_NR_lchown
:
10990 if (!(p
= lock_user_string(arg1
)))
10991 return -TARGET_EFAULT
;
10992 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10993 unlock_user(p
, arg1
, 0);
10996 #ifdef TARGET_NR_getuid
10997 case TARGET_NR_getuid
:
10998 return get_errno(high2lowuid(getuid()));
11000 #ifdef TARGET_NR_getgid
11001 case TARGET_NR_getgid
:
11002 return get_errno(high2lowgid(getgid()));
11004 #ifdef TARGET_NR_geteuid
11005 case TARGET_NR_geteuid
:
11006 return get_errno(high2lowuid(geteuid()));
11008 #ifdef TARGET_NR_getegid
11009 case TARGET_NR_getegid
:
11010 return get_errno(high2lowgid(getegid()));
11012 case TARGET_NR_setreuid
:
11013 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11014 case TARGET_NR_setregid
:
11015 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11016 case TARGET_NR_getgroups
:
11018 int gidsetsize
= arg1
;
11019 target_id
*target_grouplist
;
11023 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11024 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11025 if (gidsetsize
== 0)
11027 if (!is_error(ret
)) {
11028 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11029 if (!target_grouplist
)
11030 return -TARGET_EFAULT
;
11031 for(i
= 0;i
< ret
; i
++)
11032 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11033 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11037 case TARGET_NR_setgroups
:
11039 int gidsetsize
= arg1
;
11040 target_id
*target_grouplist
;
11041 gid_t
*grouplist
= NULL
;
11044 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11045 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11046 if (!target_grouplist
) {
11047 return -TARGET_EFAULT
;
11049 for (i
= 0; i
< gidsetsize
; i
++) {
11050 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11052 unlock_user(target_grouplist
, arg2
, 0);
11054 return get_errno(setgroups(gidsetsize
, grouplist
));
11056 case TARGET_NR_fchown
:
11057 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11058 #if defined(TARGET_NR_fchownat)
11059 case TARGET_NR_fchownat
:
11060 if (!(p
= lock_user_string(arg2
)))
11061 return -TARGET_EFAULT
;
11062 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11063 low2highgid(arg4
), arg5
));
11064 unlock_user(p
, arg2
, 0);
11067 #ifdef TARGET_NR_setresuid
11068 case TARGET_NR_setresuid
:
11069 return get_errno(sys_setresuid(low2highuid(arg1
),
11071 low2highuid(arg3
)));
11073 #ifdef TARGET_NR_getresuid
11074 case TARGET_NR_getresuid
:
11076 uid_t ruid
, euid
, suid
;
11077 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11078 if (!is_error(ret
)) {
11079 if (put_user_id(high2lowuid(ruid
), arg1
)
11080 || put_user_id(high2lowuid(euid
), arg2
)
11081 || put_user_id(high2lowuid(suid
), arg3
))
11082 return -TARGET_EFAULT
;
11087 #ifdef TARGET_NR_getresgid
11088 case TARGET_NR_setresgid
:
11089 return get_errno(sys_setresgid(low2highgid(arg1
),
11091 low2highgid(arg3
)));
11093 #ifdef TARGET_NR_getresgid
11094 case TARGET_NR_getresgid
:
11096 gid_t rgid
, egid
, sgid
;
11097 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11098 if (!is_error(ret
)) {
11099 if (put_user_id(high2lowgid(rgid
), arg1
)
11100 || put_user_id(high2lowgid(egid
), arg2
)
11101 || put_user_id(high2lowgid(sgid
), arg3
))
11102 return -TARGET_EFAULT
;
11107 #ifdef TARGET_NR_chown
11108 case TARGET_NR_chown
:
11109 if (!(p
= lock_user_string(arg1
)))
11110 return -TARGET_EFAULT
;
11111 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11112 unlock_user(p
, arg1
, 0);
11115 case TARGET_NR_setuid
:
11116 return get_errno(sys_setuid(low2highuid(arg1
)));
11117 case TARGET_NR_setgid
:
11118 return get_errno(sys_setgid(low2highgid(arg1
)));
11119 case TARGET_NR_setfsuid
:
11120 return get_errno(setfsuid(arg1
));
11121 case TARGET_NR_setfsgid
:
11122 return get_errno(setfsgid(arg1
));
11124 #ifdef TARGET_NR_lchown32
11125 case TARGET_NR_lchown32
:
11126 if (!(p
= lock_user_string(arg1
)))
11127 return -TARGET_EFAULT
;
11128 ret
= get_errno(lchown(p
, arg2
, arg3
));
11129 unlock_user(p
, arg1
, 0);
11132 #ifdef TARGET_NR_getuid32
11133 case TARGET_NR_getuid32
:
11134 return get_errno(getuid());
11137 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11138 /* Alpha specific */
11139 case TARGET_NR_getxuid
:
11143 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11145 return get_errno(getuid());
11147 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11148 /* Alpha specific */
11149 case TARGET_NR_getxgid
:
11153 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11155 return get_errno(getgid());
11157 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11158 /* Alpha specific */
11159 case TARGET_NR_osf_getsysinfo
:
11160 ret
= -TARGET_EOPNOTSUPP
;
11162 case TARGET_GSI_IEEE_FP_CONTROL
:
11164 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11165 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11167 swcr
&= ~SWCR_STATUS_MASK
;
11168 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11170 if (put_user_u64 (swcr
, arg2
))
11171 return -TARGET_EFAULT
;
11176 /* case GSI_IEEE_STATE_AT_SIGNAL:
11177 -- Not implemented in linux kernel.
11179 -- Retrieves current unaligned access state; not much used.
11180 case GSI_PROC_TYPE:
11181 -- Retrieves implver information; surely not used.
11182 case GSI_GET_HWRPB:
11183 -- Grabs a copy of the HWRPB; surely not used.
11188 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11189 /* Alpha specific */
11190 case TARGET_NR_osf_setsysinfo
:
11191 ret
= -TARGET_EOPNOTSUPP
;
11193 case TARGET_SSI_IEEE_FP_CONTROL
:
11195 uint64_t swcr
, fpcr
;
11197 if (get_user_u64 (swcr
, arg2
)) {
11198 return -TARGET_EFAULT
;
11202 * The kernel calls swcr_update_status to update the
11203 * status bits from the fpcr at every point that it
11204 * could be queried. Therefore, we store the status
11205 * bits only in FPCR.
11207 ((CPUAlphaState
*)cpu_env
)->swcr
11208 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11210 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11211 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11212 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11213 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11218 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11220 uint64_t exc
, fpcr
, fex
;
11222 if (get_user_u64(exc
, arg2
)) {
11223 return -TARGET_EFAULT
;
11225 exc
&= SWCR_STATUS_MASK
;
11226 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11228 /* Old exceptions are not signaled. */
11229 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11231 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11232 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11234 /* Update the hardware fpcr. */
11235 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11236 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11239 int si_code
= TARGET_FPE_FLTUNK
;
11240 target_siginfo_t info
;
11242 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11243 si_code
= TARGET_FPE_FLTUND
;
11245 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11246 si_code
= TARGET_FPE_FLTRES
;
11248 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11249 si_code
= TARGET_FPE_FLTUND
;
11251 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11252 si_code
= TARGET_FPE_FLTOVF
;
11254 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11255 si_code
= TARGET_FPE_FLTDIV
;
11257 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11258 si_code
= TARGET_FPE_FLTINV
;
11261 info
.si_signo
= SIGFPE
;
11263 info
.si_code
= si_code
;
11264 info
._sifields
._sigfault
._addr
11265 = ((CPUArchState
*)cpu_env
)->pc
;
11266 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11267 QEMU_SI_FAULT
, &info
);
11273 /* case SSI_NVPAIRS:
11274 -- Used with SSIN_UACPROC to enable unaligned accesses.
11275 case SSI_IEEE_STATE_AT_SIGNAL:
11276 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11277 -- Not implemented in linux kernel
11282 #ifdef TARGET_NR_osf_sigprocmask
11283 /* Alpha specific. */
11284 case TARGET_NR_osf_sigprocmask
:
11288 sigset_t set
, oldset
;
11291 case TARGET_SIG_BLOCK
:
11294 case TARGET_SIG_UNBLOCK
:
11297 case TARGET_SIG_SETMASK
:
11301 return -TARGET_EINVAL
;
11304 target_to_host_old_sigset(&set
, &mask
);
11305 ret
= do_sigprocmask(how
, &set
, &oldset
);
11307 host_to_target_old_sigset(&mask
, &oldset
);
11314 #ifdef TARGET_NR_getgid32
11315 case TARGET_NR_getgid32
:
11316 return get_errno(getgid());
11318 #ifdef TARGET_NR_geteuid32
11319 case TARGET_NR_geteuid32
:
11320 return get_errno(geteuid());
11322 #ifdef TARGET_NR_getegid32
11323 case TARGET_NR_getegid32
:
11324 return get_errno(getegid());
11326 #ifdef TARGET_NR_setreuid32
11327 case TARGET_NR_setreuid32
:
11328 return get_errno(setreuid(arg1
, arg2
));
11330 #ifdef TARGET_NR_setregid32
11331 case TARGET_NR_setregid32
:
11332 return get_errno(setregid(arg1
, arg2
));
11334 #ifdef TARGET_NR_getgroups32
11335 case TARGET_NR_getgroups32
:
11337 int gidsetsize
= arg1
;
11338 uint32_t *target_grouplist
;
11342 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11343 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11344 if (gidsetsize
== 0)
11346 if (!is_error(ret
)) {
11347 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11348 if (!target_grouplist
) {
11349 return -TARGET_EFAULT
;
11351 for(i
= 0;i
< ret
; i
++)
11352 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11353 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11358 #ifdef TARGET_NR_setgroups32
11359 case TARGET_NR_setgroups32
:
11361 int gidsetsize
= arg1
;
11362 uint32_t *target_grouplist
;
11366 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11367 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11368 if (!target_grouplist
) {
11369 return -TARGET_EFAULT
;
11371 for(i
= 0;i
< gidsetsize
; i
++)
11372 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11373 unlock_user(target_grouplist
, arg2
, 0);
11374 return get_errno(setgroups(gidsetsize
, grouplist
));
11377 #ifdef TARGET_NR_fchown32
11378 case TARGET_NR_fchown32
:
11379 return get_errno(fchown(arg1
, arg2
, arg3
));
11381 #ifdef TARGET_NR_setresuid32
11382 case TARGET_NR_setresuid32
:
11383 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11385 #ifdef TARGET_NR_getresuid32
11386 case TARGET_NR_getresuid32
:
11388 uid_t ruid
, euid
, suid
;
11389 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11390 if (!is_error(ret
)) {
11391 if (put_user_u32(ruid
, arg1
)
11392 || put_user_u32(euid
, arg2
)
11393 || put_user_u32(suid
, arg3
))
11394 return -TARGET_EFAULT
;
11399 #ifdef TARGET_NR_setresgid32
11400 case TARGET_NR_setresgid32
:
11401 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11403 #ifdef TARGET_NR_getresgid32
11404 case TARGET_NR_getresgid32
:
11406 gid_t rgid
, egid
, sgid
;
11407 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11408 if (!is_error(ret
)) {
11409 if (put_user_u32(rgid
, arg1
)
11410 || put_user_u32(egid
, arg2
)
11411 || put_user_u32(sgid
, arg3
))
11412 return -TARGET_EFAULT
;
11417 #ifdef TARGET_NR_chown32
11418 case TARGET_NR_chown32
:
11419 if (!(p
= lock_user_string(arg1
)))
11420 return -TARGET_EFAULT
;
11421 ret
= get_errno(chown(p
, arg2
, arg3
));
11422 unlock_user(p
, arg1
, 0);
11425 #ifdef TARGET_NR_setuid32
11426 case TARGET_NR_setuid32
:
11427 return get_errno(sys_setuid(arg1
));
11429 #ifdef TARGET_NR_setgid32
11430 case TARGET_NR_setgid32
:
11431 return get_errno(sys_setgid(arg1
));
11433 #ifdef TARGET_NR_setfsuid32
11434 case TARGET_NR_setfsuid32
:
11435 return get_errno(setfsuid(arg1
));
11437 #ifdef TARGET_NR_setfsgid32
11438 case TARGET_NR_setfsgid32
:
11439 return get_errno(setfsgid(arg1
));
11441 #ifdef TARGET_NR_mincore
11442 case TARGET_NR_mincore
:
11444 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11446 return -TARGET_ENOMEM
;
11448 p
= lock_user_string(arg3
);
11450 ret
= -TARGET_EFAULT
;
11452 ret
= get_errno(mincore(a
, arg2
, p
));
11453 unlock_user(p
, arg3
, ret
);
11455 unlock_user(a
, arg1
, 0);
11459 #ifdef TARGET_NR_arm_fadvise64_64
11460 case TARGET_NR_arm_fadvise64_64
:
11461 /* arm_fadvise64_64 looks like fadvise64_64 but
11462 * with different argument order: fd, advice, offset, len
11463 * rather than the usual fd, offset, len, advice.
11464 * Note that offset and len are both 64-bit so appear as
11465 * pairs of 32-bit registers.
11467 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11468 target_offset64(arg5
, arg6
), arg2
);
11469 return -host_to_target_errno(ret
);
11472 #if TARGET_ABI_BITS == 32
11474 #ifdef TARGET_NR_fadvise64_64
11475 case TARGET_NR_fadvise64_64
:
11476 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11477 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11485 /* 6 args: fd, offset (high, low), len (high, low), advice */
11486 if (regpairs_aligned(cpu_env
, num
)) {
11487 /* offset is in (3,4), len in (5,6) and advice in 7 */
11495 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11496 target_offset64(arg4
, arg5
), arg6
);
11497 return -host_to_target_errno(ret
);
11500 #ifdef TARGET_NR_fadvise64
11501 case TARGET_NR_fadvise64
:
11502 /* 5 args: fd, offset (high, low), len, advice */
11503 if (regpairs_aligned(cpu_env
, num
)) {
11504 /* offset is in (3,4), len in 5 and advice in 6 */
11510 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11511 return -host_to_target_errno(ret
);
11514 #else /* not a 32-bit ABI */
11515 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11516 #ifdef TARGET_NR_fadvise64_64
11517 case TARGET_NR_fadvise64_64
:
11519 #ifdef TARGET_NR_fadvise64
11520 case TARGET_NR_fadvise64
:
11522 #ifdef TARGET_S390X
11524 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11525 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11526 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11527 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11531 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11533 #endif /* end of 64-bit ABI fadvise handling */
11535 #ifdef TARGET_NR_madvise
11536 case TARGET_NR_madvise
:
11537 /* A straight passthrough may not be safe because qemu sometimes
11538 turns private file-backed mappings into anonymous mappings.
11539 This will break MADV_DONTNEED.
11540 This is a hint, so ignoring and returning success is ok. */
11543 #ifdef TARGET_NR_fcntl64
11544 case TARGET_NR_fcntl64
:
11548 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11549 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11552 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11553 copyfrom
= copy_from_user_oabi_flock64
;
11554 copyto
= copy_to_user_oabi_flock64
;
11558 cmd
= target_to_host_fcntl_cmd(arg2
);
11559 if (cmd
== -TARGET_EINVAL
) {
11564 case TARGET_F_GETLK64
:
11565 ret
= copyfrom(&fl
, arg3
);
11569 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11571 ret
= copyto(arg3
, &fl
);
11575 case TARGET_F_SETLK64
:
11576 case TARGET_F_SETLKW64
:
11577 ret
= copyfrom(&fl
, arg3
);
11581 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11584 ret
= do_fcntl(arg1
, arg2
, arg3
);
11590 #ifdef TARGET_NR_cacheflush
11591 case TARGET_NR_cacheflush
:
11592 /* self-modifying code is handled automatically, so nothing needed */
11595 #ifdef TARGET_NR_getpagesize
11596 case TARGET_NR_getpagesize
:
11597 return TARGET_PAGE_SIZE
;
11599 case TARGET_NR_gettid
:
11600 return get_errno(sys_gettid());
11601 #ifdef TARGET_NR_readahead
11602 case TARGET_NR_readahead
:
11603 #if TARGET_ABI_BITS == 32
11604 if (regpairs_aligned(cpu_env
, num
)) {
11609 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11611 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11616 #ifdef TARGET_NR_setxattr
11617 case TARGET_NR_listxattr
:
11618 case TARGET_NR_llistxattr
:
11622 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11624 return -TARGET_EFAULT
;
11627 p
= lock_user_string(arg1
);
11629 if (num
== TARGET_NR_listxattr
) {
11630 ret
= get_errno(listxattr(p
, b
, arg3
));
11632 ret
= get_errno(llistxattr(p
, b
, arg3
));
11635 ret
= -TARGET_EFAULT
;
11637 unlock_user(p
, arg1
, 0);
11638 unlock_user(b
, arg2
, arg3
);
11641 case TARGET_NR_flistxattr
:
11645 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11647 return -TARGET_EFAULT
;
11650 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11651 unlock_user(b
, arg2
, arg3
);
11654 case TARGET_NR_setxattr
:
11655 case TARGET_NR_lsetxattr
:
11657 void *p
, *n
, *v
= 0;
11659 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11661 return -TARGET_EFAULT
;
11664 p
= lock_user_string(arg1
);
11665 n
= lock_user_string(arg2
);
11667 if (num
== TARGET_NR_setxattr
) {
11668 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11670 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11673 ret
= -TARGET_EFAULT
;
11675 unlock_user(p
, arg1
, 0);
11676 unlock_user(n
, arg2
, 0);
11677 unlock_user(v
, arg3
, 0);
11680 case TARGET_NR_fsetxattr
:
11684 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11686 return -TARGET_EFAULT
;
11689 n
= lock_user_string(arg2
);
11691 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11693 ret
= -TARGET_EFAULT
;
11695 unlock_user(n
, arg2
, 0);
11696 unlock_user(v
, arg3
, 0);
11699 case TARGET_NR_getxattr
:
11700 case TARGET_NR_lgetxattr
:
11702 void *p
, *n
, *v
= 0;
11704 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11706 return -TARGET_EFAULT
;
11709 p
= lock_user_string(arg1
);
11710 n
= lock_user_string(arg2
);
11712 if (num
== TARGET_NR_getxattr
) {
11713 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11715 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11718 ret
= -TARGET_EFAULT
;
11720 unlock_user(p
, arg1
, 0);
11721 unlock_user(n
, arg2
, 0);
11722 unlock_user(v
, arg3
, arg4
);
11725 case TARGET_NR_fgetxattr
:
11729 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11731 return -TARGET_EFAULT
;
11734 n
= lock_user_string(arg2
);
11736 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11738 ret
= -TARGET_EFAULT
;
11740 unlock_user(n
, arg2
, 0);
11741 unlock_user(v
, arg3
, arg4
);
11744 case TARGET_NR_removexattr
:
11745 case TARGET_NR_lremovexattr
:
11748 p
= lock_user_string(arg1
);
11749 n
= lock_user_string(arg2
);
11751 if (num
== TARGET_NR_removexattr
) {
11752 ret
= get_errno(removexattr(p
, n
));
11754 ret
= get_errno(lremovexattr(p
, n
));
11757 ret
= -TARGET_EFAULT
;
11759 unlock_user(p
, arg1
, 0);
11760 unlock_user(n
, arg2
, 0);
11763 case TARGET_NR_fremovexattr
:
11766 n
= lock_user_string(arg2
);
11768 ret
= get_errno(fremovexattr(arg1
, n
));
11770 ret
= -TARGET_EFAULT
;
11772 unlock_user(n
, arg2
, 0);
11776 #endif /* CONFIG_ATTR */
11777 #ifdef TARGET_NR_set_thread_area
11778 case TARGET_NR_set_thread_area
:
11779 #if defined(TARGET_MIPS)
11780 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11782 #elif defined(TARGET_CRIS)
11784 ret
= -TARGET_EINVAL
;
11786 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11790 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11791 return do_set_thread_area(cpu_env
, arg1
);
11792 #elif defined(TARGET_M68K)
11794 TaskState
*ts
= cpu
->opaque
;
11795 ts
->tp_value
= arg1
;
11799 return -TARGET_ENOSYS
;
11802 #ifdef TARGET_NR_get_thread_area
11803 case TARGET_NR_get_thread_area
:
11804 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11805 return do_get_thread_area(cpu_env
, arg1
);
11806 #elif defined(TARGET_M68K)
11808 TaskState
*ts
= cpu
->opaque
;
11809 return ts
->tp_value
;
11812 return -TARGET_ENOSYS
;
11815 #ifdef TARGET_NR_getdomainname
11816 case TARGET_NR_getdomainname
:
11817 return -TARGET_ENOSYS
;
11820 #ifdef TARGET_NR_clock_settime
11821 case TARGET_NR_clock_settime
:
11823 struct timespec ts
;
11825 ret
= target_to_host_timespec(&ts
, arg2
);
11826 if (!is_error(ret
)) {
11827 ret
= get_errno(clock_settime(arg1
, &ts
));
11832 #ifdef TARGET_NR_clock_settime64
11833 case TARGET_NR_clock_settime64
:
11835 struct timespec ts
;
11837 ret
= target_to_host_timespec64(&ts
, arg2
);
11838 if (!is_error(ret
)) {
11839 ret
= get_errno(clock_settime(arg1
, &ts
));
11844 #ifdef TARGET_NR_clock_gettime
11845 case TARGET_NR_clock_gettime
:
11847 struct timespec ts
;
11848 ret
= get_errno(clock_gettime(arg1
, &ts
));
11849 if (!is_error(ret
)) {
11850 ret
= host_to_target_timespec(arg2
, &ts
);
11855 #ifdef TARGET_NR_clock_gettime64
11856 case TARGET_NR_clock_gettime64
:
11858 struct timespec ts
;
11859 ret
= get_errno(clock_gettime(arg1
, &ts
));
11860 if (!is_error(ret
)) {
11861 ret
= host_to_target_timespec64(arg2
, &ts
);
11866 #ifdef TARGET_NR_clock_getres
11867 case TARGET_NR_clock_getres
:
11869 struct timespec ts
;
11870 ret
= get_errno(clock_getres(arg1
, &ts
));
11871 if (!is_error(ret
)) {
11872 host_to_target_timespec(arg2
, &ts
);
11877 #ifdef TARGET_NR_clock_getres_time64
11878 case TARGET_NR_clock_getres_time64
:
11880 struct timespec ts
;
11881 ret
= get_errno(clock_getres(arg1
, &ts
));
11882 if (!is_error(ret
)) {
11883 host_to_target_timespec64(arg2
, &ts
);
11888 #ifdef TARGET_NR_clock_nanosleep
11889 case TARGET_NR_clock_nanosleep
:
11891 struct timespec ts
;
11892 if (target_to_host_timespec(&ts
, arg3
)) {
11893 return -TARGET_EFAULT
;
11895 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11896 &ts
, arg4
? &ts
: NULL
));
11898 * if the call is interrupted by a signal handler, it fails
11899 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11900 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11902 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
11903 host_to_target_timespec(arg4
, &ts
)) {
11904 return -TARGET_EFAULT
;
11911 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11912 case TARGET_NR_set_tid_address
:
11913 return get_errno(set_tid_address((int *)g2h(arg1
)));
11916 case TARGET_NR_tkill
:
11917 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11919 case TARGET_NR_tgkill
:
11920 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11921 target_to_host_signal(arg3
)));
11923 #ifdef TARGET_NR_set_robust_list
11924 case TARGET_NR_set_robust_list
:
11925 case TARGET_NR_get_robust_list
:
11926 /* The ABI for supporting robust futexes has userspace pass
11927 * the kernel a pointer to a linked list which is updated by
11928 * userspace after the syscall; the list is walked by the kernel
11929 * when the thread exits. Since the linked list in QEMU guest
11930 * memory isn't a valid linked list for the host and we have
11931 * no way to reliably intercept the thread-death event, we can't
11932 * support these. Silently return ENOSYS so that guest userspace
11933 * falls back to a non-robust futex implementation (which should
11934 * be OK except in the corner case of the guest crashing while
11935 * holding a mutex that is shared with another process via
11938 return -TARGET_ENOSYS
;
11941 #if defined(TARGET_NR_utimensat)
11942 case TARGET_NR_utimensat
:
11944 struct timespec
*tsp
, ts
[2];
11948 if (target_to_host_timespec(ts
, arg3
)) {
11949 return -TARGET_EFAULT
;
11951 if (target_to_host_timespec(ts
+ 1, arg3
+
11952 sizeof(struct target_timespec
))) {
11953 return -TARGET_EFAULT
;
11958 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11960 if (!(p
= lock_user_string(arg2
))) {
11961 return -TARGET_EFAULT
;
11963 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11964 unlock_user(p
, arg2
, 0);
11969 #ifdef TARGET_NR_futex
11970 case TARGET_NR_futex
:
11971 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11973 #ifdef TARGET_NR_futex_time64
11974 case TARGET_NR_futex_time64
:
11975 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11977 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11978 case TARGET_NR_inotify_init
:
11979 ret
= get_errno(sys_inotify_init());
11981 fd_trans_register(ret
, &target_inotify_trans
);
11985 #ifdef CONFIG_INOTIFY1
11986 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11987 case TARGET_NR_inotify_init1
:
11988 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11989 fcntl_flags_tbl
)));
11991 fd_trans_register(ret
, &target_inotify_trans
);
11996 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11997 case TARGET_NR_inotify_add_watch
:
11998 p
= lock_user_string(arg2
);
11999 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12000 unlock_user(p
, arg2
, 0);
12003 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12004 case TARGET_NR_inotify_rm_watch
:
12005 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12008 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12009 case TARGET_NR_mq_open
:
12011 struct mq_attr posix_mq_attr
;
12012 struct mq_attr
*pposix_mq_attr
;
12015 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12016 pposix_mq_attr
= NULL
;
12018 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12019 return -TARGET_EFAULT
;
12021 pposix_mq_attr
= &posix_mq_attr
;
12023 p
= lock_user_string(arg1
- 1);
12025 return -TARGET_EFAULT
;
12027 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12028 unlock_user (p
, arg1
, 0);
12032 case TARGET_NR_mq_unlink
:
12033 p
= lock_user_string(arg1
- 1);
12035 return -TARGET_EFAULT
;
12037 ret
= get_errno(mq_unlink(p
));
12038 unlock_user (p
, arg1
, 0);
12041 #ifdef TARGET_NR_mq_timedsend
12042 case TARGET_NR_mq_timedsend
:
12044 struct timespec ts
;
12046 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12048 if (target_to_host_timespec(&ts
, arg5
)) {
12049 return -TARGET_EFAULT
;
12051 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12052 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12053 return -TARGET_EFAULT
;
12056 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12058 unlock_user (p
, arg2
, arg3
);
12063 #ifdef TARGET_NR_mq_timedreceive
12064 case TARGET_NR_mq_timedreceive
:
12066 struct timespec ts
;
12069 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12071 if (target_to_host_timespec(&ts
, arg5
)) {
12072 return -TARGET_EFAULT
;
12074 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12076 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12077 return -TARGET_EFAULT
;
12080 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12083 unlock_user (p
, arg2
, arg3
);
12085 put_user_u32(prio
, arg4
);
12090 /* Not implemented for now... */
12091 /* case TARGET_NR_mq_notify: */
12094 case TARGET_NR_mq_getsetattr
:
12096 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12099 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12100 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12101 &posix_mq_attr_out
));
12102 } else if (arg3
!= 0) {
12103 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12105 if (ret
== 0 && arg3
!= 0) {
12106 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12112 #ifdef CONFIG_SPLICE
12113 #ifdef TARGET_NR_tee
12114 case TARGET_NR_tee
:
12116 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12120 #ifdef TARGET_NR_splice
12121 case TARGET_NR_splice
:
12123 loff_t loff_in
, loff_out
;
12124 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12126 if (get_user_u64(loff_in
, arg2
)) {
12127 return -TARGET_EFAULT
;
12129 ploff_in
= &loff_in
;
12132 if (get_user_u64(loff_out
, arg4
)) {
12133 return -TARGET_EFAULT
;
12135 ploff_out
= &loff_out
;
12137 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12139 if (put_user_u64(loff_in
, arg2
)) {
12140 return -TARGET_EFAULT
;
12144 if (put_user_u64(loff_out
, arg4
)) {
12145 return -TARGET_EFAULT
;
12151 #ifdef TARGET_NR_vmsplice
12152 case TARGET_NR_vmsplice
:
12154 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12156 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12157 unlock_iovec(vec
, arg2
, arg3
, 0);
12159 ret
= -host_to_target_errno(errno
);
12164 #endif /* CONFIG_SPLICE */
12165 #ifdef CONFIG_EVENTFD
12166 #if defined(TARGET_NR_eventfd)
12167 case TARGET_NR_eventfd
:
12168 ret
= get_errno(eventfd(arg1
, 0));
12170 fd_trans_register(ret
, &target_eventfd_trans
);
12174 #if defined(TARGET_NR_eventfd2)
12175 case TARGET_NR_eventfd2
:
12177 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12178 if (arg2
& TARGET_O_NONBLOCK
) {
12179 host_flags
|= O_NONBLOCK
;
12181 if (arg2
& TARGET_O_CLOEXEC
) {
12182 host_flags
|= O_CLOEXEC
;
12184 ret
= get_errno(eventfd(arg1
, host_flags
));
12186 fd_trans_register(ret
, &target_eventfd_trans
);
12191 #endif /* CONFIG_EVENTFD */
12192 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12193 case TARGET_NR_fallocate
:
12194 #if TARGET_ABI_BITS == 32
12195 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12196 target_offset64(arg5
, arg6
)));
12198 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12202 #if defined(CONFIG_SYNC_FILE_RANGE)
12203 #if defined(TARGET_NR_sync_file_range)
12204 case TARGET_NR_sync_file_range
:
12205 #if TARGET_ABI_BITS == 32
12206 #if defined(TARGET_MIPS)
12207 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12208 target_offset64(arg5
, arg6
), arg7
));
12210 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12211 target_offset64(arg4
, arg5
), arg6
));
12212 #endif /* !TARGET_MIPS */
12214 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12218 #if defined(TARGET_NR_sync_file_range2) || \
12219 defined(TARGET_NR_arm_sync_file_range)
12220 #if defined(TARGET_NR_sync_file_range2)
12221 case TARGET_NR_sync_file_range2
:
12223 #if defined(TARGET_NR_arm_sync_file_range)
12224 case TARGET_NR_arm_sync_file_range
:
12226 /* This is like sync_file_range but the arguments are reordered */
12227 #if TARGET_ABI_BITS == 32
12228 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12229 target_offset64(arg5
, arg6
), arg2
));
12231 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12236 #if defined(TARGET_NR_signalfd4)
12237 case TARGET_NR_signalfd4
:
12238 return do_signalfd4(arg1
, arg2
, arg4
);
12240 #if defined(TARGET_NR_signalfd)
12241 case TARGET_NR_signalfd
:
12242 return do_signalfd4(arg1
, arg2
, 0);
12244 #if defined(CONFIG_EPOLL)
12245 #if defined(TARGET_NR_epoll_create)
12246 case TARGET_NR_epoll_create
:
12247 return get_errno(epoll_create(arg1
));
12249 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12250 case TARGET_NR_epoll_create1
:
12251 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12253 #if defined(TARGET_NR_epoll_ctl)
12254 case TARGET_NR_epoll_ctl
:
12256 struct epoll_event ep
;
12257 struct epoll_event
*epp
= 0;
12259 struct target_epoll_event
*target_ep
;
12260 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12261 return -TARGET_EFAULT
;
12263 ep
.events
= tswap32(target_ep
->events
);
12264 /* The epoll_data_t union is just opaque data to the kernel,
12265 * so we transfer all 64 bits across and need not worry what
12266 * actual data type it is.
12268 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12269 unlock_user_struct(target_ep
, arg4
, 0);
12272 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12276 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12277 #if defined(TARGET_NR_epoll_wait)
12278 case TARGET_NR_epoll_wait
:
12280 #if defined(TARGET_NR_epoll_pwait)
12281 case TARGET_NR_epoll_pwait
:
12284 struct target_epoll_event
*target_ep
;
12285 struct epoll_event
*ep
;
12287 int maxevents
= arg3
;
12288 int timeout
= arg4
;
12290 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12291 return -TARGET_EINVAL
;
12294 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12295 maxevents
* sizeof(struct target_epoll_event
), 1);
12297 return -TARGET_EFAULT
;
12300 ep
= g_try_new(struct epoll_event
, maxevents
);
12302 unlock_user(target_ep
, arg2
, 0);
12303 return -TARGET_ENOMEM
;
12307 #if defined(TARGET_NR_epoll_pwait)
12308 case TARGET_NR_epoll_pwait
:
12310 target_sigset_t
*target_set
;
12311 sigset_t _set
, *set
= &_set
;
12314 if (arg6
!= sizeof(target_sigset_t
)) {
12315 ret
= -TARGET_EINVAL
;
12319 target_set
= lock_user(VERIFY_READ
, arg5
,
12320 sizeof(target_sigset_t
), 1);
12322 ret
= -TARGET_EFAULT
;
12325 target_to_host_sigset(set
, target_set
);
12326 unlock_user(target_set
, arg5
, 0);
12331 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12332 set
, SIGSET_T_SIZE
));
12336 #if defined(TARGET_NR_epoll_wait)
12337 case TARGET_NR_epoll_wait
:
12338 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12343 ret
= -TARGET_ENOSYS
;
12345 if (!is_error(ret
)) {
12347 for (i
= 0; i
< ret
; i
++) {
12348 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12349 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12351 unlock_user(target_ep
, arg2
,
12352 ret
* sizeof(struct target_epoll_event
));
12354 unlock_user(target_ep
, arg2
, 0);
12361 #ifdef TARGET_NR_prlimit64
12362 case TARGET_NR_prlimit64
:
12364 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12365 struct target_rlimit64
*target_rnew
, *target_rold
;
12366 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12367 int resource
= target_to_host_resource(arg2
);
12369 if (arg3
&& (resource
!= RLIMIT_AS
&&
12370 resource
!= RLIMIT_DATA
&&
12371 resource
!= RLIMIT_STACK
)) {
12372 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12373 return -TARGET_EFAULT
;
12375 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12376 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12377 unlock_user_struct(target_rnew
, arg3
, 0);
12381 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12382 if (!is_error(ret
) && arg4
) {
12383 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12384 return -TARGET_EFAULT
;
12386 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12387 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12388 unlock_user_struct(target_rold
, arg4
, 1);
12393 #ifdef TARGET_NR_gethostname
12394 case TARGET_NR_gethostname
:
12396 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12398 ret
= get_errno(gethostname(name
, arg2
));
12399 unlock_user(name
, arg1
, arg2
);
12401 ret
= -TARGET_EFAULT
;
12406 #ifdef TARGET_NR_atomic_cmpxchg_32
12407 case TARGET_NR_atomic_cmpxchg_32
:
12409 /* should use start_exclusive from main.c */
12410 abi_ulong mem_value
;
12411 if (get_user_u32(mem_value
, arg6
)) {
12412 target_siginfo_t info
;
12413 info
.si_signo
= SIGSEGV
;
12415 info
.si_code
= TARGET_SEGV_MAPERR
;
12416 info
._sifields
._sigfault
._addr
= arg6
;
12417 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12418 QEMU_SI_FAULT
, &info
);
12422 if (mem_value
== arg2
)
12423 put_user_u32(arg1
, arg6
);
12427 #ifdef TARGET_NR_atomic_barrier
12428 case TARGET_NR_atomic_barrier
:
12429 /* Like the kernel implementation and the
12430 qemu arm barrier, no-op this? */
12434 #ifdef TARGET_NR_timer_create
12435 case TARGET_NR_timer_create
:
12437 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12439 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12442 int timer_index
= next_free_host_timer();
12444 if (timer_index
< 0) {
12445 ret
= -TARGET_EAGAIN
;
12447 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12450 phost_sevp
= &host_sevp
;
12451 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12457 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12461 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12462 return -TARGET_EFAULT
;
12470 #ifdef TARGET_NR_timer_settime
12471 case TARGET_NR_timer_settime
:
12473 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12474 * struct itimerspec * old_value */
12475 target_timer_t timerid
= get_timer_id(arg1
);
12479 } else if (arg3
== 0) {
12480 ret
= -TARGET_EINVAL
;
12482 timer_t htimer
= g_posix_timers
[timerid
];
12483 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12485 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12486 return -TARGET_EFAULT
;
12489 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12490 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12491 return -TARGET_EFAULT
;
12498 #ifdef TARGET_NR_timer_settime64
12499 case TARGET_NR_timer_settime64
:
12501 target_timer_t timerid
= get_timer_id(arg1
);
12505 } else if (arg3
== 0) {
12506 ret
= -TARGET_EINVAL
;
12508 timer_t htimer
= g_posix_timers
[timerid
];
12509 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12511 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12512 return -TARGET_EFAULT
;
12515 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12516 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12517 return -TARGET_EFAULT
;
12524 #ifdef TARGET_NR_timer_gettime
12525 case TARGET_NR_timer_gettime
:
12527 /* args: timer_t timerid, struct itimerspec *curr_value */
12528 target_timer_t timerid
= get_timer_id(arg1
);
12532 } else if (!arg2
) {
12533 ret
= -TARGET_EFAULT
;
12535 timer_t htimer
= g_posix_timers
[timerid
];
12536 struct itimerspec hspec
;
12537 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12539 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12540 ret
= -TARGET_EFAULT
;
12547 #ifdef TARGET_NR_timer_gettime64
12548 case TARGET_NR_timer_gettime64
:
12550 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12551 target_timer_t timerid
= get_timer_id(arg1
);
12555 } else if (!arg2
) {
12556 ret
= -TARGET_EFAULT
;
12558 timer_t htimer
= g_posix_timers
[timerid
];
12559 struct itimerspec hspec
;
12560 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12562 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12563 ret
= -TARGET_EFAULT
;
12570 #ifdef TARGET_NR_timer_getoverrun
12571 case TARGET_NR_timer_getoverrun
:
12573 /* args: timer_t timerid */
12574 target_timer_t timerid
= get_timer_id(arg1
);
12579 timer_t htimer
= g_posix_timers
[timerid
];
12580 ret
= get_errno(timer_getoverrun(htimer
));
12586 #ifdef TARGET_NR_timer_delete
12587 case TARGET_NR_timer_delete
:
12589 /* args: timer_t timerid */
12590 target_timer_t timerid
= get_timer_id(arg1
);
12595 timer_t htimer
= g_posix_timers
[timerid
];
12596 ret
= get_errno(timer_delete(htimer
));
12597 g_posix_timers
[timerid
] = 0;
12603 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12604 case TARGET_NR_timerfd_create
:
12605 return get_errno(timerfd_create(arg1
,
12606 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12609 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12610 case TARGET_NR_timerfd_gettime
:
12612 struct itimerspec its_curr
;
12614 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12616 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12617 return -TARGET_EFAULT
;
12623 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12624 case TARGET_NR_timerfd_gettime64
:
12626 struct itimerspec its_curr
;
12628 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12630 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12631 return -TARGET_EFAULT
;
12637 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12638 case TARGET_NR_timerfd_settime
:
12640 struct itimerspec its_new
, its_old
, *p_new
;
12643 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12644 return -TARGET_EFAULT
;
12651 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12653 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12654 return -TARGET_EFAULT
;
12660 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12661 case TARGET_NR_timerfd_settime64
:
12663 struct itimerspec its_new
, its_old
, *p_new
;
12666 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
12667 return -TARGET_EFAULT
;
12674 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12676 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
12677 return -TARGET_EFAULT
;
12683 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12684 case TARGET_NR_ioprio_get
:
12685 return get_errno(ioprio_get(arg1
, arg2
));
12688 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12689 case TARGET_NR_ioprio_set
:
12690 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12693 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12694 case TARGET_NR_setns
:
12695 return get_errno(setns(arg1
, arg2
));
12697 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12698 case TARGET_NR_unshare
:
12699 return get_errno(unshare(arg1
));
12701 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12702 case TARGET_NR_kcmp
:
12703 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12705 #ifdef TARGET_NR_swapcontext
12706 case TARGET_NR_swapcontext
:
12707 /* PowerPC specific. */
12708 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12710 #ifdef TARGET_NR_memfd_create
12711 case TARGET_NR_memfd_create
:
12712 p
= lock_user_string(arg1
);
12714 return -TARGET_EFAULT
;
12716 ret
= get_errno(memfd_create(p
, arg2
));
12717 fd_trans_unregister(ret
);
12718 unlock_user(p
, arg1
, 0);
12721 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12722 case TARGET_NR_membarrier
:
12723 return get_errno(membarrier(arg1
, arg2
));
12727 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12728 return -TARGET_ENOSYS
;
12733 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12734 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12735 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12738 CPUState
*cpu
= env_cpu(cpu_env
);
12741 #ifdef DEBUG_ERESTARTSYS
12742 /* Debug-only code for exercising the syscall-restart code paths
12743 * in the per-architecture cpu main loops: restart every syscall
12744 * the guest makes once before letting it through.
12750 return -TARGET_ERESTARTSYS
;
12755 record_syscall_start(cpu
, num
, arg1
,
12756 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12758 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12759 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12762 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12763 arg5
, arg6
, arg7
, arg8
);
12765 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12766 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
12767 arg3
, arg4
, arg5
, arg6
);
12770 record_syscall_return(cpu
, num
, ret
);