4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
119 #include "linux_loop.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
131 #define CLONE_IO 0x80000000 /* Clone io context */
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid
)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
296 loff_t
*, res
, uint
, wh
);
298 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
299 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
301 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group
,int,error_code
)
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address
,int *,tidptr
)
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
310 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
314 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
318 unsigned long *, user_mask_ptr
);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
321 unsigned long *, user_mask_ptr
);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
324 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
326 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
327 struct __user_cap_data_struct
*, data
);
328 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
329 struct __user_cap_data_struct
*, data
);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get
, int, which
, int, who
)
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
342 unsigned long, idx1
, unsigned long, idx2
)
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
350 unsigned int, mask
, struct target_statx
*, statxbuf
)
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier
, int, cmd
, int, flags
)
356 static bitmask_transtbl fcntl_flags_tbl
[] = {
357 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
358 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
359 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
360 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
361 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
362 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
363 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
364 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
365 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
366 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
367 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
368 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
369 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
380 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
392 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
398 const struct timespec
*,tsp
,int,flags
)
400 static int sys_utimensat(int dirfd
, const char *pathname
,
401 const struct timespec times
[2], int flags
)
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
413 const char *, new, unsigned int, flags
)
415 static int sys_renameat2(int oldfd
, const char *old
,
416 int newfd
, const char *new, int flags
)
419 return renameat(oldfd
, old
, newfd
, new);
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
439 return (inotify_add_watch(fd
, pathname
, mask
));
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
445 return (inotify_rm_watch(fd
, wd
));
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags
)
452 return (inotify_init1(flags
));
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64
{
474 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
475 const struct host_rlimit64
*, new_limit
,
476 struct host_rlimit64
*, old_limit
)
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers
[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
489 if (g_posix_timers
[k
] == 0) {
490 g_posix_timers
[k
] = (timer_t
) 1;
498 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
500 static inline int regpairs_aligned(void *cpu_env
, int num
)
502 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
504 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
505 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
506 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
507 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
508 * of registers which translates to the same as ARM/MIPS, because we start with
510 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
511 #elif defined(TARGET_SH4)
512 /* SH4 doesn't align register pairs, except for p{read,write}64 */
513 static inline int regpairs_aligned(void *cpu_env
, int num
)
516 case TARGET_NR_pread64
:
517 case TARGET_NR_pwrite64
:
524 #elif defined(TARGET_XTENSA)
525 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
527 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
530 #define ERRNO_TABLE_SIZE 1200
532 /* target_to_host_errno_table[] is initialized from
533 * host_to_target_errno_table[] in syscall_init(). */
534 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
538 * This list is the union of errno values overridden in asm-<arch>/errno.h
539 * minus the errnos that are not actually generic to all archs.
541 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
542 [EAGAIN
] = TARGET_EAGAIN
,
543 [EIDRM
] = TARGET_EIDRM
,
544 [ECHRNG
] = TARGET_ECHRNG
,
545 [EL2NSYNC
] = TARGET_EL2NSYNC
,
546 [EL3HLT
] = TARGET_EL3HLT
,
547 [EL3RST
] = TARGET_EL3RST
,
548 [ELNRNG
] = TARGET_ELNRNG
,
549 [EUNATCH
] = TARGET_EUNATCH
,
550 [ENOCSI
] = TARGET_ENOCSI
,
551 [EL2HLT
] = TARGET_EL2HLT
,
552 [EDEADLK
] = TARGET_EDEADLK
,
553 [ENOLCK
] = TARGET_ENOLCK
,
554 [EBADE
] = TARGET_EBADE
,
555 [EBADR
] = TARGET_EBADR
,
556 [EXFULL
] = TARGET_EXFULL
,
557 [ENOANO
] = TARGET_ENOANO
,
558 [EBADRQC
] = TARGET_EBADRQC
,
559 [EBADSLT
] = TARGET_EBADSLT
,
560 [EBFONT
] = TARGET_EBFONT
,
561 [ENOSTR
] = TARGET_ENOSTR
,
562 [ENODATA
] = TARGET_ENODATA
,
563 [ETIME
] = TARGET_ETIME
,
564 [ENOSR
] = TARGET_ENOSR
,
565 [ENONET
] = TARGET_ENONET
,
566 [ENOPKG
] = TARGET_ENOPKG
,
567 [EREMOTE
] = TARGET_EREMOTE
,
568 [ENOLINK
] = TARGET_ENOLINK
,
569 [EADV
] = TARGET_EADV
,
570 [ESRMNT
] = TARGET_ESRMNT
,
571 [ECOMM
] = TARGET_ECOMM
,
572 [EPROTO
] = TARGET_EPROTO
,
573 [EDOTDOT
] = TARGET_EDOTDOT
,
574 [EMULTIHOP
] = TARGET_EMULTIHOP
,
575 [EBADMSG
] = TARGET_EBADMSG
,
576 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
577 [EOVERFLOW
] = TARGET_EOVERFLOW
,
578 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
579 [EBADFD
] = TARGET_EBADFD
,
580 [EREMCHG
] = TARGET_EREMCHG
,
581 [ELIBACC
] = TARGET_ELIBACC
,
582 [ELIBBAD
] = TARGET_ELIBBAD
,
583 [ELIBSCN
] = TARGET_ELIBSCN
,
584 [ELIBMAX
] = TARGET_ELIBMAX
,
585 [ELIBEXEC
] = TARGET_ELIBEXEC
,
586 [EILSEQ
] = TARGET_EILSEQ
,
587 [ENOSYS
] = TARGET_ENOSYS
,
588 [ELOOP
] = TARGET_ELOOP
,
589 [ERESTART
] = TARGET_ERESTART
,
590 [ESTRPIPE
] = TARGET_ESTRPIPE
,
591 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
592 [EUSERS
] = TARGET_EUSERS
,
593 [ENOTSOCK
] = TARGET_ENOTSOCK
,
594 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
595 [EMSGSIZE
] = TARGET_EMSGSIZE
,
596 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
597 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
598 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
599 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
600 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
601 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
602 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
603 [EADDRINUSE
] = TARGET_EADDRINUSE
,
604 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
605 [ENETDOWN
] = TARGET_ENETDOWN
,
606 [ENETUNREACH
] = TARGET_ENETUNREACH
,
607 [ENETRESET
] = TARGET_ENETRESET
,
608 [ECONNABORTED
] = TARGET_ECONNABORTED
,
609 [ECONNRESET
] = TARGET_ECONNRESET
,
610 [ENOBUFS
] = TARGET_ENOBUFS
,
611 [EISCONN
] = TARGET_EISCONN
,
612 [ENOTCONN
] = TARGET_ENOTCONN
,
613 [EUCLEAN
] = TARGET_EUCLEAN
,
614 [ENOTNAM
] = TARGET_ENOTNAM
,
615 [ENAVAIL
] = TARGET_ENAVAIL
,
616 [EISNAM
] = TARGET_EISNAM
,
617 [EREMOTEIO
] = TARGET_EREMOTEIO
,
618 [EDQUOT
] = TARGET_EDQUOT
,
619 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
620 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
621 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
622 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
623 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
624 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
625 [EALREADY
] = TARGET_EALREADY
,
626 [EINPROGRESS
] = TARGET_EINPROGRESS
,
627 [ESTALE
] = TARGET_ESTALE
,
628 [ECANCELED
] = TARGET_ECANCELED
,
629 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
630 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
632 [ENOKEY
] = TARGET_ENOKEY
,
635 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
638 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
641 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
644 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
646 #ifdef ENOTRECOVERABLE
647 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
650 [ENOMSG
] = TARGET_ENOMSG
,
653 [ERFKILL
] = TARGET_ERFKILL
,
656 [EHWPOISON
] = TARGET_EHWPOISON
,
660 static inline int host_to_target_errno(int err
)
662 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
663 host_to_target_errno_table
[err
]) {
664 return host_to_target_errno_table
[err
];
669 static inline int target_to_host_errno(int err
)
671 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
672 target_to_host_errno_table
[err
]) {
673 return target_to_host_errno_table
[err
];
678 static inline abi_long
get_errno(abi_long ret
)
681 return -host_to_target_errno(errno
);
686 const char *target_strerror(int err
)
688 if (err
== TARGET_ERESTARTSYS
) {
689 return "To be restarted";
691 if (err
== TARGET_QEMU_ESIGRETURN
) {
692 return "Successful exit from sigreturn";
695 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
698 return strerror(target_to_host_errno(err
));
701 #define safe_syscall0(type, name) \
702 static type safe_##name(void) \
704 return safe_syscall(__NR_##name); \
707 #define safe_syscall1(type, name, type1, arg1) \
708 static type safe_##name(type1 arg1) \
710 return safe_syscall(__NR_##name, arg1); \
713 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
714 static type safe_##name(type1 arg1, type2 arg2) \
716 return safe_syscall(__NR_##name, arg1, arg2); \
719 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
722 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
725 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
729 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
732 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 type4, arg4, type5, arg5) \
734 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
737 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
740 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
741 type4, arg4, type5, arg5, type6, arg6) \
742 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
743 type5 arg5, type6 arg6) \
745 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
748 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
749 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
750 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
751 int, flags
, mode_t
, mode
)
752 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
753 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
754 struct rusage
*, rusage
)
756 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
757 int, options
, struct rusage
*, rusage
)
758 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
759 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
760 defined(TARGET_NR_pselect6)
761 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
762 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
764 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
765 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
766 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
769 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
770 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
772 #if defined(__NR_futex)
773 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
774 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
776 #if defined(__NR_futex_time64)
777 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
778 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
780 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
781 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
782 safe_syscall2(int, tkill
, int, tid
, int, sig
)
783 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
784 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
785 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
786 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
787 unsigned long, pos_l
, unsigned long, pos_h
)
788 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
789 unsigned long, pos_l
, unsigned long, pos_h
)
790 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
792 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
793 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
794 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
795 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
796 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
797 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
798 safe_syscall2(int, flock
, int, fd
, int, operation
)
799 #ifdef TARGET_NR_rt_sigtimedwait
800 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
801 const struct timespec
*, uts
, size_t, sigsetsize
)
803 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
805 #if defined(TARGET_NR_nanosleep)
806 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
807 struct timespec
*, rem
)
809 #ifdef TARGET_NR_clock_nanosleep
810 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
811 const struct timespec
*, req
, struct timespec
*, rem
)
815 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
818 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
819 void *, ptr
, long, fifth
)
823 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
827 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
828 long, msgtype
, int, flags
)
830 #ifdef __NR_semtimedop
831 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
832 unsigned, nsops
, const struct timespec
*, timeout
)
834 #ifdef TARGET_NR_mq_timedsend
835 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
836 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
838 #ifdef TARGET_NR_mq_timedreceive
839 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
840 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
842 /* We do ioctl like this rather than via safe_syscall3 to preserve the
843 * "third argument might be integer or pointer or not present" behaviour of
846 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
847 /* Similarly for fcntl. Note that callers must always:
848 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
849 * use the flock64 struct rather than unsuffixed flock
850 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
853 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
855 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
858 static inline int host_to_target_sock_type(int host_type
)
862 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
864 target_type
= TARGET_SOCK_DGRAM
;
867 target_type
= TARGET_SOCK_STREAM
;
870 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
874 #if defined(SOCK_CLOEXEC)
875 if (host_type
& SOCK_CLOEXEC
) {
876 target_type
|= TARGET_SOCK_CLOEXEC
;
880 #if defined(SOCK_NONBLOCK)
881 if (host_type
& SOCK_NONBLOCK
) {
882 target_type
|= TARGET_SOCK_NONBLOCK
;
889 static abi_ulong target_brk
;
890 static abi_ulong target_original_brk
;
891 static abi_ulong brk_page
;
893 void target_set_brk(abi_ulong new_brk
)
895 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
896 brk_page
= HOST_PAGE_ALIGN(target_brk
);
899 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
900 #define DEBUGF_BRK(message, args...)
902 /* do_brk() must return target values and target errnos. */
903 abi_long
do_brk(abi_ulong new_brk
)
905 abi_long mapped_addr
;
906 abi_ulong new_alloc_size
;
908 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
911 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
914 if (new_brk
< target_original_brk
) {
915 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
920 /* If the new brk is less than the highest page reserved to the
921 * target heap allocation, set it and we're almost done... */
922 if (new_brk
<= brk_page
) {
923 /* Heap contents are initialized to zero, as for anonymous
925 if (new_brk
> target_brk
) {
926 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
928 target_brk
= new_brk
;
929 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
933 /* We need to allocate more memory after the brk... Note that
934 * we don't use MAP_FIXED because that will map over the top of
935 * any existing mapping (like the one with the host libc or qemu
936 * itself); instead we treat "mapped but at wrong address" as
937 * a failure and unmap again.
939 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
940 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
941 PROT_READ
|PROT_WRITE
,
942 MAP_ANON
|MAP_PRIVATE
, 0, 0));
944 if (mapped_addr
== brk_page
) {
945 /* Heap contents are initialized to zero, as for anonymous
946 * mapped pages. Technically the new pages are already
947 * initialized to zero since they *are* anonymous mapped
948 * pages, however we have to take care with the contents that
949 * come from the remaining part of the previous page: it may
950 * contains garbage data due to a previous heap usage (grown
952 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
954 target_brk
= new_brk
;
955 brk_page
= HOST_PAGE_ALIGN(target_brk
);
956 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
959 } else if (mapped_addr
!= -1) {
960 /* Mapped but at wrong address, meaning there wasn't actually
961 * enough space for this brk.
963 target_munmap(mapped_addr
, new_alloc_size
);
965 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
968 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
971 #if defined(TARGET_ALPHA)
972 /* We (partially) emulate OSF/1 on Alpha, which requires we
973 return a proper errno, not an unchanged brk value. */
974 return -TARGET_ENOMEM
;
976 /* For everything else, return the previous break. */
980 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
981 defined(TARGET_NR_pselect6)
982 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
983 abi_ulong target_fds_addr
,
987 abi_ulong b
, *target_fds
;
989 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
990 if (!(target_fds
= lock_user(VERIFY_READ
,
992 sizeof(abi_ulong
) * nw
,
994 return -TARGET_EFAULT
;
998 for (i
= 0; i
< nw
; i
++) {
999 /* grab the abi_ulong */
1000 __get_user(b
, &target_fds
[i
]);
1001 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1002 /* check the bit inside the abi_ulong */
1009 unlock_user(target_fds
, target_fds_addr
, 0);
1014 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1015 abi_ulong target_fds_addr
,
1018 if (target_fds_addr
) {
1019 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1020 return -TARGET_EFAULT
;
1028 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1034 abi_ulong
*target_fds
;
1036 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1037 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1039 sizeof(abi_ulong
) * nw
,
1041 return -TARGET_EFAULT
;
1044 for (i
= 0; i
< nw
; i
++) {
1046 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1047 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1050 __put_user(v
, &target_fds
[i
]);
1053 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1059 #if defined(__alpha__)
1060 #define HOST_HZ 1024
1065 static inline abi_long
host_to_target_clock_t(long ticks
)
1067 #if HOST_HZ == TARGET_HZ
1070 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1074 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1075 const struct rusage
*rusage
)
1077 struct target_rusage
*target_rusage
;
1079 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1080 return -TARGET_EFAULT
;
1081 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1082 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1083 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1084 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1085 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1086 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1087 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1088 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1089 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1090 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1091 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1092 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1093 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1094 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1095 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1096 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1097 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1098 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1099 unlock_user_struct(target_rusage
, target_addr
, 1);
1104 #ifdef TARGET_NR_setrlimit
1105 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1107 abi_ulong target_rlim_swap
;
1110 target_rlim_swap
= tswapal(target_rlim
);
1111 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1112 return RLIM_INFINITY
;
1114 result
= target_rlim_swap
;
1115 if (target_rlim_swap
!= (rlim_t
)result
)
1116 return RLIM_INFINITY
;
1122 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1123 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1125 abi_ulong target_rlim_swap
;
1128 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1129 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1131 target_rlim_swap
= rlim
;
1132 result
= tswapal(target_rlim_swap
);
1138 static inline int target_to_host_resource(int code
)
1141 case TARGET_RLIMIT_AS
:
1143 case TARGET_RLIMIT_CORE
:
1145 case TARGET_RLIMIT_CPU
:
1147 case TARGET_RLIMIT_DATA
:
1149 case TARGET_RLIMIT_FSIZE
:
1150 return RLIMIT_FSIZE
;
1151 case TARGET_RLIMIT_LOCKS
:
1152 return RLIMIT_LOCKS
;
1153 case TARGET_RLIMIT_MEMLOCK
:
1154 return RLIMIT_MEMLOCK
;
1155 case TARGET_RLIMIT_MSGQUEUE
:
1156 return RLIMIT_MSGQUEUE
;
1157 case TARGET_RLIMIT_NICE
:
1159 case TARGET_RLIMIT_NOFILE
:
1160 return RLIMIT_NOFILE
;
1161 case TARGET_RLIMIT_NPROC
:
1162 return RLIMIT_NPROC
;
1163 case TARGET_RLIMIT_RSS
:
1165 case TARGET_RLIMIT_RTPRIO
:
1166 return RLIMIT_RTPRIO
;
1167 case TARGET_RLIMIT_SIGPENDING
:
1168 return RLIMIT_SIGPENDING
;
1169 case TARGET_RLIMIT_STACK
:
1170 return RLIMIT_STACK
;
1176 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1177 abi_ulong target_tv_addr
)
1179 struct target_timeval
*target_tv
;
1181 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1182 return -TARGET_EFAULT
;
1185 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1186 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1188 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1193 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1194 const struct timeval
*tv
)
1196 struct target_timeval
*target_tv
;
1198 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1199 return -TARGET_EFAULT
;
1202 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1203 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1205 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1210 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1211 const struct timeval
*tv
)
1213 struct target__kernel_sock_timeval
*target_tv
;
1215 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1216 return -TARGET_EFAULT
;
1219 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1220 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1222 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1227 #if defined(TARGET_NR_futex) || \
1228 defined(TARGET_NR_rt_sigtimedwait) || \
1229 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1230 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1231 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1232 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1233 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1234 defined(TARGET_NR_timer_settime) || \
1235 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1236 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1237 abi_ulong target_addr
)
1239 struct target_timespec
*target_ts
;
1241 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1242 return -TARGET_EFAULT
;
1244 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1245 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1246 unlock_user_struct(target_ts
, target_addr
, 0);
1251 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1252 defined(TARGET_NR_timer_settime64) || \
1253 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1254 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1255 abi_ulong target_addr
)
1257 struct target__kernel_timespec
*target_ts
;
1259 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1260 return -TARGET_EFAULT
;
1262 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1263 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1264 unlock_user_struct(target_ts
, target_addr
, 0);
1269 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1270 struct timespec
*host_ts
)
1272 struct target_timespec
*target_ts
;
1274 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1275 return -TARGET_EFAULT
;
1277 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1278 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1279 unlock_user_struct(target_ts
, target_addr
, 1);
1283 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1284 struct timespec
*host_ts
)
1286 struct target__kernel_timespec
*target_ts
;
1288 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1289 return -TARGET_EFAULT
;
1291 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1292 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1293 unlock_user_struct(target_ts
, target_addr
, 1);
1297 #if defined(TARGET_NR_gettimeofday)
1298 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1299 struct timezone
*tz
)
1301 struct target_timezone
*target_tz
;
1303 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1304 return -TARGET_EFAULT
;
1307 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1308 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1310 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1316 #if defined(TARGET_NR_settimeofday)
1317 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1318 abi_ulong target_tz_addr
)
1320 struct target_timezone
*target_tz
;
1322 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1323 return -TARGET_EFAULT
;
1326 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1327 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1329 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1335 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1338 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1339 abi_ulong target_mq_attr_addr
)
1341 struct target_mq_attr
*target_mq_attr
;
1343 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1344 target_mq_attr_addr
, 1))
1345 return -TARGET_EFAULT
;
1347 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1348 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1349 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1350 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1352 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1357 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1358 const struct mq_attr
*attr
)
1360 struct target_mq_attr
*target_mq_attr
;
1362 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1363 target_mq_attr_addr
, 0))
1364 return -TARGET_EFAULT
;
1366 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1367 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1368 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1369 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1371 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1377 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1378 /* do_select() must return target values and target errnos. */
1379 static abi_long
do_select(int n
,
1380 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1381 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1383 fd_set rfds
, wfds
, efds
;
1384 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1386 struct timespec ts
, *ts_ptr
;
1389 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1393 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1397 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1402 if (target_tv_addr
) {
1403 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1404 return -TARGET_EFAULT
;
1405 ts
.tv_sec
= tv
.tv_sec
;
1406 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1412 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1415 if (!is_error(ret
)) {
1416 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1417 return -TARGET_EFAULT
;
1418 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1419 return -TARGET_EFAULT
;
1420 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1421 return -TARGET_EFAULT
;
1423 if (target_tv_addr
) {
1424 tv
.tv_sec
= ts
.tv_sec
;
1425 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1426 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1427 return -TARGET_EFAULT
;
1435 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1436 static abi_long
do_old_select(abi_ulong arg1
)
1438 struct target_sel_arg_struct
*sel
;
1439 abi_ulong inp
, outp
, exp
, tvp
;
1442 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1443 return -TARGET_EFAULT
;
1446 nsel
= tswapal(sel
->n
);
1447 inp
= tswapal(sel
->inp
);
1448 outp
= tswapal(sel
->outp
);
1449 exp
= tswapal(sel
->exp
);
1450 tvp
= tswapal(sel
->tvp
);
1452 unlock_user_struct(sel
, arg1
, 0);
1454 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1459 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1462 return pipe2(host_pipe
, flags
);
1468 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1469 int flags
, int is_pipe2
)
1473 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1476 return get_errno(ret
);
1478 /* Several targets have special calling conventions for the original
1479 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1481 #if defined(TARGET_ALPHA)
1482 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1483 return host_pipe
[0];
1484 #elif defined(TARGET_MIPS)
1485 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1486 return host_pipe
[0];
1487 #elif defined(TARGET_SH4)
1488 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1489 return host_pipe
[0];
1490 #elif defined(TARGET_SPARC)
1491 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1492 return host_pipe
[0];
1496 if (put_user_s32(host_pipe
[0], pipedes
)
1497 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1498 return -TARGET_EFAULT
;
1499 return get_errno(ret
);
1502 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1503 abi_ulong target_addr
,
1506 struct target_ip_mreqn
*target_smreqn
;
1508 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1510 return -TARGET_EFAULT
;
1511 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1512 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1513 if (len
== sizeof(struct target_ip_mreqn
))
1514 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1515 unlock_user(target_smreqn
, target_addr
, 0);
1520 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1521 abi_ulong target_addr
,
1524 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1525 sa_family_t sa_family
;
1526 struct target_sockaddr
*target_saddr
;
1528 if (fd_trans_target_to_host_addr(fd
)) {
1529 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1532 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1534 return -TARGET_EFAULT
;
1536 sa_family
= tswap16(target_saddr
->sa_family
);
1538 /* Oops. The caller might send a incomplete sun_path; sun_path
1539 * must be terminated by \0 (see the manual page), but
1540 * unfortunately it is quite common to specify sockaddr_un
1541 * length as "strlen(x->sun_path)" while it should be
1542 * "strlen(...) + 1". We'll fix that here if needed.
1543 * Linux kernel has a similar feature.
1546 if (sa_family
== AF_UNIX
) {
1547 if (len
< unix_maxlen
&& len
> 0) {
1548 char *cp
= (char*)target_saddr
;
1550 if ( cp
[len
-1] && !cp
[len
] )
1553 if (len
> unix_maxlen
)
1557 memcpy(addr
, target_saddr
, len
);
1558 addr
->sa_family
= sa_family
;
1559 if (sa_family
== AF_NETLINK
) {
1560 struct sockaddr_nl
*nladdr
;
1562 nladdr
= (struct sockaddr_nl
*)addr
;
1563 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1564 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1565 } else if (sa_family
== AF_PACKET
) {
1566 struct target_sockaddr_ll
*lladdr
;
1568 lladdr
= (struct target_sockaddr_ll
*)addr
;
1569 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1570 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1572 unlock_user(target_saddr
, target_addr
, 0);
1577 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1578 struct sockaddr
*addr
,
1581 struct target_sockaddr
*target_saddr
;
1588 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1590 return -TARGET_EFAULT
;
1591 memcpy(target_saddr
, addr
, len
);
1592 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1593 sizeof(target_saddr
->sa_family
)) {
1594 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1596 if (addr
->sa_family
== AF_NETLINK
&&
1597 len
>= sizeof(struct target_sockaddr_nl
)) {
1598 struct target_sockaddr_nl
*target_nl
=
1599 (struct target_sockaddr_nl
*)target_saddr
;
1600 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1601 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1602 } else if (addr
->sa_family
== AF_PACKET
) {
1603 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1604 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1605 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1606 } else if (addr
->sa_family
== AF_INET6
&&
1607 len
>= sizeof(struct target_sockaddr_in6
)) {
1608 struct target_sockaddr_in6
*target_in6
=
1609 (struct target_sockaddr_in6
*)target_saddr
;
1610 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1612 unlock_user(target_saddr
, target_addr
, len
);
1617 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1618 struct target_msghdr
*target_msgh
)
1620 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1621 abi_long msg_controllen
;
1622 abi_ulong target_cmsg_addr
;
1623 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1624 socklen_t space
= 0;
1626 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1627 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1629 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1630 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1631 target_cmsg_start
= target_cmsg
;
1633 return -TARGET_EFAULT
;
1635 while (cmsg
&& target_cmsg
) {
1636 void *data
= CMSG_DATA(cmsg
);
1637 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1639 int len
= tswapal(target_cmsg
->cmsg_len
)
1640 - sizeof(struct target_cmsghdr
);
1642 space
+= CMSG_SPACE(len
);
1643 if (space
> msgh
->msg_controllen
) {
1644 space
-= CMSG_SPACE(len
);
1645 /* This is a QEMU bug, since we allocated the payload
1646 * area ourselves (unlike overflow in host-to-target
1647 * conversion, which is just the guest giving us a buffer
1648 * that's too small). It can't happen for the payload types
1649 * we currently support; if it becomes an issue in future
1650 * we would need to improve our allocation strategy to
1651 * something more intelligent than "twice the size of the
1652 * target buffer we're reading from".
1654 qemu_log_mask(LOG_UNIMP
,
1655 ("Unsupported ancillary data %d/%d: "
1656 "unhandled msg size\n"),
1657 tswap32(target_cmsg
->cmsg_level
),
1658 tswap32(target_cmsg
->cmsg_type
));
1662 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1663 cmsg
->cmsg_level
= SOL_SOCKET
;
1665 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1667 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1668 cmsg
->cmsg_len
= CMSG_LEN(len
);
1670 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1671 int *fd
= (int *)data
;
1672 int *target_fd
= (int *)target_data
;
1673 int i
, numfds
= len
/ sizeof(int);
1675 for (i
= 0; i
< numfds
; i
++) {
1676 __get_user(fd
[i
], target_fd
+ i
);
1678 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1679 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1680 struct ucred
*cred
= (struct ucred
*)data
;
1681 struct target_ucred
*target_cred
=
1682 (struct target_ucred
*)target_data
;
1684 __get_user(cred
->pid
, &target_cred
->pid
);
1685 __get_user(cred
->uid
, &target_cred
->uid
);
1686 __get_user(cred
->gid
, &target_cred
->gid
);
1688 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1689 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1690 memcpy(data
, target_data
, len
);
1693 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1694 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1697 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1699 msgh
->msg_controllen
= space
;
1703 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1704 struct msghdr
*msgh
)
1706 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1707 abi_long msg_controllen
;
1708 abi_ulong target_cmsg_addr
;
1709 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1710 socklen_t space
= 0;
1712 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1713 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1715 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1716 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1717 target_cmsg_start
= target_cmsg
;
1719 return -TARGET_EFAULT
;
1721 while (cmsg
&& target_cmsg
) {
1722 void *data
= CMSG_DATA(cmsg
);
1723 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1725 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1726 int tgt_len
, tgt_space
;
1728 /* We never copy a half-header but may copy half-data;
1729 * this is Linux's behaviour in put_cmsg(). Note that
1730 * truncation here is a guest problem (which we report
1731 * to the guest via the CTRUNC bit), unlike truncation
1732 * in target_to_host_cmsg, which is a QEMU bug.
1734 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1735 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1739 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1740 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1742 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1744 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1746 /* Payload types which need a different size of payload on
1747 * the target must adjust tgt_len here.
1750 switch (cmsg
->cmsg_level
) {
1752 switch (cmsg
->cmsg_type
) {
1754 tgt_len
= sizeof(struct target_timeval
);
1764 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1765 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1766 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1769 /* We must now copy-and-convert len bytes of payload
1770 * into tgt_len bytes of destination space. Bear in mind
1771 * that in both source and destination we may be dealing
1772 * with a truncated value!
1774 switch (cmsg
->cmsg_level
) {
1776 switch (cmsg
->cmsg_type
) {
1779 int *fd
= (int *)data
;
1780 int *target_fd
= (int *)target_data
;
1781 int i
, numfds
= tgt_len
/ sizeof(int);
1783 for (i
= 0; i
< numfds
; i
++) {
1784 __put_user(fd
[i
], target_fd
+ i
);
1790 struct timeval
*tv
= (struct timeval
*)data
;
1791 struct target_timeval
*target_tv
=
1792 (struct target_timeval
*)target_data
;
1794 if (len
!= sizeof(struct timeval
) ||
1795 tgt_len
!= sizeof(struct target_timeval
)) {
1799 /* copy struct timeval to target */
1800 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1801 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1804 case SCM_CREDENTIALS
:
1806 struct ucred
*cred
= (struct ucred
*)data
;
1807 struct target_ucred
*target_cred
=
1808 (struct target_ucred
*)target_data
;
1810 __put_user(cred
->pid
, &target_cred
->pid
);
1811 __put_user(cred
->uid
, &target_cred
->uid
);
1812 __put_user(cred
->gid
, &target_cred
->gid
);
1821 switch (cmsg
->cmsg_type
) {
1824 uint32_t *v
= (uint32_t *)data
;
1825 uint32_t *t_int
= (uint32_t *)target_data
;
1827 if (len
!= sizeof(uint32_t) ||
1828 tgt_len
!= sizeof(uint32_t)) {
1831 __put_user(*v
, t_int
);
1837 struct sock_extended_err ee
;
1838 struct sockaddr_in offender
;
1840 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1841 struct errhdr_t
*target_errh
=
1842 (struct errhdr_t
*)target_data
;
1844 if (len
!= sizeof(struct errhdr_t
) ||
1845 tgt_len
!= sizeof(struct errhdr_t
)) {
1848 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1849 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1850 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1851 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1852 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1853 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1854 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1855 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1856 (void *) &errh
->offender
, sizeof(errh
->offender
));
1865 switch (cmsg
->cmsg_type
) {
1868 uint32_t *v
= (uint32_t *)data
;
1869 uint32_t *t_int
= (uint32_t *)target_data
;
1871 if (len
!= sizeof(uint32_t) ||
1872 tgt_len
!= sizeof(uint32_t)) {
1875 __put_user(*v
, t_int
);
1881 struct sock_extended_err ee
;
1882 struct sockaddr_in6 offender
;
1884 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1885 struct errhdr6_t
*target_errh
=
1886 (struct errhdr6_t
*)target_data
;
1888 if (len
!= sizeof(struct errhdr6_t
) ||
1889 tgt_len
!= sizeof(struct errhdr6_t
)) {
1892 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1893 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1894 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1895 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1896 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1897 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1898 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1899 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1900 (void *) &errh
->offender
, sizeof(errh
->offender
));
1910 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1911 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1912 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1913 if (tgt_len
> len
) {
1914 memset(target_data
+ len
, 0, tgt_len
- len
);
1918 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1919 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1920 if (msg_controllen
< tgt_space
) {
1921 tgt_space
= msg_controllen
;
1923 msg_controllen
-= tgt_space
;
1925 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1926 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1929 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1931 target_msgh
->msg_controllen
= tswapal(space
);
1935 /* do_setsockopt() Must return target values and target errnos. */
1936 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1937 abi_ulong optval_addr
, socklen_t optlen
)
1941 struct ip_mreqn
*ip_mreq
;
1942 struct ip_mreq_source
*ip_mreq_source
;
1946 /* TCP options all take an 'int' value. */
1947 if (optlen
< sizeof(uint32_t))
1948 return -TARGET_EINVAL
;
1950 if (get_user_u32(val
, optval_addr
))
1951 return -TARGET_EFAULT
;
1952 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1959 case IP_ROUTER_ALERT
:
1963 case IP_MTU_DISCOVER
:
1970 case IP_MULTICAST_TTL
:
1971 case IP_MULTICAST_LOOP
:
1973 if (optlen
>= sizeof(uint32_t)) {
1974 if (get_user_u32(val
, optval_addr
))
1975 return -TARGET_EFAULT
;
1976 } else if (optlen
>= 1) {
1977 if (get_user_u8(val
, optval_addr
))
1978 return -TARGET_EFAULT
;
1980 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1982 case IP_ADD_MEMBERSHIP
:
1983 case IP_DROP_MEMBERSHIP
:
1984 if (optlen
< sizeof (struct target_ip_mreq
) ||
1985 optlen
> sizeof (struct target_ip_mreqn
))
1986 return -TARGET_EINVAL
;
1988 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1989 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1990 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1993 case IP_BLOCK_SOURCE
:
1994 case IP_UNBLOCK_SOURCE
:
1995 case IP_ADD_SOURCE_MEMBERSHIP
:
1996 case IP_DROP_SOURCE_MEMBERSHIP
:
1997 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1998 return -TARGET_EINVAL
;
2000 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2001 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2002 unlock_user (ip_mreq_source
, optval_addr
, 0);
2011 case IPV6_MTU_DISCOVER
:
2014 case IPV6_RECVPKTINFO
:
2015 case IPV6_UNICAST_HOPS
:
2016 case IPV6_MULTICAST_HOPS
:
2017 case IPV6_MULTICAST_LOOP
:
2019 case IPV6_RECVHOPLIMIT
:
2020 case IPV6_2292HOPLIMIT
:
2023 case IPV6_2292PKTINFO
:
2024 case IPV6_RECVTCLASS
:
2025 case IPV6_RECVRTHDR
:
2026 case IPV6_2292RTHDR
:
2027 case IPV6_RECVHOPOPTS
:
2028 case IPV6_2292HOPOPTS
:
2029 case IPV6_RECVDSTOPTS
:
2030 case IPV6_2292DSTOPTS
:
2032 #ifdef IPV6_RECVPATHMTU
2033 case IPV6_RECVPATHMTU
:
2035 #ifdef IPV6_TRANSPARENT
2036 case IPV6_TRANSPARENT
:
2038 #ifdef IPV6_FREEBIND
2041 #ifdef IPV6_RECVORIGDSTADDR
2042 case IPV6_RECVORIGDSTADDR
:
2045 if (optlen
< sizeof(uint32_t)) {
2046 return -TARGET_EINVAL
;
2048 if (get_user_u32(val
, optval_addr
)) {
2049 return -TARGET_EFAULT
;
2051 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2052 &val
, sizeof(val
)));
2056 struct in6_pktinfo pki
;
2058 if (optlen
< sizeof(pki
)) {
2059 return -TARGET_EINVAL
;
2062 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2063 return -TARGET_EFAULT
;
2066 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2068 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2069 &pki
, sizeof(pki
)));
2072 case IPV6_ADD_MEMBERSHIP
:
2073 case IPV6_DROP_MEMBERSHIP
:
2075 struct ipv6_mreq ipv6mreq
;
2077 if (optlen
< sizeof(ipv6mreq
)) {
2078 return -TARGET_EINVAL
;
2081 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2082 return -TARGET_EFAULT
;
2085 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2087 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2088 &ipv6mreq
, sizeof(ipv6mreq
)));
2099 struct icmp6_filter icmp6f
;
2101 if (optlen
> sizeof(icmp6f
)) {
2102 optlen
= sizeof(icmp6f
);
2105 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2106 return -TARGET_EFAULT
;
2109 for (val
= 0; val
< 8; val
++) {
2110 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2113 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2125 /* those take an u32 value */
2126 if (optlen
< sizeof(uint32_t)) {
2127 return -TARGET_EINVAL
;
2130 if (get_user_u32(val
, optval_addr
)) {
2131 return -TARGET_EFAULT
;
2133 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2134 &val
, sizeof(val
)));
2141 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2146 char *alg_key
= g_malloc(optlen
);
2149 return -TARGET_ENOMEM
;
2151 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2153 return -TARGET_EFAULT
;
2155 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2160 case ALG_SET_AEAD_AUTHSIZE
:
2162 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2171 case TARGET_SOL_SOCKET
:
2173 case TARGET_SO_RCVTIMEO
:
2177 optname
= SO_RCVTIMEO
;
2180 if (optlen
!= sizeof(struct target_timeval
)) {
2181 return -TARGET_EINVAL
;
2184 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2185 return -TARGET_EFAULT
;
2188 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2192 case TARGET_SO_SNDTIMEO
:
2193 optname
= SO_SNDTIMEO
;
2195 case TARGET_SO_ATTACH_FILTER
:
2197 struct target_sock_fprog
*tfprog
;
2198 struct target_sock_filter
*tfilter
;
2199 struct sock_fprog fprog
;
2200 struct sock_filter
*filter
;
2203 if (optlen
!= sizeof(*tfprog
)) {
2204 return -TARGET_EINVAL
;
2206 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2207 return -TARGET_EFAULT
;
2209 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2210 tswapal(tfprog
->filter
), 0)) {
2211 unlock_user_struct(tfprog
, optval_addr
, 1);
2212 return -TARGET_EFAULT
;
2215 fprog
.len
= tswap16(tfprog
->len
);
2216 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2217 if (filter
== NULL
) {
2218 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2219 unlock_user_struct(tfprog
, optval_addr
, 1);
2220 return -TARGET_ENOMEM
;
2222 for (i
= 0; i
< fprog
.len
; i
++) {
2223 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2224 filter
[i
].jt
= tfilter
[i
].jt
;
2225 filter
[i
].jf
= tfilter
[i
].jf
;
2226 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2228 fprog
.filter
= filter
;
2230 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2231 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2234 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2235 unlock_user_struct(tfprog
, optval_addr
, 1);
2238 case TARGET_SO_BINDTODEVICE
:
2240 char *dev_ifname
, *addr_ifname
;
2242 if (optlen
> IFNAMSIZ
- 1) {
2243 optlen
= IFNAMSIZ
- 1;
2245 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2247 return -TARGET_EFAULT
;
2249 optname
= SO_BINDTODEVICE
;
2250 addr_ifname
= alloca(IFNAMSIZ
);
2251 memcpy(addr_ifname
, dev_ifname
, optlen
);
2252 addr_ifname
[optlen
] = 0;
2253 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2254 addr_ifname
, optlen
));
2255 unlock_user (dev_ifname
, optval_addr
, 0);
2258 case TARGET_SO_LINGER
:
2261 struct target_linger
*tlg
;
2263 if (optlen
!= sizeof(struct target_linger
)) {
2264 return -TARGET_EINVAL
;
2266 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2267 return -TARGET_EFAULT
;
2269 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2270 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2271 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2273 unlock_user_struct(tlg
, optval_addr
, 0);
2276 /* Options with 'int' argument. */
2277 case TARGET_SO_DEBUG
:
2280 case TARGET_SO_REUSEADDR
:
2281 optname
= SO_REUSEADDR
;
2284 case TARGET_SO_REUSEPORT
:
2285 optname
= SO_REUSEPORT
;
2288 case TARGET_SO_TYPE
:
2291 case TARGET_SO_ERROR
:
2294 case TARGET_SO_DONTROUTE
:
2295 optname
= SO_DONTROUTE
;
2297 case TARGET_SO_BROADCAST
:
2298 optname
= SO_BROADCAST
;
2300 case TARGET_SO_SNDBUF
:
2301 optname
= SO_SNDBUF
;
2303 case TARGET_SO_SNDBUFFORCE
:
2304 optname
= SO_SNDBUFFORCE
;
2306 case TARGET_SO_RCVBUF
:
2307 optname
= SO_RCVBUF
;
2309 case TARGET_SO_RCVBUFFORCE
:
2310 optname
= SO_RCVBUFFORCE
;
2312 case TARGET_SO_KEEPALIVE
:
2313 optname
= SO_KEEPALIVE
;
2315 case TARGET_SO_OOBINLINE
:
2316 optname
= SO_OOBINLINE
;
2318 case TARGET_SO_NO_CHECK
:
2319 optname
= SO_NO_CHECK
;
2321 case TARGET_SO_PRIORITY
:
2322 optname
= SO_PRIORITY
;
2325 case TARGET_SO_BSDCOMPAT
:
2326 optname
= SO_BSDCOMPAT
;
2329 case TARGET_SO_PASSCRED
:
2330 optname
= SO_PASSCRED
;
2332 case TARGET_SO_PASSSEC
:
2333 optname
= SO_PASSSEC
;
2335 case TARGET_SO_TIMESTAMP
:
2336 optname
= SO_TIMESTAMP
;
2338 case TARGET_SO_RCVLOWAT
:
2339 optname
= SO_RCVLOWAT
;
2344 if (optlen
< sizeof(uint32_t))
2345 return -TARGET_EINVAL
;
2347 if (get_user_u32(val
, optval_addr
))
2348 return -TARGET_EFAULT
;
2349 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2354 case NETLINK_PKTINFO
:
2355 case NETLINK_ADD_MEMBERSHIP
:
2356 case NETLINK_DROP_MEMBERSHIP
:
2357 case NETLINK_BROADCAST_ERROR
:
2358 case NETLINK_NO_ENOBUFS
:
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2360 case NETLINK_LISTEN_ALL_NSID
:
2361 case NETLINK_CAP_ACK
:
2362 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2363 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2364 case NETLINK_EXT_ACK
:
2365 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2367 case NETLINK_GET_STRICT_CHK
:
2368 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2374 if (optlen
< sizeof(uint32_t)) {
2375 return -TARGET_EINVAL
;
2377 if (get_user_u32(val
, optval_addr
)) {
2378 return -TARGET_EFAULT
;
2380 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2383 #endif /* SOL_NETLINK */
2386 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2388 ret
= -TARGET_ENOPROTOOPT
;
2393 /* do_getsockopt() Must return target values and target errnos. */
2394 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2395 abi_ulong optval_addr
, abi_ulong optlen
)
2402 case TARGET_SOL_SOCKET
:
2405 /* These don't just return a single integer */
2406 case TARGET_SO_PEERNAME
:
2408 case TARGET_SO_RCVTIMEO
: {
2412 optname
= SO_RCVTIMEO
;
2415 if (get_user_u32(len
, optlen
)) {
2416 return -TARGET_EFAULT
;
2419 return -TARGET_EINVAL
;
2423 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2428 if (len
> sizeof(struct target_timeval
)) {
2429 len
= sizeof(struct target_timeval
);
2431 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2432 return -TARGET_EFAULT
;
2434 if (put_user_u32(len
, optlen
)) {
2435 return -TARGET_EFAULT
;
2439 case TARGET_SO_SNDTIMEO
:
2440 optname
= SO_SNDTIMEO
;
2442 case TARGET_SO_PEERCRED
: {
2445 struct target_ucred
*tcr
;
2447 if (get_user_u32(len
, optlen
)) {
2448 return -TARGET_EFAULT
;
2451 return -TARGET_EINVAL
;
2455 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2463 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2464 return -TARGET_EFAULT
;
2466 __put_user(cr
.pid
, &tcr
->pid
);
2467 __put_user(cr
.uid
, &tcr
->uid
);
2468 __put_user(cr
.gid
, &tcr
->gid
);
2469 unlock_user_struct(tcr
, optval_addr
, 1);
2470 if (put_user_u32(len
, optlen
)) {
2471 return -TARGET_EFAULT
;
2475 case TARGET_SO_PEERSEC
: {
2478 if (get_user_u32(len
, optlen
)) {
2479 return -TARGET_EFAULT
;
2482 return -TARGET_EINVAL
;
2484 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2486 return -TARGET_EFAULT
;
2489 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2491 if (put_user_u32(lv
, optlen
)) {
2492 ret
= -TARGET_EFAULT
;
2494 unlock_user(name
, optval_addr
, lv
);
2497 case TARGET_SO_LINGER
:
2501 struct target_linger
*tlg
;
2503 if (get_user_u32(len
, optlen
)) {
2504 return -TARGET_EFAULT
;
2507 return -TARGET_EINVAL
;
2511 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2519 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2520 return -TARGET_EFAULT
;
2522 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2523 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2524 unlock_user_struct(tlg
, optval_addr
, 1);
2525 if (put_user_u32(len
, optlen
)) {
2526 return -TARGET_EFAULT
;
2530 /* Options with 'int' argument. */
2531 case TARGET_SO_DEBUG
:
2534 case TARGET_SO_REUSEADDR
:
2535 optname
= SO_REUSEADDR
;
2538 case TARGET_SO_REUSEPORT
:
2539 optname
= SO_REUSEPORT
;
2542 case TARGET_SO_TYPE
:
2545 case TARGET_SO_ERROR
:
2548 case TARGET_SO_DONTROUTE
:
2549 optname
= SO_DONTROUTE
;
2551 case TARGET_SO_BROADCAST
:
2552 optname
= SO_BROADCAST
;
2554 case TARGET_SO_SNDBUF
:
2555 optname
= SO_SNDBUF
;
2557 case TARGET_SO_RCVBUF
:
2558 optname
= SO_RCVBUF
;
2560 case TARGET_SO_KEEPALIVE
:
2561 optname
= SO_KEEPALIVE
;
2563 case TARGET_SO_OOBINLINE
:
2564 optname
= SO_OOBINLINE
;
2566 case TARGET_SO_NO_CHECK
:
2567 optname
= SO_NO_CHECK
;
2569 case TARGET_SO_PRIORITY
:
2570 optname
= SO_PRIORITY
;
2573 case TARGET_SO_BSDCOMPAT
:
2574 optname
= SO_BSDCOMPAT
;
2577 case TARGET_SO_PASSCRED
:
2578 optname
= SO_PASSCRED
;
2580 case TARGET_SO_TIMESTAMP
:
2581 optname
= SO_TIMESTAMP
;
2583 case TARGET_SO_RCVLOWAT
:
2584 optname
= SO_RCVLOWAT
;
2586 case TARGET_SO_ACCEPTCONN
:
2587 optname
= SO_ACCEPTCONN
;
2594 /* TCP options all take an 'int' value. */
2596 if (get_user_u32(len
, optlen
))
2597 return -TARGET_EFAULT
;
2599 return -TARGET_EINVAL
;
2601 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2604 if (optname
== SO_TYPE
) {
2605 val
= host_to_target_sock_type(val
);
2610 if (put_user_u32(val
, optval_addr
))
2611 return -TARGET_EFAULT
;
2613 if (put_user_u8(val
, optval_addr
))
2614 return -TARGET_EFAULT
;
2616 if (put_user_u32(len
, optlen
))
2617 return -TARGET_EFAULT
;
2624 case IP_ROUTER_ALERT
:
2628 case IP_MTU_DISCOVER
:
2634 case IP_MULTICAST_TTL
:
2635 case IP_MULTICAST_LOOP
:
2636 if (get_user_u32(len
, optlen
))
2637 return -TARGET_EFAULT
;
2639 return -TARGET_EINVAL
;
2641 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2644 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2646 if (put_user_u32(len
, optlen
)
2647 || put_user_u8(val
, optval_addr
))
2648 return -TARGET_EFAULT
;
2650 if (len
> sizeof(int))
2652 if (put_user_u32(len
, optlen
)
2653 || put_user_u32(val
, optval_addr
))
2654 return -TARGET_EFAULT
;
2658 ret
= -TARGET_ENOPROTOOPT
;
2664 case IPV6_MTU_DISCOVER
:
2667 case IPV6_RECVPKTINFO
:
2668 case IPV6_UNICAST_HOPS
:
2669 case IPV6_MULTICAST_HOPS
:
2670 case IPV6_MULTICAST_LOOP
:
2672 case IPV6_RECVHOPLIMIT
:
2673 case IPV6_2292HOPLIMIT
:
2676 case IPV6_2292PKTINFO
:
2677 case IPV6_RECVTCLASS
:
2678 case IPV6_RECVRTHDR
:
2679 case IPV6_2292RTHDR
:
2680 case IPV6_RECVHOPOPTS
:
2681 case IPV6_2292HOPOPTS
:
2682 case IPV6_RECVDSTOPTS
:
2683 case IPV6_2292DSTOPTS
:
2685 #ifdef IPV6_RECVPATHMTU
2686 case IPV6_RECVPATHMTU
:
2688 #ifdef IPV6_TRANSPARENT
2689 case IPV6_TRANSPARENT
:
2691 #ifdef IPV6_FREEBIND
2694 #ifdef IPV6_RECVORIGDSTADDR
2695 case IPV6_RECVORIGDSTADDR
:
2697 if (get_user_u32(len
, optlen
))
2698 return -TARGET_EFAULT
;
2700 return -TARGET_EINVAL
;
2702 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2705 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2707 if (put_user_u32(len
, optlen
)
2708 || put_user_u8(val
, optval_addr
))
2709 return -TARGET_EFAULT
;
2711 if (len
> sizeof(int))
2713 if (put_user_u32(len
, optlen
)
2714 || put_user_u32(val
, optval_addr
))
2715 return -TARGET_EFAULT
;
2719 ret
= -TARGET_ENOPROTOOPT
;
2726 case NETLINK_PKTINFO
:
2727 case NETLINK_BROADCAST_ERROR
:
2728 case NETLINK_NO_ENOBUFS
:
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2730 case NETLINK_LISTEN_ALL_NSID
:
2731 case NETLINK_CAP_ACK
:
2732 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2733 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2734 case NETLINK_EXT_ACK
:
2735 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2736 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2737 case NETLINK_GET_STRICT_CHK
:
2738 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2739 if (get_user_u32(len
, optlen
)) {
2740 return -TARGET_EFAULT
;
2742 if (len
!= sizeof(val
)) {
2743 return -TARGET_EINVAL
;
2746 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2750 if (put_user_u32(lv
, optlen
)
2751 || put_user_u32(val
, optval_addr
)) {
2752 return -TARGET_EFAULT
;
2755 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2756 case NETLINK_LIST_MEMBERSHIPS
:
2760 if (get_user_u32(len
, optlen
)) {
2761 return -TARGET_EFAULT
;
2764 return -TARGET_EINVAL
;
2766 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2768 return -TARGET_EFAULT
;
2771 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2773 unlock_user(results
, optval_addr
, 0);
2776 /* swap host endianess to target endianess. */
2777 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2778 results
[i
] = tswap32(results
[i
]);
2780 if (put_user_u32(lv
, optlen
)) {
2781 return -TARGET_EFAULT
;
2783 unlock_user(results
, optval_addr
, 0);
2786 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2791 #endif /* SOL_NETLINK */
2794 qemu_log_mask(LOG_UNIMP
,
2795 "getsockopt level=%d optname=%d not yet supported\n",
2797 ret
= -TARGET_EOPNOTSUPP
;
2803 /* Convert target low/high pair representing file offset into the host
2804 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2805 * as the kernel doesn't handle them either.
2807 static void target_to_host_low_high(abi_ulong tlow
,
2809 unsigned long *hlow
,
2810 unsigned long *hhigh
)
2812 uint64_t off
= tlow
|
2813 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2814 TARGET_LONG_BITS
/ 2;
2817 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2820 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2821 abi_ulong count
, int copy
)
2823 struct target_iovec
*target_vec
;
2825 abi_ulong total_len
, max_len
;
2828 bool bad_address
= false;
2834 if (count
> IOV_MAX
) {
2839 vec
= g_try_new0(struct iovec
, count
);
2845 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2846 count
* sizeof(struct target_iovec
), 1);
2847 if (target_vec
== NULL
) {
2852 /* ??? If host page size > target page size, this will result in a
2853 value larger than what we can actually support. */
2854 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2857 for (i
= 0; i
< count
; i
++) {
2858 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2859 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2864 } else if (len
== 0) {
2865 /* Zero length pointer is ignored. */
2866 vec
[i
].iov_base
= 0;
2868 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2869 /* If the first buffer pointer is bad, this is a fault. But
2870 * subsequent bad buffers will result in a partial write; this
2871 * is realized by filling the vector with null pointers and
2873 if (!vec
[i
].iov_base
) {
2884 if (len
> max_len
- total_len
) {
2885 len
= max_len
- total_len
;
2888 vec
[i
].iov_len
= len
;
2892 unlock_user(target_vec
, target_addr
, 0);
2897 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2898 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2901 unlock_user(target_vec
, target_addr
, 0);
2908 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2909 abi_ulong count
, int copy
)
2911 struct target_iovec
*target_vec
;
2914 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2915 count
* sizeof(struct target_iovec
), 1);
2917 for (i
= 0; i
< count
; i
++) {
2918 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2919 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2923 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2925 unlock_user(target_vec
, target_addr
, 0);
2931 static inline int target_to_host_sock_type(int *type
)
2934 int target_type
= *type
;
2936 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2937 case TARGET_SOCK_DGRAM
:
2938 host_type
= SOCK_DGRAM
;
2940 case TARGET_SOCK_STREAM
:
2941 host_type
= SOCK_STREAM
;
2944 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2947 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2948 #if defined(SOCK_CLOEXEC)
2949 host_type
|= SOCK_CLOEXEC
;
2951 return -TARGET_EINVAL
;
2954 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2955 #if defined(SOCK_NONBLOCK)
2956 host_type
|= SOCK_NONBLOCK
;
2957 #elif !defined(O_NONBLOCK)
2958 return -TARGET_EINVAL
;
2965 /* Try to emulate socket type flags after socket creation. */
2966 static int sock_flags_fixup(int fd
, int target_type
)
2968 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2969 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2970 int flags
= fcntl(fd
, F_GETFL
);
2971 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2973 return -TARGET_EINVAL
;
2980 /* do_socket() Must return target values and target errnos. */
2981 static abi_long
do_socket(int domain
, int type
, int protocol
)
2983 int target_type
= type
;
2986 ret
= target_to_host_sock_type(&type
);
2991 if (domain
== PF_NETLINK
&& !(
2992 #ifdef CONFIG_RTNETLINK
2993 protocol
== NETLINK_ROUTE
||
2995 protocol
== NETLINK_KOBJECT_UEVENT
||
2996 protocol
== NETLINK_AUDIT
)) {
2997 return -TARGET_EPROTONOSUPPORT
;
3000 if (domain
== AF_PACKET
||
3001 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3002 protocol
= tswap16(protocol
);
3005 ret
= get_errno(socket(domain
, type
, protocol
));
3007 ret
= sock_flags_fixup(ret
, target_type
);
3008 if (type
== SOCK_PACKET
) {
3009 /* Manage an obsolete case :
3010 * if socket type is SOCK_PACKET, bind by name
3012 fd_trans_register(ret
, &target_packet_trans
);
3013 } else if (domain
== PF_NETLINK
) {
3015 #ifdef CONFIG_RTNETLINK
3017 fd_trans_register(ret
, &target_netlink_route_trans
);
3020 case NETLINK_KOBJECT_UEVENT
:
3021 /* nothing to do: messages are strings */
3024 fd_trans_register(ret
, &target_netlink_audit_trans
);
3027 g_assert_not_reached();
3034 /* do_bind() Must return target values and target errnos. */
3035 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3041 if ((int)addrlen
< 0) {
3042 return -TARGET_EINVAL
;
3045 addr
= alloca(addrlen
+1);
3047 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3051 return get_errno(bind(sockfd
, addr
, addrlen
));
3054 /* do_connect() Must return target values and target errnos. */
3055 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3061 if ((int)addrlen
< 0) {
3062 return -TARGET_EINVAL
;
3065 addr
= alloca(addrlen
+1);
3067 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3071 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3074 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3075 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3076 int flags
, int send
)
3082 abi_ulong target_vec
;
3084 if (msgp
->msg_name
) {
3085 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3086 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3087 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3088 tswapal(msgp
->msg_name
),
3090 if (ret
== -TARGET_EFAULT
) {
3091 /* For connected sockets msg_name and msg_namelen must
3092 * be ignored, so returning EFAULT immediately is wrong.
3093 * Instead, pass a bad msg_name to the host kernel, and
3094 * let it decide whether to return EFAULT or not.
3096 msg
.msg_name
= (void *)-1;
3101 msg
.msg_name
= NULL
;
3102 msg
.msg_namelen
= 0;
3104 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3105 msg
.msg_control
= alloca(msg
.msg_controllen
);
3106 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3108 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3110 count
= tswapal(msgp
->msg_iovlen
);
3111 target_vec
= tswapal(msgp
->msg_iov
);
3113 if (count
> IOV_MAX
) {
3114 /* sendrcvmsg returns a different errno for this condition than
3115 * readv/writev, so we must catch it here before lock_iovec() does.
3117 ret
= -TARGET_EMSGSIZE
;
3121 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3122 target_vec
, count
, send
);
3124 ret
= -host_to_target_errno(errno
);
3127 msg
.msg_iovlen
= count
;
3131 if (fd_trans_target_to_host_data(fd
)) {
3134 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3135 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3136 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3137 msg
.msg_iov
->iov_len
);
3139 msg
.msg_iov
->iov_base
= host_msg
;
3140 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3144 ret
= target_to_host_cmsg(&msg
, msgp
);
3146 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3150 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3151 if (!is_error(ret
)) {
3153 if (fd_trans_host_to_target_data(fd
)) {
3154 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3155 MIN(msg
.msg_iov
->iov_len
, len
));
3157 ret
= host_to_target_cmsg(msgp
, &msg
);
3159 if (!is_error(ret
)) {
3160 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3161 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3162 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3163 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3164 msg
.msg_name
, msg
.msg_namelen
);
3176 unlock_iovec(vec
, target_vec
, count
, !send
);
3181 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3182 int flags
, int send
)
3185 struct target_msghdr
*msgp
;
3187 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3191 return -TARGET_EFAULT
;
3193 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3194 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3198 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3199 * so it might not have this *mmsg-specific flag either.
3201 #ifndef MSG_WAITFORONE
3202 #define MSG_WAITFORONE 0x10000
3205 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3206 unsigned int vlen
, unsigned int flags
,
3209 struct target_mmsghdr
*mmsgp
;
3213 if (vlen
> UIO_MAXIOV
) {
3217 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3219 return -TARGET_EFAULT
;
3222 for (i
= 0; i
< vlen
; i
++) {
3223 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3224 if (is_error(ret
)) {
3227 mmsgp
[i
].msg_len
= tswap32(ret
);
3228 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3229 if (flags
& MSG_WAITFORONE
) {
3230 flags
|= MSG_DONTWAIT
;
3234 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3236 /* Return number of datagrams sent if we sent any at all;
3237 * otherwise return the error.
3245 /* do_accept4() Must return target values and target errnos. */
3246 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3247 abi_ulong target_addrlen_addr
, int flags
)
3249 socklen_t addrlen
, ret_addrlen
;
3254 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3256 if (target_addr
== 0) {
3257 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3260 /* linux returns EINVAL if addrlen pointer is invalid */
3261 if (get_user_u32(addrlen
, target_addrlen_addr
))
3262 return -TARGET_EINVAL
;
3264 if ((int)addrlen
< 0) {
3265 return -TARGET_EINVAL
;
3268 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3269 return -TARGET_EINVAL
;
3271 addr
= alloca(addrlen
);
3273 ret_addrlen
= addrlen
;
3274 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3275 if (!is_error(ret
)) {
3276 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3277 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3278 ret
= -TARGET_EFAULT
;
3284 /* do_getpeername() Must return target values and target errnos. */
3285 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3286 abi_ulong target_addrlen_addr
)
3288 socklen_t addrlen
, ret_addrlen
;
3292 if (get_user_u32(addrlen
, target_addrlen_addr
))
3293 return -TARGET_EFAULT
;
3295 if ((int)addrlen
< 0) {
3296 return -TARGET_EINVAL
;
3299 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3300 return -TARGET_EFAULT
;
3302 addr
= alloca(addrlen
);
3304 ret_addrlen
= addrlen
;
3305 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3306 if (!is_error(ret
)) {
3307 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3308 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3309 ret
= -TARGET_EFAULT
;
3315 /* do_getsockname() Must return target values and target errnos. */
3316 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3317 abi_ulong target_addrlen_addr
)
3319 socklen_t addrlen
, ret_addrlen
;
3323 if (get_user_u32(addrlen
, target_addrlen_addr
))
3324 return -TARGET_EFAULT
;
3326 if ((int)addrlen
< 0) {
3327 return -TARGET_EINVAL
;
3330 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3331 return -TARGET_EFAULT
;
3333 addr
= alloca(addrlen
);
3335 ret_addrlen
= addrlen
;
3336 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3337 if (!is_error(ret
)) {
3338 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3339 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3340 ret
= -TARGET_EFAULT
;
3346 /* do_socketpair() Must return target values and target errnos. */
3347 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3348 abi_ulong target_tab_addr
)
3353 target_to_host_sock_type(&type
);
3355 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3356 if (!is_error(ret
)) {
3357 if (put_user_s32(tab
[0], target_tab_addr
)
3358 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3359 ret
= -TARGET_EFAULT
;
3364 /* do_sendto() Must return target values and target errnos. */
3365 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3366 abi_ulong target_addr
, socklen_t addrlen
)
3370 void *copy_msg
= NULL
;
3373 if ((int)addrlen
< 0) {
3374 return -TARGET_EINVAL
;
3377 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3379 return -TARGET_EFAULT
;
3380 if (fd_trans_target_to_host_data(fd
)) {
3381 copy_msg
= host_msg
;
3382 host_msg
= g_malloc(len
);
3383 memcpy(host_msg
, copy_msg
, len
);
3384 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3390 addr
= alloca(addrlen
+1);
3391 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3395 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3397 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3402 host_msg
= copy_msg
;
3404 unlock_user(host_msg
, msg
, 0);
3408 /* do_recvfrom() Must return target values and target errnos. */
3409 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3410 abi_ulong target_addr
,
3411 abi_ulong target_addrlen
)
3413 socklen_t addrlen
, ret_addrlen
;
3418 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3420 return -TARGET_EFAULT
;
3422 if (get_user_u32(addrlen
, target_addrlen
)) {
3423 ret
= -TARGET_EFAULT
;
3426 if ((int)addrlen
< 0) {
3427 ret
= -TARGET_EINVAL
;
3430 addr
= alloca(addrlen
);
3431 ret_addrlen
= addrlen
;
3432 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3433 addr
, &ret_addrlen
));
3435 addr
= NULL
; /* To keep compiler quiet. */
3436 addrlen
= 0; /* To keep compiler quiet. */
3437 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3439 if (!is_error(ret
)) {
3440 if (fd_trans_host_to_target_data(fd
)) {
3442 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3443 if (is_error(trans
)) {
3449 host_to_target_sockaddr(target_addr
, addr
,
3450 MIN(addrlen
, ret_addrlen
));
3451 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3452 ret
= -TARGET_EFAULT
;
3456 unlock_user(host_msg
, msg
, len
);
3459 unlock_user(host_msg
, msg
, 0);
3464 #ifdef TARGET_NR_socketcall
3465 /* do_socketcall() must return target values and target errnos. */
3466 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3468 static const unsigned nargs
[] = { /* number of arguments per operation */
3469 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3470 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3472 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3473 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3476 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3477 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3479 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3481 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3482 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3484 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3486 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3487 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3488 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3490 abi_long a
[6]; /* max 6 args */
3493 /* check the range of the first argument num */
3494 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3495 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3496 return -TARGET_EINVAL
;
3498 /* ensure we have space for args */
3499 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3500 return -TARGET_EINVAL
;
3502 /* collect the arguments in a[] according to nargs[] */
3503 for (i
= 0; i
< nargs
[num
]; ++i
) {
3504 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3505 return -TARGET_EFAULT
;
3508 /* now when we have the args, invoke the appropriate underlying function */
3510 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3511 return do_socket(a
[0], a
[1], a
[2]);
3512 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3513 return do_bind(a
[0], a
[1], a
[2]);
3514 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3515 return do_connect(a
[0], a
[1], a
[2]);
3516 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3517 return get_errno(listen(a
[0], a
[1]));
3518 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3519 return do_accept4(a
[0], a
[1], a
[2], 0);
3520 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3521 return do_getsockname(a
[0], a
[1], a
[2]);
3522 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3523 return do_getpeername(a
[0], a
[1], a
[2]);
3524 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3525 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3526 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3527 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3528 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3529 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3530 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3531 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3532 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3533 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3534 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3535 return get_errno(shutdown(a
[0], a
[1]));
3536 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3537 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3538 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3539 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3540 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3541 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3542 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3543 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3544 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3545 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3546 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3547 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3548 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3549 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3551 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3552 return -TARGET_EINVAL
;
3557 #define N_SHM_REGIONS 32
3559 static struct shm_region
{
3563 } shm_regions
[N_SHM_REGIONS
];
3565 #ifndef TARGET_SEMID64_DS
3566 /* asm-generic version of this struct */
3567 struct target_semid64_ds
3569 struct target_ipc_perm sem_perm
;
3570 abi_ulong sem_otime
;
3571 #if TARGET_ABI_BITS == 32
3572 abi_ulong __unused1
;
3574 abi_ulong sem_ctime
;
3575 #if TARGET_ABI_BITS == 32
3576 abi_ulong __unused2
;
3578 abi_ulong sem_nsems
;
3579 abi_ulong __unused3
;
3580 abi_ulong __unused4
;
3584 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3585 abi_ulong target_addr
)
3587 struct target_ipc_perm
*target_ip
;
3588 struct target_semid64_ds
*target_sd
;
3590 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3591 return -TARGET_EFAULT
;
3592 target_ip
= &(target_sd
->sem_perm
);
3593 host_ip
->__key
= tswap32(target_ip
->__key
);
3594 host_ip
->uid
= tswap32(target_ip
->uid
);
3595 host_ip
->gid
= tswap32(target_ip
->gid
);
3596 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3597 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3598 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3599 host_ip
->mode
= tswap32(target_ip
->mode
);
3601 host_ip
->mode
= tswap16(target_ip
->mode
);
3603 #if defined(TARGET_PPC)
3604 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3606 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3608 unlock_user_struct(target_sd
, target_addr
, 0);
3612 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3613 struct ipc_perm
*host_ip
)
3615 struct target_ipc_perm
*target_ip
;
3616 struct target_semid64_ds
*target_sd
;
3618 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3619 return -TARGET_EFAULT
;
3620 target_ip
= &(target_sd
->sem_perm
);
3621 target_ip
->__key
= tswap32(host_ip
->__key
);
3622 target_ip
->uid
= tswap32(host_ip
->uid
);
3623 target_ip
->gid
= tswap32(host_ip
->gid
);
3624 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3625 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3626 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3627 target_ip
->mode
= tswap32(host_ip
->mode
);
3629 target_ip
->mode
= tswap16(host_ip
->mode
);
3631 #if defined(TARGET_PPC)
3632 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3634 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3636 unlock_user_struct(target_sd
, target_addr
, 1);
3640 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3641 abi_ulong target_addr
)
3643 struct target_semid64_ds
*target_sd
;
3645 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3646 return -TARGET_EFAULT
;
3647 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3648 return -TARGET_EFAULT
;
3649 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3650 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3651 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3652 unlock_user_struct(target_sd
, target_addr
, 0);
3656 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3657 struct semid_ds
*host_sd
)
3659 struct target_semid64_ds
*target_sd
;
3661 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3662 return -TARGET_EFAULT
;
3663 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3664 return -TARGET_EFAULT
;
3665 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3666 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3667 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3668 unlock_user_struct(target_sd
, target_addr
, 1);
3672 struct target_seminfo
{
3685 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3686 struct seminfo
*host_seminfo
)
3688 struct target_seminfo
*target_seminfo
;
3689 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3690 return -TARGET_EFAULT
;
3691 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3692 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3693 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3694 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3695 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3696 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3697 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3698 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3699 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3700 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3701 unlock_user_struct(target_seminfo
, target_addr
, 1);
3707 struct semid_ds
*buf
;
3708 unsigned short *array
;
3709 struct seminfo
*__buf
;
3712 union target_semun
{
3719 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3720 abi_ulong target_addr
)
3723 unsigned short *array
;
3725 struct semid_ds semid_ds
;
3728 semun
.buf
= &semid_ds
;
3730 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3732 return get_errno(ret
);
3734 nsems
= semid_ds
.sem_nsems
;
3736 *host_array
= g_try_new(unsigned short, nsems
);
3738 return -TARGET_ENOMEM
;
3740 array
= lock_user(VERIFY_READ
, target_addr
,
3741 nsems
*sizeof(unsigned short), 1);
3743 g_free(*host_array
);
3744 return -TARGET_EFAULT
;
3747 for(i
=0; i
<nsems
; i
++) {
3748 __get_user((*host_array
)[i
], &array
[i
]);
3750 unlock_user(array
, target_addr
, 0);
3755 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3756 unsigned short **host_array
)
3759 unsigned short *array
;
3761 struct semid_ds semid_ds
;
3764 semun
.buf
= &semid_ds
;
3766 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3768 return get_errno(ret
);
3770 nsems
= semid_ds
.sem_nsems
;
3772 array
= lock_user(VERIFY_WRITE
, target_addr
,
3773 nsems
*sizeof(unsigned short), 0);
3775 return -TARGET_EFAULT
;
3777 for(i
=0; i
<nsems
; i
++) {
3778 __put_user((*host_array
)[i
], &array
[i
]);
3780 g_free(*host_array
);
3781 unlock_user(array
, target_addr
, 1);
3786 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3787 abi_ulong target_arg
)
3789 union target_semun target_su
= { .buf
= target_arg
};
3791 struct semid_ds dsarg
;
3792 unsigned short *array
= NULL
;
3793 struct seminfo seminfo
;
3794 abi_long ret
= -TARGET_EINVAL
;
3801 /* In 64 bit cross-endian situations, we will erroneously pick up
3802 * the wrong half of the union for the "val" element. To rectify
3803 * this, the entire 8-byte structure is byteswapped, followed by
3804 * a swap of the 4 byte val field. In other cases, the data is
3805 * already in proper host byte order. */
3806 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3807 target_su
.buf
= tswapal(target_su
.buf
);
3808 arg
.val
= tswap32(target_su
.val
);
3810 arg
.val
= target_su
.val
;
3812 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3816 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3820 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3821 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3828 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3832 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3833 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3839 arg
.__buf
= &seminfo
;
3840 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3841 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3849 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3856 struct target_sembuf
{
3857 unsigned short sem_num
;
3862 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3863 abi_ulong target_addr
,
3866 struct target_sembuf
*target_sembuf
;
3869 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3870 nsops
*sizeof(struct target_sembuf
), 1);
3872 return -TARGET_EFAULT
;
3874 for(i
=0; i
<nsops
; i
++) {
3875 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3876 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3877 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3880 unlock_user(target_sembuf
, target_addr
, 0);
3885 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3886 defined(TARGET_NR_semtimedop)
3889 * This macro is required to handle the s390 variants, which passes the
3890 * arguments in a different order than default.
3893 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3894 (__nsops), (__timeout), (__sops)
3896 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3897 (__nsops), 0, (__sops), (__timeout)
3900 static inline abi_long
do_semtimedop(int semid
,
3905 struct sembuf
*sops
;
3906 struct timespec ts
, *pts
= NULL
;
3911 if (target_to_host_timespec(pts
, timeout
)) {
3912 return -TARGET_EFAULT
;
3916 if (nsops
> TARGET_SEMOPM
) {
3917 return -TARGET_E2BIG
;
3920 sops
= g_new(struct sembuf
, nsops
);
3922 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
3924 return -TARGET_EFAULT
;
3927 ret
= -TARGET_ENOSYS
;
3928 #ifdef __NR_semtimedop
3929 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
3932 if (ret
== -TARGET_ENOSYS
) {
3933 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
3934 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
3942 struct target_msqid_ds
3944 struct target_ipc_perm msg_perm
;
3945 abi_ulong msg_stime
;
3946 #if TARGET_ABI_BITS == 32
3947 abi_ulong __unused1
;
3949 abi_ulong msg_rtime
;
3950 #if TARGET_ABI_BITS == 32
3951 abi_ulong __unused2
;
3953 abi_ulong msg_ctime
;
3954 #if TARGET_ABI_BITS == 32
3955 abi_ulong __unused3
;
3957 abi_ulong __msg_cbytes
;
3959 abi_ulong msg_qbytes
;
3960 abi_ulong msg_lspid
;
3961 abi_ulong msg_lrpid
;
3962 abi_ulong __unused4
;
3963 abi_ulong __unused5
;
3966 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3967 abi_ulong target_addr
)
3969 struct target_msqid_ds
*target_md
;
3971 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3972 return -TARGET_EFAULT
;
3973 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3974 return -TARGET_EFAULT
;
3975 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3976 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3977 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3978 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3979 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3980 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3981 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3982 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3983 unlock_user_struct(target_md
, target_addr
, 0);
3987 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3988 struct msqid_ds
*host_md
)
3990 struct target_msqid_ds
*target_md
;
3992 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3993 return -TARGET_EFAULT
;
3994 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3995 return -TARGET_EFAULT
;
3996 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3997 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3998 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3999 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4000 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4001 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4002 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4003 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4004 unlock_user_struct(target_md
, target_addr
, 1);
4008 struct target_msginfo
{
4016 unsigned short int msgseg
;
4019 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4020 struct msginfo
*host_msginfo
)
4022 struct target_msginfo
*target_msginfo
;
4023 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4024 return -TARGET_EFAULT
;
4025 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4026 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4027 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4028 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4029 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4030 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4031 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4032 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4033 unlock_user_struct(target_msginfo
, target_addr
, 1);
4037 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4039 struct msqid_ds dsarg
;
4040 struct msginfo msginfo
;
4041 abi_long ret
= -TARGET_EINVAL
;
4049 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4050 return -TARGET_EFAULT
;
4051 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4052 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4053 return -TARGET_EFAULT
;
4056 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4060 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4061 if (host_to_target_msginfo(ptr
, &msginfo
))
4062 return -TARGET_EFAULT
;
4069 struct target_msgbuf
{
4074 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4075 ssize_t msgsz
, int msgflg
)
4077 struct target_msgbuf
*target_mb
;
4078 struct msgbuf
*host_mb
;
4082 return -TARGET_EINVAL
;
4085 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4086 return -TARGET_EFAULT
;
4087 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4089 unlock_user_struct(target_mb
, msgp
, 0);
4090 return -TARGET_ENOMEM
;
4092 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4093 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4094 ret
= -TARGET_ENOSYS
;
4096 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4099 if (ret
== -TARGET_ENOSYS
) {
4101 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4104 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4110 unlock_user_struct(target_mb
, msgp
, 0);
4116 #if defined(__sparc__)
4117 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4118 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4119 #elif defined(__s390x__)
4120 /* The s390 sys_ipc variant has only five parameters. */
4121 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4122 ((long int[]){(long int)__msgp, __msgtyp})
4124 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4125 ((long int[]){(long int)__msgp, __msgtyp}), 0
4129 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4130 ssize_t msgsz
, abi_long msgtyp
,
4133 struct target_msgbuf
*target_mb
;
4135 struct msgbuf
*host_mb
;
4139 return -TARGET_EINVAL
;
4142 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4143 return -TARGET_EFAULT
;
4145 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4147 ret
= -TARGET_ENOMEM
;
4150 ret
= -TARGET_ENOSYS
;
4152 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4155 if (ret
== -TARGET_ENOSYS
) {
4156 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4157 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4162 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4163 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4164 if (!target_mtext
) {
4165 ret
= -TARGET_EFAULT
;
4168 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4169 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4172 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4176 unlock_user_struct(target_mb
, msgp
, 1);
4181 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4182 abi_ulong target_addr
)
4184 struct target_shmid_ds
*target_sd
;
4186 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4187 return -TARGET_EFAULT
;
4188 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4189 return -TARGET_EFAULT
;
4190 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4191 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4192 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4193 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4194 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4195 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4196 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4197 unlock_user_struct(target_sd
, target_addr
, 0);
4201 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4202 struct shmid_ds
*host_sd
)
4204 struct target_shmid_ds
*target_sd
;
4206 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4207 return -TARGET_EFAULT
;
4208 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4209 return -TARGET_EFAULT
;
4210 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4211 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4212 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4213 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4214 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4215 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4216 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4217 unlock_user_struct(target_sd
, target_addr
, 1);
4221 struct target_shminfo
{
4229 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4230 struct shminfo
*host_shminfo
)
4232 struct target_shminfo
*target_shminfo
;
4233 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4234 return -TARGET_EFAULT
;
4235 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4236 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4237 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4238 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4239 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4240 unlock_user_struct(target_shminfo
, target_addr
, 1);
4244 struct target_shm_info
{
4249 abi_ulong swap_attempts
;
4250 abi_ulong swap_successes
;
4253 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4254 struct shm_info
*host_shm_info
)
4256 struct target_shm_info
*target_shm_info
;
4257 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4258 return -TARGET_EFAULT
;
4259 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4260 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4261 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4262 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4263 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4264 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4265 unlock_user_struct(target_shm_info
, target_addr
, 1);
4269 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4271 struct shmid_ds dsarg
;
4272 struct shminfo shminfo
;
4273 struct shm_info shm_info
;
4274 abi_long ret
= -TARGET_EINVAL
;
4282 if (target_to_host_shmid_ds(&dsarg
, buf
))
4283 return -TARGET_EFAULT
;
4284 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4285 if (host_to_target_shmid_ds(buf
, &dsarg
))
4286 return -TARGET_EFAULT
;
4289 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4290 if (host_to_target_shminfo(buf
, &shminfo
))
4291 return -TARGET_EFAULT
;
4294 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4295 if (host_to_target_shm_info(buf
, &shm_info
))
4296 return -TARGET_EFAULT
;
4301 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4308 #ifndef TARGET_FORCE_SHMLBA
4309 /* For most architectures, SHMLBA is the same as the page size;
4310 * some architectures have larger values, in which case they should
4311 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4312 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4313 * and defining its own value for SHMLBA.
4315 * The kernel also permits SHMLBA to be set by the architecture to a
4316 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4317 * this means that addresses are rounded to the large size if
4318 * SHM_RND is set but addresses not aligned to that size are not rejected
4319 * as long as they are at least page-aligned. Since the only architecture
4320 * which uses this is ia64 this code doesn't provide for that oddity.
4322 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4324 return TARGET_PAGE_SIZE
;
4328 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4329 int shmid
, abi_ulong shmaddr
, int shmflg
)
4333 struct shmid_ds shm_info
;
4337 /* find out the length of the shared memory segment */
4338 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4339 if (is_error(ret
)) {
4340 /* can't get length, bail out */
4344 shmlba
= target_shmlba(cpu_env
);
4346 if (shmaddr
& (shmlba
- 1)) {
4347 if (shmflg
& SHM_RND
) {
4348 shmaddr
&= ~(shmlba
- 1);
4350 return -TARGET_EINVAL
;
4353 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4354 return -TARGET_EINVAL
;
4360 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4362 abi_ulong mmap_start
;
4364 /* In order to use the host shmat, we need to honor host SHMLBA. */
4365 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4367 if (mmap_start
== -1) {
4369 host_raddr
= (void *)-1;
4371 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4374 if (host_raddr
== (void *)-1) {
4376 return get_errno((long)host_raddr
);
4378 raddr
=h2g((unsigned long)host_raddr
);
4380 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4381 PAGE_VALID
| PAGE_READ
|
4382 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4384 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4385 if (!shm_regions
[i
].in_use
) {
4386 shm_regions
[i
].in_use
= true;
4387 shm_regions
[i
].start
= raddr
;
4388 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4398 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4405 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4406 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4407 shm_regions
[i
].in_use
= false;
4408 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4412 rv
= get_errno(shmdt(g2h(shmaddr
)));
4419 #ifdef TARGET_NR_ipc
4420 /* ??? This only works with linear mappings. */
4421 /* do_ipc() must return target values and target errnos. */
4422 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4423 unsigned int call
, abi_long first
,
4424 abi_long second
, abi_long third
,
4425 abi_long ptr
, abi_long fifth
)
4430 version
= call
>> 16;
4435 ret
= do_semtimedop(first
, ptr
, second
, 0);
4437 case IPCOP_semtimedop
:
4439 * The s390 sys_ipc variant has only five parameters instead of six
4440 * (as for default variant) and the only difference is the handling of
4441 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4442 * to a struct timespec where the generic variant uses fifth parameter.
4444 #if defined(TARGET_S390X)
4445 ret
= do_semtimedop(first
, ptr
, second
, third
);
4447 ret
= do_semtimedop(first
, ptr
, second
, fifth
);
4452 ret
= get_errno(semget(first
, second
, third
));
4455 case IPCOP_semctl
: {
4456 /* The semun argument to semctl is passed by value, so dereference the
4459 get_user_ual(atptr
, ptr
);
4460 ret
= do_semctl(first
, second
, third
, atptr
);
4465 ret
= get_errno(msgget(first
, second
));
4469 ret
= do_msgsnd(first
, ptr
, second
, third
);
4473 ret
= do_msgctl(first
, second
, ptr
);
4480 struct target_ipc_kludge
{
4485 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4486 ret
= -TARGET_EFAULT
;
4490 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4492 unlock_user_struct(tmp
, ptr
, 0);
4496 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4505 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4506 if (is_error(raddr
))
4507 return get_errno(raddr
);
4508 if (put_user_ual(raddr
, third
))
4509 return -TARGET_EFAULT
;
4513 ret
= -TARGET_EINVAL
;
4518 ret
= do_shmdt(ptr
);
4522 /* IPC_* flag values are the same on all linux platforms */
4523 ret
= get_errno(shmget(first
, second
, third
));
4526 /* IPC_* and SHM_* command values are the same on all linux platforms */
4528 ret
= do_shmctl(first
, second
, ptr
);
4531 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4533 ret
= -TARGET_ENOSYS
;
4540 /* kernel structure types definitions */
4542 #define STRUCT(name, ...) STRUCT_ ## name,
4543 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4545 #include "syscall_types.h"
4549 #undef STRUCT_SPECIAL
4551 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4552 #define STRUCT_SPECIAL(name)
4553 #include "syscall_types.h"
4555 #undef STRUCT_SPECIAL
4557 #define MAX_STRUCT_SIZE 4096
4559 #ifdef CONFIG_FIEMAP
4560 /* So fiemap access checks don't overflow on 32 bit systems.
4561 * This is very slightly smaller than the limit imposed by
4562 * the underlying kernel.
4564 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4565 / sizeof(struct fiemap_extent))
4567 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4568 int fd
, int cmd
, abi_long arg
)
4570 /* The parameter for this ioctl is a struct fiemap followed
4571 * by an array of struct fiemap_extent whose size is set
4572 * in fiemap->fm_extent_count. The array is filled in by the
4575 int target_size_in
, target_size_out
;
4577 const argtype
*arg_type
= ie
->arg_type
;
4578 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4581 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4585 assert(arg_type
[0] == TYPE_PTR
);
4586 assert(ie
->access
== IOC_RW
);
4588 target_size_in
= thunk_type_size(arg_type
, 0);
4589 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4591 return -TARGET_EFAULT
;
4593 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4594 unlock_user(argptr
, arg
, 0);
4595 fm
= (struct fiemap
*)buf_temp
;
4596 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4597 return -TARGET_EINVAL
;
4600 outbufsz
= sizeof (*fm
) +
4601 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4603 if (outbufsz
> MAX_STRUCT_SIZE
) {
4604 /* We can't fit all the extents into the fixed size buffer.
4605 * Allocate one that is large enough and use it instead.
4607 fm
= g_try_malloc(outbufsz
);
4609 return -TARGET_ENOMEM
;
4611 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4614 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4615 if (!is_error(ret
)) {
4616 target_size_out
= target_size_in
;
4617 /* An extent_count of 0 means we were only counting the extents
4618 * so there are no structs to copy
4620 if (fm
->fm_extent_count
!= 0) {
4621 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4623 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4625 ret
= -TARGET_EFAULT
;
4627 /* Convert the struct fiemap */
4628 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4629 if (fm
->fm_extent_count
!= 0) {
4630 p
= argptr
+ target_size_in
;
4631 /* ...and then all the struct fiemap_extents */
4632 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4633 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4638 unlock_user(argptr
, arg
, target_size_out
);
4648 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4649 int fd
, int cmd
, abi_long arg
)
4651 const argtype
*arg_type
= ie
->arg_type
;
4655 struct ifconf
*host_ifconf
;
4657 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4658 int target_ifreq_size
;
4663 abi_long target_ifc_buf
;
4667 assert(arg_type
[0] == TYPE_PTR
);
4668 assert(ie
->access
== IOC_RW
);
4671 target_size
= thunk_type_size(arg_type
, 0);
4673 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4675 return -TARGET_EFAULT
;
4676 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4677 unlock_user(argptr
, arg
, 0);
4679 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4680 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4681 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4683 if (target_ifc_buf
!= 0) {
4684 target_ifc_len
= host_ifconf
->ifc_len
;
4685 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4686 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4688 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4689 if (outbufsz
> MAX_STRUCT_SIZE
) {
4691 * We can't fit all the extents into the fixed size buffer.
4692 * Allocate one that is large enough and use it instead.
4694 host_ifconf
= malloc(outbufsz
);
4696 return -TARGET_ENOMEM
;
4698 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4701 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4703 host_ifconf
->ifc_len
= host_ifc_len
;
4705 host_ifc_buf
= NULL
;
4707 host_ifconf
->ifc_buf
= host_ifc_buf
;
4709 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4710 if (!is_error(ret
)) {
4711 /* convert host ifc_len to target ifc_len */
4713 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4714 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4715 host_ifconf
->ifc_len
= target_ifc_len
;
4717 /* restore target ifc_buf */
4719 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4721 /* copy struct ifconf to target user */
4723 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4725 return -TARGET_EFAULT
;
4726 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4727 unlock_user(argptr
, arg
, target_size
);
4729 if (target_ifc_buf
!= 0) {
4730 /* copy ifreq[] to target user */
4731 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4732 for (i
= 0; i
< nb_ifreq
; i
++) {
4733 thunk_convert(argptr
+ i
* target_ifreq_size
,
4734 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4735 ifreq_arg_type
, THUNK_TARGET
);
4737 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4748 #if defined(CONFIG_USBFS)
4749 #if HOST_LONG_BITS > 64
4750 #error USBDEVFS thunks do not support >64 bit hosts yet.
4753 uint64_t target_urb_adr
;
4754 uint64_t target_buf_adr
;
4755 char *target_buf_ptr
;
4756 struct usbdevfs_urb host_urb
;
4759 static GHashTable
*usbdevfs_urb_hashtable(void)
4761 static GHashTable
*urb_hashtable
;
4763 if (!urb_hashtable
) {
4764 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4766 return urb_hashtable
;
4769 static void urb_hashtable_insert(struct live_urb
*urb
)
4771 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4772 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4775 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4777 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4778 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4781 static void urb_hashtable_remove(struct live_urb
*urb
)
4783 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4784 g_hash_table_remove(urb_hashtable
, urb
);
4788 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4789 int fd
, int cmd
, abi_long arg
)
4791 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4792 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4793 struct live_urb
*lurb
;
4797 uintptr_t target_urb_adr
;
4800 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4802 memset(buf_temp
, 0, sizeof(uint64_t));
4803 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4804 if (is_error(ret
)) {
4808 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4809 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4810 if (!lurb
->target_urb_adr
) {
4811 return -TARGET_EFAULT
;
4813 urb_hashtable_remove(lurb
);
4814 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4815 lurb
->host_urb
.buffer_length
);
4816 lurb
->target_buf_ptr
= NULL
;
4818 /* restore the guest buffer pointer */
4819 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4821 /* update the guest urb struct */
4822 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4825 return -TARGET_EFAULT
;
4827 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4828 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4830 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4831 /* write back the urb handle */
4832 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4835 return -TARGET_EFAULT
;
4838 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4839 target_urb_adr
= lurb
->target_urb_adr
;
4840 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4841 unlock_user(argptr
, arg
, target_size
);
4848 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4849 uint8_t *buf_temp
__attribute__((unused
)),
4850 int fd
, int cmd
, abi_long arg
)
4852 struct live_urb
*lurb
;
4854 /* map target address back to host URB with metadata. */
4855 lurb
= urb_hashtable_lookup(arg
);
4857 return -TARGET_EFAULT
;
4859 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4863 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4864 int fd
, int cmd
, abi_long arg
)
4866 const argtype
*arg_type
= ie
->arg_type
;
4871 struct live_urb
*lurb
;
4874 * each submitted URB needs to map to a unique ID for the
4875 * kernel, and that unique ID needs to be a pointer to
4876 * host memory. hence, we need to malloc for each URB.
4877 * isochronous transfers have a variable length struct.
4880 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4882 /* construct host copy of urb and metadata */
4883 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4885 return -TARGET_ENOMEM
;
4888 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4891 return -TARGET_EFAULT
;
4893 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4894 unlock_user(argptr
, arg
, 0);
4896 lurb
->target_urb_adr
= arg
;
4897 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4899 /* buffer space used depends on endpoint type so lock the entire buffer */
4900 /* control type urbs should check the buffer contents for true direction */
4901 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4902 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4903 lurb
->host_urb
.buffer_length
, 1);
4904 if (lurb
->target_buf_ptr
== NULL
) {
4906 return -TARGET_EFAULT
;
4909 /* update buffer pointer in host copy */
4910 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4912 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4913 if (is_error(ret
)) {
4914 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4917 urb_hashtable_insert(lurb
);
4922 #endif /* CONFIG_USBFS */
4924 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4925 int cmd
, abi_long arg
)
4928 struct dm_ioctl
*host_dm
;
4929 abi_long guest_data
;
4930 uint32_t guest_data_size
;
4932 const argtype
*arg_type
= ie
->arg_type
;
4934 void *big_buf
= NULL
;
4938 target_size
= thunk_type_size(arg_type
, 0);
4939 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4941 ret
= -TARGET_EFAULT
;
4944 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4945 unlock_user(argptr
, arg
, 0);
4947 /* buf_temp is too small, so fetch things into a bigger buffer */
4948 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4949 memcpy(big_buf
, buf_temp
, target_size
);
4953 guest_data
= arg
+ host_dm
->data_start
;
4954 if ((guest_data
- arg
) < 0) {
4955 ret
= -TARGET_EINVAL
;
4958 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4959 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4961 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4963 ret
= -TARGET_EFAULT
;
4967 switch (ie
->host_cmd
) {
4969 case DM_LIST_DEVICES
:
4972 case DM_DEV_SUSPEND
:
4975 case DM_TABLE_STATUS
:
4976 case DM_TABLE_CLEAR
:
4978 case DM_LIST_VERSIONS
:
4982 case DM_DEV_SET_GEOMETRY
:
4983 /* data contains only strings */
4984 memcpy(host_data
, argptr
, guest_data_size
);
4987 memcpy(host_data
, argptr
, guest_data_size
);
4988 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4992 void *gspec
= argptr
;
4993 void *cur_data
= host_data
;
4994 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4995 int spec_size
= thunk_type_size(arg_type
, 0);
4998 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4999 struct dm_target_spec
*spec
= cur_data
;
5003 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5004 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5006 spec
->next
= sizeof(*spec
) + slen
;
5007 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5009 cur_data
+= spec
->next
;
5014 ret
= -TARGET_EINVAL
;
5015 unlock_user(argptr
, guest_data
, 0);
5018 unlock_user(argptr
, guest_data
, 0);
5020 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5021 if (!is_error(ret
)) {
5022 guest_data
= arg
+ host_dm
->data_start
;
5023 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5024 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5025 switch (ie
->host_cmd
) {
5030 case DM_DEV_SUSPEND
:
5033 case DM_TABLE_CLEAR
:
5035 case DM_DEV_SET_GEOMETRY
:
5036 /* no return data */
5038 case DM_LIST_DEVICES
:
5040 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5041 uint32_t remaining_data
= guest_data_size
;
5042 void *cur_data
= argptr
;
5043 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5044 int nl_size
= 12; /* can't use thunk_size due to alignment */
5047 uint32_t next
= nl
->next
;
5049 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5051 if (remaining_data
< nl
->next
) {
5052 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5055 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5056 strcpy(cur_data
+ nl_size
, nl
->name
);
5057 cur_data
+= nl
->next
;
5058 remaining_data
-= nl
->next
;
5062 nl
= (void*)nl
+ next
;
5067 case DM_TABLE_STATUS
:
5069 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5070 void *cur_data
= argptr
;
5071 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5072 int spec_size
= thunk_type_size(arg_type
, 0);
5075 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5076 uint32_t next
= spec
->next
;
5077 int slen
= strlen((char*)&spec
[1]) + 1;
5078 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5079 if (guest_data_size
< spec
->next
) {
5080 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5083 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5084 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5085 cur_data
= argptr
+ spec
->next
;
5086 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5092 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5093 int count
= *(uint32_t*)hdata
;
5094 uint64_t *hdev
= hdata
+ 8;
5095 uint64_t *gdev
= argptr
+ 8;
5098 *(uint32_t*)argptr
= tswap32(count
);
5099 for (i
= 0; i
< count
; i
++) {
5100 *gdev
= tswap64(*hdev
);
5106 case DM_LIST_VERSIONS
:
5108 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5109 uint32_t remaining_data
= guest_data_size
;
5110 void *cur_data
= argptr
;
5111 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5112 int vers_size
= thunk_type_size(arg_type
, 0);
5115 uint32_t next
= vers
->next
;
5117 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5119 if (remaining_data
< vers
->next
) {
5120 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5123 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5124 strcpy(cur_data
+ vers_size
, vers
->name
);
5125 cur_data
+= vers
->next
;
5126 remaining_data
-= vers
->next
;
5130 vers
= (void*)vers
+ next
;
5135 unlock_user(argptr
, guest_data
, 0);
5136 ret
= -TARGET_EINVAL
;
5139 unlock_user(argptr
, guest_data
, guest_data_size
);
5141 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5143 ret
= -TARGET_EFAULT
;
5146 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5147 unlock_user(argptr
, arg
, target_size
);
5154 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5155 int cmd
, abi_long arg
)
5159 const argtype
*arg_type
= ie
->arg_type
;
5160 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5163 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5164 struct blkpg_partition host_part
;
5166 /* Read and convert blkpg */
5168 target_size
= thunk_type_size(arg_type
, 0);
5169 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5171 ret
= -TARGET_EFAULT
;
5174 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5175 unlock_user(argptr
, arg
, 0);
5177 switch (host_blkpg
->op
) {
5178 case BLKPG_ADD_PARTITION
:
5179 case BLKPG_DEL_PARTITION
:
5180 /* payload is struct blkpg_partition */
5183 /* Unknown opcode */
5184 ret
= -TARGET_EINVAL
;
5188 /* Read and convert blkpg->data */
5189 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5190 target_size
= thunk_type_size(part_arg_type
, 0);
5191 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5193 ret
= -TARGET_EFAULT
;
5196 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5197 unlock_user(argptr
, arg
, 0);
5199 /* Swizzle the data pointer to our local copy and call! */
5200 host_blkpg
->data
= &host_part
;
5201 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5207 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5208 int fd
, int cmd
, abi_long arg
)
5210 const argtype
*arg_type
= ie
->arg_type
;
5211 const StructEntry
*se
;
5212 const argtype
*field_types
;
5213 const int *dst_offsets
, *src_offsets
;
5216 abi_ulong
*target_rt_dev_ptr
= NULL
;
5217 unsigned long *host_rt_dev_ptr
= NULL
;
5221 assert(ie
->access
== IOC_W
);
5222 assert(*arg_type
== TYPE_PTR
);
5224 assert(*arg_type
== TYPE_STRUCT
);
5225 target_size
= thunk_type_size(arg_type
, 0);
5226 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5228 return -TARGET_EFAULT
;
5231 assert(*arg_type
== (int)STRUCT_rtentry
);
5232 se
= struct_entries
+ *arg_type
++;
5233 assert(se
->convert
[0] == NULL
);
5234 /* convert struct here to be able to catch rt_dev string */
5235 field_types
= se
->field_types
;
5236 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5237 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5238 for (i
= 0; i
< se
->nb_fields
; i
++) {
5239 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5240 assert(*field_types
== TYPE_PTRVOID
);
5241 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5242 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5243 if (*target_rt_dev_ptr
!= 0) {
5244 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5245 tswapal(*target_rt_dev_ptr
));
5246 if (!*host_rt_dev_ptr
) {
5247 unlock_user(argptr
, arg
, 0);
5248 return -TARGET_EFAULT
;
5251 *host_rt_dev_ptr
= 0;
5256 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5257 argptr
+ src_offsets
[i
],
5258 field_types
, THUNK_HOST
);
5260 unlock_user(argptr
, arg
, 0);
5262 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5264 assert(host_rt_dev_ptr
!= NULL
);
5265 assert(target_rt_dev_ptr
!= NULL
);
5266 if (*host_rt_dev_ptr
!= 0) {
5267 unlock_user((void *)*host_rt_dev_ptr
,
5268 *target_rt_dev_ptr
, 0);
5273 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5274 int fd
, int cmd
, abi_long arg
)
5276 int sig
= target_to_host_signal(arg
);
5277 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5280 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5281 int fd
, int cmd
, abi_long arg
)
5286 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5287 if (is_error(ret
)) {
5291 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5292 if (copy_to_user_timeval(arg
, &tv
)) {
5293 return -TARGET_EFAULT
;
5296 if (copy_to_user_timeval64(arg
, &tv
)) {
5297 return -TARGET_EFAULT
;
5304 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5305 int fd
, int cmd
, abi_long arg
)
5310 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5311 if (is_error(ret
)) {
5315 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5316 if (host_to_target_timespec(arg
, &ts
)) {
5317 return -TARGET_EFAULT
;
5320 if (host_to_target_timespec64(arg
, &ts
)) {
5321 return -TARGET_EFAULT
;
5329 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5330 int fd
, int cmd
, abi_long arg
)
5332 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5333 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5339 static void unlock_drm_version(struct drm_version
*host_ver
,
5340 struct target_drm_version
*target_ver
,
5343 unlock_user(host_ver
->name
, target_ver
->name
,
5344 copy
? host_ver
->name_len
: 0);
5345 unlock_user(host_ver
->date
, target_ver
->date
,
5346 copy
? host_ver
->date_len
: 0);
5347 unlock_user(host_ver
->desc
, target_ver
->desc
,
5348 copy
? host_ver
->desc_len
: 0);
5351 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5352 struct target_drm_version
*target_ver
)
5354 memset(host_ver
, 0, sizeof(*host_ver
));
5356 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5357 if (host_ver
->name_len
) {
5358 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5359 target_ver
->name_len
, 0);
5360 if (!host_ver
->name
) {
5365 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5366 if (host_ver
->date_len
) {
5367 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5368 target_ver
->date_len
, 0);
5369 if (!host_ver
->date
) {
5374 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5375 if (host_ver
->desc_len
) {
5376 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5377 target_ver
->desc_len
, 0);
5378 if (!host_ver
->desc
) {
5385 unlock_drm_version(host_ver
, target_ver
, false);
5389 static inline void host_to_target_drmversion(
5390 struct target_drm_version
*target_ver
,
5391 struct drm_version
*host_ver
)
5393 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5394 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5395 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5396 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5397 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5398 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5399 unlock_drm_version(host_ver
, target_ver
, true);
5402 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5403 int fd
, int cmd
, abi_long arg
)
5405 struct drm_version
*ver
;
5406 struct target_drm_version
*target_ver
;
5409 switch (ie
->host_cmd
) {
5410 case DRM_IOCTL_VERSION
:
5411 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5412 return -TARGET_EFAULT
;
5414 ver
= (struct drm_version
*)buf_temp
;
5415 ret
= target_to_host_drmversion(ver
, target_ver
);
5416 if (!is_error(ret
)) {
5417 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5418 if (is_error(ret
)) {
5419 unlock_drm_version(ver
, target_ver
, false);
5421 host_to_target_drmversion(target_ver
, ver
);
5424 unlock_user_struct(target_ver
, arg
, 0);
5427 return -TARGET_ENOSYS
;
5430 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5431 struct drm_i915_getparam
*gparam
,
5432 int fd
, abi_long arg
)
5436 struct target_drm_i915_getparam
*target_gparam
;
5438 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5439 return -TARGET_EFAULT
;
5442 __get_user(gparam
->param
, &target_gparam
->param
);
5443 gparam
->value
= &value
;
5444 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5445 put_user_s32(value
, target_gparam
->value
);
5447 unlock_user_struct(target_gparam
, arg
, 0);
5451 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5452 int fd
, int cmd
, abi_long arg
)
5454 switch (ie
->host_cmd
) {
5455 case DRM_IOCTL_I915_GETPARAM
:
5456 return do_ioctl_drm_i915_getparam(ie
,
5457 (struct drm_i915_getparam
*)buf_temp
,
5460 return -TARGET_ENOSYS
;
5466 IOCTLEntry ioctl_entries
[] = {
5467 #define IOCTL(cmd, access, ...) \
5468 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5469 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5470 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5471 #define IOCTL_IGNORE(cmd) \
5472 { TARGET_ ## cmd, 0, #cmd },
5477 /* ??? Implement proper locking for ioctls. */
5478 /* do_ioctl() Must return target values and target errnos. */
5479 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5481 const IOCTLEntry
*ie
;
5482 const argtype
*arg_type
;
5484 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5490 if (ie
->target_cmd
== 0) {
5492 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5493 return -TARGET_ENOSYS
;
5495 if (ie
->target_cmd
== cmd
)
5499 arg_type
= ie
->arg_type
;
5501 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5502 } else if (!ie
->host_cmd
) {
5503 /* Some architectures define BSD ioctls in their headers
5504 that are not implemented in Linux. */
5505 return -TARGET_ENOSYS
;
5508 switch(arg_type
[0]) {
5511 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5517 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5521 target_size
= thunk_type_size(arg_type
, 0);
5522 switch(ie
->access
) {
5524 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5525 if (!is_error(ret
)) {
5526 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5528 return -TARGET_EFAULT
;
5529 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5530 unlock_user(argptr
, arg
, target_size
);
5534 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5536 return -TARGET_EFAULT
;
5537 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5538 unlock_user(argptr
, arg
, 0);
5539 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5543 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5545 return -TARGET_EFAULT
;
5546 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5547 unlock_user(argptr
, arg
, 0);
5548 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5549 if (!is_error(ret
)) {
5550 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5552 return -TARGET_EFAULT
;
5553 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5554 unlock_user(argptr
, arg
, target_size
);
5560 qemu_log_mask(LOG_UNIMP
,
5561 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5562 (long)cmd
, arg_type
[0]);
5563 ret
= -TARGET_ENOSYS
;
5569 static const bitmask_transtbl iflag_tbl
[] = {
5570 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5571 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5572 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5573 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5574 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5575 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5576 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5577 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5578 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5579 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5580 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5581 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5582 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5583 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5587 static const bitmask_transtbl oflag_tbl
[] = {
5588 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5589 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5590 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5591 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5592 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5593 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5594 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5595 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5596 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5597 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5598 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5599 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5600 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5601 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5602 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5603 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5604 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5605 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5606 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5607 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5608 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5609 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5610 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5611 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5615 static const bitmask_transtbl cflag_tbl
[] = {
5616 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5617 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5618 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5619 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5620 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5621 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5622 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5623 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5624 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5625 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5626 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5627 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5628 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5629 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5630 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5631 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5632 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5633 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5634 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5635 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5636 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5637 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5638 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5639 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5640 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5641 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5642 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5643 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5644 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5645 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5646 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5650 static const bitmask_transtbl lflag_tbl
[] = {
5651 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5652 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5653 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5654 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5655 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5656 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5657 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5658 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5659 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5660 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5661 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5662 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5663 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5664 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5665 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5669 static void target_to_host_termios (void *dst
, const void *src
)
5671 struct host_termios
*host
= dst
;
5672 const struct target_termios
*target
= src
;
5675 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5677 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5679 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5681 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5682 host
->c_line
= target
->c_line
;
5684 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5685 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5686 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5687 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5688 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5689 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5690 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5691 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5692 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5693 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5694 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5695 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5696 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5697 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5698 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5699 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5700 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5701 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5704 static void host_to_target_termios (void *dst
, const void *src
)
5706 struct target_termios
*target
= dst
;
5707 const struct host_termios
*host
= src
;
5710 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5712 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5714 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5716 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5717 target
->c_line
= host
->c_line
;
5719 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5720 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5721 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5722 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5723 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5724 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5725 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5726 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5727 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5728 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5729 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5730 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5731 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5732 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5733 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5734 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5735 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5736 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5739 static const StructEntry struct_termios_def
= {
5740 .convert
= { host_to_target_termios
, target_to_host_termios
},
5741 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5742 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5745 static bitmask_transtbl mmap_flags_tbl
[] = {
5746 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5747 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5748 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5749 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5750 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5751 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5752 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5753 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5754 MAP_DENYWRITE
, MAP_DENYWRITE
},
5755 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5756 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5757 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5758 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5759 MAP_NORESERVE
, MAP_NORESERVE
},
5760 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5761 /* MAP_STACK had been ignored by the kernel for quite some time.
5762 Recognize it for the target insofar as we do not want to pass
5763 it through to the host. */
5764 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5769 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5770 * TARGET_I386 is defined if TARGET_X86_64 is defined
5772 #if defined(TARGET_I386)
5774 /* NOTE: there is really one LDT for all the threads */
5775 static uint8_t *ldt_table
;
5777 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5784 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5785 if (size
> bytecount
)
5787 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5789 return -TARGET_EFAULT
;
5790 /* ??? Should this by byteswapped? */
5791 memcpy(p
, ldt_table
, size
);
5792 unlock_user(p
, ptr
, size
);
5796 /* XXX: add locking support */
5797 static abi_long
write_ldt(CPUX86State
*env
,
5798 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5800 struct target_modify_ldt_ldt_s ldt_info
;
5801 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5802 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5803 int seg_not_present
, useable
, lm
;
5804 uint32_t *lp
, entry_1
, entry_2
;
5806 if (bytecount
!= sizeof(ldt_info
))
5807 return -TARGET_EINVAL
;
5808 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5809 return -TARGET_EFAULT
;
5810 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5811 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5812 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5813 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5814 unlock_user_struct(target_ldt_info
, ptr
, 0);
5816 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5817 return -TARGET_EINVAL
;
5818 seg_32bit
= ldt_info
.flags
& 1;
5819 contents
= (ldt_info
.flags
>> 1) & 3;
5820 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5821 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5822 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5823 useable
= (ldt_info
.flags
>> 6) & 1;
5827 lm
= (ldt_info
.flags
>> 7) & 1;
5829 if (contents
== 3) {
5831 return -TARGET_EINVAL
;
5832 if (seg_not_present
== 0)
5833 return -TARGET_EINVAL
;
5835 /* allocate the LDT */
5837 env
->ldt
.base
= target_mmap(0,
5838 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5839 PROT_READ
|PROT_WRITE
,
5840 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5841 if (env
->ldt
.base
== -1)
5842 return -TARGET_ENOMEM
;
5843 memset(g2h(env
->ldt
.base
), 0,
5844 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5845 env
->ldt
.limit
= 0xffff;
5846 ldt_table
= g2h(env
->ldt
.base
);
5849 /* NOTE: same code as Linux kernel */
5850 /* Allow LDTs to be cleared by the user. */
5851 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5854 read_exec_only
== 1 &&
5856 limit_in_pages
== 0 &&
5857 seg_not_present
== 1 &&
5865 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5866 (ldt_info
.limit
& 0x0ffff);
5867 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5868 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5869 (ldt_info
.limit
& 0xf0000) |
5870 ((read_exec_only
^ 1) << 9) |
5872 ((seg_not_present
^ 1) << 15) |
5874 (limit_in_pages
<< 23) |
5878 entry_2
|= (useable
<< 20);
5880 /* Install the new entry ... */
5882 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5883 lp
[0] = tswap32(entry_1
);
5884 lp
[1] = tswap32(entry_2
);
5888 /* specific and weird i386 syscalls */
5889 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5890 unsigned long bytecount
)
5896 ret
= read_ldt(ptr
, bytecount
);
5899 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5902 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5905 ret
= -TARGET_ENOSYS
;
5911 #if defined(TARGET_ABI32)
5912 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5914 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5915 struct target_modify_ldt_ldt_s ldt_info
;
5916 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5917 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5918 int seg_not_present
, useable
, lm
;
5919 uint32_t *lp
, entry_1
, entry_2
;
5922 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5923 if (!target_ldt_info
)
5924 return -TARGET_EFAULT
;
5925 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5926 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5927 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5928 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5929 if (ldt_info
.entry_number
== -1) {
5930 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5931 if (gdt_table
[i
] == 0) {
5932 ldt_info
.entry_number
= i
;
5933 target_ldt_info
->entry_number
= tswap32(i
);
5938 unlock_user_struct(target_ldt_info
, ptr
, 1);
5940 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5941 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5942 return -TARGET_EINVAL
;
5943 seg_32bit
= ldt_info
.flags
& 1;
5944 contents
= (ldt_info
.flags
>> 1) & 3;
5945 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5946 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5947 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5948 useable
= (ldt_info
.flags
>> 6) & 1;
5952 lm
= (ldt_info
.flags
>> 7) & 1;
5955 if (contents
== 3) {
5956 if (seg_not_present
== 0)
5957 return -TARGET_EINVAL
;
5960 /* NOTE: same code as Linux kernel */
5961 /* Allow LDTs to be cleared by the user. */
5962 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5963 if ((contents
== 0 &&
5964 read_exec_only
== 1 &&
5966 limit_in_pages
== 0 &&
5967 seg_not_present
== 1 &&
5975 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5976 (ldt_info
.limit
& 0x0ffff);
5977 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5978 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5979 (ldt_info
.limit
& 0xf0000) |
5980 ((read_exec_only
^ 1) << 9) |
5982 ((seg_not_present
^ 1) << 15) |
5984 (limit_in_pages
<< 23) |
5989 /* Install the new entry ... */
5991 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5992 lp
[0] = tswap32(entry_1
);
5993 lp
[1] = tswap32(entry_2
);
5997 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5999 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6000 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6001 uint32_t base_addr
, limit
, flags
;
6002 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6003 int seg_not_present
, useable
, lm
;
6004 uint32_t *lp
, entry_1
, entry_2
;
6006 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6007 if (!target_ldt_info
)
6008 return -TARGET_EFAULT
;
6009 idx
= tswap32(target_ldt_info
->entry_number
);
6010 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6011 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6012 unlock_user_struct(target_ldt_info
, ptr
, 1);
6013 return -TARGET_EINVAL
;
6015 lp
= (uint32_t *)(gdt_table
+ idx
);
6016 entry_1
= tswap32(lp
[0]);
6017 entry_2
= tswap32(lp
[1]);
6019 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6020 contents
= (entry_2
>> 10) & 3;
6021 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6022 seg_32bit
= (entry_2
>> 22) & 1;
6023 limit_in_pages
= (entry_2
>> 23) & 1;
6024 useable
= (entry_2
>> 20) & 1;
6028 lm
= (entry_2
>> 21) & 1;
6030 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6031 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6032 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6033 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6034 base_addr
= (entry_1
>> 16) |
6035 (entry_2
& 0xff000000) |
6036 ((entry_2
& 0xff) << 16);
6037 target_ldt_info
->base_addr
= tswapal(base_addr
);
6038 target_ldt_info
->limit
= tswap32(limit
);
6039 target_ldt_info
->flags
= tswap32(flags
);
6040 unlock_user_struct(target_ldt_info
, ptr
, 1);
6044 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6046 return -TARGET_ENOSYS
;
6049 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6056 case TARGET_ARCH_SET_GS
:
6057 case TARGET_ARCH_SET_FS
:
6058 if (code
== TARGET_ARCH_SET_GS
)
6062 cpu_x86_load_seg(env
, idx
, 0);
6063 env
->segs
[idx
].base
= addr
;
6065 case TARGET_ARCH_GET_GS
:
6066 case TARGET_ARCH_GET_FS
:
6067 if (code
== TARGET_ARCH_GET_GS
)
6071 val
= env
->segs
[idx
].base
;
6072 if (put_user(val
, addr
, abi_ulong
))
6073 ret
= -TARGET_EFAULT
;
6076 ret
= -TARGET_EINVAL
;
6081 #endif /* defined(TARGET_ABI32 */
6083 #endif /* defined(TARGET_I386) */
6085 #define NEW_STACK_SIZE 0x40000
6088 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6091 pthread_mutex_t mutex
;
6092 pthread_cond_t cond
;
6095 abi_ulong child_tidptr
;
6096 abi_ulong parent_tidptr
;
6100 static void *clone_func(void *arg
)
6102 new_thread_info
*info
= arg
;
6107 rcu_register_thread();
6108 tcg_register_thread();
6112 ts
= (TaskState
*)cpu
->opaque
;
6113 info
->tid
= sys_gettid();
6115 if (info
->child_tidptr
)
6116 put_user_u32(info
->tid
, info
->child_tidptr
);
6117 if (info
->parent_tidptr
)
6118 put_user_u32(info
->tid
, info
->parent_tidptr
);
6119 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6120 /* Enable signals. */
6121 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6122 /* Signal to the parent that we're ready. */
6123 pthread_mutex_lock(&info
->mutex
);
6124 pthread_cond_broadcast(&info
->cond
);
6125 pthread_mutex_unlock(&info
->mutex
);
6126 /* Wait until the parent has finished initializing the tls state. */
6127 pthread_mutex_lock(&clone_lock
);
6128 pthread_mutex_unlock(&clone_lock
);
6134 /* do_fork() Must return host values and target errnos (unlike most
6135 do_*() functions). */
6136 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6137 abi_ulong parent_tidptr
, target_ulong newtls
,
6138 abi_ulong child_tidptr
)
6140 CPUState
*cpu
= env_cpu(env
);
6144 CPUArchState
*new_env
;
6147 flags
&= ~CLONE_IGNORED_FLAGS
;
6149 /* Emulate vfork() with fork() */
6150 if (flags
& CLONE_VFORK
)
6151 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6153 if (flags
& CLONE_VM
) {
6154 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6155 new_thread_info info
;
6156 pthread_attr_t attr
;
6158 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6159 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6160 return -TARGET_EINVAL
;
6163 ts
= g_new0(TaskState
, 1);
6164 init_task_state(ts
);
6166 /* Grab a mutex so that thread setup appears atomic. */
6167 pthread_mutex_lock(&clone_lock
);
6169 /* we create a new CPU instance. */
6170 new_env
= cpu_copy(env
);
6171 /* Init regs that differ from the parent. */
6172 cpu_clone_regs_child(new_env
, newsp
, flags
);
6173 cpu_clone_regs_parent(env
, flags
);
6174 new_cpu
= env_cpu(new_env
);
6175 new_cpu
->opaque
= ts
;
6176 ts
->bprm
= parent_ts
->bprm
;
6177 ts
->info
= parent_ts
->info
;
6178 ts
->signal_mask
= parent_ts
->signal_mask
;
6180 if (flags
& CLONE_CHILD_CLEARTID
) {
6181 ts
->child_tidptr
= child_tidptr
;
6184 if (flags
& CLONE_SETTLS
) {
6185 cpu_set_tls (new_env
, newtls
);
6188 memset(&info
, 0, sizeof(info
));
6189 pthread_mutex_init(&info
.mutex
, NULL
);
6190 pthread_mutex_lock(&info
.mutex
);
6191 pthread_cond_init(&info
.cond
, NULL
);
6193 if (flags
& CLONE_CHILD_SETTID
) {
6194 info
.child_tidptr
= child_tidptr
;
6196 if (flags
& CLONE_PARENT_SETTID
) {
6197 info
.parent_tidptr
= parent_tidptr
;
6200 ret
= pthread_attr_init(&attr
);
6201 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6202 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6203 /* It is not safe to deliver signals until the child has finished
6204 initializing, so temporarily block all signals. */
6205 sigfillset(&sigmask
);
6206 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6207 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6209 /* If this is our first additional thread, we need to ensure we
6210 * generate code for parallel execution and flush old translations.
6212 if (!parallel_cpus
) {
6213 parallel_cpus
= true;
6217 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6218 /* TODO: Free new CPU state if thread creation failed. */
6220 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6221 pthread_attr_destroy(&attr
);
6223 /* Wait for the child to initialize. */
6224 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6229 pthread_mutex_unlock(&info
.mutex
);
6230 pthread_cond_destroy(&info
.cond
);
6231 pthread_mutex_destroy(&info
.mutex
);
6232 pthread_mutex_unlock(&clone_lock
);
6234 /* if no CLONE_VM, we consider it is a fork */
6235 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6236 return -TARGET_EINVAL
;
6239 /* We can't support custom termination signals */
6240 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6241 return -TARGET_EINVAL
;
6244 if (block_signals()) {
6245 return -TARGET_ERESTARTSYS
;
6251 /* Child Process. */
6252 cpu_clone_regs_child(env
, newsp
, flags
);
6254 /* There is a race condition here. The parent process could
6255 theoretically read the TID in the child process before the child
6256 tid is set. This would require using either ptrace
6257 (not implemented) or having *_tidptr to point at a shared memory
6258 mapping. We can't repeat the spinlock hack used above because
6259 the child process gets its own copy of the lock. */
6260 if (flags
& CLONE_CHILD_SETTID
)
6261 put_user_u32(sys_gettid(), child_tidptr
);
6262 if (flags
& CLONE_PARENT_SETTID
)
6263 put_user_u32(sys_gettid(), parent_tidptr
);
6264 ts
= (TaskState
*)cpu
->opaque
;
6265 if (flags
& CLONE_SETTLS
)
6266 cpu_set_tls (env
, newtls
);
6267 if (flags
& CLONE_CHILD_CLEARTID
)
6268 ts
->child_tidptr
= child_tidptr
;
6270 cpu_clone_regs_parent(env
, flags
);
6277 /* warning : doesn't handle linux specific flags... */
6278 static int target_to_host_fcntl_cmd(int cmd
)
6283 case TARGET_F_DUPFD
:
6284 case TARGET_F_GETFD
:
6285 case TARGET_F_SETFD
:
6286 case TARGET_F_GETFL
:
6287 case TARGET_F_SETFL
:
6288 case TARGET_F_OFD_GETLK
:
6289 case TARGET_F_OFD_SETLK
:
6290 case TARGET_F_OFD_SETLKW
:
6293 case TARGET_F_GETLK
:
6296 case TARGET_F_SETLK
:
6299 case TARGET_F_SETLKW
:
6302 case TARGET_F_GETOWN
:
6305 case TARGET_F_SETOWN
:
6308 case TARGET_F_GETSIG
:
6311 case TARGET_F_SETSIG
:
6314 #if TARGET_ABI_BITS == 32
6315 case TARGET_F_GETLK64
:
6318 case TARGET_F_SETLK64
:
6321 case TARGET_F_SETLKW64
:
6325 case TARGET_F_SETLEASE
:
6328 case TARGET_F_GETLEASE
:
6331 #ifdef F_DUPFD_CLOEXEC
6332 case TARGET_F_DUPFD_CLOEXEC
:
6333 ret
= F_DUPFD_CLOEXEC
;
6336 case TARGET_F_NOTIFY
:
6340 case TARGET_F_GETOWN_EX
:
6345 case TARGET_F_SETOWN_EX
:
6350 case TARGET_F_SETPIPE_SZ
:
6353 case TARGET_F_GETPIPE_SZ
:
6358 ret
= -TARGET_EINVAL
;
6362 #if defined(__powerpc64__)
6363 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6364 * is not supported by kernel. The glibc fcntl call actually adjusts
6365 * them to 5, 6 and 7 before making the syscall(). Since we make the
6366 * syscall directly, adjust to what is supported by the kernel.
6368 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6369 ret
-= F_GETLK64
- 5;
6376 #define FLOCK_TRANSTBL \
6378 TRANSTBL_CONVERT(F_RDLCK); \
6379 TRANSTBL_CONVERT(F_WRLCK); \
6380 TRANSTBL_CONVERT(F_UNLCK); \
6381 TRANSTBL_CONVERT(F_EXLCK); \
6382 TRANSTBL_CONVERT(F_SHLCK); \
6385 static int target_to_host_flock(int type
)
6387 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6389 #undef TRANSTBL_CONVERT
6390 return -TARGET_EINVAL
;
6393 static int host_to_target_flock(int type
)
6395 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6397 #undef TRANSTBL_CONVERT
6398 /* if we don't know how to convert the value coming
6399 * from the host we copy to the target field as-is
6404 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6405 abi_ulong target_flock_addr
)
6407 struct target_flock
*target_fl
;
6410 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6411 return -TARGET_EFAULT
;
6414 __get_user(l_type
, &target_fl
->l_type
);
6415 l_type
= target_to_host_flock(l_type
);
6419 fl
->l_type
= l_type
;
6420 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6421 __get_user(fl
->l_start
, &target_fl
->l_start
);
6422 __get_user(fl
->l_len
, &target_fl
->l_len
);
6423 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6424 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6428 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6429 const struct flock64
*fl
)
6431 struct target_flock
*target_fl
;
6434 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6435 return -TARGET_EFAULT
;
6438 l_type
= host_to_target_flock(fl
->l_type
);
6439 __put_user(l_type
, &target_fl
->l_type
);
6440 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6441 __put_user(fl
->l_start
, &target_fl
->l_start
);
6442 __put_user(fl
->l_len
, &target_fl
->l_len
);
6443 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6444 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6448 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6449 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6451 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6452 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6453 abi_ulong target_flock_addr
)
6455 struct target_oabi_flock64
*target_fl
;
6458 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6459 return -TARGET_EFAULT
;
6462 __get_user(l_type
, &target_fl
->l_type
);
6463 l_type
= target_to_host_flock(l_type
);
6467 fl
->l_type
= l_type
;
6468 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6469 __get_user(fl
->l_start
, &target_fl
->l_start
);
6470 __get_user(fl
->l_len
, &target_fl
->l_len
);
6471 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6472 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6476 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6477 const struct flock64
*fl
)
6479 struct target_oabi_flock64
*target_fl
;
6482 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6483 return -TARGET_EFAULT
;
6486 l_type
= host_to_target_flock(fl
->l_type
);
6487 __put_user(l_type
, &target_fl
->l_type
);
6488 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6489 __put_user(fl
->l_start
, &target_fl
->l_start
);
6490 __put_user(fl
->l_len
, &target_fl
->l_len
);
6491 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6492 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6497 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6498 abi_ulong target_flock_addr
)
6500 struct target_flock64
*target_fl
;
6503 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6504 return -TARGET_EFAULT
;
6507 __get_user(l_type
, &target_fl
->l_type
);
6508 l_type
= target_to_host_flock(l_type
);
6512 fl
->l_type
= l_type
;
6513 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6514 __get_user(fl
->l_start
, &target_fl
->l_start
);
6515 __get_user(fl
->l_len
, &target_fl
->l_len
);
6516 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6517 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6521 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6522 const struct flock64
*fl
)
6524 struct target_flock64
*target_fl
;
6527 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6528 return -TARGET_EFAULT
;
6531 l_type
= host_to_target_flock(fl
->l_type
);
6532 __put_user(l_type
, &target_fl
->l_type
);
6533 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6534 __put_user(fl
->l_start
, &target_fl
->l_start
);
6535 __put_user(fl
->l_len
, &target_fl
->l_len
);
6536 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6537 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6541 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6543 struct flock64 fl64
;
6545 struct f_owner_ex fox
;
6546 struct target_f_owner_ex
*target_fox
;
6549 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6551 if (host_cmd
== -TARGET_EINVAL
)
6555 case TARGET_F_GETLK
:
6556 ret
= copy_from_user_flock(&fl64
, arg
);
6560 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6562 ret
= copy_to_user_flock(arg
, &fl64
);
6566 case TARGET_F_SETLK
:
6567 case TARGET_F_SETLKW
:
6568 ret
= copy_from_user_flock(&fl64
, arg
);
6572 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6575 case TARGET_F_GETLK64
:
6576 case TARGET_F_OFD_GETLK
:
6577 ret
= copy_from_user_flock64(&fl64
, arg
);
6581 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6583 ret
= copy_to_user_flock64(arg
, &fl64
);
6586 case TARGET_F_SETLK64
:
6587 case TARGET_F_SETLKW64
:
6588 case TARGET_F_OFD_SETLK
:
6589 case TARGET_F_OFD_SETLKW
:
6590 ret
= copy_from_user_flock64(&fl64
, arg
);
6594 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6597 case TARGET_F_GETFL
:
6598 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6600 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6604 case TARGET_F_SETFL
:
6605 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6606 target_to_host_bitmask(arg
,
6611 case TARGET_F_GETOWN_EX
:
6612 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6614 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6615 return -TARGET_EFAULT
;
6616 target_fox
->type
= tswap32(fox
.type
);
6617 target_fox
->pid
= tswap32(fox
.pid
);
6618 unlock_user_struct(target_fox
, arg
, 1);
6624 case TARGET_F_SETOWN_EX
:
6625 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6626 return -TARGET_EFAULT
;
6627 fox
.type
= tswap32(target_fox
->type
);
6628 fox
.pid
= tswap32(target_fox
->pid
);
6629 unlock_user_struct(target_fox
, arg
, 0);
6630 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6634 case TARGET_F_SETOWN
:
6635 case TARGET_F_GETOWN
:
6636 case TARGET_F_SETSIG
:
6637 case TARGET_F_GETSIG
:
6638 case TARGET_F_SETLEASE
:
6639 case TARGET_F_GETLEASE
:
6640 case TARGET_F_SETPIPE_SZ
:
6641 case TARGET_F_GETPIPE_SZ
:
6642 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6646 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6654 static inline int high2lowuid(int uid
)
6662 static inline int high2lowgid(int gid
)
6670 static inline int low2highuid(int uid
)
6672 if ((int16_t)uid
== -1)
6678 static inline int low2highgid(int gid
)
6680 if ((int16_t)gid
== -1)
6685 static inline int tswapid(int id
)
6690 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6692 #else /* !USE_UID16 */
6693 static inline int high2lowuid(int uid
)
6697 static inline int high2lowgid(int gid
)
6701 static inline int low2highuid(int uid
)
6705 static inline int low2highgid(int gid
)
6709 static inline int tswapid(int id
)
6714 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6716 #endif /* USE_UID16 */
6718 /* We must do direct syscalls for setting UID/GID, because we want to
6719 * implement the Linux system call semantics of "change only for this thread",
6720 * not the libc/POSIX semantics of "change for all threads in process".
6721 * (See http://ewontfix.com/17/ for more details.)
6722 * We use the 32-bit version of the syscalls if present; if it is not
6723 * then either the host architecture supports 32-bit UIDs natively with
6724 * the standard syscall, or the 16-bit UID is the best we can do.
6726 #ifdef __NR_setuid32
6727 #define __NR_sys_setuid __NR_setuid32
6729 #define __NR_sys_setuid __NR_setuid
6731 #ifdef __NR_setgid32
6732 #define __NR_sys_setgid __NR_setgid32
6734 #define __NR_sys_setgid __NR_setgid
6736 #ifdef __NR_setresuid32
6737 #define __NR_sys_setresuid __NR_setresuid32
6739 #define __NR_sys_setresuid __NR_setresuid
6741 #ifdef __NR_setresgid32
6742 #define __NR_sys_setresgid __NR_setresgid32
6744 #define __NR_sys_setresgid __NR_setresgid
6747 _syscall1(int, sys_setuid
, uid_t
, uid
)
6748 _syscall1(int, sys_setgid
, gid_t
, gid
)
6749 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6750 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6752 void syscall_init(void)
6755 const argtype
*arg_type
;
6759 thunk_init(STRUCT_MAX
);
6761 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6762 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6763 #include "syscall_types.h"
6765 #undef STRUCT_SPECIAL
6767 /* Build target_to_host_errno_table[] table from
6768 * host_to_target_errno_table[]. */
6769 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6770 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6773 /* we patch the ioctl size if necessary. We rely on the fact that
6774 no ioctl has all the bits at '1' in the size field */
6776 while (ie
->target_cmd
!= 0) {
6777 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6778 TARGET_IOC_SIZEMASK
) {
6779 arg_type
= ie
->arg_type
;
6780 if (arg_type
[0] != TYPE_PTR
) {
6781 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6786 size
= thunk_type_size(arg_type
, 0);
6787 ie
->target_cmd
= (ie
->target_cmd
&
6788 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6789 (size
<< TARGET_IOC_SIZESHIFT
);
6792 /* automatic consistency check if same arch */
6793 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6794 (defined(__x86_64__) && defined(TARGET_X86_64))
6795 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6796 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6797 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6804 #ifdef TARGET_NR_truncate64
6805 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6810 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6814 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6818 #ifdef TARGET_NR_ftruncate64
6819 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6824 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6828 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6832 #if defined(TARGET_NR_timer_settime) || \
6833 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6834 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
6835 abi_ulong target_addr
)
6837 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
6838 offsetof(struct target_itimerspec
,
6840 target_to_host_timespec(&host_its
->it_value
, target_addr
+
6841 offsetof(struct target_itimerspec
,
6843 return -TARGET_EFAULT
;
6850 #if defined(TARGET_NR_timer_settime64) || \
6851 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6852 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
6853 abi_ulong target_addr
)
6855 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
6856 offsetof(struct target__kernel_itimerspec
,
6858 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
6859 offsetof(struct target__kernel_itimerspec
,
6861 return -TARGET_EFAULT
;
6868 #if ((defined(TARGET_NR_timerfd_gettime) || \
6869 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6870 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6871 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6872 struct itimerspec
*host_its
)
6874 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6876 &host_its
->it_interval
) ||
6877 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
6879 &host_its
->it_value
)) {
6880 return -TARGET_EFAULT
;
6886 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6887 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6888 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6889 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
6890 struct itimerspec
*host_its
)
6892 if (host_to_target_timespec64(target_addr
+
6893 offsetof(struct target__kernel_itimerspec
,
6895 &host_its
->it_interval
) ||
6896 host_to_target_timespec64(target_addr
+
6897 offsetof(struct target__kernel_itimerspec
,
6899 &host_its
->it_value
)) {
6900 return -TARGET_EFAULT
;
6906 #if defined(TARGET_NR_adjtimex) || \
6907 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6908 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6909 abi_long target_addr
)
6911 struct target_timex
*target_tx
;
6913 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6914 return -TARGET_EFAULT
;
6917 __get_user(host_tx
->modes
, &target_tx
->modes
);
6918 __get_user(host_tx
->offset
, &target_tx
->offset
);
6919 __get_user(host_tx
->freq
, &target_tx
->freq
);
6920 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6921 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6922 __get_user(host_tx
->status
, &target_tx
->status
);
6923 __get_user(host_tx
->constant
, &target_tx
->constant
);
6924 __get_user(host_tx
->precision
, &target_tx
->precision
);
6925 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6926 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6927 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6928 __get_user(host_tx
->tick
, &target_tx
->tick
);
6929 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6930 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6931 __get_user(host_tx
->shift
, &target_tx
->shift
);
6932 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6933 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6934 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6935 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6936 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6937 __get_user(host_tx
->tai
, &target_tx
->tai
);
6939 unlock_user_struct(target_tx
, target_addr
, 0);
6943 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6944 struct timex
*host_tx
)
6946 struct target_timex
*target_tx
;
6948 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6949 return -TARGET_EFAULT
;
6952 __put_user(host_tx
->modes
, &target_tx
->modes
);
6953 __put_user(host_tx
->offset
, &target_tx
->offset
);
6954 __put_user(host_tx
->freq
, &target_tx
->freq
);
6955 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6956 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6957 __put_user(host_tx
->status
, &target_tx
->status
);
6958 __put_user(host_tx
->constant
, &target_tx
->constant
);
6959 __put_user(host_tx
->precision
, &target_tx
->precision
);
6960 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6961 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6962 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6963 __put_user(host_tx
->tick
, &target_tx
->tick
);
6964 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6965 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6966 __put_user(host_tx
->shift
, &target_tx
->shift
);
6967 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6968 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6969 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6970 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6971 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6972 __put_user(host_tx
->tai
, &target_tx
->tai
);
6974 unlock_user_struct(target_tx
, target_addr
, 1);
6979 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6980 abi_ulong target_addr
)
6982 struct target_sigevent
*target_sevp
;
6984 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6985 return -TARGET_EFAULT
;
6988 /* This union is awkward on 64 bit systems because it has a 32 bit
6989 * integer and a pointer in it; we follow the conversion approach
6990 * used for handling sigval types in signal.c so the guest should get
6991 * the correct value back even if we did a 64 bit byteswap and it's
6992 * using the 32 bit integer.
6994 host_sevp
->sigev_value
.sival_ptr
=
6995 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6996 host_sevp
->sigev_signo
=
6997 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6998 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6999 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7001 unlock_user_struct(target_sevp
, target_addr
, 1);
7005 #if defined(TARGET_NR_mlockall)
7006 static inline int target_to_host_mlockall_arg(int arg
)
7010 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7011 result
|= MCL_CURRENT
;
7013 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7014 result
|= MCL_FUTURE
;
7020 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7021 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7022 defined(TARGET_NR_newfstatat))
7023 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7024 abi_ulong target_addr
,
7025 struct stat
*host_st
)
7027 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7028 if (((CPUARMState
*)cpu_env
)->eabi
) {
7029 struct target_eabi_stat64
*target_st
;
7031 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7032 return -TARGET_EFAULT
;
7033 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7034 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7035 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7036 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7037 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7039 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7040 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7041 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7042 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7043 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7044 __put_user(host_st
->st_size
, &target_st
->st_size
);
7045 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7046 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7047 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7048 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7049 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7050 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7051 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7052 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7053 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7055 unlock_user_struct(target_st
, target_addr
, 1);
7059 #if defined(TARGET_HAS_STRUCT_STAT64)
7060 struct target_stat64
*target_st
;
7062 struct target_stat
*target_st
;
7065 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7066 return -TARGET_EFAULT
;
7067 memset(target_st
, 0, sizeof(*target_st
));
7068 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7069 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7070 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7071 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7073 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7074 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7075 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7076 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7077 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7078 /* XXX: better use of kernel struct */
7079 __put_user(host_st
->st_size
, &target_st
->st_size
);
7080 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7081 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7082 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7083 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7084 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7085 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7086 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7087 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7088 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7090 unlock_user_struct(target_st
, target_addr
, 1);
7097 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7098 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7099 abi_ulong target_addr
)
7101 struct target_statx
*target_stx
;
7103 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7104 return -TARGET_EFAULT
;
7106 memset(target_stx
, 0, sizeof(*target_stx
));
7108 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7109 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7110 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7111 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7112 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7113 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7114 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7115 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7116 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7117 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7118 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7119 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7120 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7121 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7122 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7123 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7124 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7125 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7126 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7127 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7128 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7129 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7130 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7132 unlock_user_struct(target_stx
, target_addr
, 1);
7138 static int do_sys_futex(int *uaddr
, int op
, int val
,
7139 const struct timespec
*timeout
, int *uaddr2
,
7142 #if HOST_LONG_BITS == 64
7143 #if defined(__NR_futex)
7144 /* always a 64-bit time_t, it doesn't define _time64 version */
7145 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7148 #else /* HOST_LONG_BITS == 64 */
7149 #if defined(__NR_futex_time64)
7150 if (sizeof(timeout
->tv_sec
) == 8) {
7151 /* _time64 function on 32bit arch */
7152 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7155 #if defined(__NR_futex)
7156 /* old function on 32bit arch */
7157 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7159 #endif /* HOST_LONG_BITS == 64 */
7160 g_assert_not_reached();
7163 static int do_safe_futex(int *uaddr
, int op
, int val
,
7164 const struct timespec
*timeout
, int *uaddr2
,
7167 #if HOST_LONG_BITS == 64
7168 #if defined(__NR_futex)
7169 /* always a 64-bit time_t, it doesn't define _time64 version */
7170 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7172 #else /* HOST_LONG_BITS == 64 */
7173 #if defined(__NR_futex_time64)
7174 if (sizeof(timeout
->tv_sec
) == 8) {
7175 /* _time64 function on 32bit arch */
7176 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7180 #if defined(__NR_futex)
7181 /* old function on 32bit arch */
7182 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7184 #endif /* HOST_LONG_BITS == 64 */
7185 return -TARGET_ENOSYS
;
7188 /* ??? Using host futex calls even when target atomic operations
7189 are not really atomic probably breaks things. However implementing
7190 futexes locally would make futexes shared between multiple processes
7191 tricky. However they're probably useless because guest atomic
7192 operations won't work either. */
7193 #if defined(TARGET_NR_futex)
7194 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7195 target_ulong uaddr2
, int val3
)
7197 struct timespec ts
, *pts
;
7200 /* ??? We assume FUTEX_* constants are the same on both host
7202 #ifdef FUTEX_CMD_MASK
7203 base_op
= op
& FUTEX_CMD_MASK
;
7209 case FUTEX_WAIT_BITSET
:
7212 target_to_host_timespec(pts
, timeout
);
7216 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7218 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7220 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7222 case FUTEX_CMP_REQUEUE
:
7224 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7225 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7226 But the prototype takes a `struct timespec *'; insert casts
7227 to satisfy the compiler. We do not need to tswap TIMEOUT
7228 since it's not compared to guest memory. */
7229 pts
= (struct timespec
*)(uintptr_t) timeout
;
7230 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7231 (base_op
== FUTEX_CMP_REQUEUE
7235 return -TARGET_ENOSYS
;
7240 #if defined(TARGET_NR_futex_time64)
7241 static int do_futex_time64(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7242 target_ulong uaddr2
, int val3
)
7244 struct timespec ts
, *pts
;
7247 /* ??? We assume FUTEX_* constants are the same on both host
7249 #ifdef FUTEX_CMD_MASK
7250 base_op
= op
& FUTEX_CMD_MASK
;
7256 case FUTEX_WAIT_BITSET
:
7259 target_to_host_timespec64(pts
, timeout
);
7263 return do_safe_futex(g2h(uaddr
), op
, tswap32(val
), pts
, NULL
, val3
);
7265 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7267 return do_safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0);
7269 case FUTEX_CMP_REQUEUE
:
7271 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7272 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7273 But the prototype takes a `struct timespec *'; insert casts
7274 to satisfy the compiler. We do not need to tswap TIMEOUT
7275 since it's not compared to guest memory. */
7276 pts
= (struct timespec
*)(uintptr_t) timeout
;
7277 return do_safe_futex(g2h(uaddr
), op
, val
, pts
, g2h(uaddr2
),
7278 (base_op
== FUTEX_CMP_REQUEUE
7282 return -TARGET_ENOSYS
;
7287 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7288 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7289 abi_long handle
, abi_long mount_id
,
7292 struct file_handle
*target_fh
;
7293 struct file_handle
*fh
;
7297 unsigned int size
, total_size
;
7299 if (get_user_s32(size
, handle
)) {
7300 return -TARGET_EFAULT
;
7303 name
= lock_user_string(pathname
);
7305 return -TARGET_EFAULT
;
7308 total_size
= sizeof(struct file_handle
) + size
;
7309 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7311 unlock_user(name
, pathname
, 0);
7312 return -TARGET_EFAULT
;
7315 fh
= g_malloc0(total_size
);
7316 fh
->handle_bytes
= size
;
7318 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7319 unlock_user(name
, pathname
, 0);
7321 /* man name_to_handle_at(2):
7322 * Other than the use of the handle_bytes field, the caller should treat
7323 * the file_handle structure as an opaque data type
7326 memcpy(target_fh
, fh
, total_size
);
7327 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7328 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7330 unlock_user(target_fh
, handle
, total_size
);
7332 if (put_user_s32(mid
, mount_id
)) {
7333 return -TARGET_EFAULT
;
7341 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7342 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7345 struct file_handle
*target_fh
;
7346 struct file_handle
*fh
;
7347 unsigned int size
, total_size
;
7350 if (get_user_s32(size
, handle
)) {
7351 return -TARGET_EFAULT
;
7354 total_size
= sizeof(struct file_handle
) + size
;
7355 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7357 return -TARGET_EFAULT
;
7360 fh
= g_memdup(target_fh
, total_size
);
7361 fh
->handle_bytes
= size
;
7362 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7364 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7365 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7369 unlock_user(target_fh
, handle
, total_size
);
7375 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7377 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7380 target_sigset_t
*target_mask
;
7384 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7385 return -TARGET_EINVAL
;
7387 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7388 return -TARGET_EFAULT
;
7391 target_to_host_sigset(&host_mask
, target_mask
);
7393 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7395 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7397 fd_trans_register(ret
, &target_signalfd_trans
);
7400 unlock_user_struct(target_mask
, mask
, 0);
7406 /* Map host to target signal numbers for the wait family of syscalls.
7407 Assume all other status bits are the same. */
7408 int host_to_target_waitstatus(int status
)
7410 if (WIFSIGNALED(status
)) {
7411 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7413 if (WIFSTOPPED(status
)) {
7414 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7420 static int open_self_cmdline(void *cpu_env
, int fd
)
7422 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7423 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7426 for (i
= 0; i
< bprm
->argc
; i
++) {
7427 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7429 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7437 static int open_self_maps(void *cpu_env
, int fd
)
7439 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7440 TaskState
*ts
= cpu
->opaque
;
7441 GSList
*map_info
= read_self_maps();
7445 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7446 MapInfo
*e
= (MapInfo
*) s
->data
;
7448 if (h2g_valid(e
->start
)) {
7449 unsigned long min
= e
->start
;
7450 unsigned long max
= e
->end
;
7451 int flags
= page_get_flags(h2g(min
));
7454 max
= h2g_valid(max
- 1) ?
7455 max
: (uintptr_t) g2h(GUEST_ADDR_MAX
) + 1;
7457 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7461 if (h2g(min
) == ts
->info
->stack_limit
) {
7467 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7468 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7469 h2g(min
), h2g(max
- 1) + 1,
7470 e
->is_read
? 'r' : '-',
7471 e
->is_write
? 'w' : '-',
7472 e
->is_exec
? 'x' : '-',
7473 e
->is_priv
? 'p' : '-',
7474 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7476 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7483 free_self_maps(map_info
);
7485 #ifdef TARGET_VSYSCALL_PAGE
7487 * We only support execution from the vsyscall page.
7488 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7490 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7491 " --xp 00000000 00:00 0",
7492 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7493 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7499 static int open_self_stat(void *cpu_env
, int fd
)
7501 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7502 TaskState
*ts
= cpu
->opaque
;
7503 g_autoptr(GString
) buf
= g_string_new(NULL
);
7506 for (i
= 0; i
< 44; i
++) {
7509 g_string_printf(buf
, FMT_pid
" ", getpid());
7510 } else if (i
== 1) {
7512 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7513 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7514 g_string_printf(buf
, "(%.15s) ", bin
);
7515 } else if (i
== 27) {
7517 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7519 /* for the rest, there is MasterCard */
7520 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7523 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7531 static int open_self_auxv(void *cpu_env
, int fd
)
7533 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7534 TaskState
*ts
= cpu
->opaque
;
7535 abi_ulong auxv
= ts
->info
->saved_auxv
;
7536 abi_ulong len
= ts
->info
->auxv_len
;
7540 * Auxiliary vector is stored in target process stack.
7541 * read in whole auxv vector and copy it to file
7543 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7547 r
= write(fd
, ptr
, len
);
7554 lseek(fd
, 0, SEEK_SET
);
7555 unlock_user(ptr
, auxv
, len
);
7561 static int is_proc_myself(const char *filename
, const char *entry
)
7563 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7564 filename
+= strlen("/proc/");
7565 if (!strncmp(filename
, "self/", strlen("self/"))) {
7566 filename
+= strlen("self/");
7567 } else if (*filename
>= '1' && *filename
<= '9') {
7569 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7570 if (!strncmp(filename
, myself
, strlen(myself
))) {
7571 filename
+= strlen(myself
);
7578 if (!strcmp(filename
, entry
)) {
7585 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7586 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7587 static int is_proc(const char *filename
, const char *entry
)
7589 return strcmp(filename
, entry
) == 0;
7593 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7594 static int open_net_route(void *cpu_env
, int fd
)
7601 fp
= fopen("/proc/net/route", "r");
7608 read
= getline(&line
, &len
, fp
);
7609 dprintf(fd
, "%s", line
);
7613 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7615 uint32_t dest
, gw
, mask
;
7616 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7619 fields
= sscanf(line
,
7620 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7621 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7622 &mask
, &mtu
, &window
, &irtt
);
7626 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7627 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7628 metric
, tswap32(mask
), mtu
, window
, irtt
);
7638 #if defined(TARGET_SPARC)
7639 static int open_cpuinfo(void *cpu_env
, int fd
)
7641 dprintf(fd
, "type\t\t: sun4u\n");
7646 #if defined(TARGET_HPPA)
7647 static int open_cpuinfo(void *cpu_env
, int fd
)
7649 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7650 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7651 dprintf(fd
, "capabilities\t: os32\n");
7652 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7653 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7658 #if defined(TARGET_M68K)
7659 static int open_hardware(void *cpu_env
, int fd
)
7661 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7666 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7669 const char *filename
;
7670 int (*fill
)(void *cpu_env
, int fd
);
7671 int (*cmp
)(const char *s1
, const char *s2
);
7673 const struct fake_open
*fake_open
;
7674 static const struct fake_open fakes
[] = {
7675 { "maps", open_self_maps
, is_proc_myself
},
7676 { "stat", open_self_stat
, is_proc_myself
},
7677 { "auxv", open_self_auxv
, is_proc_myself
},
7678 { "cmdline", open_self_cmdline
, is_proc_myself
},
7679 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7680 { "/proc/net/route", open_net_route
, is_proc
},
7682 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7683 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7685 #if defined(TARGET_M68K)
7686 { "/proc/hardware", open_hardware
, is_proc
},
7688 { NULL
, NULL
, NULL
}
7691 if (is_proc_myself(pathname
, "exe")) {
7692 int execfd
= qemu_getauxval(AT_EXECFD
);
7693 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7696 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7697 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7702 if (fake_open
->filename
) {
7704 char filename
[PATH_MAX
];
7707 /* create temporary file to map stat to */
7708 tmpdir
= getenv("TMPDIR");
7711 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7712 fd
= mkstemp(filename
);
7718 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7724 lseek(fd
, 0, SEEK_SET
);
7729 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7732 #define TIMER_MAGIC 0x0caf0000
7733 #define TIMER_MAGIC_MASK 0xffff0000
7735 /* Convert QEMU provided timer ID back to internal 16bit index format */
7736 static target_timer_t
get_timer_id(abi_long arg
)
7738 target_timer_t timerid
= arg
;
7740 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7741 return -TARGET_EINVAL
;
7746 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7747 return -TARGET_EINVAL
;
7753 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7755 abi_ulong target_addr
,
7758 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7759 unsigned host_bits
= sizeof(*host_mask
) * 8;
7760 abi_ulong
*target_mask
;
7763 assert(host_size
>= target_size
);
7765 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7767 return -TARGET_EFAULT
;
7769 memset(host_mask
, 0, host_size
);
7771 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7772 unsigned bit
= i
* target_bits
;
7775 __get_user(val
, &target_mask
[i
]);
7776 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7777 if (val
& (1UL << j
)) {
7778 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7783 unlock_user(target_mask
, target_addr
, 0);
7787 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7789 abi_ulong target_addr
,
7792 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7793 unsigned host_bits
= sizeof(*host_mask
) * 8;
7794 abi_ulong
*target_mask
;
7797 assert(host_size
>= target_size
);
7799 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7801 return -TARGET_EFAULT
;
7804 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7805 unsigned bit
= i
* target_bits
;
7808 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7809 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7813 __put_user(val
, &target_mask
[i
]);
7816 unlock_user(target_mask
, target_addr
, target_size
);
7820 /* This is an internal helper for do_syscall so that it is easier
7821 * to have a single return point, so that actions, such as logging
7822 * of syscall results, can be performed.
7823 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7825 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7826 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7827 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7830 CPUState
*cpu
= env_cpu(cpu_env
);
7832 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7833 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7834 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7835 || defined(TARGET_NR_statx)
7838 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7839 || defined(TARGET_NR_fstatfs)
7845 case TARGET_NR_exit
:
7846 /* In old applications this may be used to implement _exit(2).
7847 However in threaded applictions it is used for thread termination,
7848 and _exit_group is used for application termination.
7849 Do thread termination if we have more then one thread. */
7851 if (block_signals()) {
7852 return -TARGET_ERESTARTSYS
;
7855 pthread_mutex_lock(&clone_lock
);
7857 if (CPU_NEXT(first_cpu
)) {
7858 TaskState
*ts
= cpu
->opaque
;
7860 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
7861 object_unref(OBJECT(cpu
));
7863 * At this point the CPU should be unrealized and removed
7864 * from cpu lists. We can clean-up the rest of the thread
7865 * data without the lock held.
7868 pthread_mutex_unlock(&clone_lock
);
7870 if (ts
->child_tidptr
) {
7871 put_user_u32(0, ts
->child_tidptr
);
7872 do_sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7877 rcu_unregister_thread();
7881 pthread_mutex_unlock(&clone_lock
);
7882 preexit_cleanup(cpu_env
, arg1
);
7884 return 0; /* avoid warning */
7885 case TARGET_NR_read
:
7886 if (arg2
== 0 && arg3
== 0) {
7887 return get_errno(safe_read(arg1
, 0, 0));
7889 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7890 return -TARGET_EFAULT
;
7891 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7893 fd_trans_host_to_target_data(arg1
)) {
7894 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7896 unlock_user(p
, arg2
, ret
);
7899 case TARGET_NR_write
:
7900 if (arg2
== 0 && arg3
== 0) {
7901 return get_errno(safe_write(arg1
, 0, 0));
7903 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7904 return -TARGET_EFAULT
;
7905 if (fd_trans_target_to_host_data(arg1
)) {
7906 void *copy
= g_malloc(arg3
);
7907 memcpy(copy
, p
, arg3
);
7908 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7910 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7914 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7916 unlock_user(p
, arg2
, 0);
7919 #ifdef TARGET_NR_open
7920 case TARGET_NR_open
:
7921 if (!(p
= lock_user_string(arg1
)))
7922 return -TARGET_EFAULT
;
7923 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7924 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7926 fd_trans_unregister(ret
);
7927 unlock_user(p
, arg1
, 0);
7930 case TARGET_NR_openat
:
7931 if (!(p
= lock_user_string(arg2
)))
7932 return -TARGET_EFAULT
;
7933 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7934 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7936 fd_trans_unregister(ret
);
7937 unlock_user(p
, arg2
, 0);
7939 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7940 case TARGET_NR_name_to_handle_at
:
7941 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7945 case TARGET_NR_open_by_handle_at
:
7946 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7947 fd_trans_unregister(ret
);
7950 case TARGET_NR_close
:
7951 fd_trans_unregister(arg1
);
7952 return get_errno(close(arg1
));
7955 return do_brk(arg1
);
7956 #ifdef TARGET_NR_fork
7957 case TARGET_NR_fork
:
7958 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7960 #ifdef TARGET_NR_waitpid
7961 case TARGET_NR_waitpid
:
7964 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7965 if (!is_error(ret
) && arg2
&& ret
7966 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7967 return -TARGET_EFAULT
;
7971 #ifdef TARGET_NR_waitid
7972 case TARGET_NR_waitid
:
7976 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7977 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7978 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7979 return -TARGET_EFAULT
;
7980 host_to_target_siginfo(p
, &info
);
7981 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7986 #ifdef TARGET_NR_creat /* not on alpha */
7987 case TARGET_NR_creat
:
7988 if (!(p
= lock_user_string(arg1
)))
7989 return -TARGET_EFAULT
;
7990 ret
= get_errno(creat(p
, arg2
));
7991 fd_trans_unregister(ret
);
7992 unlock_user(p
, arg1
, 0);
7995 #ifdef TARGET_NR_link
7996 case TARGET_NR_link
:
7999 p
= lock_user_string(arg1
);
8000 p2
= lock_user_string(arg2
);
8002 ret
= -TARGET_EFAULT
;
8004 ret
= get_errno(link(p
, p2
));
8005 unlock_user(p2
, arg2
, 0);
8006 unlock_user(p
, arg1
, 0);
8010 #if defined(TARGET_NR_linkat)
8011 case TARGET_NR_linkat
:
8015 return -TARGET_EFAULT
;
8016 p
= lock_user_string(arg2
);
8017 p2
= lock_user_string(arg4
);
8019 ret
= -TARGET_EFAULT
;
8021 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8022 unlock_user(p
, arg2
, 0);
8023 unlock_user(p2
, arg4
, 0);
8027 #ifdef TARGET_NR_unlink
8028 case TARGET_NR_unlink
:
8029 if (!(p
= lock_user_string(arg1
)))
8030 return -TARGET_EFAULT
;
8031 ret
= get_errno(unlink(p
));
8032 unlock_user(p
, arg1
, 0);
8035 #if defined(TARGET_NR_unlinkat)
8036 case TARGET_NR_unlinkat
:
8037 if (!(p
= lock_user_string(arg2
)))
8038 return -TARGET_EFAULT
;
8039 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8040 unlock_user(p
, arg2
, 0);
8043 case TARGET_NR_execve
:
8045 char **argp
, **envp
;
8048 abi_ulong guest_argp
;
8049 abi_ulong guest_envp
;
8056 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8057 if (get_user_ual(addr
, gp
))
8058 return -TARGET_EFAULT
;
8065 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8066 if (get_user_ual(addr
, gp
))
8067 return -TARGET_EFAULT
;
8073 argp
= g_new0(char *, argc
+ 1);
8074 envp
= g_new0(char *, envc
+ 1);
8076 for (gp
= guest_argp
, q
= argp
; gp
;
8077 gp
+= sizeof(abi_ulong
), q
++) {
8078 if (get_user_ual(addr
, gp
))
8082 if (!(*q
= lock_user_string(addr
)))
8084 total_size
+= strlen(*q
) + 1;
8088 for (gp
= guest_envp
, q
= envp
; gp
;
8089 gp
+= sizeof(abi_ulong
), q
++) {
8090 if (get_user_ual(addr
, gp
))
8094 if (!(*q
= lock_user_string(addr
)))
8096 total_size
+= strlen(*q
) + 1;
8100 if (!(p
= lock_user_string(arg1
)))
8102 /* Although execve() is not an interruptible syscall it is
8103 * a special case where we must use the safe_syscall wrapper:
8104 * if we allow a signal to happen before we make the host
8105 * syscall then we will 'lose' it, because at the point of
8106 * execve the process leaves QEMU's control. So we use the
8107 * safe syscall wrapper to ensure that we either take the
8108 * signal as a guest signal, or else it does not happen
8109 * before the execve completes and makes it the other
8110 * program's problem.
8112 ret
= get_errno(safe_execve(p
, argp
, envp
));
8113 unlock_user(p
, arg1
, 0);
8118 ret
= -TARGET_EFAULT
;
8121 for (gp
= guest_argp
, q
= argp
; *q
;
8122 gp
+= sizeof(abi_ulong
), q
++) {
8123 if (get_user_ual(addr
, gp
)
8126 unlock_user(*q
, addr
, 0);
8128 for (gp
= guest_envp
, q
= envp
; *q
;
8129 gp
+= sizeof(abi_ulong
), q
++) {
8130 if (get_user_ual(addr
, gp
)
8133 unlock_user(*q
, addr
, 0);
8140 case TARGET_NR_chdir
:
8141 if (!(p
= lock_user_string(arg1
)))
8142 return -TARGET_EFAULT
;
8143 ret
= get_errno(chdir(p
));
8144 unlock_user(p
, arg1
, 0);
8146 #ifdef TARGET_NR_time
8147 case TARGET_NR_time
:
8150 ret
= get_errno(time(&host_time
));
8153 && put_user_sal(host_time
, arg1
))
8154 return -TARGET_EFAULT
;
8158 #ifdef TARGET_NR_mknod
8159 case TARGET_NR_mknod
:
8160 if (!(p
= lock_user_string(arg1
)))
8161 return -TARGET_EFAULT
;
8162 ret
= get_errno(mknod(p
, arg2
, arg3
));
8163 unlock_user(p
, arg1
, 0);
8166 #if defined(TARGET_NR_mknodat)
8167 case TARGET_NR_mknodat
:
8168 if (!(p
= lock_user_string(arg2
)))
8169 return -TARGET_EFAULT
;
8170 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8171 unlock_user(p
, arg2
, 0);
8174 #ifdef TARGET_NR_chmod
8175 case TARGET_NR_chmod
:
8176 if (!(p
= lock_user_string(arg1
)))
8177 return -TARGET_EFAULT
;
8178 ret
= get_errno(chmod(p
, arg2
));
8179 unlock_user(p
, arg1
, 0);
8182 #ifdef TARGET_NR_lseek
8183 case TARGET_NR_lseek
:
8184 return get_errno(lseek(arg1
, arg2
, arg3
));
8186 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8187 /* Alpha specific */
8188 case TARGET_NR_getxpid
:
8189 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8190 return get_errno(getpid());
8192 #ifdef TARGET_NR_getpid
8193 case TARGET_NR_getpid
:
8194 return get_errno(getpid());
8196 case TARGET_NR_mount
:
8198 /* need to look at the data field */
8202 p
= lock_user_string(arg1
);
8204 return -TARGET_EFAULT
;
8210 p2
= lock_user_string(arg2
);
8213 unlock_user(p
, arg1
, 0);
8215 return -TARGET_EFAULT
;
8219 p3
= lock_user_string(arg3
);
8222 unlock_user(p
, arg1
, 0);
8224 unlock_user(p2
, arg2
, 0);
8225 return -TARGET_EFAULT
;
8231 /* FIXME - arg5 should be locked, but it isn't clear how to
8232 * do that since it's not guaranteed to be a NULL-terminated
8236 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8238 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8240 ret
= get_errno(ret
);
8243 unlock_user(p
, arg1
, 0);
8245 unlock_user(p2
, arg2
, 0);
8247 unlock_user(p3
, arg3
, 0);
8251 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8252 #if defined(TARGET_NR_umount)
8253 case TARGET_NR_umount
:
8255 #if defined(TARGET_NR_oldumount)
8256 case TARGET_NR_oldumount
:
8258 if (!(p
= lock_user_string(arg1
)))
8259 return -TARGET_EFAULT
;
8260 ret
= get_errno(umount(p
));
8261 unlock_user(p
, arg1
, 0);
8264 #ifdef TARGET_NR_stime /* not on alpha */
8265 case TARGET_NR_stime
:
8269 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8270 return -TARGET_EFAULT
;
8272 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8275 #ifdef TARGET_NR_alarm /* not on alpha */
8276 case TARGET_NR_alarm
:
8279 #ifdef TARGET_NR_pause /* not on alpha */
8280 case TARGET_NR_pause
:
8281 if (!block_signals()) {
8282 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8284 return -TARGET_EINTR
;
8286 #ifdef TARGET_NR_utime
8287 case TARGET_NR_utime
:
8289 struct utimbuf tbuf
, *host_tbuf
;
8290 struct target_utimbuf
*target_tbuf
;
8292 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8293 return -TARGET_EFAULT
;
8294 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8295 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8296 unlock_user_struct(target_tbuf
, arg2
, 0);
8301 if (!(p
= lock_user_string(arg1
)))
8302 return -TARGET_EFAULT
;
8303 ret
= get_errno(utime(p
, host_tbuf
));
8304 unlock_user(p
, arg1
, 0);
8308 #ifdef TARGET_NR_utimes
8309 case TARGET_NR_utimes
:
8311 struct timeval
*tvp
, tv
[2];
8313 if (copy_from_user_timeval(&tv
[0], arg2
)
8314 || copy_from_user_timeval(&tv
[1],
8315 arg2
+ sizeof(struct target_timeval
)))
8316 return -TARGET_EFAULT
;
8321 if (!(p
= lock_user_string(arg1
)))
8322 return -TARGET_EFAULT
;
8323 ret
= get_errno(utimes(p
, tvp
));
8324 unlock_user(p
, arg1
, 0);
8328 #if defined(TARGET_NR_futimesat)
8329 case TARGET_NR_futimesat
:
8331 struct timeval
*tvp
, tv
[2];
8333 if (copy_from_user_timeval(&tv
[0], arg3
)
8334 || copy_from_user_timeval(&tv
[1],
8335 arg3
+ sizeof(struct target_timeval
)))
8336 return -TARGET_EFAULT
;
8341 if (!(p
= lock_user_string(arg2
))) {
8342 return -TARGET_EFAULT
;
8344 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8345 unlock_user(p
, arg2
, 0);
8349 #ifdef TARGET_NR_access
8350 case TARGET_NR_access
:
8351 if (!(p
= lock_user_string(arg1
))) {
8352 return -TARGET_EFAULT
;
8354 ret
= get_errno(access(path(p
), arg2
));
8355 unlock_user(p
, arg1
, 0);
8358 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8359 case TARGET_NR_faccessat
:
8360 if (!(p
= lock_user_string(arg2
))) {
8361 return -TARGET_EFAULT
;
8363 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8364 unlock_user(p
, arg2
, 0);
8367 #ifdef TARGET_NR_nice /* not on alpha */
8368 case TARGET_NR_nice
:
8369 return get_errno(nice(arg1
));
8371 case TARGET_NR_sync
:
8374 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8375 case TARGET_NR_syncfs
:
8376 return get_errno(syncfs(arg1
));
8378 case TARGET_NR_kill
:
8379 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8380 #ifdef TARGET_NR_rename
8381 case TARGET_NR_rename
:
8384 p
= lock_user_string(arg1
);
8385 p2
= lock_user_string(arg2
);
8387 ret
= -TARGET_EFAULT
;
8389 ret
= get_errno(rename(p
, p2
));
8390 unlock_user(p2
, arg2
, 0);
8391 unlock_user(p
, arg1
, 0);
8395 #if defined(TARGET_NR_renameat)
8396 case TARGET_NR_renameat
:
8399 p
= lock_user_string(arg2
);
8400 p2
= lock_user_string(arg4
);
8402 ret
= -TARGET_EFAULT
;
8404 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8405 unlock_user(p2
, arg4
, 0);
8406 unlock_user(p
, arg2
, 0);
8410 #if defined(TARGET_NR_renameat2)
8411 case TARGET_NR_renameat2
:
8414 p
= lock_user_string(arg2
);
8415 p2
= lock_user_string(arg4
);
8417 ret
= -TARGET_EFAULT
;
8419 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8421 unlock_user(p2
, arg4
, 0);
8422 unlock_user(p
, arg2
, 0);
8426 #ifdef TARGET_NR_mkdir
8427 case TARGET_NR_mkdir
:
8428 if (!(p
= lock_user_string(arg1
)))
8429 return -TARGET_EFAULT
;
8430 ret
= get_errno(mkdir(p
, arg2
));
8431 unlock_user(p
, arg1
, 0);
8434 #if defined(TARGET_NR_mkdirat)
8435 case TARGET_NR_mkdirat
:
8436 if (!(p
= lock_user_string(arg2
)))
8437 return -TARGET_EFAULT
;
8438 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8439 unlock_user(p
, arg2
, 0);
8442 #ifdef TARGET_NR_rmdir
8443 case TARGET_NR_rmdir
:
8444 if (!(p
= lock_user_string(arg1
)))
8445 return -TARGET_EFAULT
;
8446 ret
= get_errno(rmdir(p
));
8447 unlock_user(p
, arg1
, 0);
8451 ret
= get_errno(dup(arg1
));
8453 fd_trans_dup(arg1
, ret
);
8456 #ifdef TARGET_NR_pipe
8457 case TARGET_NR_pipe
:
8458 return do_pipe(cpu_env
, arg1
, 0, 0);
8460 #ifdef TARGET_NR_pipe2
8461 case TARGET_NR_pipe2
:
8462 return do_pipe(cpu_env
, arg1
,
8463 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8465 case TARGET_NR_times
:
8467 struct target_tms
*tmsp
;
8469 ret
= get_errno(times(&tms
));
8471 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8473 return -TARGET_EFAULT
;
8474 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8475 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8476 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8477 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8480 ret
= host_to_target_clock_t(ret
);
8483 case TARGET_NR_acct
:
8485 ret
= get_errno(acct(NULL
));
8487 if (!(p
= lock_user_string(arg1
))) {
8488 return -TARGET_EFAULT
;
8490 ret
= get_errno(acct(path(p
)));
8491 unlock_user(p
, arg1
, 0);
8494 #ifdef TARGET_NR_umount2
8495 case TARGET_NR_umount2
:
8496 if (!(p
= lock_user_string(arg1
)))
8497 return -TARGET_EFAULT
;
8498 ret
= get_errno(umount2(p
, arg2
));
8499 unlock_user(p
, arg1
, 0);
8502 case TARGET_NR_ioctl
:
8503 return do_ioctl(arg1
, arg2
, arg3
);
8504 #ifdef TARGET_NR_fcntl
8505 case TARGET_NR_fcntl
:
8506 return do_fcntl(arg1
, arg2
, arg3
);
8508 case TARGET_NR_setpgid
:
8509 return get_errno(setpgid(arg1
, arg2
));
8510 case TARGET_NR_umask
:
8511 return get_errno(umask(arg1
));
8512 case TARGET_NR_chroot
:
8513 if (!(p
= lock_user_string(arg1
)))
8514 return -TARGET_EFAULT
;
8515 ret
= get_errno(chroot(p
));
8516 unlock_user(p
, arg1
, 0);
8518 #ifdef TARGET_NR_dup2
8519 case TARGET_NR_dup2
:
8520 ret
= get_errno(dup2(arg1
, arg2
));
8522 fd_trans_dup(arg1
, arg2
);
8526 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8527 case TARGET_NR_dup3
:
8531 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8534 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8535 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8537 fd_trans_dup(arg1
, arg2
);
8542 #ifdef TARGET_NR_getppid /* not on alpha */
8543 case TARGET_NR_getppid
:
8544 return get_errno(getppid());
8546 #ifdef TARGET_NR_getpgrp
8547 case TARGET_NR_getpgrp
:
8548 return get_errno(getpgrp());
8550 case TARGET_NR_setsid
:
8551 return get_errno(setsid());
8552 #ifdef TARGET_NR_sigaction
8553 case TARGET_NR_sigaction
:
8555 #if defined(TARGET_ALPHA)
8556 struct target_sigaction act
, oact
, *pact
= 0;
8557 struct target_old_sigaction
*old_act
;
8559 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8560 return -TARGET_EFAULT
;
8561 act
._sa_handler
= old_act
->_sa_handler
;
8562 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8563 act
.sa_flags
= old_act
->sa_flags
;
8564 act
.sa_restorer
= 0;
8565 unlock_user_struct(old_act
, arg2
, 0);
8568 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8569 if (!is_error(ret
) && arg3
) {
8570 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8571 return -TARGET_EFAULT
;
8572 old_act
->_sa_handler
= oact
._sa_handler
;
8573 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8574 old_act
->sa_flags
= oact
.sa_flags
;
8575 unlock_user_struct(old_act
, arg3
, 1);
8577 #elif defined(TARGET_MIPS)
8578 struct target_sigaction act
, oact
, *pact
, *old_act
;
8581 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8582 return -TARGET_EFAULT
;
8583 act
._sa_handler
= old_act
->_sa_handler
;
8584 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8585 act
.sa_flags
= old_act
->sa_flags
;
8586 unlock_user_struct(old_act
, arg2
, 0);
8592 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8594 if (!is_error(ret
) && arg3
) {
8595 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8596 return -TARGET_EFAULT
;
8597 old_act
->_sa_handler
= oact
._sa_handler
;
8598 old_act
->sa_flags
= oact
.sa_flags
;
8599 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8600 old_act
->sa_mask
.sig
[1] = 0;
8601 old_act
->sa_mask
.sig
[2] = 0;
8602 old_act
->sa_mask
.sig
[3] = 0;
8603 unlock_user_struct(old_act
, arg3
, 1);
8606 struct target_old_sigaction
*old_act
;
8607 struct target_sigaction act
, oact
, *pact
;
8609 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8610 return -TARGET_EFAULT
;
8611 act
._sa_handler
= old_act
->_sa_handler
;
8612 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8613 act
.sa_flags
= old_act
->sa_flags
;
8614 act
.sa_restorer
= old_act
->sa_restorer
;
8615 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8616 act
.ka_restorer
= 0;
8618 unlock_user_struct(old_act
, arg2
, 0);
8623 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8624 if (!is_error(ret
) && arg3
) {
8625 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8626 return -TARGET_EFAULT
;
8627 old_act
->_sa_handler
= oact
._sa_handler
;
8628 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8629 old_act
->sa_flags
= oact
.sa_flags
;
8630 old_act
->sa_restorer
= oact
.sa_restorer
;
8631 unlock_user_struct(old_act
, arg3
, 1);
8637 case TARGET_NR_rt_sigaction
:
8639 #if defined(TARGET_ALPHA)
8640 /* For Alpha and SPARC this is a 5 argument syscall, with
8641 * a 'restorer' parameter which must be copied into the
8642 * sa_restorer field of the sigaction struct.
8643 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8644 * and arg5 is the sigsetsize.
8645 * Alpha also has a separate rt_sigaction struct that it uses
8646 * here; SPARC uses the usual sigaction struct.
8648 struct target_rt_sigaction
*rt_act
;
8649 struct target_sigaction act
, oact
, *pact
= 0;
8651 if (arg4
!= sizeof(target_sigset_t
)) {
8652 return -TARGET_EINVAL
;
8655 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8656 return -TARGET_EFAULT
;
8657 act
._sa_handler
= rt_act
->_sa_handler
;
8658 act
.sa_mask
= rt_act
->sa_mask
;
8659 act
.sa_flags
= rt_act
->sa_flags
;
8660 act
.sa_restorer
= arg5
;
8661 unlock_user_struct(rt_act
, arg2
, 0);
8664 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8665 if (!is_error(ret
) && arg3
) {
8666 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8667 return -TARGET_EFAULT
;
8668 rt_act
->_sa_handler
= oact
._sa_handler
;
8669 rt_act
->sa_mask
= oact
.sa_mask
;
8670 rt_act
->sa_flags
= oact
.sa_flags
;
8671 unlock_user_struct(rt_act
, arg3
, 1);
8675 target_ulong restorer
= arg4
;
8676 target_ulong sigsetsize
= arg5
;
8678 target_ulong sigsetsize
= arg4
;
8680 struct target_sigaction
*act
;
8681 struct target_sigaction
*oact
;
8683 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8684 return -TARGET_EINVAL
;
8687 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8688 return -TARGET_EFAULT
;
8690 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8691 act
->ka_restorer
= restorer
;
8697 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8698 ret
= -TARGET_EFAULT
;
8699 goto rt_sigaction_fail
;
8703 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8706 unlock_user_struct(act
, arg2
, 0);
8708 unlock_user_struct(oact
, arg3
, 1);
8712 #ifdef TARGET_NR_sgetmask /* not on alpha */
8713 case TARGET_NR_sgetmask
:
8716 abi_ulong target_set
;
8717 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8719 host_to_target_old_sigset(&target_set
, &cur_set
);
8725 #ifdef TARGET_NR_ssetmask /* not on alpha */
8726 case TARGET_NR_ssetmask
:
8729 abi_ulong target_set
= arg1
;
8730 target_to_host_old_sigset(&set
, &target_set
);
8731 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8733 host_to_target_old_sigset(&target_set
, &oset
);
8739 #ifdef TARGET_NR_sigprocmask
8740 case TARGET_NR_sigprocmask
:
8742 #if defined(TARGET_ALPHA)
8743 sigset_t set
, oldset
;
8748 case TARGET_SIG_BLOCK
:
8751 case TARGET_SIG_UNBLOCK
:
8754 case TARGET_SIG_SETMASK
:
8758 return -TARGET_EINVAL
;
8761 target_to_host_old_sigset(&set
, &mask
);
8763 ret
= do_sigprocmask(how
, &set
, &oldset
);
8764 if (!is_error(ret
)) {
8765 host_to_target_old_sigset(&mask
, &oldset
);
8767 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8770 sigset_t set
, oldset
, *set_ptr
;
8775 case TARGET_SIG_BLOCK
:
8778 case TARGET_SIG_UNBLOCK
:
8781 case TARGET_SIG_SETMASK
:
8785 return -TARGET_EINVAL
;
8787 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8788 return -TARGET_EFAULT
;
8789 target_to_host_old_sigset(&set
, p
);
8790 unlock_user(p
, arg2
, 0);
8796 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8797 if (!is_error(ret
) && arg3
) {
8798 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8799 return -TARGET_EFAULT
;
8800 host_to_target_old_sigset(p
, &oldset
);
8801 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8807 case TARGET_NR_rt_sigprocmask
:
8810 sigset_t set
, oldset
, *set_ptr
;
8812 if (arg4
!= sizeof(target_sigset_t
)) {
8813 return -TARGET_EINVAL
;
8818 case TARGET_SIG_BLOCK
:
8821 case TARGET_SIG_UNBLOCK
:
8824 case TARGET_SIG_SETMASK
:
8828 return -TARGET_EINVAL
;
8830 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8831 return -TARGET_EFAULT
;
8832 target_to_host_sigset(&set
, p
);
8833 unlock_user(p
, arg2
, 0);
8839 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8840 if (!is_error(ret
) && arg3
) {
8841 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8842 return -TARGET_EFAULT
;
8843 host_to_target_sigset(p
, &oldset
);
8844 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8848 #ifdef TARGET_NR_sigpending
8849 case TARGET_NR_sigpending
:
8852 ret
= get_errno(sigpending(&set
));
8853 if (!is_error(ret
)) {
8854 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8855 return -TARGET_EFAULT
;
8856 host_to_target_old_sigset(p
, &set
);
8857 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8862 case TARGET_NR_rt_sigpending
:
8866 /* Yes, this check is >, not != like most. We follow the kernel's
8867 * logic and it does it like this because it implements
8868 * NR_sigpending through the same code path, and in that case
8869 * the old_sigset_t is smaller in size.
8871 if (arg2
> sizeof(target_sigset_t
)) {
8872 return -TARGET_EINVAL
;
8875 ret
= get_errno(sigpending(&set
));
8876 if (!is_error(ret
)) {
8877 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8878 return -TARGET_EFAULT
;
8879 host_to_target_sigset(p
, &set
);
8880 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8884 #ifdef TARGET_NR_sigsuspend
8885 case TARGET_NR_sigsuspend
:
8887 TaskState
*ts
= cpu
->opaque
;
8888 #if defined(TARGET_ALPHA)
8889 abi_ulong mask
= arg1
;
8890 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8892 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8893 return -TARGET_EFAULT
;
8894 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8895 unlock_user(p
, arg1
, 0);
8897 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8899 if (ret
!= -TARGET_ERESTARTSYS
) {
8900 ts
->in_sigsuspend
= 1;
8905 case TARGET_NR_rt_sigsuspend
:
8907 TaskState
*ts
= cpu
->opaque
;
8909 if (arg2
!= sizeof(target_sigset_t
)) {
8910 return -TARGET_EINVAL
;
8912 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8913 return -TARGET_EFAULT
;
8914 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8915 unlock_user(p
, arg1
, 0);
8916 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8918 if (ret
!= -TARGET_ERESTARTSYS
) {
8919 ts
->in_sigsuspend
= 1;
8923 #ifdef TARGET_NR_rt_sigtimedwait
8924 case TARGET_NR_rt_sigtimedwait
:
8927 struct timespec uts
, *puts
;
8930 if (arg4
!= sizeof(target_sigset_t
)) {
8931 return -TARGET_EINVAL
;
8934 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8935 return -TARGET_EFAULT
;
8936 target_to_host_sigset(&set
, p
);
8937 unlock_user(p
, arg1
, 0);
8940 if (target_to_host_timespec(puts
, arg3
)) {
8941 return -TARGET_EFAULT
;
8946 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8948 if (!is_error(ret
)) {
8950 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8953 return -TARGET_EFAULT
;
8955 host_to_target_siginfo(p
, &uinfo
);
8956 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8958 ret
= host_to_target_signal(ret
);
8963 case TARGET_NR_rt_sigqueueinfo
:
8967 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8969 return -TARGET_EFAULT
;
8971 target_to_host_siginfo(&uinfo
, p
);
8972 unlock_user(p
, arg3
, 0);
8973 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8976 case TARGET_NR_rt_tgsigqueueinfo
:
8980 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8982 return -TARGET_EFAULT
;
8984 target_to_host_siginfo(&uinfo
, p
);
8985 unlock_user(p
, arg4
, 0);
8986 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8989 #ifdef TARGET_NR_sigreturn
8990 case TARGET_NR_sigreturn
:
8991 if (block_signals()) {
8992 return -TARGET_ERESTARTSYS
;
8994 return do_sigreturn(cpu_env
);
8996 case TARGET_NR_rt_sigreturn
:
8997 if (block_signals()) {
8998 return -TARGET_ERESTARTSYS
;
9000 return do_rt_sigreturn(cpu_env
);
9001 case TARGET_NR_sethostname
:
9002 if (!(p
= lock_user_string(arg1
)))
9003 return -TARGET_EFAULT
;
9004 ret
= get_errno(sethostname(p
, arg2
));
9005 unlock_user(p
, arg1
, 0);
9007 #ifdef TARGET_NR_setrlimit
9008 case TARGET_NR_setrlimit
:
9010 int resource
= target_to_host_resource(arg1
);
9011 struct target_rlimit
*target_rlim
;
9013 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9014 return -TARGET_EFAULT
;
9015 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9016 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9017 unlock_user_struct(target_rlim
, arg2
, 0);
9019 * If we just passed through resource limit settings for memory then
9020 * they would also apply to QEMU's own allocations, and QEMU will
9021 * crash or hang or die if its allocations fail. Ideally we would
9022 * track the guest allocations in QEMU and apply the limits ourselves.
9023 * For now, just tell the guest the call succeeded but don't actually
9026 if (resource
!= RLIMIT_AS
&&
9027 resource
!= RLIMIT_DATA
&&
9028 resource
!= RLIMIT_STACK
) {
9029 return get_errno(setrlimit(resource
, &rlim
));
9035 #ifdef TARGET_NR_getrlimit
9036 case TARGET_NR_getrlimit
:
9038 int resource
= target_to_host_resource(arg1
);
9039 struct target_rlimit
*target_rlim
;
9042 ret
= get_errno(getrlimit(resource
, &rlim
));
9043 if (!is_error(ret
)) {
9044 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9045 return -TARGET_EFAULT
;
9046 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9047 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9048 unlock_user_struct(target_rlim
, arg2
, 1);
9053 case TARGET_NR_getrusage
:
9055 struct rusage rusage
;
9056 ret
= get_errno(getrusage(arg1
, &rusage
));
9057 if (!is_error(ret
)) {
9058 ret
= host_to_target_rusage(arg2
, &rusage
);
9062 #if defined(TARGET_NR_gettimeofday)
9063 case TARGET_NR_gettimeofday
:
9068 ret
= get_errno(gettimeofday(&tv
, &tz
));
9069 if (!is_error(ret
)) {
9070 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9071 return -TARGET_EFAULT
;
9073 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9074 return -TARGET_EFAULT
;
9080 #if defined(TARGET_NR_settimeofday)
9081 case TARGET_NR_settimeofday
:
9083 struct timeval tv
, *ptv
= NULL
;
9084 struct timezone tz
, *ptz
= NULL
;
9087 if (copy_from_user_timeval(&tv
, arg1
)) {
9088 return -TARGET_EFAULT
;
9094 if (copy_from_user_timezone(&tz
, arg2
)) {
9095 return -TARGET_EFAULT
;
9100 return get_errno(settimeofday(ptv
, ptz
));
9103 #if defined(TARGET_NR_select)
9104 case TARGET_NR_select
:
9105 #if defined(TARGET_WANT_NI_OLD_SELECT)
9106 /* some architectures used to have old_select here
9107 * but now ENOSYS it.
9109 ret
= -TARGET_ENOSYS
;
9110 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9111 ret
= do_old_select(arg1
);
9113 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9117 #ifdef TARGET_NR_pselect6
9118 case TARGET_NR_pselect6
:
9120 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9121 fd_set rfds
, wfds
, efds
;
9122 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9123 struct timespec ts
, *ts_ptr
;
9126 * The 6th arg is actually two args smashed together,
9127 * so we cannot use the C library.
9135 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9136 target_sigset_t
*target_sigset
;
9144 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9148 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9152 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9158 * This takes a timespec, and not a timeval, so we cannot
9159 * use the do_select() helper ...
9162 if (target_to_host_timespec(&ts
, ts_addr
)) {
9163 return -TARGET_EFAULT
;
9170 /* Extract the two packed args for the sigset */
9173 sig
.size
= SIGSET_T_SIZE
;
9175 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9177 return -TARGET_EFAULT
;
9179 arg_sigset
= tswapal(arg7
[0]);
9180 arg_sigsize
= tswapal(arg7
[1]);
9181 unlock_user(arg7
, arg6
, 0);
9185 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9186 /* Like the kernel, we enforce correct size sigsets */
9187 return -TARGET_EINVAL
;
9189 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9190 sizeof(*target_sigset
), 1);
9191 if (!target_sigset
) {
9192 return -TARGET_EFAULT
;
9194 target_to_host_sigset(&set
, target_sigset
);
9195 unlock_user(target_sigset
, arg_sigset
, 0);
9203 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9206 if (!is_error(ret
)) {
9207 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9208 return -TARGET_EFAULT
;
9209 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9210 return -TARGET_EFAULT
;
9211 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9212 return -TARGET_EFAULT
;
9214 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9215 return -TARGET_EFAULT
;
9220 #ifdef TARGET_NR_symlink
9221 case TARGET_NR_symlink
:
9224 p
= lock_user_string(arg1
);
9225 p2
= lock_user_string(arg2
);
9227 ret
= -TARGET_EFAULT
;
9229 ret
= get_errno(symlink(p
, p2
));
9230 unlock_user(p2
, arg2
, 0);
9231 unlock_user(p
, arg1
, 0);
9235 #if defined(TARGET_NR_symlinkat)
9236 case TARGET_NR_symlinkat
:
9239 p
= lock_user_string(arg1
);
9240 p2
= lock_user_string(arg3
);
9242 ret
= -TARGET_EFAULT
;
9244 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9245 unlock_user(p2
, arg3
, 0);
9246 unlock_user(p
, arg1
, 0);
9250 #ifdef TARGET_NR_readlink
9251 case TARGET_NR_readlink
:
9254 p
= lock_user_string(arg1
);
9255 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9257 ret
= -TARGET_EFAULT
;
9259 /* Short circuit this for the magic exe check. */
9260 ret
= -TARGET_EINVAL
;
9261 } else if (is_proc_myself((const char *)p
, "exe")) {
9262 char real
[PATH_MAX
], *temp
;
9263 temp
= realpath(exec_path
, real
);
9264 /* Return value is # of bytes that we wrote to the buffer. */
9266 ret
= get_errno(-1);
9268 /* Don't worry about sign mismatch as earlier mapping
9269 * logic would have thrown a bad address error. */
9270 ret
= MIN(strlen(real
), arg3
);
9271 /* We cannot NUL terminate the string. */
9272 memcpy(p2
, real
, ret
);
9275 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9277 unlock_user(p2
, arg2
, ret
);
9278 unlock_user(p
, arg1
, 0);
9282 #if defined(TARGET_NR_readlinkat)
9283 case TARGET_NR_readlinkat
:
9286 p
= lock_user_string(arg2
);
9287 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9289 ret
= -TARGET_EFAULT
;
9290 } else if (is_proc_myself((const char *)p
, "exe")) {
9291 char real
[PATH_MAX
], *temp
;
9292 temp
= realpath(exec_path
, real
);
9293 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9294 snprintf((char *)p2
, arg4
, "%s", real
);
9296 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9298 unlock_user(p2
, arg3
, ret
);
9299 unlock_user(p
, arg2
, 0);
9303 #ifdef TARGET_NR_swapon
9304 case TARGET_NR_swapon
:
9305 if (!(p
= lock_user_string(arg1
)))
9306 return -TARGET_EFAULT
;
9307 ret
= get_errno(swapon(p
, arg2
));
9308 unlock_user(p
, arg1
, 0);
9311 case TARGET_NR_reboot
:
9312 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9313 /* arg4 must be ignored in all other cases */
9314 p
= lock_user_string(arg4
);
9316 return -TARGET_EFAULT
;
9318 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9319 unlock_user(p
, arg4
, 0);
9321 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9324 #ifdef TARGET_NR_mmap
9325 case TARGET_NR_mmap
:
9326 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9327 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9328 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9329 || defined(TARGET_S390X)
9332 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9333 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9334 return -TARGET_EFAULT
;
9341 unlock_user(v
, arg1
, 0);
9342 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9343 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9347 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9348 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9354 #ifdef TARGET_NR_mmap2
9355 case TARGET_NR_mmap2
:
9357 #define MMAP_SHIFT 12
9359 ret
= target_mmap(arg1
, arg2
, arg3
,
9360 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9361 arg5
, arg6
<< MMAP_SHIFT
);
9362 return get_errno(ret
);
9364 case TARGET_NR_munmap
:
9365 return get_errno(target_munmap(arg1
, arg2
));
9366 case TARGET_NR_mprotect
:
9368 TaskState
*ts
= cpu
->opaque
;
9369 /* Special hack to detect libc making the stack executable. */
9370 if ((arg3
& PROT_GROWSDOWN
)
9371 && arg1
>= ts
->info
->stack_limit
9372 && arg1
<= ts
->info
->start_stack
) {
9373 arg3
&= ~PROT_GROWSDOWN
;
9374 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9375 arg1
= ts
->info
->stack_limit
;
9378 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9379 #ifdef TARGET_NR_mremap
9380 case TARGET_NR_mremap
:
9381 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9383 /* ??? msync/mlock/munlock are broken for softmmu. */
9384 #ifdef TARGET_NR_msync
9385 case TARGET_NR_msync
:
9386 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9388 #ifdef TARGET_NR_mlock
9389 case TARGET_NR_mlock
:
9390 return get_errno(mlock(g2h(arg1
), arg2
));
9392 #ifdef TARGET_NR_munlock
9393 case TARGET_NR_munlock
:
9394 return get_errno(munlock(g2h(arg1
), arg2
));
9396 #ifdef TARGET_NR_mlockall
9397 case TARGET_NR_mlockall
:
9398 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9400 #ifdef TARGET_NR_munlockall
9401 case TARGET_NR_munlockall
:
9402 return get_errno(munlockall());
9404 #ifdef TARGET_NR_truncate
9405 case TARGET_NR_truncate
:
9406 if (!(p
= lock_user_string(arg1
)))
9407 return -TARGET_EFAULT
;
9408 ret
= get_errno(truncate(p
, arg2
));
9409 unlock_user(p
, arg1
, 0);
9412 #ifdef TARGET_NR_ftruncate
9413 case TARGET_NR_ftruncate
:
9414 return get_errno(ftruncate(arg1
, arg2
));
9416 case TARGET_NR_fchmod
:
9417 return get_errno(fchmod(arg1
, arg2
));
9418 #if defined(TARGET_NR_fchmodat)
9419 case TARGET_NR_fchmodat
:
9420 if (!(p
= lock_user_string(arg2
)))
9421 return -TARGET_EFAULT
;
9422 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9423 unlock_user(p
, arg2
, 0);
9426 case TARGET_NR_getpriority
:
9427 /* Note that negative values are valid for getpriority, so we must
9428 differentiate based on errno settings. */
9430 ret
= getpriority(arg1
, arg2
);
9431 if (ret
== -1 && errno
!= 0) {
9432 return -host_to_target_errno(errno
);
9435 /* Return value is the unbiased priority. Signal no error. */
9436 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9438 /* Return value is a biased priority to avoid negative numbers. */
9442 case TARGET_NR_setpriority
:
9443 return get_errno(setpriority(arg1
, arg2
, arg3
));
9444 #ifdef TARGET_NR_statfs
9445 case TARGET_NR_statfs
:
9446 if (!(p
= lock_user_string(arg1
))) {
9447 return -TARGET_EFAULT
;
9449 ret
= get_errno(statfs(path(p
), &stfs
));
9450 unlock_user(p
, arg1
, 0);
9452 if (!is_error(ret
)) {
9453 struct target_statfs
*target_stfs
;
9455 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9456 return -TARGET_EFAULT
;
9457 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9458 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9459 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9460 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9461 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9462 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9463 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9464 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9465 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9466 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9467 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9468 #ifdef _STATFS_F_FLAGS
9469 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9471 __put_user(0, &target_stfs
->f_flags
);
9473 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9474 unlock_user_struct(target_stfs
, arg2
, 1);
9478 #ifdef TARGET_NR_fstatfs
9479 case TARGET_NR_fstatfs
:
9480 ret
= get_errno(fstatfs(arg1
, &stfs
));
9481 goto convert_statfs
;
9483 #ifdef TARGET_NR_statfs64
9484 case TARGET_NR_statfs64
:
9485 if (!(p
= lock_user_string(arg1
))) {
9486 return -TARGET_EFAULT
;
9488 ret
= get_errno(statfs(path(p
), &stfs
));
9489 unlock_user(p
, arg1
, 0);
9491 if (!is_error(ret
)) {
9492 struct target_statfs64
*target_stfs
;
9494 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9495 return -TARGET_EFAULT
;
9496 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9497 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9498 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9499 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9500 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9501 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9502 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9503 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9504 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9505 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9506 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9507 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9508 unlock_user_struct(target_stfs
, arg3
, 1);
9511 case TARGET_NR_fstatfs64
:
9512 ret
= get_errno(fstatfs(arg1
, &stfs
));
9513 goto convert_statfs64
;
9515 #ifdef TARGET_NR_socketcall
9516 case TARGET_NR_socketcall
:
9517 return do_socketcall(arg1
, arg2
);
9519 #ifdef TARGET_NR_accept
9520 case TARGET_NR_accept
:
9521 return do_accept4(arg1
, arg2
, arg3
, 0);
9523 #ifdef TARGET_NR_accept4
9524 case TARGET_NR_accept4
:
9525 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9527 #ifdef TARGET_NR_bind
9528 case TARGET_NR_bind
:
9529 return do_bind(arg1
, arg2
, arg3
);
9531 #ifdef TARGET_NR_connect
9532 case TARGET_NR_connect
:
9533 return do_connect(arg1
, arg2
, arg3
);
9535 #ifdef TARGET_NR_getpeername
9536 case TARGET_NR_getpeername
:
9537 return do_getpeername(arg1
, arg2
, arg3
);
9539 #ifdef TARGET_NR_getsockname
9540 case TARGET_NR_getsockname
:
9541 return do_getsockname(arg1
, arg2
, arg3
);
9543 #ifdef TARGET_NR_getsockopt
9544 case TARGET_NR_getsockopt
:
9545 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9547 #ifdef TARGET_NR_listen
9548 case TARGET_NR_listen
:
9549 return get_errno(listen(arg1
, arg2
));
9551 #ifdef TARGET_NR_recv
9552 case TARGET_NR_recv
:
9553 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9555 #ifdef TARGET_NR_recvfrom
9556 case TARGET_NR_recvfrom
:
9557 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9559 #ifdef TARGET_NR_recvmsg
9560 case TARGET_NR_recvmsg
:
9561 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9563 #ifdef TARGET_NR_send
9564 case TARGET_NR_send
:
9565 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9567 #ifdef TARGET_NR_sendmsg
9568 case TARGET_NR_sendmsg
:
9569 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9571 #ifdef TARGET_NR_sendmmsg
9572 case TARGET_NR_sendmmsg
:
9573 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9575 #ifdef TARGET_NR_recvmmsg
9576 case TARGET_NR_recvmmsg
:
9577 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9579 #ifdef TARGET_NR_sendto
9580 case TARGET_NR_sendto
:
9581 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9583 #ifdef TARGET_NR_shutdown
9584 case TARGET_NR_shutdown
:
9585 return get_errno(shutdown(arg1
, arg2
));
9587 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9588 case TARGET_NR_getrandom
:
9589 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9591 return -TARGET_EFAULT
;
9593 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9594 unlock_user(p
, arg1
, ret
);
9597 #ifdef TARGET_NR_socket
9598 case TARGET_NR_socket
:
9599 return do_socket(arg1
, arg2
, arg3
);
9601 #ifdef TARGET_NR_socketpair
9602 case TARGET_NR_socketpair
:
9603 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9605 #ifdef TARGET_NR_setsockopt
9606 case TARGET_NR_setsockopt
:
9607 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9609 #if defined(TARGET_NR_syslog)
9610 case TARGET_NR_syslog
:
9615 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9616 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9617 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9618 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9619 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9620 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9621 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9622 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9623 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9624 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9625 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9626 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9629 return -TARGET_EINVAL
;
9634 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9636 return -TARGET_EFAULT
;
9638 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9639 unlock_user(p
, arg2
, arg3
);
9643 return -TARGET_EINVAL
;
9648 case TARGET_NR_setitimer
:
9650 struct itimerval value
, ovalue
, *pvalue
;
9654 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9655 || copy_from_user_timeval(&pvalue
->it_value
,
9656 arg2
+ sizeof(struct target_timeval
)))
9657 return -TARGET_EFAULT
;
9661 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9662 if (!is_error(ret
) && arg3
) {
9663 if (copy_to_user_timeval(arg3
,
9664 &ovalue
.it_interval
)
9665 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9667 return -TARGET_EFAULT
;
9671 case TARGET_NR_getitimer
:
9673 struct itimerval value
;
9675 ret
= get_errno(getitimer(arg1
, &value
));
9676 if (!is_error(ret
) && arg2
) {
9677 if (copy_to_user_timeval(arg2
,
9679 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9681 return -TARGET_EFAULT
;
9685 #ifdef TARGET_NR_stat
9686 case TARGET_NR_stat
:
9687 if (!(p
= lock_user_string(arg1
))) {
9688 return -TARGET_EFAULT
;
9690 ret
= get_errno(stat(path(p
), &st
));
9691 unlock_user(p
, arg1
, 0);
9694 #ifdef TARGET_NR_lstat
9695 case TARGET_NR_lstat
:
9696 if (!(p
= lock_user_string(arg1
))) {
9697 return -TARGET_EFAULT
;
9699 ret
= get_errno(lstat(path(p
), &st
));
9700 unlock_user(p
, arg1
, 0);
9703 #ifdef TARGET_NR_fstat
9704 case TARGET_NR_fstat
:
9706 ret
= get_errno(fstat(arg1
, &st
));
9707 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9710 if (!is_error(ret
)) {
9711 struct target_stat
*target_st
;
9713 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9714 return -TARGET_EFAULT
;
9715 memset(target_st
, 0, sizeof(*target_st
));
9716 __put_user(st
.st_dev
, &target_st
->st_dev
);
9717 __put_user(st
.st_ino
, &target_st
->st_ino
);
9718 __put_user(st
.st_mode
, &target_st
->st_mode
);
9719 __put_user(st
.st_uid
, &target_st
->st_uid
);
9720 __put_user(st
.st_gid
, &target_st
->st_gid
);
9721 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9722 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9723 __put_user(st
.st_size
, &target_st
->st_size
);
9724 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9725 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9726 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9727 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9728 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9729 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9730 defined(TARGET_STAT_HAVE_NSEC)
9731 __put_user(st
.st_atim
.tv_nsec
,
9732 &target_st
->target_st_atime_nsec
);
9733 __put_user(st
.st_mtim
.tv_nsec
,
9734 &target_st
->target_st_mtime_nsec
);
9735 __put_user(st
.st_ctim
.tv_nsec
,
9736 &target_st
->target_st_ctime_nsec
);
9738 unlock_user_struct(target_st
, arg2
, 1);
9743 case TARGET_NR_vhangup
:
9744 return get_errno(vhangup());
9745 #ifdef TARGET_NR_syscall
9746 case TARGET_NR_syscall
:
9747 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9748 arg6
, arg7
, arg8
, 0);
9750 #if defined(TARGET_NR_wait4)
9751 case TARGET_NR_wait4
:
9754 abi_long status_ptr
= arg2
;
9755 struct rusage rusage
, *rusage_ptr
;
9756 abi_ulong target_rusage
= arg4
;
9757 abi_long rusage_err
;
9759 rusage_ptr
= &rusage
;
9762 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9763 if (!is_error(ret
)) {
9764 if (status_ptr
&& ret
) {
9765 status
= host_to_target_waitstatus(status
);
9766 if (put_user_s32(status
, status_ptr
))
9767 return -TARGET_EFAULT
;
9769 if (target_rusage
) {
9770 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9779 #ifdef TARGET_NR_swapoff
9780 case TARGET_NR_swapoff
:
9781 if (!(p
= lock_user_string(arg1
)))
9782 return -TARGET_EFAULT
;
9783 ret
= get_errno(swapoff(p
));
9784 unlock_user(p
, arg1
, 0);
9787 case TARGET_NR_sysinfo
:
9789 struct target_sysinfo
*target_value
;
9790 struct sysinfo value
;
9791 ret
= get_errno(sysinfo(&value
));
9792 if (!is_error(ret
) && arg1
)
9794 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9795 return -TARGET_EFAULT
;
9796 __put_user(value
.uptime
, &target_value
->uptime
);
9797 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9798 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9799 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9800 __put_user(value
.totalram
, &target_value
->totalram
);
9801 __put_user(value
.freeram
, &target_value
->freeram
);
9802 __put_user(value
.sharedram
, &target_value
->sharedram
);
9803 __put_user(value
.bufferram
, &target_value
->bufferram
);
9804 __put_user(value
.totalswap
, &target_value
->totalswap
);
9805 __put_user(value
.freeswap
, &target_value
->freeswap
);
9806 __put_user(value
.procs
, &target_value
->procs
);
9807 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9808 __put_user(value
.freehigh
, &target_value
->freehigh
);
9809 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9810 unlock_user_struct(target_value
, arg1
, 1);
9814 #ifdef TARGET_NR_ipc
9816 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9818 #ifdef TARGET_NR_semget
9819 case TARGET_NR_semget
:
9820 return get_errno(semget(arg1
, arg2
, arg3
));
9822 #ifdef TARGET_NR_semop
9823 case TARGET_NR_semop
:
9824 return do_semtimedop(arg1
, arg2
, arg3
, 0);
9826 #ifdef TARGET_NR_semtimedop
9827 case TARGET_NR_semtimedop
:
9828 return do_semtimedop(arg1
, arg2
, arg3
, arg4
);
9830 #ifdef TARGET_NR_semctl
9831 case TARGET_NR_semctl
:
9832 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9834 #ifdef TARGET_NR_msgctl
9835 case TARGET_NR_msgctl
:
9836 return do_msgctl(arg1
, arg2
, arg3
);
9838 #ifdef TARGET_NR_msgget
9839 case TARGET_NR_msgget
:
9840 return get_errno(msgget(arg1
, arg2
));
9842 #ifdef TARGET_NR_msgrcv
9843 case TARGET_NR_msgrcv
:
9844 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9846 #ifdef TARGET_NR_msgsnd
9847 case TARGET_NR_msgsnd
:
9848 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9850 #ifdef TARGET_NR_shmget
9851 case TARGET_NR_shmget
:
9852 return get_errno(shmget(arg1
, arg2
, arg3
));
9854 #ifdef TARGET_NR_shmctl
9855 case TARGET_NR_shmctl
:
9856 return do_shmctl(arg1
, arg2
, arg3
);
9858 #ifdef TARGET_NR_shmat
9859 case TARGET_NR_shmat
:
9860 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9862 #ifdef TARGET_NR_shmdt
9863 case TARGET_NR_shmdt
:
9864 return do_shmdt(arg1
);
9866 case TARGET_NR_fsync
:
9867 return get_errno(fsync(arg1
));
9868 case TARGET_NR_clone
:
9869 /* Linux manages to have three different orderings for its
9870 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9871 * match the kernel's CONFIG_CLONE_* settings.
9872 * Microblaze is further special in that it uses a sixth
9873 * implicit argument to clone for the TLS pointer.
9875 #if defined(TARGET_MICROBLAZE)
9876 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9877 #elif defined(TARGET_CLONE_BACKWARDS)
9878 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9879 #elif defined(TARGET_CLONE_BACKWARDS2)
9880 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9882 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9885 #ifdef __NR_exit_group
9886 /* new thread calls */
9887 case TARGET_NR_exit_group
:
9888 preexit_cleanup(cpu_env
, arg1
);
9889 return get_errno(exit_group(arg1
));
9891 case TARGET_NR_setdomainname
:
9892 if (!(p
= lock_user_string(arg1
)))
9893 return -TARGET_EFAULT
;
9894 ret
= get_errno(setdomainname(p
, arg2
));
9895 unlock_user(p
, arg1
, 0);
9897 case TARGET_NR_uname
:
9898 /* no need to transcode because we use the linux syscall */
9900 struct new_utsname
* buf
;
9902 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9903 return -TARGET_EFAULT
;
9904 ret
= get_errno(sys_uname(buf
));
9905 if (!is_error(ret
)) {
9906 /* Overwrite the native machine name with whatever is being
9908 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9909 sizeof(buf
->machine
));
9910 /* Allow the user to override the reported release. */
9911 if (qemu_uname_release
&& *qemu_uname_release
) {
9912 g_strlcpy(buf
->release
, qemu_uname_release
,
9913 sizeof(buf
->release
));
9916 unlock_user_struct(buf
, arg1
, 1);
9920 case TARGET_NR_modify_ldt
:
9921 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9922 #if !defined(TARGET_X86_64)
9923 case TARGET_NR_vm86
:
9924 return do_vm86(cpu_env
, arg1
, arg2
);
9927 #if defined(TARGET_NR_adjtimex)
9928 case TARGET_NR_adjtimex
:
9930 struct timex host_buf
;
9932 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9933 return -TARGET_EFAULT
;
9935 ret
= get_errno(adjtimex(&host_buf
));
9936 if (!is_error(ret
)) {
9937 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9938 return -TARGET_EFAULT
;
9944 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9945 case TARGET_NR_clock_adjtime
:
9947 struct timex htx
, *phtx
= &htx
;
9949 if (target_to_host_timex(phtx
, arg2
) != 0) {
9950 return -TARGET_EFAULT
;
9952 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9953 if (!is_error(ret
) && phtx
) {
9954 if (host_to_target_timex(arg2
, phtx
) != 0) {
9955 return -TARGET_EFAULT
;
9961 case TARGET_NR_getpgid
:
9962 return get_errno(getpgid(arg1
));
9963 case TARGET_NR_fchdir
:
9964 return get_errno(fchdir(arg1
));
9965 case TARGET_NR_personality
:
9966 return get_errno(personality(arg1
));
9967 #ifdef TARGET_NR__llseek /* Not on alpha */
9968 case TARGET_NR__llseek
:
9971 #if !defined(__NR_llseek)
9972 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9974 ret
= get_errno(res
);
9979 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9981 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9982 return -TARGET_EFAULT
;
9987 #ifdef TARGET_NR_getdents
9988 case TARGET_NR_getdents
:
9989 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9990 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9992 struct target_dirent
*target_dirp
;
9993 struct linux_dirent
*dirp
;
9994 abi_long count
= arg3
;
9996 dirp
= g_try_malloc(count
);
9998 return -TARGET_ENOMEM
;
10001 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10002 if (!is_error(ret
)) {
10003 struct linux_dirent
*de
;
10004 struct target_dirent
*tde
;
10006 int reclen
, treclen
;
10007 int count1
, tnamelen
;
10011 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10012 return -TARGET_EFAULT
;
10015 reclen
= de
->d_reclen
;
10016 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10017 assert(tnamelen
>= 0);
10018 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10019 assert(count1
+ treclen
<= count
);
10020 tde
->d_reclen
= tswap16(treclen
);
10021 tde
->d_ino
= tswapal(de
->d_ino
);
10022 tde
->d_off
= tswapal(de
->d_off
);
10023 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10024 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10026 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10030 unlock_user(target_dirp
, arg2
, ret
);
10036 struct linux_dirent
*dirp
;
10037 abi_long count
= arg3
;
10039 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10040 return -TARGET_EFAULT
;
10041 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10042 if (!is_error(ret
)) {
10043 struct linux_dirent
*de
;
10048 reclen
= de
->d_reclen
;
10051 de
->d_reclen
= tswap16(reclen
);
10052 tswapls(&de
->d_ino
);
10053 tswapls(&de
->d_off
);
10054 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10058 unlock_user(dirp
, arg2
, ret
);
10062 /* Implement getdents in terms of getdents64 */
10064 struct linux_dirent64
*dirp
;
10065 abi_long count
= arg3
;
10067 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10069 return -TARGET_EFAULT
;
10071 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10072 if (!is_error(ret
)) {
10073 /* Convert the dirent64 structs to target dirent. We do this
10074 * in-place, since we can guarantee that a target_dirent is no
10075 * larger than a dirent64; however this means we have to be
10076 * careful to read everything before writing in the new format.
10078 struct linux_dirent64
*de
;
10079 struct target_dirent
*tde
;
10084 tde
= (struct target_dirent
*)dirp
;
10086 int namelen
, treclen
;
10087 int reclen
= de
->d_reclen
;
10088 uint64_t ino
= de
->d_ino
;
10089 int64_t off
= de
->d_off
;
10090 uint8_t type
= de
->d_type
;
10092 namelen
= strlen(de
->d_name
);
10093 treclen
= offsetof(struct target_dirent
, d_name
)
10095 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10097 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10098 tde
->d_ino
= tswapal(ino
);
10099 tde
->d_off
= tswapal(off
);
10100 tde
->d_reclen
= tswap16(treclen
);
10101 /* The target_dirent type is in what was formerly a padding
10102 * byte at the end of the structure:
10104 *(((char *)tde
) + treclen
- 1) = type
;
10106 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10107 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10113 unlock_user(dirp
, arg2
, ret
);
10117 #endif /* TARGET_NR_getdents */
10118 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10119 case TARGET_NR_getdents64
:
10121 struct linux_dirent64
*dirp
;
10122 abi_long count
= arg3
;
10123 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10124 return -TARGET_EFAULT
;
10125 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10126 if (!is_error(ret
)) {
10127 struct linux_dirent64
*de
;
10132 reclen
= de
->d_reclen
;
10135 de
->d_reclen
= tswap16(reclen
);
10136 tswap64s((uint64_t *)&de
->d_ino
);
10137 tswap64s((uint64_t *)&de
->d_off
);
10138 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10142 unlock_user(dirp
, arg2
, ret
);
10145 #endif /* TARGET_NR_getdents64 */
10146 #if defined(TARGET_NR__newselect)
10147 case TARGET_NR__newselect
:
10148 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10150 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10151 # ifdef TARGET_NR_poll
10152 case TARGET_NR_poll
:
10154 # ifdef TARGET_NR_ppoll
10155 case TARGET_NR_ppoll
:
10158 struct target_pollfd
*target_pfd
;
10159 unsigned int nfds
= arg2
;
10160 struct pollfd
*pfd
;
10166 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10167 return -TARGET_EINVAL
;
10170 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10171 sizeof(struct target_pollfd
) * nfds
, 1);
10173 return -TARGET_EFAULT
;
10176 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10177 for (i
= 0; i
< nfds
; i
++) {
10178 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10179 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10184 # ifdef TARGET_NR_ppoll
10185 case TARGET_NR_ppoll
:
10187 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10188 target_sigset_t
*target_set
;
10189 sigset_t _set
, *set
= &_set
;
10192 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10193 unlock_user(target_pfd
, arg1
, 0);
10194 return -TARGET_EFAULT
;
10201 if (arg5
!= sizeof(target_sigset_t
)) {
10202 unlock_user(target_pfd
, arg1
, 0);
10203 return -TARGET_EINVAL
;
10206 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10208 unlock_user(target_pfd
, arg1
, 0);
10209 return -TARGET_EFAULT
;
10211 target_to_host_sigset(set
, target_set
);
10216 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10217 set
, SIGSET_T_SIZE
));
10219 if (!is_error(ret
) && arg3
) {
10220 host_to_target_timespec(arg3
, timeout_ts
);
10223 unlock_user(target_set
, arg4
, 0);
10228 # ifdef TARGET_NR_poll
10229 case TARGET_NR_poll
:
10231 struct timespec ts
, *pts
;
10234 /* Convert ms to secs, ns */
10235 ts
.tv_sec
= arg3
/ 1000;
10236 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10239 /* -ve poll() timeout means "infinite" */
10242 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10247 g_assert_not_reached();
10250 if (!is_error(ret
)) {
10251 for(i
= 0; i
< nfds
; i
++) {
10252 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10255 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10259 case TARGET_NR_flock
:
10260 /* NOTE: the flock constant seems to be the same for every
10262 return get_errno(safe_flock(arg1
, arg2
));
10263 case TARGET_NR_readv
:
10265 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10267 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10268 unlock_iovec(vec
, arg2
, arg3
, 1);
10270 ret
= -host_to_target_errno(errno
);
10274 case TARGET_NR_writev
:
10276 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10278 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10279 unlock_iovec(vec
, arg2
, arg3
, 0);
10281 ret
= -host_to_target_errno(errno
);
10285 #if defined(TARGET_NR_preadv)
10286 case TARGET_NR_preadv
:
10288 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10290 unsigned long low
, high
;
10292 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10293 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10294 unlock_iovec(vec
, arg2
, arg3
, 1);
10296 ret
= -host_to_target_errno(errno
);
10301 #if defined(TARGET_NR_pwritev)
10302 case TARGET_NR_pwritev
:
10304 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10306 unsigned long low
, high
;
10308 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10309 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10310 unlock_iovec(vec
, arg2
, arg3
, 0);
10312 ret
= -host_to_target_errno(errno
);
10317 case TARGET_NR_getsid
:
10318 return get_errno(getsid(arg1
));
10319 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10320 case TARGET_NR_fdatasync
:
10321 return get_errno(fdatasync(arg1
));
10323 #ifdef TARGET_NR__sysctl
10324 case TARGET_NR__sysctl
:
10325 /* We don't implement this, but ENOTDIR is always a safe
10327 return -TARGET_ENOTDIR
;
10329 case TARGET_NR_sched_getaffinity
:
10331 unsigned int mask_size
;
10332 unsigned long *mask
;
10335 * sched_getaffinity needs multiples of ulong, so need to take
10336 * care of mismatches between target ulong and host ulong sizes.
10338 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10339 return -TARGET_EINVAL
;
10341 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10343 mask
= alloca(mask_size
);
10344 memset(mask
, 0, mask_size
);
10345 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10347 if (!is_error(ret
)) {
10349 /* More data returned than the caller's buffer will fit.
10350 * This only happens if sizeof(abi_long) < sizeof(long)
10351 * and the caller passed us a buffer holding an odd number
10352 * of abi_longs. If the host kernel is actually using the
10353 * extra 4 bytes then fail EINVAL; otherwise we can just
10354 * ignore them and only copy the interesting part.
10356 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10357 if (numcpus
> arg2
* 8) {
10358 return -TARGET_EINVAL
;
10363 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10364 return -TARGET_EFAULT
;
10369 case TARGET_NR_sched_setaffinity
:
10371 unsigned int mask_size
;
10372 unsigned long *mask
;
10375 * sched_setaffinity needs multiples of ulong, so need to take
10376 * care of mismatches between target ulong and host ulong sizes.
10378 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10379 return -TARGET_EINVAL
;
10381 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10382 mask
= alloca(mask_size
);
10384 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10389 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10391 case TARGET_NR_getcpu
:
10393 unsigned cpu
, node
;
10394 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10395 arg2
? &node
: NULL
,
10397 if (is_error(ret
)) {
10400 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10401 return -TARGET_EFAULT
;
10403 if (arg2
&& put_user_u32(node
, arg2
)) {
10404 return -TARGET_EFAULT
;
10408 case TARGET_NR_sched_setparam
:
10410 struct sched_param
*target_schp
;
10411 struct sched_param schp
;
10414 return -TARGET_EINVAL
;
10416 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10417 return -TARGET_EFAULT
;
10418 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10419 unlock_user_struct(target_schp
, arg2
, 0);
10420 return get_errno(sched_setparam(arg1
, &schp
));
10422 case TARGET_NR_sched_getparam
:
10424 struct sched_param
*target_schp
;
10425 struct sched_param schp
;
10428 return -TARGET_EINVAL
;
10430 ret
= get_errno(sched_getparam(arg1
, &schp
));
10431 if (!is_error(ret
)) {
10432 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10433 return -TARGET_EFAULT
;
10434 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10435 unlock_user_struct(target_schp
, arg2
, 1);
10439 case TARGET_NR_sched_setscheduler
:
10441 struct sched_param
*target_schp
;
10442 struct sched_param schp
;
10444 return -TARGET_EINVAL
;
10446 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10447 return -TARGET_EFAULT
;
10448 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10449 unlock_user_struct(target_schp
, arg3
, 0);
10450 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10452 case TARGET_NR_sched_getscheduler
:
10453 return get_errno(sched_getscheduler(arg1
));
10454 case TARGET_NR_sched_yield
:
10455 return get_errno(sched_yield());
10456 case TARGET_NR_sched_get_priority_max
:
10457 return get_errno(sched_get_priority_max(arg1
));
10458 case TARGET_NR_sched_get_priority_min
:
10459 return get_errno(sched_get_priority_min(arg1
));
10460 #ifdef TARGET_NR_sched_rr_get_interval
10461 case TARGET_NR_sched_rr_get_interval
:
10463 struct timespec ts
;
10464 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10465 if (!is_error(ret
)) {
10466 ret
= host_to_target_timespec(arg2
, &ts
);
10471 #if defined(TARGET_NR_nanosleep)
10472 case TARGET_NR_nanosleep
:
10474 struct timespec req
, rem
;
10475 target_to_host_timespec(&req
, arg1
);
10476 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10477 if (is_error(ret
) && arg2
) {
10478 host_to_target_timespec(arg2
, &rem
);
10483 case TARGET_NR_prctl
:
10485 case PR_GET_PDEATHSIG
:
10488 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10489 if (!is_error(ret
) && arg2
10490 && put_user_ual(deathsig
, arg2
)) {
10491 return -TARGET_EFAULT
;
10498 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10500 return -TARGET_EFAULT
;
10502 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10503 arg3
, arg4
, arg5
));
10504 unlock_user(name
, arg2
, 16);
10509 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10511 return -TARGET_EFAULT
;
10513 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10514 arg3
, arg4
, arg5
));
10515 unlock_user(name
, arg2
, 0);
10520 case TARGET_PR_GET_FP_MODE
:
10522 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10524 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10525 ret
|= TARGET_PR_FP_MODE_FR
;
10527 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10528 ret
|= TARGET_PR_FP_MODE_FRE
;
10532 case TARGET_PR_SET_FP_MODE
:
10534 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10535 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10536 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10537 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10538 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10540 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10541 TARGET_PR_FP_MODE_FRE
;
10543 /* If nothing to change, return right away, successfully. */
10544 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10547 /* Check the value is valid */
10548 if (arg2
& ~known_bits
) {
10549 return -TARGET_EOPNOTSUPP
;
10551 /* Setting FRE without FR is not supported. */
10552 if (new_fre
&& !new_fr
) {
10553 return -TARGET_EOPNOTSUPP
;
10555 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10556 /* FR1 is not supported */
10557 return -TARGET_EOPNOTSUPP
;
10559 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10560 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10561 /* cannot set FR=0 */
10562 return -TARGET_EOPNOTSUPP
;
10564 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10565 /* Cannot set FRE=1 */
10566 return -TARGET_EOPNOTSUPP
;
10570 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10571 for (i
= 0; i
< 32 ; i
+= 2) {
10572 if (!old_fr
&& new_fr
) {
10573 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10574 } else if (old_fr
&& !new_fr
) {
10575 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10580 env
->CP0_Status
|= (1 << CP0St_FR
);
10581 env
->hflags
|= MIPS_HFLAG_F64
;
10583 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10584 env
->hflags
&= ~MIPS_HFLAG_F64
;
10587 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10588 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10589 env
->hflags
|= MIPS_HFLAG_FRE
;
10592 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10593 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10599 #ifdef TARGET_AARCH64
10600 case TARGET_PR_SVE_SET_VL
:
10602 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10603 * PR_SVE_VL_INHERIT. Note the kernel definition
10604 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10605 * even though the current architectural maximum is VQ=16.
10607 ret
= -TARGET_EINVAL
;
10608 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10609 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10610 CPUARMState
*env
= cpu_env
;
10611 ARMCPU
*cpu
= env_archcpu(env
);
10612 uint32_t vq
, old_vq
;
10614 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10615 vq
= MAX(arg2
/ 16, 1);
10616 vq
= MIN(vq
, cpu
->sve_max_vq
);
10619 aarch64_sve_narrow_vq(env
, vq
);
10621 env
->vfp
.zcr_el
[1] = vq
- 1;
10622 arm_rebuild_hflags(env
);
10626 case TARGET_PR_SVE_GET_VL
:
10627 ret
= -TARGET_EINVAL
;
10629 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10630 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10631 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10635 case TARGET_PR_PAC_RESET_KEYS
:
10637 CPUARMState
*env
= cpu_env
;
10638 ARMCPU
*cpu
= env_archcpu(env
);
10640 if (arg3
|| arg4
|| arg5
) {
10641 return -TARGET_EINVAL
;
10643 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10644 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10645 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10646 TARGET_PR_PAC_APGAKEY
);
10652 } else if (arg2
& ~all
) {
10653 return -TARGET_EINVAL
;
10655 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10656 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10657 sizeof(ARMPACKey
), &err
);
10659 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10660 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10661 sizeof(ARMPACKey
), &err
);
10663 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10664 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10665 sizeof(ARMPACKey
), &err
);
10667 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10668 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10669 sizeof(ARMPACKey
), &err
);
10671 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10672 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10673 sizeof(ARMPACKey
), &err
);
10677 * Some unknown failure in the crypto. The best
10678 * we can do is log it and fail the syscall.
10679 * The real syscall cannot fail this way.
10681 qemu_log_mask(LOG_UNIMP
,
10682 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10683 error_get_pretty(err
));
10685 return -TARGET_EIO
;
10690 return -TARGET_EINVAL
;
10691 #endif /* AARCH64 */
10692 case PR_GET_SECCOMP
:
10693 case PR_SET_SECCOMP
:
10694 /* Disable seccomp to prevent the target disabling syscalls we
10696 return -TARGET_EINVAL
;
10698 /* Most prctl options have no pointer arguments */
10699 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10702 #ifdef TARGET_NR_arch_prctl
10703 case TARGET_NR_arch_prctl
:
10704 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10706 #ifdef TARGET_NR_pread64
10707 case TARGET_NR_pread64
:
10708 if (regpairs_aligned(cpu_env
, num
)) {
10712 if (arg2
== 0 && arg3
== 0) {
10713 /* Special-case NULL buffer and zero length, which should succeed */
10716 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10718 return -TARGET_EFAULT
;
10721 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10722 unlock_user(p
, arg2
, ret
);
10724 case TARGET_NR_pwrite64
:
10725 if (regpairs_aligned(cpu_env
, num
)) {
10729 if (arg2
== 0 && arg3
== 0) {
10730 /* Special-case NULL buffer and zero length, which should succeed */
10733 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10735 return -TARGET_EFAULT
;
10738 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10739 unlock_user(p
, arg2
, 0);
10742 case TARGET_NR_getcwd
:
10743 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10744 return -TARGET_EFAULT
;
10745 ret
= get_errno(sys_getcwd1(p
, arg2
));
10746 unlock_user(p
, arg1
, ret
);
10748 case TARGET_NR_capget
:
10749 case TARGET_NR_capset
:
10751 struct target_user_cap_header
*target_header
;
10752 struct target_user_cap_data
*target_data
= NULL
;
10753 struct __user_cap_header_struct header
;
10754 struct __user_cap_data_struct data
[2];
10755 struct __user_cap_data_struct
*dataptr
= NULL
;
10756 int i
, target_datalen
;
10757 int data_items
= 1;
10759 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10760 return -TARGET_EFAULT
;
10762 header
.version
= tswap32(target_header
->version
);
10763 header
.pid
= tswap32(target_header
->pid
);
10765 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10766 /* Version 2 and up takes pointer to two user_data structs */
10770 target_datalen
= sizeof(*target_data
) * data_items
;
10773 if (num
== TARGET_NR_capget
) {
10774 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10776 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10778 if (!target_data
) {
10779 unlock_user_struct(target_header
, arg1
, 0);
10780 return -TARGET_EFAULT
;
10783 if (num
== TARGET_NR_capset
) {
10784 for (i
= 0; i
< data_items
; i
++) {
10785 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10786 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10787 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10794 if (num
== TARGET_NR_capget
) {
10795 ret
= get_errno(capget(&header
, dataptr
));
10797 ret
= get_errno(capset(&header
, dataptr
));
10800 /* The kernel always updates version for both capget and capset */
10801 target_header
->version
= tswap32(header
.version
);
10802 unlock_user_struct(target_header
, arg1
, 1);
10805 if (num
== TARGET_NR_capget
) {
10806 for (i
= 0; i
< data_items
; i
++) {
10807 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10808 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10809 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10811 unlock_user(target_data
, arg2
, target_datalen
);
10813 unlock_user(target_data
, arg2
, 0);
10818 case TARGET_NR_sigaltstack
:
10819 return do_sigaltstack(arg1
, arg2
,
10820 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10822 #ifdef CONFIG_SENDFILE
10823 #ifdef TARGET_NR_sendfile
10824 case TARGET_NR_sendfile
:
10826 off_t
*offp
= NULL
;
10829 ret
= get_user_sal(off
, arg3
);
10830 if (is_error(ret
)) {
10835 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10836 if (!is_error(ret
) && arg3
) {
10837 abi_long ret2
= put_user_sal(off
, arg3
);
10838 if (is_error(ret2
)) {
10845 #ifdef TARGET_NR_sendfile64
10846 case TARGET_NR_sendfile64
:
10848 off_t
*offp
= NULL
;
10851 ret
= get_user_s64(off
, arg3
);
10852 if (is_error(ret
)) {
10857 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10858 if (!is_error(ret
) && arg3
) {
10859 abi_long ret2
= put_user_s64(off
, arg3
);
10860 if (is_error(ret2
)) {
10868 #ifdef TARGET_NR_vfork
10869 case TARGET_NR_vfork
:
10870 return get_errno(do_fork(cpu_env
,
10871 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10874 #ifdef TARGET_NR_ugetrlimit
10875 case TARGET_NR_ugetrlimit
:
10877 struct rlimit rlim
;
10878 int resource
= target_to_host_resource(arg1
);
10879 ret
= get_errno(getrlimit(resource
, &rlim
));
10880 if (!is_error(ret
)) {
10881 struct target_rlimit
*target_rlim
;
10882 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10883 return -TARGET_EFAULT
;
10884 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10885 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10886 unlock_user_struct(target_rlim
, arg2
, 1);
10891 #ifdef TARGET_NR_truncate64
10892 case TARGET_NR_truncate64
:
10893 if (!(p
= lock_user_string(arg1
)))
10894 return -TARGET_EFAULT
;
10895 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10896 unlock_user(p
, arg1
, 0);
10899 #ifdef TARGET_NR_ftruncate64
10900 case TARGET_NR_ftruncate64
:
10901 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10903 #ifdef TARGET_NR_stat64
10904 case TARGET_NR_stat64
:
10905 if (!(p
= lock_user_string(arg1
))) {
10906 return -TARGET_EFAULT
;
10908 ret
= get_errno(stat(path(p
), &st
));
10909 unlock_user(p
, arg1
, 0);
10910 if (!is_error(ret
))
10911 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10914 #ifdef TARGET_NR_lstat64
10915 case TARGET_NR_lstat64
:
10916 if (!(p
= lock_user_string(arg1
))) {
10917 return -TARGET_EFAULT
;
10919 ret
= get_errno(lstat(path(p
), &st
));
10920 unlock_user(p
, arg1
, 0);
10921 if (!is_error(ret
))
10922 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10925 #ifdef TARGET_NR_fstat64
10926 case TARGET_NR_fstat64
:
10927 ret
= get_errno(fstat(arg1
, &st
));
10928 if (!is_error(ret
))
10929 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10932 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10933 #ifdef TARGET_NR_fstatat64
10934 case TARGET_NR_fstatat64
:
10936 #ifdef TARGET_NR_newfstatat
10937 case TARGET_NR_newfstatat
:
10939 if (!(p
= lock_user_string(arg2
))) {
10940 return -TARGET_EFAULT
;
10942 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10943 unlock_user(p
, arg2
, 0);
10944 if (!is_error(ret
))
10945 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10948 #if defined(TARGET_NR_statx)
10949 case TARGET_NR_statx
:
10951 struct target_statx
*target_stx
;
10955 p
= lock_user_string(arg2
);
10957 return -TARGET_EFAULT
;
10959 #if defined(__NR_statx)
10962 * It is assumed that struct statx is architecture independent.
10964 struct target_statx host_stx
;
10967 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10968 if (!is_error(ret
)) {
10969 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10970 unlock_user(p
, arg2
, 0);
10971 return -TARGET_EFAULT
;
10975 if (ret
!= -TARGET_ENOSYS
) {
10976 unlock_user(p
, arg2
, 0);
10981 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10982 unlock_user(p
, arg2
, 0);
10984 if (!is_error(ret
)) {
10985 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10986 return -TARGET_EFAULT
;
10988 memset(target_stx
, 0, sizeof(*target_stx
));
10989 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10990 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10991 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10992 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10993 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10994 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10995 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10996 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10997 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10998 __put_user(st
.st_size
, &target_stx
->stx_size
);
10999 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11000 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11001 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11002 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11003 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11004 unlock_user_struct(target_stx
, arg5
, 1);
11009 #ifdef TARGET_NR_lchown
11010 case TARGET_NR_lchown
:
11011 if (!(p
= lock_user_string(arg1
)))
11012 return -TARGET_EFAULT
;
11013 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11014 unlock_user(p
, arg1
, 0);
11017 #ifdef TARGET_NR_getuid
11018 case TARGET_NR_getuid
:
11019 return get_errno(high2lowuid(getuid()));
11021 #ifdef TARGET_NR_getgid
11022 case TARGET_NR_getgid
:
11023 return get_errno(high2lowgid(getgid()));
11025 #ifdef TARGET_NR_geteuid
11026 case TARGET_NR_geteuid
:
11027 return get_errno(high2lowuid(geteuid()));
11029 #ifdef TARGET_NR_getegid
11030 case TARGET_NR_getegid
:
11031 return get_errno(high2lowgid(getegid()));
11033 case TARGET_NR_setreuid
:
11034 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11035 case TARGET_NR_setregid
:
11036 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11037 case TARGET_NR_getgroups
:
11039 int gidsetsize
= arg1
;
11040 target_id
*target_grouplist
;
11044 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11045 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11046 if (gidsetsize
== 0)
11048 if (!is_error(ret
)) {
11049 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11050 if (!target_grouplist
)
11051 return -TARGET_EFAULT
;
11052 for(i
= 0;i
< ret
; i
++)
11053 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11054 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11058 case TARGET_NR_setgroups
:
11060 int gidsetsize
= arg1
;
11061 target_id
*target_grouplist
;
11062 gid_t
*grouplist
= NULL
;
11065 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11066 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11067 if (!target_grouplist
) {
11068 return -TARGET_EFAULT
;
11070 for (i
= 0; i
< gidsetsize
; i
++) {
11071 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11073 unlock_user(target_grouplist
, arg2
, 0);
11075 return get_errno(setgroups(gidsetsize
, grouplist
));
11077 case TARGET_NR_fchown
:
11078 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11079 #if defined(TARGET_NR_fchownat)
11080 case TARGET_NR_fchownat
:
11081 if (!(p
= lock_user_string(arg2
)))
11082 return -TARGET_EFAULT
;
11083 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11084 low2highgid(arg4
), arg5
));
11085 unlock_user(p
, arg2
, 0);
11088 #ifdef TARGET_NR_setresuid
11089 case TARGET_NR_setresuid
:
11090 return get_errno(sys_setresuid(low2highuid(arg1
),
11092 low2highuid(arg3
)));
11094 #ifdef TARGET_NR_getresuid
11095 case TARGET_NR_getresuid
:
11097 uid_t ruid
, euid
, suid
;
11098 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11099 if (!is_error(ret
)) {
11100 if (put_user_id(high2lowuid(ruid
), arg1
)
11101 || put_user_id(high2lowuid(euid
), arg2
)
11102 || put_user_id(high2lowuid(suid
), arg3
))
11103 return -TARGET_EFAULT
;
11108 #ifdef TARGET_NR_getresgid
11109 case TARGET_NR_setresgid
:
11110 return get_errno(sys_setresgid(low2highgid(arg1
),
11112 low2highgid(arg3
)));
11114 #ifdef TARGET_NR_getresgid
11115 case TARGET_NR_getresgid
:
11117 gid_t rgid
, egid
, sgid
;
11118 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11119 if (!is_error(ret
)) {
11120 if (put_user_id(high2lowgid(rgid
), arg1
)
11121 || put_user_id(high2lowgid(egid
), arg2
)
11122 || put_user_id(high2lowgid(sgid
), arg3
))
11123 return -TARGET_EFAULT
;
11128 #ifdef TARGET_NR_chown
11129 case TARGET_NR_chown
:
11130 if (!(p
= lock_user_string(arg1
)))
11131 return -TARGET_EFAULT
;
11132 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11133 unlock_user(p
, arg1
, 0);
11136 case TARGET_NR_setuid
:
11137 return get_errno(sys_setuid(low2highuid(arg1
)));
11138 case TARGET_NR_setgid
:
11139 return get_errno(sys_setgid(low2highgid(arg1
)));
11140 case TARGET_NR_setfsuid
:
11141 return get_errno(setfsuid(arg1
));
11142 case TARGET_NR_setfsgid
:
11143 return get_errno(setfsgid(arg1
));
11145 #ifdef TARGET_NR_lchown32
11146 case TARGET_NR_lchown32
:
11147 if (!(p
= lock_user_string(arg1
)))
11148 return -TARGET_EFAULT
;
11149 ret
= get_errno(lchown(p
, arg2
, arg3
));
11150 unlock_user(p
, arg1
, 0);
11153 #ifdef TARGET_NR_getuid32
11154 case TARGET_NR_getuid32
:
11155 return get_errno(getuid());
11158 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11159 /* Alpha specific */
11160 case TARGET_NR_getxuid
:
11164 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11166 return get_errno(getuid());
11168 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11169 /* Alpha specific */
11170 case TARGET_NR_getxgid
:
11174 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11176 return get_errno(getgid());
11178 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11179 /* Alpha specific */
11180 case TARGET_NR_osf_getsysinfo
:
11181 ret
= -TARGET_EOPNOTSUPP
;
11183 case TARGET_GSI_IEEE_FP_CONTROL
:
11185 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11186 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11188 swcr
&= ~SWCR_STATUS_MASK
;
11189 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11191 if (put_user_u64 (swcr
, arg2
))
11192 return -TARGET_EFAULT
;
11197 /* case GSI_IEEE_STATE_AT_SIGNAL:
11198 -- Not implemented in linux kernel.
11200 -- Retrieves current unaligned access state; not much used.
11201 case GSI_PROC_TYPE:
11202 -- Retrieves implver information; surely not used.
11203 case GSI_GET_HWRPB:
11204 -- Grabs a copy of the HWRPB; surely not used.
11209 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11210 /* Alpha specific */
11211 case TARGET_NR_osf_setsysinfo
:
11212 ret
= -TARGET_EOPNOTSUPP
;
11214 case TARGET_SSI_IEEE_FP_CONTROL
:
11216 uint64_t swcr
, fpcr
;
11218 if (get_user_u64 (swcr
, arg2
)) {
11219 return -TARGET_EFAULT
;
11223 * The kernel calls swcr_update_status to update the
11224 * status bits from the fpcr at every point that it
11225 * could be queried. Therefore, we store the status
11226 * bits only in FPCR.
11228 ((CPUAlphaState
*)cpu_env
)->swcr
11229 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11231 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11232 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11233 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11234 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11239 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11241 uint64_t exc
, fpcr
, fex
;
11243 if (get_user_u64(exc
, arg2
)) {
11244 return -TARGET_EFAULT
;
11246 exc
&= SWCR_STATUS_MASK
;
11247 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11249 /* Old exceptions are not signaled. */
11250 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11252 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11253 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11255 /* Update the hardware fpcr. */
11256 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11257 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11260 int si_code
= TARGET_FPE_FLTUNK
;
11261 target_siginfo_t info
;
11263 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11264 si_code
= TARGET_FPE_FLTUND
;
11266 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11267 si_code
= TARGET_FPE_FLTRES
;
11269 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11270 si_code
= TARGET_FPE_FLTUND
;
11272 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11273 si_code
= TARGET_FPE_FLTOVF
;
11275 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11276 si_code
= TARGET_FPE_FLTDIV
;
11278 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11279 si_code
= TARGET_FPE_FLTINV
;
11282 info
.si_signo
= SIGFPE
;
11284 info
.si_code
= si_code
;
11285 info
._sifields
._sigfault
._addr
11286 = ((CPUArchState
*)cpu_env
)->pc
;
11287 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11288 QEMU_SI_FAULT
, &info
);
11294 /* case SSI_NVPAIRS:
11295 -- Used with SSIN_UACPROC to enable unaligned accesses.
11296 case SSI_IEEE_STATE_AT_SIGNAL:
11297 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11298 -- Not implemented in linux kernel
11303 #ifdef TARGET_NR_osf_sigprocmask
11304 /* Alpha specific. */
11305 case TARGET_NR_osf_sigprocmask
:
11309 sigset_t set
, oldset
;
11312 case TARGET_SIG_BLOCK
:
11315 case TARGET_SIG_UNBLOCK
:
11318 case TARGET_SIG_SETMASK
:
11322 return -TARGET_EINVAL
;
11325 target_to_host_old_sigset(&set
, &mask
);
11326 ret
= do_sigprocmask(how
, &set
, &oldset
);
11328 host_to_target_old_sigset(&mask
, &oldset
);
11335 #ifdef TARGET_NR_getgid32
11336 case TARGET_NR_getgid32
:
11337 return get_errno(getgid());
11339 #ifdef TARGET_NR_geteuid32
11340 case TARGET_NR_geteuid32
:
11341 return get_errno(geteuid());
11343 #ifdef TARGET_NR_getegid32
11344 case TARGET_NR_getegid32
:
11345 return get_errno(getegid());
11347 #ifdef TARGET_NR_setreuid32
11348 case TARGET_NR_setreuid32
:
11349 return get_errno(setreuid(arg1
, arg2
));
11351 #ifdef TARGET_NR_setregid32
11352 case TARGET_NR_setregid32
:
11353 return get_errno(setregid(arg1
, arg2
));
11355 #ifdef TARGET_NR_getgroups32
11356 case TARGET_NR_getgroups32
:
11358 int gidsetsize
= arg1
;
11359 uint32_t *target_grouplist
;
11363 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11364 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11365 if (gidsetsize
== 0)
11367 if (!is_error(ret
)) {
11368 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11369 if (!target_grouplist
) {
11370 return -TARGET_EFAULT
;
11372 for(i
= 0;i
< ret
; i
++)
11373 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11374 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11379 #ifdef TARGET_NR_setgroups32
11380 case TARGET_NR_setgroups32
:
11382 int gidsetsize
= arg1
;
11383 uint32_t *target_grouplist
;
11387 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11388 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11389 if (!target_grouplist
) {
11390 return -TARGET_EFAULT
;
11392 for(i
= 0;i
< gidsetsize
; i
++)
11393 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11394 unlock_user(target_grouplist
, arg2
, 0);
11395 return get_errno(setgroups(gidsetsize
, grouplist
));
11398 #ifdef TARGET_NR_fchown32
11399 case TARGET_NR_fchown32
:
11400 return get_errno(fchown(arg1
, arg2
, arg3
));
11402 #ifdef TARGET_NR_setresuid32
11403 case TARGET_NR_setresuid32
:
11404 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11406 #ifdef TARGET_NR_getresuid32
11407 case TARGET_NR_getresuid32
:
11409 uid_t ruid
, euid
, suid
;
11410 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11411 if (!is_error(ret
)) {
11412 if (put_user_u32(ruid
, arg1
)
11413 || put_user_u32(euid
, arg2
)
11414 || put_user_u32(suid
, arg3
))
11415 return -TARGET_EFAULT
;
11420 #ifdef TARGET_NR_setresgid32
11421 case TARGET_NR_setresgid32
:
11422 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11424 #ifdef TARGET_NR_getresgid32
11425 case TARGET_NR_getresgid32
:
11427 gid_t rgid
, egid
, sgid
;
11428 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11429 if (!is_error(ret
)) {
11430 if (put_user_u32(rgid
, arg1
)
11431 || put_user_u32(egid
, arg2
)
11432 || put_user_u32(sgid
, arg3
))
11433 return -TARGET_EFAULT
;
11438 #ifdef TARGET_NR_chown32
11439 case TARGET_NR_chown32
:
11440 if (!(p
= lock_user_string(arg1
)))
11441 return -TARGET_EFAULT
;
11442 ret
= get_errno(chown(p
, arg2
, arg3
));
11443 unlock_user(p
, arg1
, 0);
11446 #ifdef TARGET_NR_setuid32
11447 case TARGET_NR_setuid32
:
11448 return get_errno(sys_setuid(arg1
));
11450 #ifdef TARGET_NR_setgid32
11451 case TARGET_NR_setgid32
:
11452 return get_errno(sys_setgid(arg1
));
11454 #ifdef TARGET_NR_setfsuid32
11455 case TARGET_NR_setfsuid32
:
11456 return get_errno(setfsuid(arg1
));
11458 #ifdef TARGET_NR_setfsgid32
11459 case TARGET_NR_setfsgid32
:
11460 return get_errno(setfsgid(arg1
));
11462 #ifdef TARGET_NR_mincore
11463 case TARGET_NR_mincore
:
11465 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11467 return -TARGET_ENOMEM
;
11469 p
= lock_user_string(arg3
);
11471 ret
= -TARGET_EFAULT
;
11473 ret
= get_errno(mincore(a
, arg2
, p
));
11474 unlock_user(p
, arg3
, ret
);
11476 unlock_user(a
, arg1
, 0);
11480 #ifdef TARGET_NR_arm_fadvise64_64
11481 case TARGET_NR_arm_fadvise64_64
:
11482 /* arm_fadvise64_64 looks like fadvise64_64 but
11483 * with different argument order: fd, advice, offset, len
11484 * rather than the usual fd, offset, len, advice.
11485 * Note that offset and len are both 64-bit so appear as
11486 * pairs of 32-bit registers.
11488 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11489 target_offset64(arg5
, arg6
), arg2
);
11490 return -host_to_target_errno(ret
);
11493 #if TARGET_ABI_BITS == 32
11495 #ifdef TARGET_NR_fadvise64_64
11496 case TARGET_NR_fadvise64_64
:
11497 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11498 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11506 /* 6 args: fd, offset (high, low), len (high, low), advice */
11507 if (regpairs_aligned(cpu_env
, num
)) {
11508 /* offset is in (3,4), len in (5,6) and advice in 7 */
11516 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11517 target_offset64(arg4
, arg5
), arg6
);
11518 return -host_to_target_errno(ret
);
11521 #ifdef TARGET_NR_fadvise64
11522 case TARGET_NR_fadvise64
:
11523 /* 5 args: fd, offset (high, low), len, advice */
11524 if (regpairs_aligned(cpu_env
, num
)) {
11525 /* offset is in (3,4), len in 5 and advice in 6 */
11531 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11532 return -host_to_target_errno(ret
);
11535 #else /* not a 32-bit ABI */
11536 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11537 #ifdef TARGET_NR_fadvise64_64
11538 case TARGET_NR_fadvise64_64
:
11540 #ifdef TARGET_NR_fadvise64
11541 case TARGET_NR_fadvise64
:
11543 #ifdef TARGET_S390X
11545 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11546 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11547 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11548 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11552 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11554 #endif /* end of 64-bit ABI fadvise handling */
11556 #ifdef TARGET_NR_madvise
11557 case TARGET_NR_madvise
:
11558 /* A straight passthrough may not be safe because qemu sometimes
11559 turns private file-backed mappings into anonymous mappings.
11560 This will break MADV_DONTNEED.
11561 This is a hint, so ignoring and returning success is ok. */
11564 #ifdef TARGET_NR_fcntl64
11565 case TARGET_NR_fcntl64
:
11569 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11570 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11573 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11574 copyfrom
= copy_from_user_oabi_flock64
;
11575 copyto
= copy_to_user_oabi_flock64
;
11579 cmd
= target_to_host_fcntl_cmd(arg2
);
11580 if (cmd
== -TARGET_EINVAL
) {
11585 case TARGET_F_GETLK64
:
11586 ret
= copyfrom(&fl
, arg3
);
11590 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11592 ret
= copyto(arg3
, &fl
);
11596 case TARGET_F_SETLK64
:
11597 case TARGET_F_SETLKW64
:
11598 ret
= copyfrom(&fl
, arg3
);
11602 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11605 ret
= do_fcntl(arg1
, arg2
, arg3
);
11611 #ifdef TARGET_NR_cacheflush
11612 case TARGET_NR_cacheflush
:
11613 /* self-modifying code is handled automatically, so nothing needed */
11616 #ifdef TARGET_NR_getpagesize
11617 case TARGET_NR_getpagesize
:
11618 return TARGET_PAGE_SIZE
;
11620 case TARGET_NR_gettid
:
11621 return get_errno(sys_gettid());
11622 #ifdef TARGET_NR_readahead
11623 case TARGET_NR_readahead
:
11624 #if TARGET_ABI_BITS == 32
11625 if (regpairs_aligned(cpu_env
, num
)) {
11630 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11632 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11637 #ifdef TARGET_NR_setxattr
11638 case TARGET_NR_listxattr
:
11639 case TARGET_NR_llistxattr
:
11643 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11645 return -TARGET_EFAULT
;
11648 p
= lock_user_string(arg1
);
11650 if (num
== TARGET_NR_listxattr
) {
11651 ret
= get_errno(listxattr(p
, b
, arg3
));
11653 ret
= get_errno(llistxattr(p
, b
, arg3
));
11656 ret
= -TARGET_EFAULT
;
11658 unlock_user(p
, arg1
, 0);
11659 unlock_user(b
, arg2
, arg3
);
11662 case TARGET_NR_flistxattr
:
11666 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11668 return -TARGET_EFAULT
;
11671 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11672 unlock_user(b
, arg2
, arg3
);
11675 case TARGET_NR_setxattr
:
11676 case TARGET_NR_lsetxattr
:
11678 void *p
, *n
, *v
= 0;
11680 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11682 return -TARGET_EFAULT
;
11685 p
= lock_user_string(arg1
);
11686 n
= lock_user_string(arg2
);
11688 if (num
== TARGET_NR_setxattr
) {
11689 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11691 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11694 ret
= -TARGET_EFAULT
;
11696 unlock_user(p
, arg1
, 0);
11697 unlock_user(n
, arg2
, 0);
11698 unlock_user(v
, arg3
, 0);
11701 case TARGET_NR_fsetxattr
:
11705 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11707 return -TARGET_EFAULT
;
11710 n
= lock_user_string(arg2
);
11712 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11714 ret
= -TARGET_EFAULT
;
11716 unlock_user(n
, arg2
, 0);
11717 unlock_user(v
, arg3
, 0);
11720 case TARGET_NR_getxattr
:
11721 case TARGET_NR_lgetxattr
:
11723 void *p
, *n
, *v
= 0;
11725 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11727 return -TARGET_EFAULT
;
11730 p
= lock_user_string(arg1
);
11731 n
= lock_user_string(arg2
);
11733 if (num
== TARGET_NR_getxattr
) {
11734 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11736 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11739 ret
= -TARGET_EFAULT
;
11741 unlock_user(p
, arg1
, 0);
11742 unlock_user(n
, arg2
, 0);
11743 unlock_user(v
, arg3
, arg4
);
11746 case TARGET_NR_fgetxattr
:
11750 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11752 return -TARGET_EFAULT
;
11755 n
= lock_user_string(arg2
);
11757 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11759 ret
= -TARGET_EFAULT
;
11761 unlock_user(n
, arg2
, 0);
11762 unlock_user(v
, arg3
, arg4
);
11765 case TARGET_NR_removexattr
:
11766 case TARGET_NR_lremovexattr
:
11769 p
= lock_user_string(arg1
);
11770 n
= lock_user_string(arg2
);
11772 if (num
== TARGET_NR_removexattr
) {
11773 ret
= get_errno(removexattr(p
, n
));
11775 ret
= get_errno(lremovexattr(p
, n
));
11778 ret
= -TARGET_EFAULT
;
11780 unlock_user(p
, arg1
, 0);
11781 unlock_user(n
, arg2
, 0);
11784 case TARGET_NR_fremovexattr
:
11787 n
= lock_user_string(arg2
);
11789 ret
= get_errno(fremovexattr(arg1
, n
));
11791 ret
= -TARGET_EFAULT
;
11793 unlock_user(n
, arg2
, 0);
11797 #endif /* CONFIG_ATTR */
11798 #ifdef TARGET_NR_set_thread_area
11799 case TARGET_NR_set_thread_area
:
11800 #if defined(TARGET_MIPS)
11801 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11803 #elif defined(TARGET_CRIS)
11805 ret
= -TARGET_EINVAL
;
11807 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11811 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11812 return do_set_thread_area(cpu_env
, arg1
);
11813 #elif defined(TARGET_M68K)
11815 TaskState
*ts
= cpu
->opaque
;
11816 ts
->tp_value
= arg1
;
11820 return -TARGET_ENOSYS
;
11823 #ifdef TARGET_NR_get_thread_area
11824 case TARGET_NR_get_thread_area
:
11825 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11826 return do_get_thread_area(cpu_env
, arg1
);
11827 #elif defined(TARGET_M68K)
11829 TaskState
*ts
= cpu
->opaque
;
11830 return ts
->tp_value
;
11833 return -TARGET_ENOSYS
;
11836 #ifdef TARGET_NR_getdomainname
11837 case TARGET_NR_getdomainname
:
11838 return -TARGET_ENOSYS
;
11841 #ifdef TARGET_NR_clock_settime
11842 case TARGET_NR_clock_settime
:
11844 struct timespec ts
;
11846 ret
= target_to_host_timespec(&ts
, arg2
);
11847 if (!is_error(ret
)) {
11848 ret
= get_errno(clock_settime(arg1
, &ts
));
11853 #ifdef TARGET_NR_clock_settime64
11854 case TARGET_NR_clock_settime64
:
11856 struct timespec ts
;
11858 ret
= target_to_host_timespec64(&ts
, arg2
);
11859 if (!is_error(ret
)) {
11860 ret
= get_errno(clock_settime(arg1
, &ts
));
11865 #ifdef TARGET_NR_clock_gettime
11866 case TARGET_NR_clock_gettime
:
11868 struct timespec ts
;
11869 ret
= get_errno(clock_gettime(arg1
, &ts
));
11870 if (!is_error(ret
)) {
11871 ret
= host_to_target_timespec(arg2
, &ts
);
11876 #ifdef TARGET_NR_clock_gettime64
11877 case TARGET_NR_clock_gettime64
:
11879 struct timespec ts
;
11880 ret
= get_errno(clock_gettime(arg1
, &ts
));
11881 if (!is_error(ret
)) {
11882 ret
= host_to_target_timespec64(arg2
, &ts
);
11887 #ifdef TARGET_NR_clock_getres
11888 case TARGET_NR_clock_getres
:
11890 struct timespec ts
;
11891 ret
= get_errno(clock_getres(arg1
, &ts
));
11892 if (!is_error(ret
)) {
11893 host_to_target_timespec(arg2
, &ts
);
11898 #ifdef TARGET_NR_clock_getres_time64
11899 case TARGET_NR_clock_getres_time64
:
11901 struct timespec ts
;
11902 ret
= get_errno(clock_getres(arg1
, &ts
));
11903 if (!is_error(ret
)) {
11904 host_to_target_timespec64(arg2
, &ts
);
11909 #ifdef TARGET_NR_clock_nanosleep
11910 case TARGET_NR_clock_nanosleep
:
11912 struct timespec ts
;
11913 if (target_to_host_timespec(&ts
, arg3
)) {
11914 return -TARGET_EFAULT
;
11916 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11917 &ts
, arg4
? &ts
: NULL
));
11919 * if the call is interrupted by a signal handler, it fails
11920 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11921 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11923 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
11924 host_to_target_timespec(arg4
, &ts
)) {
11925 return -TARGET_EFAULT
;
11932 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11933 case TARGET_NR_set_tid_address
:
11934 return get_errno(set_tid_address((int *)g2h(arg1
)));
11937 case TARGET_NR_tkill
:
11938 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11940 case TARGET_NR_tgkill
:
11941 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11942 target_to_host_signal(arg3
)));
11944 #ifdef TARGET_NR_set_robust_list
11945 case TARGET_NR_set_robust_list
:
11946 case TARGET_NR_get_robust_list
:
11947 /* The ABI for supporting robust futexes has userspace pass
11948 * the kernel a pointer to a linked list which is updated by
11949 * userspace after the syscall; the list is walked by the kernel
11950 * when the thread exits. Since the linked list in QEMU guest
11951 * memory isn't a valid linked list for the host and we have
11952 * no way to reliably intercept the thread-death event, we can't
11953 * support these. Silently return ENOSYS so that guest userspace
11954 * falls back to a non-robust futex implementation (which should
11955 * be OK except in the corner case of the guest crashing while
11956 * holding a mutex that is shared with another process via
11959 return -TARGET_ENOSYS
;
11962 #if defined(TARGET_NR_utimensat)
11963 case TARGET_NR_utimensat
:
11965 struct timespec
*tsp
, ts
[2];
11969 if (target_to_host_timespec(ts
, arg3
)) {
11970 return -TARGET_EFAULT
;
11972 if (target_to_host_timespec(ts
+ 1, arg3
+
11973 sizeof(struct target_timespec
))) {
11974 return -TARGET_EFAULT
;
11979 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11981 if (!(p
= lock_user_string(arg2
))) {
11982 return -TARGET_EFAULT
;
11984 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11985 unlock_user(p
, arg2
, 0);
11990 #ifdef TARGET_NR_futex
11991 case TARGET_NR_futex
:
11992 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11994 #ifdef TARGET_NR_futex_time64
11995 case TARGET_NR_futex_time64
:
11996 return do_futex_time64(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11998 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11999 case TARGET_NR_inotify_init
:
12000 ret
= get_errno(sys_inotify_init());
12002 fd_trans_register(ret
, &target_inotify_trans
);
12006 #ifdef CONFIG_INOTIFY1
12007 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12008 case TARGET_NR_inotify_init1
:
12009 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12010 fcntl_flags_tbl
)));
12012 fd_trans_register(ret
, &target_inotify_trans
);
12017 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12018 case TARGET_NR_inotify_add_watch
:
12019 p
= lock_user_string(arg2
);
12020 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12021 unlock_user(p
, arg2
, 0);
12024 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12025 case TARGET_NR_inotify_rm_watch
:
12026 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12029 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12030 case TARGET_NR_mq_open
:
12032 struct mq_attr posix_mq_attr
;
12033 struct mq_attr
*pposix_mq_attr
;
12036 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12037 pposix_mq_attr
= NULL
;
12039 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12040 return -TARGET_EFAULT
;
12042 pposix_mq_attr
= &posix_mq_attr
;
12044 p
= lock_user_string(arg1
- 1);
12046 return -TARGET_EFAULT
;
12048 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12049 unlock_user (p
, arg1
, 0);
12053 case TARGET_NR_mq_unlink
:
12054 p
= lock_user_string(arg1
- 1);
12056 return -TARGET_EFAULT
;
12058 ret
= get_errno(mq_unlink(p
));
12059 unlock_user (p
, arg1
, 0);
12062 #ifdef TARGET_NR_mq_timedsend
12063 case TARGET_NR_mq_timedsend
:
12065 struct timespec ts
;
12067 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12069 target_to_host_timespec(&ts
, arg5
);
12070 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12071 host_to_target_timespec(arg5
, &ts
);
12073 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12075 unlock_user (p
, arg2
, arg3
);
12080 #ifdef TARGET_NR_mq_timedreceive
12081 case TARGET_NR_mq_timedreceive
:
12083 struct timespec ts
;
12086 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12088 target_to_host_timespec(&ts
, arg5
);
12089 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12091 host_to_target_timespec(arg5
, &ts
);
12093 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12096 unlock_user (p
, arg2
, arg3
);
12098 put_user_u32(prio
, arg4
);
12103 /* Not implemented for now... */
12104 /* case TARGET_NR_mq_notify: */
12107 case TARGET_NR_mq_getsetattr
:
12109 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12112 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12113 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12114 &posix_mq_attr_out
));
12115 } else if (arg3
!= 0) {
12116 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12118 if (ret
== 0 && arg3
!= 0) {
12119 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12125 #ifdef CONFIG_SPLICE
12126 #ifdef TARGET_NR_tee
12127 case TARGET_NR_tee
:
12129 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12133 #ifdef TARGET_NR_splice
12134 case TARGET_NR_splice
:
12136 loff_t loff_in
, loff_out
;
12137 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12139 if (get_user_u64(loff_in
, arg2
)) {
12140 return -TARGET_EFAULT
;
12142 ploff_in
= &loff_in
;
12145 if (get_user_u64(loff_out
, arg4
)) {
12146 return -TARGET_EFAULT
;
12148 ploff_out
= &loff_out
;
12150 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12152 if (put_user_u64(loff_in
, arg2
)) {
12153 return -TARGET_EFAULT
;
12157 if (put_user_u64(loff_out
, arg4
)) {
12158 return -TARGET_EFAULT
;
12164 #ifdef TARGET_NR_vmsplice
12165 case TARGET_NR_vmsplice
:
12167 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12169 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12170 unlock_iovec(vec
, arg2
, arg3
, 0);
12172 ret
= -host_to_target_errno(errno
);
12177 #endif /* CONFIG_SPLICE */
12178 #ifdef CONFIG_EVENTFD
12179 #if defined(TARGET_NR_eventfd)
12180 case TARGET_NR_eventfd
:
12181 ret
= get_errno(eventfd(arg1
, 0));
12183 fd_trans_register(ret
, &target_eventfd_trans
);
12187 #if defined(TARGET_NR_eventfd2)
12188 case TARGET_NR_eventfd2
:
12190 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12191 if (arg2
& TARGET_O_NONBLOCK
) {
12192 host_flags
|= O_NONBLOCK
;
12194 if (arg2
& TARGET_O_CLOEXEC
) {
12195 host_flags
|= O_CLOEXEC
;
12197 ret
= get_errno(eventfd(arg1
, host_flags
));
12199 fd_trans_register(ret
, &target_eventfd_trans
);
12204 #endif /* CONFIG_EVENTFD */
12205 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12206 case TARGET_NR_fallocate
:
12207 #if TARGET_ABI_BITS == 32
12208 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12209 target_offset64(arg5
, arg6
)));
12211 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12215 #if defined(CONFIG_SYNC_FILE_RANGE)
12216 #if defined(TARGET_NR_sync_file_range)
12217 case TARGET_NR_sync_file_range
:
12218 #if TARGET_ABI_BITS == 32
12219 #if defined(TARGET_MIPS)
12220 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12221 target_offset64(arg5
, arg6
), arg7
));
12223 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12224 target_offset64(arg4
, arg5
), arg6
));
12225 #endif /* !TARGET_MIPS */
12227 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12231 #if defined(TARGET_NR_sync_file_range2) || \
12232 defined(TARGET_NR_arm_sync_file_range)
12233 #if defined(TARGET_NR_sync_file_range2)
12234 case TARGET_NR_sync_file_range2
:
12236 #if defined(TARGET_NR_arm_sync_file_range)
12237 case TARGET_NR_arm_sync_file_range
:
12239 /* This is like sync_file_range but the arguments are reordered */
12240 #if TARGET_ABI_BITS == 32
12241 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12242 target_offset64(arg5
, arg6
), arg2
));
12244 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12249 #if defined(TARGET_NR_signalfd4)
12250 case TARGET_NR_signalfd4
:
12251 return do_signalfd4(arg1
, arg2
, arg4
);
12253 #if defined(TARGET_NR_signalfd)
12254 case TARGET_NR_signalfd
:
12255 return do_signalfd4(arg1
, arg2
, 0);
12257 #if defined(CONFIG_EPOLL)
12258 #if defined(TARGET_NR_epoll_create)
12259 case TARGET_NR_epoll_create
:
12260 return get_errno(epoll_create(arg1
));
12262 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12263 case TARGET_NR_epoll_create1
:
12264 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12266 #if defined(TARGET_NR_epoll_ctl)
12267 case TARGET_NR_epoll_ctl
:
12269 struct epoll_event ep
;
12270 struct epoll_event
*epp
= 0;
12272 struct target_epoll_event
*target_ep
;
12273 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12274 return -TARGET_EFAULT
;
12276 ep
.events
= tswap32(target_ep
->events
);
12277 /* The epoll_data_t union is just opaque data to the kernel,
12278 * so we transfer all 64 bits across and need not worry what
12279 * actual data type it is.
12281 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12282 unlock_user_struct(target_ep
, arg4
, 0);
12285 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12289 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12290 #if defined(TARGET_NR_epoll_wait)
12291 case TARGET_NR_epoll_wait
:
12293 #if defined(TARGET_NR_epoll_pwait)
12294 case TARGET_NR_epoll_pwait
:
12297 struct target_epoll_event
*target_ep
;
12298 struct epoll_event
*ep
;
12300 int maxevents
= arg3
;
12301 int timeout
= arg4
;
12303 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12304 return -TARGET_EINVAL
;
12307 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12308 maxevents
* sizeof(struct target_epoll_event
), 1);
12310 return -TARGET_EFAULT
;
12313 ep
= g_try_new(struct epoll_event
, maxevents
);
12315 unlock_user(target_ep
, arg2
, 0);
12316 return -TARGET_ENOMEM
;
12320 #if defined(TARGET_NR_epoll_pwait)
12321 case TARGET_NR_epoll_pwait
:
12323 target_sigset_t
*target_set
;
12324 sigset_t _set
, *set
= &_set
;
12327 if (arg6
!= sizeof(target_sigset_t
)) {
12328 ret
= -TARGET_EINVAL
;
12332 target_set
= lock_user(VERIFY_READ
, arg5
,
12333 sizeof(target_sigset_t
), 1);
12335 ret
= -TARGET_EFAULT
;
12338 target_to_host_sigset(set
, target_set
);
12339 unlock_user(target_set
, arg5
, 0);
12344 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12345 set
, SIGSET_T_SIZE
));
12349 #if defined(TARGET_NR_epoll_wait)
12350 case TARGET_NR_epoll_wait
:
12351 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12356 ret
= -TARGET_ENOSYS
;
12358 if (!is_error(ret
)) {
12360 for (i
= 0; i
< ret
; i
++) {
12361 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12362 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12364 unlock_user(target_ep
, arg2
,
12365 ret
* sizeof(struct target_epoll_event
));
12367 unlock_user(target_ep
, arg2
, 0);
12374 #ifdef TARGET_NR_prlimit64
12375 case TARGET_NR_prlimit64
:
12377 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12378 struct target_rlimit64
*target_rnew
, *target_rold
;
12379 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12380 int resource
= target_to_host_resource(arg2
);
12382 if (arg3
&& (resource
!= RLIMIT_AS
&&
12383 resource
!= RLIMIT_DATA
&&
12384 resource
!= RLIMIT_STACK
)) {
12385 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12386 return -TARGET_EFAULT
;
12388 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12389 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12390 unlock_user_struct(target_rnew
, arg3
, 0);
12394 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12395 if (!is_error(ret
) && arg4
) {
12396 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12397 return -TARGET_EFAULT
;
12399 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12400 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12401 unlock_user_struct(target_rold
, arg4
, 1);
12406 #ifdef TARGET_NR_gethostname
12407 case TARGET_NR_gethostname
:
12409 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12411 ret
= get_errno(gethostname(name
, arg2
));
12412 unlock_user(name
, arg1
, arg2
);
12414 ret
= -TARGET_EFAULT
;
12419 #ifdef TARGET_NR_atomic_cmpxchg_32
12420 case TARGET_NR_atomic_cmpxchg_32
:
12422 /* should use start_exclusive from main.c */
12423 abi_ulong mem_value
;
12424 if (get_user_u32(mem_value
, arg6
)) {
12425 target_siginfo_t info
;
12426 info
.si_signo
= SIGSEGV
;
12428 info
.si_code
= TARGET_SEGV_MAPERR
;
12429 info
._sifields
._sigfault
._addr
= arg6
;
12430 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12431 QEMU_SI_FAULT
, &info
);
12435 if (mem_value
== arg2
)
12436 put_user_u32(arg1
, arg6
);
12440 #ifdef TARGET_NR_atomic_barrier
12441 case TARGET_NR_atomic_barrier
:
12442 /* Like the kernel implementation and the
12443 qemu arm barrier, no-op this? */
12447 #ifdef TARGET_NR_timer_create
12448 case TARGET_NR_timer_create
:
12450 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12452 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12455 int timer_index
= next_free_host_timer();
12457 if (timer_index
< 0) {
12458 ret
= -TARGET_EAGAIN
;
12460 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12463 phost_sevp
= &host_sevp
;
12464 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12470 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12474 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12475 return -TARGET_EFAULT
;
12483 #ifdef TARGET_NR_timer_settime
12484 case TARGET_NR_timer_settime
:
12486 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12487 * struct itimerspec * old_value */
12488 target_timer_t timerid
= get_timer_id(arg1
);
12492 } else if (arg3
== 0) {
12493 ret
= -TARGET_EINVAL
;
12495 timer_t htimer
= g_posix_timers
[timerid
];
12496 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12498 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12499 return -TARGET_EFAULT
;
12502 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12503 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12504 return -TARGET_EFAULT
;
12511 #ifdef TARGET_NR_timer_settime64
12512 case TARGET_NR_timer_settime64
:
12514 target_timer_t timerid
= get_timer_id(arg1
);
12518 } else if (arg3
== 0) {
12519 ret
= -TARGET_EINVAL
;
12521 timer_t htimer
= g_posix_timers
[timerid
];
12522 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12524 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12525 return -TARGET_EFAULT
;
12528 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12529 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12530 return -TARGET_EFAULT
;
12537 #ifdef TARGET_NR_timer_gettime
12538 case TARGET_NR_timer_gettime
:
12540 /* args: timer_t timerid, struct itimerspec *curr_value */
12541 target_timer_t timerid
= get_timer_id(arg1
);
12545 } else if (!arg2
) {
12546 ret
= -TARGET_EFAULT
;
12548 timer_t htimer
= g_posix_timers
[timerid
];
12549 struct itimerspec hspec
;
12550 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12552 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12553 ret
= -TARGET_EFAULT
;
12560 #ifdef TARGET_NR_timer_gettime64
12561 case TARGET_NR_timer_gettime64
:
12563 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12564 target_timer_t timerid
= get_timer_id(arg1
);
12568 } else if (!arg2
) {
12569 ret
= -TARGET_EFAULT
;
12571 timer_t htimer
= g_posix_timers
[timerid
];
12572 struct itimerspec hspec
;
12573 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12575 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12576 ret
= -TARGET_EFAULT
;
12583 #ifdef TARGET_NR_timer_getoverrun
12584 case TARGET_NR_timer_getoverrun
:
12586 /* args: timer_t timerid */
12587 target_timer_t timerid
= get_timer_id(arg1
);
12592 timer_t htimer
= g_posix_timers
[timerid
];
12593 ret
= get_errno(timer_getoverrun(htimer
));
12599 #ifdef TARGET_NR_timer_delete
12600 case TARGET_NR_timer_delete
:
12602 /* args: timer_t timerid */
12603 target_timer_t timerid
= get_timer_id(arg1
);
12608 timer_t htimer
= g_posix_timers
[timerid
];
12609 ret
= get_errno(timer_delete(htimer
));
12610 g_posix_timers
[timerid
] = 0;
12616 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12617 case TARGET_NR_timerfd_create
:
12618 return get_errno(timerfd_create(arg1
,
12619 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12622 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12623 case TARGET_NR_timerfd_gettime
:
12625 struct itimerspec its_curr
;
12627 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12629 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12630 return -TARGET_EFAULT
;
12636 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12637 case TARGET_NR_timerfd_gettime64
:
12639 struct itimerspec its_curr
;
12641 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12643 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12644 return -TARGET_EFAULT
;
12650 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12651 case TARGET_NR_timerfd_settime
:
12653 struct itimerspec its_new
, its_old
, *p_new
;
12656 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12657 return -TARGET_EFAULT
;
12664 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12666 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12667 return -TARGET_EFAULT
;
12673 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12674 case TARGET_NR_timerfd_settime64
:
12676 struct itimerspec its_new
, its_old
, *p_new
;
12679 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
12680 return -TARGET_EFAULT
;
12687 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12689 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
12690 return -TARGET_EFAULT
;
12696 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12697 case TARGET_NR_ioprio_get
:
12698 return get_errno(ioprio_get(arg1
, arg2
));
12701 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12702 case TARGET_NR_ioprio_set
:
12703 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12706 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12707 case TARGET_NR_setns
:
12708 return get_errno(setns(arg1
, arg2
));
12710 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12711 case TARGET_NR_unshare
:
12712 return get_errno(unshare(arg1
));
12714 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12715 case TARGET_NR_kcmp
:
12716 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12718 #ifdef TARGET_NR_swapcontext
12719 case TARGET_NR_swapcontext
:
12720 /* PowerPC specific. */
12721 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12723 #ifdef TARGET_NR_memfd_create
12724 case TARGET_NR_memfd_create
:
12725 p
= lock_user_string(arg1
);
12727 return -TARGET_EFAULT
;
12729 ret
= get_errno(memfd_create(p
, arg2
));
12730 fd_trans_unregister(ret
);
12731 unlock_user(p
, arg1
, 0);
12734 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12735 case TARGET_NR_membarrier
:
12736 return get_errno(membarrier(arg1
, arg2
));
12740 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12741 return -TARGET_ENOSYS
;
12746 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12747 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12748 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12751 CPUState
*cpu
= env_cpu(cpu_env
);
12754 #ifdef DEBUG_ERESTARTSYS
12755 /* Debug-only code for exercising the syscall-restart code paths
12756 * in the per-architecture cpu main loops: restart every syscall
12757 * the guest makes once before letting it through.
12763 return -TARGET_ERESTARTSYS
;
12768 record_syscall_start(cpu
, num
, arg1
,
12769 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
12771 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12772 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12775 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12776 arg5
, arg6
, arg7
, arg8
);
12778 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
12779 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
12780 arg3
, arg4
, arg5
, arg6
);
12783 record_syscall_return(cpu
, num
, ret
);