4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
59 #include <sys/timerfd.h>
62 #include <sys/eventfd.h>
65 #include <sys/epoll.h>
68 #include "qemu/xattr.h"
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
87 #include <linux/mtio.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include <linux/if_alg.h>
106 #include "linux_loop.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
115 #define CLONE_IO 0x80000000 /* Clone io context */
118 /* We can't directly call the host clone syscall, because this will
119 * badly confuse libc (breaking mutexes, for example). So we must
120 * divide clone flags into:
121 * * flag combinations that look like pthread_create()
122 * * flag combinations that look like fork()
123 * * flags we can implement within QEMU itself
124 * * flags we can't support and will return an error for
126 /* For thread creation, all these flags must be present; for
127 * fork, none must be present.
129 #define CLONE_THREAD_FLAGS \
130 (CLONE_VM | CLONE_FS | CLONE_FILES | \
131 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 /* These flags are ignored:
134 * CLONE_DETACHED is now ignored by the kernel;
135 * CLONE_IO is just an optimisation hint to the I/O scheduler
137 #define CLONE_IGNORED_FLAGS \
138 (CLONE_DETACHED | CLONE_IO)
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS \
142 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
143 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 #define CLONE_INVALID_FORK_FLAGS \
151 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 #define CLONE_INVALID_THREAD_FLAGS \
154 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
155 CLONE_IGNORED_FLAGS))
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158 * have almost all been allocated. We cannot support any of
159 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161 * The checks against the invalid thread masks above will catch these.
162 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166 * once. This exercises the codepaths for restart.
168 //#define DEBUG_ERESTARTSYS
170 //#include <linux/msdos_fs.h>
171 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
172 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
182 #define _syscall0(type,name) \
183 static type name (void) \
185 return syscall(__NR_##name); \
188 #define _syscall1(type,name,type1,arg1) \
189 static type name (type1 arg1) \
191 return syscall(__NR_##name, arg1); \
194 #define _syscall2(type,name,type1,arg1,type2,arg2) \
195 static type name (type1 arg1,type2 arg2) \
197 return syscall(__NR_##name, arg1, arg2); \
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
201 static type name (type1 arg1,type2 arg2,type3 arg3) \
203 return syscall(__NR_##name, arg1, arg2, arg3); \
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
209 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 type5,arg5,type6,arg6) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 #define __NR_sys_statx __NR_statx
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
252 #define __NR_sys_gettid __NR_gettid
253 _syscall0(int, sys_gettid
)
255 /* For the 64-bit guest on 32-bit host case we must emulate
256 * getdents using getdents64, because otherwise the host
257 * might hand us back more dirent records than we can fit
258 * into the guest buffer after structure format conversion.
259 * Otherwise we emulate getdents with getdents if the host has it.
261 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
262 #define EMULATE_GETDENTS_WITH_GETDENTS
265 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
266 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
268 #if (defined(TARGET_NR_getdents) && \
269 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
270 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
275 loff_t
*, res
, uint
, wh
);
277 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
278 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
280 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
281 #ifdef __NR_exit_group
282 _syscall1(int,exit_group
,int,error_code
)
284 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
285 _syscall1(int,set_tid_address
,int *,tidptr
)
287 #if defined(TARGET_NR_futex) && defined(__NR_futex)
288 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
289 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
291 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
292 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
293 unsigned long *, user_mask_ptr
);
294 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
295 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
296 unsigned long *, user_mask_ptr
);
297 #define __NR_sys_getcpu __NR_getcpu
298 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
299 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
301 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
302 struct __user_cap_data_struct
*, data
);
303 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
304 struct __user_cap_data_struct
*, data
);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get
, int, which
, int, who
)
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
317 unsigned long, idx1
, unsigned long, idx2
)
321 * It is assumed that struct statx is architecture independent.
323 #if defined(TARGET_NR_statx) && defined(__NR_statx)
324 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
325 unsigned int, mask
, struct target_statx
*, statxbuf
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
364 static int sys_getcwd1(char *buf
, size_t size
)
366 if (getcwd(buf
, size
) == NULL
) {
367 /* getcwd() sets errno */
370 return strlen(buf
)+1;
373 #ifdef TARGET_NR_utimensat
374 #if defined(__NR_utimensat)
375 #define __NR_sys_utimensat __NR_utimensat
376 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
377 const struct timespec
*,tsp
,int,flags
)
379 static int sys_utimensat(int dirfd
, const char *pathname
,
380 const struct timespec times
[2], int flags
)
386 #endif /* TARGET_NR_utimensat */
388 #ifdef TARGET_NR_renameat2
389 #if defined(__NR_renameat2)
390 #define __NR_sys_renameat2 __NR_renameat2
391 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
392 const char *, new, unsigned int, flags
)
394 static int sys_renameat2(int oldfd
, const char *old
,
395 int newfd
, const char *new, int flags
)
398 return renameat(oldfd
, old
, newfd
, new);
404 #endif /* TARGET_NR_renameat2 */
406 #ifdef CONFIG_INOTIFY
407 #include <sys/inotify.h>
409 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
410 static int sys_inotify_init(void)
412 return (inotify_init());
415 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
416 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
418 return (inotify_add_watch(fd
, pathname
, mask
));
421 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
422 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
424 return (inotify_rm_watch(fd
, wd
));
427 #ifdef CONFIG_INOTIFY1
428 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
429 static int sys_inotify_init1(int flags
)
431 return (inotify_init1(flags
));
436 /* Userspace can usually survive runtime without inotify */
437 #undef TARGET_NR_inotify_init
438 #undef TARGET_NR_inotify_init1
439 #undef TARGET_NR_inotify_add_watch
440 #undef TARGET_NR_inotify_rm_watch
441 #endif /* CONFIG_INOTIFY */
443 #if defined(TARGET_NR_prlimit64)
444 #ifndef __NR_prlimit64
445 # define __NR_prlimit64 -1
447 #define __NR_sys_prlimit64 __NR_prlimit64
448 /* The glibc rlimit structure may not be that used by the underlying syscall */
449 struct host_rlimit64
{
453 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
454 const struct host_rlimit64
*, new_limit
,
455 struct host_rlimit64
*, old_limit
)
459 #if defined(TARGET_NR_timer_create)
460 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
461 static timer_t g_posix_timers
[32] = { 0, } ;
463 static inline int next_free_host_timer(void)
466 /* FIXME: Does finding the next free slot require a lock? */
467 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
468 if (g_posix_timers
[k
] == 0) {
469 g_posix_timers
[k
] = (timer_t
) 1;
477 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
479 static inline int regpairs_aligned(void *cpu_env
, int num
)
481 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
483 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
484 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
485 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
486 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
487 * of registers which translates to the same as ARM/MIPS, because we start with
489 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
490 #elif defined(TARGET_SH4)
491 /* SH4 doesn't align register pairs, except for p{read,write}64 */
492 static inline int regpairs_aligned(void *cpu_env
, int num
)
495 case TARGET_NR_pread64
:
496 case TARGET_NR_pwrite64
:
503 #elif defined(TARGET_XTENSA)
504 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
506 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
509 #define ERRNO_TABLE_SIZE 1200
511 /* target_to_host_errno_table[] is initialized from
512 * host_to_target_errno_table[] in syscall_init(). */
513 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
517 * This list is the union of errno values overridden in asm-<arch>/errno.h
518 * minus the errnos that are not actually generic to all archs.
520 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
521 [EAGAIN
] = TARGET_EAGAIN
,
522 [EIDRM
] = TARGET_EIDRM
,
523 [ECHRNG
] = TARGET_ECHRNG
,
524 [EL2NSYNC
] = TARGET_EL2NSYNC
,
525 [EL3HLT
] = TARGET_EL3HLT
,
526 [EL3RST
] = TARGET_EL3RST
,
527 [ELNRNG
] = TARGET_ELNRNG
,
528 [EUNATCH
] = TARGET_EUNATCH
,
529 [ENOCSI
] = TARGET_ENOCSI
,
530 [EL2HLT
] = TARGET_EL2HLT
,
531 [EDEADLK
] = TARGET_EDEADLK
,
532 [ENOLCK
] = TARGET_ENOLCK
,
533 [EBADE
] = TARGET_EBADE
,
534 [EBADR
] = TARGET_EBADR
,
535 [EXFULL
] = TARGET_EXFULL
,
536 [ENOANO
] = TARGET_ENOANO
,
537 [EBADRQC
] = TARGET_EBADRQC
,
538 [EBADSLT
] = TARGET_EBADSLT
,
539 [EBFONT
] = TARGET_EBFONT
,
540 [ENOSTR
] = TARGET_ENOSTR
,
541 [ENODATA
] = TARGET_ENODATA
,
542 [ETIME
] = TARGET_ETIME
,
543 [ENOSR
] = TARGET_ENOSR
,
544 [ENONET
] = TARGET_ENONET
,
545 [ENOPKG
] = TARGET_ENOPKG
,
546 [EREMOTE
] = TARGET_EREMOTE
,
547 [ENOLINK
] = TARGET_ENOLINK
,
548 [EADV
] = TARGET_EADV
,
549 [ESRMNT
] = TARGET_ESRMNT
,
550 [ECOMM
] = TARGET_ECOMM
,
551 [EPROTO
] = TARGET_EPROTO
,
552 [EDOTDOT
] = TARGET_EDOTDOT
,
553 [EMULTIHOP
] = TARGET_EMULTIHOP
,
554 [EBADMSG
] = TARGET_EBADMSG
,
555 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
556 [EOVERFLOW
] = TARGET_EOVERFLOW
,
557 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
558 [EBADFD
] = TARGET_EBADFD
,
559 [EREMCHG
] = TARGET_EREMCHG
,
560 [ELIBACC
] = TARGET_ELIBACC
,
561 [ELIBBAD
] = TARGET_ELIBBAD
,
562 [ELIBSCN
] = TARGET_ELIBSCN
,
563 [ELIBMAX
] = TARGET_ELIBMAX
,
564 [ELIBEXEC
] = TARGET_ELIBEXEC
,
565 [EILSEQ
] = TARGET_EILSEQ
,
566 [ENOSYS
] = TARGET_ENOSYS
,
567 [ELOOP
] = TARGET_ELOOP
,
568 [ERESTART
] = TARGET_ERESTART
,
569 [ESTRPIPE
] = TARGET_ESTRPIPE
,
570 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
571 [EUSERS
] = TARGET_EUSERS
,
572 [ENOTSOCK
] = TARGET_ENOTSOCK
,
573 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
574 [EMSGSIZE
] = TARGET_EMSGSIZE
,
575 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
576 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
577 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
578 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
579 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
580 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
581 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
582 [EADDRINUSE
] = TARGET_EADDRINUSE
,
583 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
584 [ENETDOWN
] = TARGET_ENETDOWN
,
585 [ENETUNREACH
] = TARGET_ENETUNREACH
,
586 [ENETRESET
] = TARGET_ENETRESET
,
587 [ECONNABORTED
] = TARGET_ECONNABORTED
,
588 [ECONNRESET
] = TARGET_ECONNRESET
,
589 [ENOBUFS
] = TARGET_ENOBUFS
,
590 [EISCONN
] = TARGET_EISCONN
,
591 [ENOTCONN
] = TARGET_ENOTCONN
,
592 [EUCLEAN
] = TARGET_EUCLEAN
,
593 [ENOTNAM
] = TARGET_ENOTNAM
,
594 [ENAVAIL
] = TARGET_ENAVAIL
,
595 [EISNAM
] = TARGET_EISNAM
,
596 [EREMOTEIO
] = TARGET_EREMOTEIO
,
597 [EDQUOT
] = TARGET_EDQUOT
,
598 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
599 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
600 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
601 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
602 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
603 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
604 [EALREADY
] = TARGET_EALREADY
,
605 [EINPROGRESS
] = TARGET_EINPROGRESS
,
606 [ESTALE
] = TARGET_ESTALE
,
607 [ECANCELED
] = TARGET_ECANCELED
,
608 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
609 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
611 [ENOKEY
] = TARGET_ENOKEY
,
614 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
617 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
620 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
623 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
625 #ifdef ENOTRECOVERABLE
626 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
629 [ENOMSG
] = TARGET_ENOMSG
,
632 [ERFKILL
] = TARGET_ERFKILL
,
635 [EHWPOISON
] = TARGET_EHWPOISON
,
639 static inline int host_to_target_errno(int err
)
641 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
642 host_to_target_errno_table
[err
]) {
643 return host_to_target_errno_table
[err
];
648 static inline int target_to_host_errno(int err
)
650 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
651 target_to_host_errno_table
[err
]) {
652 return target_to_host_errno_table
[err
];
657 static inline abi_long
get_errno(abi_long ret
)
660 return -host_to_target_errno(errno
);
665 const char *target_strerror(int err
)
667 if (err
== TARGET_ERESTARTSYS
) {
668 return "To be restarted";
670 if (err
== TARGET_QEMU_ESIGRETURN
) {
671 return "Successful exit from sigreturn";
674 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
677 return strerror(target_to_host_errno(err
));
680 #define safe_syscall0(type, name) \
681 static type safe_##name(void) \
683 return safe_syscall(__NR_##name); \
686 #define safe_syscall1(type, name, type1, arg1) \
687 static type safe_##name(type1 arg1) \
689 return safe_syscall(__NR_##name, arg1); \
692 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
693 static type safe_##name(type1 arg1, type2 arg2) \
695 return safe_syscall(__NR_##name, arg1, arg2); \
698 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
699 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
704 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
711 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
719 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
720 type4, arg4, type5, arg5, type6, arg6) \
721 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
722 type5 arg5, type6 arg6) \
724 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
727 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
728 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
729 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
730 int, flags
, mode_t
, mode
)
731 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
732 struct rusage
*, rusage
)
733 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
734 int, options
, struct rusage
*, rusage
)
735 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
736 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
737 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
738 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
739 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
741 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
742 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
744 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
745 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
746 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
747 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
748 safe_syscall2(int, tkill
, int, tid
, int, sig
)
749 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
750 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
751 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
752 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
753 unsigned long, pos_l
, unsigned long, pos_h
)
754 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
755 unsigned long, pos_l
, unsigned long, pos_h
)
756 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
758 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
759 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
760 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
761 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
762 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
763 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
764 safe_syscall2(int, flock
, int, fd
, int, operation
)
765 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
766 const struct timespec
*, uts
, size_t, sigsetsize
)
767 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
769 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
770 struct timespec
*, rem
)
771 #ifdef TARGET_NR_clock_nanosleep
772 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
773 const struct timespec
*, req
, struct timespec
*, rem
)
776 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
777 void *, ptr
, long, fifth
)
780 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
784 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
785 long, msgtype
, int, flags
)
787 #ifdef __NR_semtimedop
788 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
789 unsigned, nsops
, const struct timespec
*, timeout
)
791 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
792 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
793 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
794 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
795 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
797 /* We do ioctl like this rather than via safe_syscall3 to preserve the
798 * "third argument might be integer or pointer or not present" behaviour of
801 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
802 /* Similarly for fcntl. Note that callers must always:
803 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
804 * use the flock64 struct rather than unsuffixed flock
805 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
808 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
810 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
813 static inline int host_to_target_sock_type(int host_type
)
817 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
819 target_type
= TARGET_SOCK_DGRAM
;
822 target_type
= TARGET_SOCK_STREAM
;
825 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
829 #if defined(SOCK_CLOEXEC)
830 if (host_type
& SOCK_CLOEXEC
) {
831 target_type
|= TARGET_SOCK_CLOEXEC
;
835 #if defined(SOCK_NONBLOCK)
836 if (host_type
& SOCK_NONBLOCK
) {
837 target_type
|= TARGET_SOCK_NONBLOCK
;
844 static abi_ulong target_brk
;
845 static abi_ulong target_original_brk
;
846 static abi_ulong brk_page
;
848 void target_set_brk(abi_ulong new_brk
)
850 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
851 brk_page
= HOST_PAGE_ALIGN(target_brk
);
854 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
855 #define DEBUGF_BRK(message, args...)
857 /* do_brk() must return target values and target errnos. */
858 abi_long
do_brk(abi_ulong new_brk
)
860 abi_long mapped_addr
;
861 abi_ulong new_alloc_size
;
863 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
866 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
869 if (new_brk
< target_original_brk
) {
870 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
875 /* If the new brk is less than the highest page reserved to the
876 * target heap allocation, set it and we're almost done... */
877 if (new_brk
<= brk_page
) {
878 /* Heap contents are initialized to zero, as for anonymous
880 if (new_brk
> target_brk
) {
881 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
883 target_brk
= new_brk
;
884 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
888 /* We need to allocate more memory after the brk... Note that
889 * we don't use MAP_FIXED because that will map over the top of
890 * any existing mapping (like the one with the host libc or qemu
891 * itself); instead we treat "mapped but at wrong address" as
892 * a failure and unmap again.
894 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
895 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
896 PROT_READ
|PROT_WRITE
,
897 MAP_ANON
|MAP_PRIVATE
, 0, 0));
899 if (mapped_addr
== brk_page
) {
900 /* Heap contents are initialized to zero, as for anonymous
901 * mapped pages. Technically the new pages are already
902 * initialized to zero since they *are* anonymous mapped
903 * pages, however we have to take care with the contents that
904 * come from the remaining part of the previous page: it may
905 * contains garbage data due to a previous heap usage (grown
907 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
909 target_brk
= new_brk
;
910 brk_page
= HOST_PAGE_ALIGN(target_brk
);
911 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
914 } else if (mapped_addr
!= -1) {
915 /* Mapped but at wrong address, meaning there wasn't actually
916 * enough space for this brk.
918 target_munmap(mapped_addr
, new_alloc_size
);
920 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
923 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
926 #if defined(TARGET_ALPHA)
927 /* We (partially) emulate OSF/1 on Alpha, which requires we
928 return a proper errno, not an unchanged brk value. */
929 return -TARGET_ENOMEM
;
931 /* For everything else, return the previous break. */
935 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
936 abi_ulong target_fds_addr
,
940 abi_ulong b
, *target_fds
;
942 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
943 if (!(target_fds
= lock_user(VERIFY_READ
,
945 sizeof(abi_ulong
) * nw
,
947 return -TARGET_EFAULT
;
951 for (i
= 0; i
< nw
; i
++) {
952 /* grab the abi_ulong */
953 __get_user(b
, &target_fds
[i
]);
954 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
955 /* check the bit inside the abi_ulong */
962 unlock_user(target_fds
, target_fds_addr
, 0);
967 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
968 abi_ulong target_fds_addr
,
971 if (target_fds_addr
) {
972 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
973 return -TARGET_EFAULT
;
981 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
987 abi_ulong
*target_fds
;
989 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
990 if (!(target_fds
= lock_user(VERIFY_WRITE
,
992 sizeof(abi_ulong
) * nw
,
994 return -TARGET_EFAULT
;
997 for (i
= 0; i
< nw
; i
++) {
999 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1000 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1003 __put_user(v
, &target_fds
[i
]);
1006 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1011 #if defined(__alpha__)
1012 #define HOST_HZ 1024
1017 static inline abi_long
host_to_target_clock_t(long ticks
)
1019 #if HOST_HZ == TARGET_HZ
1022 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1026 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1027 const struct rusage
*rusage
)
1029 struct target_rusage
*target_rusage
;
1031 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1032 return -TARGET_EFAULT
;
1033 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1034 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1035 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1036 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1037 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1038 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1039 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1040 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1041 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1042 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1043 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1044 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1045 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1046 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1047 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1048 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1049 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1050 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1051 unlock_user_struct(target_rusage
, target_addr
, 1);
1056 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1058 abi_ulong target_rlim_swap
;
1061 target_rlim_swap
= tswapal(target_rlim
);
1062 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1063 return RLIM_INFINITY
;
1065 result
= target_rlim_swap
;
1066 if (target_rlim_swap
!= (rlim_t
)result
)
1067 return RLIM_INFINITY
;
1072 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1074 abi_ulong target_rlim_swap
;
1077 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1078 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1080 target_rlim_swap
= rlim
;
1081 result
= tswapal(target_rlim_swap
);
1086 static inline int target_to_host_resource(int code
)
1089 case TARGET_RLIMIT_AS
:
1091 case TARGET_RLIMIT_CORE
:
1093 case TARGET_RLIMIT_CPU
:
1095 case TARGET_RLIMIT_DATA
:
1097 case TARGET_RLIMIT_FSIZE
:
1098 return RLIMIT_FSIZE
;
1099 case TARGET_RLIMIT_LOCKS
:
1100 return RLIMIT_LOCKS
;
1101 case TARGET_RLIMIT_MEMLOCK
:
1102 return RLIMIT_MEMLOCK
;
1103 case TARGET_RLIMIT_MSGQUEUE
:
1104 return RLIMIT_MSGQUEUE
;
1105 case TARGET_RLIMIT_NICE
:
1107 case TARGET_RLIMIT_NOFILE
:
1108 return RLIMIT_NOFILE
;
1109 case TARGET_RLIMIT_NPROC
:
1110 return RLIMIT_NPROC
;
1111 case TARGET_RLIMIT_RSS
:
1113 case TARGET_RLIMIT_RTPRIO
:
1114 return RLIMIT_RTPRIO
;
1115 case TARGET_RLIMIT_SIGPENDING
:
1116 return RLIMIT_SIGPENDING
;
1117 case TARGET_RLIMIT_STACK
:
1118 return RLIMIT_STACK
;
1124 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1125 abi_ulong target_tv_addr
)
1127 struct target_timeval
*target_tv
;
1129 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1130 return -TARGET_EFAULT
;
1132 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1133 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1135 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1140 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1141 const struct timeval
*tv
)
1143 struct target_timeval
*target_tv
;
1145 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1146 return -TARGET_EFAULT
;
1148 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1149 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1151 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1156 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1157 abi_ulong target_tz_addr
)
1159 struct target_timezone
*target_tz
;
1161 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1162 return -TARGET_EFAULT
;
1165 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1166 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1168 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1173 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1176 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1177 abi_ulong target_mq_attr_addr
)
1179 struct target_mq_attr
*target_mq_attr
;
1181 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1182 target_mq_attr_addr
, 1))
1183 return -TARGET_EFAULT
;
1185 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1186 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1187 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1188 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1190 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1195 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1196 const struct mq_attr
*attr
)
1198 struct target_mq_attr
*target_mq_attr
;
1200 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1201 target_mq_attr_addr
, 0))
1202 return -TARGET_EFAULT
;
1204 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1205 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1206 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1207 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1209 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1215 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1216 /* do_select() must return target values and target errnos. */
1217 static abi_long
do_select(int n
,
1218 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1219 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1221 fd_set rfds
, wfds
, efds
;
1222 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1224 struct timespec ts
, *ts_ptr
;
1227 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1231 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1235 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1240 if (target_tv_addr
) {
1241 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1242 return -TARGET_EFAULT
;
1243 ts
.tv_sec
= tv
.tv_sec
;
1244 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1250 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1253 if (!is_error(ret
)) {
1254 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1255 return -TARGET_EFAULT
;
1256 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1257 return -TARGET_EFAULT
;
1258 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1259 return -TARGET_EFAULT
;
1261 if (target_tv_addr
) {
1262 tv
.tv_sec
= ts
.tv_sec
;
1263 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1264 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1265 return -TARGET_EFAULT
;
1273 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1274 static abi_long
do_old_select(abi_ulong arg1
)
1276 struct target_sel_arg_struct
*sel
;
1277 abi_ulong inp
, outp
, exp
, tvp
;
1280 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1281 return -TARGET_EFAULT
;
1284 nsel
= tswapal(sel
->n
);
1285 inp
= tswapal(sel
->inp
);
1286 outp
= tswapal(sel
->outp
);
1287 exp
= tswapal(sel
->exp
);
1288 tvp
= tswapal(sel
->tvp
);
1290 unlock_user_struct(sel
, arg1
, 0);
1292 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1297 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1300 return pipe2(host_pipe
, flags
);
1306 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1307 int flags
, int is_pipe2
)
1311 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1314 return get_errno(ret
);
1316 /* Several targets have special calling conventions for the original
1317 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1319 #if defined(TARGET_ALPHA)
1320 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1321 return host_pipe
[0];
1322 #elif defined(TARGET_MIPS)
1323 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1324 return host_pipe
[0];
1325 #elif defined(TARGET_SH4)
1326 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1327 return host_pipe
[0];
1328 #elif defined(TARGET_SPARC)
1329 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1330 return host_pipe
[0];
1334 if (put_user_s32(host_pipe
[0], pipedes
)
1335 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1336 return -TARGET_EFAULT
;
1337 return get_errno(ret
);
1340 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1341 abi_ulong target_addr
,
1344 struct target_ip_mreqn
*target_smreqn
;
1346 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1348 return -TARGET_EFAULT
;
1349 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1350 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1351 if (len
== sizeof(struct target_ip_mreqn
))
1352 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1353 unlock_user(target_smreqn
, target_addr
, 0);
1358 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1359 abi_ulong target_addr
,
1362 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1363 sa_family_t sa_family
;
1364 struct target_sockaddr
*target_saddr
;
1366 if (fd_trans_target_to_host_addr(fd
)) {
1367 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1370 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1372 return -TARGET_EFAULT
;
1374 sa_family
= tswap16(target_saddr
->sa_family
);
1376 /* Oops. The caller might send a incomplete sun_path; sun_path
1377 * must be terminated by \0 (see the manual page), but
1378 * unfortunately it is quite common to specify sockaddr_un
1379 * length as "strlen(x->sun_path)" while it should be
1380 * "strlen(...) + 1". We'll fix that here if needed.
1381 * Linux kernel has a similar feature.
1384 if (sa_family
== AF_UNIX
) {
1385 if (len
< unix_maxlen
&& len
> 0) {
1386 char *cp
= (char*)target_saddr
;
1388 if ( cp
[len
-1] && !cp
[len
] )
1391 if (len
> unix_maxlen
)
1395 memcpy(addr
, target_saddr
, len
);
1396 addr
->sa_family
= sa_family
;
1397 if (sa_family
== AF_NETLINK
) {
1398 struct sockaddr_nl
*nladdr
;
1400 nladdr
= (struct sockaddr_nl
*)addr
;
1401 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1402 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1403 } else if (sa_family
== AF_PACKET
) {
1404 struct target_sockaddr_ll
*lladdr
;
1406 lladdr
= (struct target_sockaddr_ll
*)addr
;
1407 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1408 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1410 unlock_user(target_saddr
, target_addr
, 0);
1415 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1416 struct sockaddr
*addr
,
1419 struct target_sockaddr
*target_saddr
;
1426 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1428 return -TARGET_EFAULT
;
1429 memcpy(target_saddr
, addr
, len
);
1430 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1431 sizeof(target_saddr
->sa_family
)) {
1432 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1434 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1435 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1436 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1437 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1438 } else if (addr
->sa_family
== AF_PACKET
) {
1439 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1440 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1441 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1442 } else if (addr
->sa_family
== AF_INET6
&&
1443 len
>= sizeof(struct target_sockaddr_in6
)) {
1444 struct target_sockaddr_in6
*target_in6
=
1445 (struct target_sockaddr_in6
*)target_saddr
;
1446 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1448 unlock_user(target_saddr
, target_addr
, len
);
1453 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1454 struct target_msghdr
*target_msgh
)
1456 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1457 abi_long msg_controllen
;
1458 abi_ulong target_cmsg_addr
;
1459 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1460 socklen_t space
= 0;
1462 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1463 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1465 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1466 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1467 target_cmsg_start
= target_cmsg
;
1469 return -TARGET_EFAULT
;
1471 while (cmsg
&& target_cmsg
) {
1472 void *data
= CMSG_DATA(cmsg
);
1473 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1475 int len
= tswapal(target_cmsg
->cmsg_len
)
1476 - sizeof(struct target_cmsghdr
);
1478 space
+= CMSG_SPACE(len
);
1479 if (space
> msgh
->msg_controllen
) {
1480 space
-= CMSG_SPACE(len
);
1481 /* This is a QEMU bug, since we allocated the payload
1482 * area ourselves (unlike overflow in host-to-target
1483 * conversion, which is just the guest giving us a buffer
1484 * that's too small). It can't happen for the payload types
1485 * we currently support; if it becomes an issue in future
1486 * we would need to improve our allocation strategy to
1487 * something more intelligent than "twice the size of the
1488 * target buffer we're reading from".
1490 gemu_log("Host cmsg overflow\n");
1494 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1495 cmsg
->cmsg_level
= SOL_SOCKET
;
1497 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1499 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1500 cmsg
->cmsg_len
= CMSG_LEN(len
);
1502 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1503 int *fd
= (int *)data
;
1504 int *target_fd
= (int *)target_data
;
1505 int i
, numfds
= len
/ sizeof(int);
1507 for (i
= 0; i
< numfds
; i
++) {
1508 __get_user(fd
[i
], target_fd
+ i
);
1510 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1511 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1512 struct ucred
*cred
= (struct ucred
*)data
;
1513 struct target_ucred
*target_cred
=
1514 (struct target_ucred
*)target_data
;
1516 __get_user(cred
->pid
, &target_cred
->pid
);
1517 __get_user(cred
->uid
, &target_cred
->uid
);
1518 __get_user(cred
->gid
, &target_cred
->gid
);
1520 gemu_log("Unsupported ancillary data: %d/%d\n",
1521 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1522 memcpy(data
, target_data
, len
);
1525 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1526 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1529 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1531 msgh
->msg_controllen
= space
;
1535 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1536 struct msghdr
*msgh
)
1538 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1539 abi_long msg_controllen
;
1540 abi_ulong target_cmsg_addr
;
1541 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1542 socklen_t space
= 0;
1544 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1545 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1547 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1548 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1549 target_cmsg_start
= target_cmsg
;
1551 return -TARGET_EFAULT
;
1553 while (cmsg
&& target_cmsg
) {
1554 void *data
= CMSG_DATA(cmsg
);
1555 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1557 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1558 int tgt_len
, tgt_space
;
1560 /* We never copy a half-header but may copy half-data;
1561 * this is Linux's behaviour in put_cmsg(). Note that
1562 * truncation here is a guest problem (which we report
1563 * to the guest via the CTRUNC bit), unlike truncation
1564 * in target_to_host_cmsg, which is a QEMU bug.
1566 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1567 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1571 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1572 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1574 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1576 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1578 /* Payload types which need a different size of payload on
1579 * the target must adjust tgt_len here.
1582 switch (cmsg
->cmsg_level
) {
1584 switch (cmsg
->cmsg_type
) {
1586 tgt_len
= sizeof(struct target_timeval
);
1596 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1597 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1598 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1601 /* We must now copy-and-convert len bytes of payload
1602 * into tgt_len bytes of destination space. Bear in mind
1603 * that in both source and destination we may be dealing
1604 * with a truncated value!
1606 switch (cmsg
->cmsg_level
) {
1608 switch (cmsg
->cmsg_type
) {
1611 int *fd
= (int *)data
;
1612 int *target_fd
= (int *)target_data
;
1613 int i
, numfds
= tgt_len
/ sizeof(int);
1615 for (i
= 0; i
< numfds
; i
++) {
1616 __put_user(fd
[i
], target_fd
+ i
);
1622 struct timeval
*tv
= (struct timeval
*)data
;
1623 struct target_timeval
*target_tv
=
1624 (struct target_timeval
*)target_data
;
1626 if (len
!= sizeof(struct timeval
) ||
1627 tgt_len
!= sizeof(struct target_timeval
)) {
1631 /* copy struct timeval to target */
1632 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1633 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1636 case SCM_CREDENTIALS
:
1638 struct ucred
*cred
= (struct ucred
*)data
;
1639 struct target_ucred
*target_cred
=
1640 (struct target_ucred
*)target_data
;
1642 __put_user(cred
->pid
, &target_cred
->pid
);
1643 __put_user(cred
->uid
, &target_cred
->uid
);
1644 __put_user(cred
->gid
, &target_cred
->gid
);
1653 switch (cmsg
->cmsg_type
) {
1656 uint32_t *v
= (uint32_t *)data
;
1657 uint32_t *t_int
= (uint32_t *)target_data
;
1659 if (len
!= sizeof(uint32_t) ||
1660 tgt_len
!= sizeof(uint32_t)) {
1663 __put_user(*v
, t_int
);
1669 struct sock_extended_err ee
;
1670 struct sockaddr_in offender
;
1672 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1673 struct errhdr_t
*target_errh
=
1674 (struct errhdr_t
*)target_data
;
1676 if (len
!= sizeof(struct errhdr_t
) ||
1677 tgt_len
!= sizeof(struct errhdr_t
)) {
1680 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1681 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1682 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1683 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1684 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1685 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1686 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1687 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1688 (void *) &errh
->offender
, sizeof(errh
->offender
));
1697 switch (cmsg
->cmsg_type
) {
1700 uint32_t *v
= (uint32_t *)data
;
1701 uint32_t *t_int
= (uint32_t *)target_data
;
1703 if (len
!= sizeof(uint32_t) ||
1704 tgt_len
!= sizeof(uint32_t)) {
1707 __put_user(*v
, t_int
);
1713 struct sock_extended_err ee
;
1714 struct sockaddr_in6 offender
;
1716 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1717 struct errhdr6_t
*target_errh
=
1718 (struct errhdr6_t
*)target_data
;
1720 if (len
!= sizeof(struct errhdr6_t
) ||
1721 tgt_len
!= sizeof(struct errhdr6_t
)) {
1724 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1725 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1726 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1727 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1728 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1729 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1730 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1731 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1732 (void *) &errh
->offender
, sizeof(errh
->offender
));
1742 gemu_log("Unsupported ancillary data: %d/%d\n",
1743 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1744 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1745 if (tgt_len
> len
) {
1746 memset(target_data
+ len
, 0, tgt_len
- len
);
1750 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1751 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1752 if (msg_controllen
< tgt_space
) {
1753 tgt_space
= msg_controllen
;
1755 msg_controllen
-= tgt_space
;
1757 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1758 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1761 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1763 target_msgh
->msg_controllen
= tswapal(space
);
1767 /* do_setsockopt() Must return target values and target errnos. */
1768 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1769 abi_ulong optval_addr
, socklen_t optlen
)
1773 struct ip_mreqn
*ip_mreq
;
1774 struct ip_mreq_source
*ip_mreq_source
;
1778 /* TCP options all take an 'int' value. */
1779 if (optlen
< sizeof(uint32_t))
1780 return -TARGET_EINVAL
;
1782 if (get_user_u32(val
, optval_addr
))
1783 return -TARGET_EFAULT
;
1784 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1791 case IP_ROUTER_ALERT
:
1795 case IP_MTU_DISCOVER
:
1802 case IP_MULTICAST_TTL
:
1803 case IP_MULTICAST_LOOP
:
1805 if (optlen
>= sizeof(uint32_t)) {
1806 if (get_user_u32(val
, optval_addr
))
1807 return -TARGET_EFAULT
;
1808 } else if (optlen
>= 1) {
1809 if (get_user_u8(val
, optval_addr
))
1810 return -TARGET_EFAULT
;
1812 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1814 case IP_ADD_MEMBERSHIP
:
1815 case IP_DROP_MEMBERSHIP
:
1816 if (optlen
< sizeof (struct target_ip_mreq
) ||
1817 optlen
> sizeof (struct target_ip_mreqn
))
1818 return -TARGET_EINVAL
;
1820 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1821 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1822 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1825 case IP_BLOCK_SOURCE
:
1826 case IP_UNBLOCK_SOURCE
:
1827 case IP_ADD_SOURCE_MEMBERSHIP
:
1828 case IP_DROP_SOURCE_MEMBERSHIP
:
1829 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1830 return -TARGET_EINVAL
;
1832 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1833 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1834 unlock_user (ip_mreq_source
, optval_addr
, 0);
1843 case IPV6_MTU_DISCOVER
:
1846 case IPV6_RECVPKTINFO
:
1847 case IPV6_UNICAST_HOPS
:
1848 case IPV6_MULTICAST_HOPS
:
1849 case IPV6_MULTICAST_LOOP
:
1851 case IPV6_RECVHOPLIMIT
:
1852 case IPV6_2292HOPLIMIT
:
1855 case IPV6_2292PKTINFO
:
1856 case IPV6_RECVTCLASS
:
1857 case IPV6_RECVRTHDR
:
1858 case IPV6_2292RTHDR
:
1859 case IPV6_RECVHOPOPTS
:
1860 case IPV6_2292HOPOPTS
:
1861 case IPV6_RECVDSTOPTS
:
1862 case IPV6_2292DSTOPTS
:
1864 #ifdef IPV6_RECVPATHMTU
1865 case IPV6_RECVPATHMTU
:
1867 #ifdef IPV6_TRANSPARENT
1868 case IPV6_TRANSPARENT
:
1870 #ifdef IPV6_FREEBIND
1873 #ifdef IPV6_RECVORIGDSTADDR
1874 case IPV6_RECVORIGDSTADDR
:
1877 if (optlen
< sizeof(uint32_t)) {
1878 return -TARGET_EINVAL
;
1880 if (get_user_u32(val
, optval_addr
)) {
1881 return -TARGET_EFAULT
;
1883 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1884 &val
, sizeof(val
)));
1888 struct in6_pktinfo pki
;
1890 if (optlen
< sizeof(pki
)) {
1891 return -TARGET_EINVAL
;
1894 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1895 return -TARGET_EFAULT
;
1898 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1900 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1901 &pki
, sizeof(pki
)));
1904 case IPV6_ADD_MEMBERSHIP
:
1905 case IPV6_DROP_MEMBERSHIP
:
1907 struct ipv6_mreq ipv6mreq
;
1909 if (optlen
< sizeof(ipv6mreq
)) {
1910 return -TARGET_EINVAL
;
1913 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1914 return -TARGET_EFAULT
;
1917 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1919 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1920 &ipv6mreq
, sizeof(ipv6mreq
)));
1931 struct icmp6_filter icmp6f
;
1933 if (optlen
> sizeof(icmp6f
)) {
1934 optlen
= sizeof(icmp6f
);
1937 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1938 return -TARGET_EFAULT
;
1941 for (val
= 0; val
< 8; val
++) {
1942 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1945 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1957 /* those take an u32 value */
1958 if (optlen
< sizeof(uint32_t)) {
1959 return -TARGET_EINVAL
;
1962 if (get_user_u32(val
, optval_addr
)) {
1963 return -TARGET_EFAULT
;
1965 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1966 &val
, sizeof(val
)));
1973 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
1978 char *alg_key
= g_malloc(optlen
);
1981 return -TARGET_ENOMEM
;
1983 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
1985 return -TARGET_EFAULT
;
1987 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1992 case ALG_SET_AEAD_AUTHSIZE
:
1994 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2003 case TARGET_SOL_SOCKET
:
2005 case TARGET_SO_RCVTIMEO
:
2009 optname
= SO_RCVTIMEO
;
2012 if (optlen
!= sizeof(struct target_timeval
)) {
2013 return -TARGET_EINVAL
;
2016 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2017 return -TARGET_EFAULT
;
2020 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2024 case TARGET_SO_SNDTIMEO
:
2025 optname
= SO_SNDTIMEO
;
2027 case TARGET_SO_ATTACH_FILTER
:
2029 struct target_sock_fprog
*tfprog
;
2030 struct target_sock_filter
*tfilter
;
2031 struct sock_fprog fprog
;
2032 struct sock_filter
*filter
;
2035 if (optlen
!= sizeof(*tfprog
)) {
2036 return -TARGET_EINVAL
;
2038 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2039 return -TARGET_EFAULT
;
2041 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2042 tswapal(tfprog
->filter
), 0)) {
2043 unlock_user_struct(tfprog
, optval_addr
, 1);
2044 return -TARGET_EFAULT
;
2047 fprog
.len
= tswap16(tfprog
->len
);
2048 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2049 if (filter
== NULL
) {
2050 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2051 unlock_user_struct(tfprog
, optval_addr
, 1);
2052 return -TARGET_ENOMEM
;
2054 for (i
= 0; i
< fprog
.len
; i
++) {
2055 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2056 filter
[i
].jt
= tfilter
[i
].jt
;
2057 filter
[i
].jf
= tfilter
[i
].jf
;
2058 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2060 fprog
.filter
= filter
;
2062 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2063 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2066 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2067 unlock_user_struct(tfprog
, optval_addr
, 1);
2070 case TARGET_SO_BINDTODEVICE
:
2072 char *dev_ifname
, *addr_ifname
;
2074 if (optlen
> IFNAMSIZ
- 1) {
2075 optlen
= IFNAMSIZ
- 1;
2077 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2079 return -TARGET_EFAULT
;
2081 optname
= SO_BINDTODEVICE
;
2082 addr_ifname
= alloca(IFNAMSIZ
);
2083 memcpy(addr_ifname
, dev_ifname
, optlen
);
2084 addr_ifname
[optlen
] = 0;
2085 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2086 addr_ifname
, optlen
));
2087 unlock_user (dev_ifname
, optval_addr
, 0);
2090 case TARGET_SO_LINGER
:
2093 struct target_linger
*tlg
;
2095 if (optlen
!= sizeof(struct target_linger
)) {
2096 return -TARGET_EINVAL
;
2098 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2099 return -TARGET_EFAULT
;
2101 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2102 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2103 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2105 unlock_user_struct(tlg
, optval_addr
, 0);
2108 /* Options with 'int' argument. */
2109 case TARGET_SO_DEBUG
:
2112 case TARGET_SO_REUSEADDR
:
2113 optname
= SO_REUSEADDR
;
2116 case TARGET_SO_REUSEPORT
:
2117 optname
= SO_REUSEPORT
;
2120 case TARGET_SO_TYPE
:
2123 case TARGET_SO_ERROR
:
2126 case TARGET_SO_DONTROUTE
:
2127 optname
= SO_DONTROUTE
;
2129 case TARGET_SO_BROADCAST
:
2130 optname
= SO_BROADCAST
;
2132 case TARGET_SO_SNDBUF
:
2133 optname
= SO_SNDBUF
;
2135 case TARGET_SO_SNDBUFFORCE
:
2136 optname
= SO_SNDBUFFORCE
;
2138 case TARGET_SO_RCVBUF
:
2139 optname
= SO_RCVBUF
;
2141 case TARGET_SO_RCVBUFFORCE
:
2142 optname
= SO_RCVBUFFORCE
;
2144 case TARGET_SO_KEEPALIVE
:
2145 optname
= SO_KEEPALIVE
;
2147 case TARGET_SO_OOBINLINE
:
2148 optname
= SO_OOBINLINE
;
2150 case TARGET_SO_NO_CHECK
:
2151 optname
= SO_NO_CHECK
;
2153 case TARGET_SO_PRIORITY
:
2154 optname
= SO_PRIORITY
;
2157 case TARGET_SO_BSDCOMPAT
:
2158 optname
= SO_BSDCOMPAT
;
2161 case TARGET_SO_PASSCRED
:
2162 optname
= SO_PASSCRED
;
2164 case TARGET_SO_PASSSEC
:
2165 optname
= SO_PASSSEC
;
2167 case TARGET_SO_TIMESTAMP
:
2168 optname
= SO_TIMESTAMP
;
2170 case TARGET_SO_RCVLOWAT
:
2171 optname
= SO_RCVLOWAT
;
2176 if (optlen
< sizeof(uint32_t))
2177 return -TARGET_EINVAL
;
2179 if (get_user_u32(val
, optval_addr
))
2180 return -TARGET_EFAULT
;
2181 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2185 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2186 ret
= -TARGET_ENOPROTOOPT
;
2191 /* do_getsockopt() Must return target values and target errnos. */
2192 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2193 abi_ulong optval_addr
, abi_ulong optlen
)
2200 case TARGET_SOL_SOCKET
:
2203 /* These don't just return a single integer */
2204 case TARGET_SO_RCVTIMEO
:
2205 case TARGET_SO_SNDTIMEO
:
2206 case TARGET_SO_PEERNAME
:
2208 case TARGET_SO_PEERCRED
: {
2211 struct target_ucred
*tcr
;
2213 if (get_user_u32(len
, optlen
)) {
2214 return -TARGET_EFAULT
;
2217 return -TARGET_EINVAL
;
2221 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2229 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2230 return -TARGET_EFAULT
;
2232 __put_user(cr
.pid
, &tcr
->pid
);
2233 __put_user(cr
.uid
, &tcr
->uid
);
2234 __put_user(cr
.gid
, &tcr
->gid
);
2235 unlock_user_struct(tcr
, optval_addr
, 1);
2236 if (put_user_u32(len
, optlen
)) {
2237 return -TARGET_EFAULT
;
2241 case TARGET_SO_LINGER
:
2245 struct target_linger
*tlg
;
2247 if (get_user_u32(len
, optlen
)) {
2248 return -TARGET_EFAULT
;
2251 return -TARGET_EINVAL
;
2255 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2263 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2264 return -TARGET_EFAULT
;
2266 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2267 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2268 unlock_user_struct(tlg
, optval_addr
, 1);
2269 if (put_user_u32(len
, optlen
)) {
2270 return -TARGET_EFAULT
;
2274 /* Options with 'int' argument. */
2275 case TARGET_SO_DEBUG
:
2278 case TARGET_SO_REUSEADDR
:
2279 optname
= SO_REUSEADDR
;
2282 case TARGET_SO_REUSEPORT
:
2283 optname
= SO_REUSEPORT
;
2286 case TARGET_SO_TYPE
:
2289 case TARGET_SO_ERROR
:
2292 case TARGET_SO_DONTROUTE
:
2293 optname
= SO_DONTROUTE
;
2295 case TARGET_SO_BROADCAST
:
2296 optname
= SO_BROADCAST
;
2298 case TARGET_SO_SNDBUF
:
2299 optname
= SO_SNDBUF
;
2301 case TARGET_SO_RCVBUF
:
2302 optname
= SO_RCVBUF
;
2304 case TARGET_SO_KEEPALIVE
:
2305 optname
= SO_KEEPALIVE
;
2307 case TARGET_SO_OOBINLINE
:
2308 optname
= SO_OOBINLINE
;
2310 case TARGET_SO_NO_CHECK
:
2311 optname
= SO_NO_CHECK
;
2313 case TARGET_SO_PRIORITY
:
2314 optname
= SO_PRIORITY
;
2317 case TARGET_SO_BSDCOMPAT
:
2318 optname
= SO_BSDCOMPAT
;
2321 case TARGET_SO_PASSCRED
:
2322 optname
= SO_PASSCRED
;
2324 case TARGET_SO_TIMESTAMP
:
2325 optname
= SO_TIMESTAMP
;
2327 case TARGET_SO_RCVLOWAT
:
2328 optname
= SO_RCVLOWAT
;
2330 case TARGET_SO_ACCEPTCONN
:
2331 optname
= SO_ACCEPTCONN
;
2338 /* TCP options all take an 'int' value. */
2340 if (get_user_u32(len
, optlen
))
2341 return -TARGET_EFAULT
;
2343 return -TARGET_EINVAL
;
2345 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2348 if (optname
== SO_TYPE
) {
2349 val
= host_to_target_sock_type(val
);
2354 if (put_user_u32(val
, optval_addr
))
2355 return -TARGET_EFAULT
;
2357 if (put_user_u8(val
, optval_addr
))
2358 return -TARGET_EFAULT
;
2360 if (put_user_u32(len
, optlen
))
2361 return -TARGET_EFAULT
;
2368 case IP_ROUTER_ALERT
:
2372 case IP_MTU_DISCOVER
:
2378 case IP_MULTICAST_TTL
:
2379 case IP_MULTICAST_LOOP
:
2380 if (get_user_u32(len
, optlen
))
2381 return -TARGET_EFAULT
;
2383 return -TARGET_EINVAL
;
2385 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2388 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2390 if (put_user_u32(len
, optlen
)
2391 || put_user_u8(val
, optval_addr
))
2392 return -TARGET_EFAULT
;
2394 if (len
> sizeof(int))
2396 if (put_user_u32(len
, optlen
)
2397 || put_user_u32(val
, optval_addr
))
2398 return -TARGET_EFAULT
;
2402 ret
= -TARGET_ENOPROTOOPT
;
2408 case IPV6_MTU_DISCOVER
:
2411 case IPV6_RECVPKTINFO
:
2412 case IPV6_UNICAST_HOPS
:
2413 case IPV6_MULTICAST_HOPS
:
2414 case IPV6_MULTICAST_LOOP
:
2416 case IPV6_RECVHOPLIMIT
:
2417 case IPV6_2292HOPLIMIT
:
2420 case IPV6_2292PKTINFO
:
2421 case IPV6_RECVTCLASS
:
2422 case IPV6_RECVRTHDR
:
2423 case IPV6_2292RTHDR
:
2424 case IPV6_RECVHOPOPTS
:
2425 case IPV6_2292HOPOPTS
:
2426 case IPV6_RECVDSTOPTS
:
2427 case IPV6_2292DSTOPTS
:
2429 #ifdef IPV6_RECVPATHMTU
2430 case IPV6_RECVPATHMTU
:
2432 #ifdef IPV6_TRANSPARENT
2433 case IPV6_TRANSPARENT
:
2435 #ifdef IPV6_FREEBIND
2438 #ifdef IPV6_RECVORIGDSTADDR
2439 case IPV6_RECVORIGDSTADDR
:
2441 if (get_user_u32(len
, optlen
))
2442 return -TARGET_EFAULT
;
2444 return -TARGET_EINVAL
;
2446 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2449 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2451 if (put_user_u32(len
, optlen
)
2452 || put_user_u8(val
, optval_addr
))
2453 return -TARGET_EFAULT
;
2455 if (len
> sizeof(int))
2457 if (put_user_u32(len
, optlen
)
2458 || put_user_u32(val
, optval_addr
))
2459 return -TARGET_EFAULT
;
2463 ret
= -TARGET_ENOPROTOOPT
;
2469 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2471 ret
= -TARGET_EOPNOTSUPP
;
2477 /* Convert target low/high pair representing file offset into the host
2478 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2479 * as the kernel doesn't handle them either.
2481 static void target_to_host_low_high(abi_ulong tlow
,
2483 unsigned long *hlow
,
2484 unsigned long *hhigh
)
2486 uint64_t off
= tlow
|
2487 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2488 TARGET_LONG_BITS
/ 2;
2491 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2494 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2495 abi_ulong count
, int copy
)
2497 struct target_iovec
*target_vec
;
2499 abi_ulong total_len
, max_len
;
2502 bool bad_address
= false;
2508 if (count
> IOV_MAX
) {
2513 vec
= g_try_new0(struct iovec
, count
);
2519 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2520 count
* sizeof(struct target_iovec
), 1);
2521 if (target_vec
== NULL
) {
2526 /* ??? If host page size > target page size, this will result in a
2527 value larger than what we can actually support. */
2528 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2531 for (i
= 0; i
< count
; i
++) {
2532 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2533 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2538 } else if (len
== 0) {
2539 /* Zero length pointer is ignored. */
2540 vec
[i
].iov_base
= 0;
2542 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2543 /* If the first buffer pointer is bad, this is a fault. But
2544 * subsequent bad buffers will result in a partial write; this
2545 * is realized by filling the vector with null pointers and
2547 if (!vec
[i
].iov_base
) {
2558 if (len
> max_len
- total_len
) {
2559 len
= max_len
- total_len
;
2562 vec
[i
].iov_len
= len
;
2566 unlock_user(target_vec
, target_addr
, 0);
2571 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2572 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2575 unlock_user(target_vec
, target_addr
, 0);
2582 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2583 abi_ulong count
, int copy
)
2585 struct target_iovec
*target_vec
;
2588 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2589 count
* sizeof(struct target_iovec
), 1);
2591 for (i
= 0; i
< count
; i
++) {
2592 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2593 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2597 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2599 unlock_user(target_vec
, target_addr
, 0);
2605 static inline int target_to_host_sock_type(int *type
)
2608 int target_type
= *type
;
2610 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2611 case TARGET_SOCK_DGRAM
:
2612 host_type
= SOCK_DGRAM
;
2614 case TARGET_SOCK_STREAM
:
2615 host_type
= SOCK_STREAM
;
2618 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2621 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2622 #if defined(SOCK_CLOEXEC)
2623 host_type
|= SOCK_CLOEXEC
;
2625 return -TARGET_EINVAL
;
2628 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2629 #if defined(SOCK_NONBLOCK)
2630 host_type
|= SOCK_NONBLOCK
;
2631 #elif !defined(O_NONBLOCK)
2632 return -TARGET_EINVAL
;
2639 /* Try to emulate socket type flags after socket creation. */
2640 static int sock_flags_fixup(int fd
, int target_type
)
2642 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2643 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2644 int flags
= fcntl(fd
, F_GETFL
);
2645 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2647 return -TARGET_EINVAL
;
2654 /* do_socket() Must return target values and target errnos. */
2655 static abi_long
do_socket(int domain
, int type
, int protocol
)
2657 int target_type
= type
;
2660 ret
= target_to_host_sock_type(&type
);
2665 if (domain
== PF_NETLINK
&& !(
2666 #ifdef CONFIG_RTNETLINK
2667 protocol
== NETLINK_ROUTE
||
2669 protocol
== NETLINK_KOBJECT_UEVENT
||
2670 protocol
== NETLINK_AUDIT
)) {
2671 return -EPFNOSUPPORT
;
2674 if (domain
== AF_PACKET
||
2675 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2676 protocol
= tswap16(protocol
);
2679 ret
= get_errno(socket(domain
, type
, protocol
));
2681 ret
= sock_flags_fixup(ret
, target_type
);
2682 if (type
== SOCK_PACKET
) {
2683 /* Manage an obsolete case :
2684 * if socket type is SOCK_PACKET, bind by name
2686 fd_trans_register(ret
, &target_packet_trans
);
2687 } else if (domain
== PF_NETLINK
) {
2689 #ifdef CONFIG_RTNETLINK
2691 fd_trans_register(ret
, &target_netlink_route_trans
);
2694 case NETLINK_KOBJECT_UEVENT
:
2695 /* nothing to do: messages are strings */
2698 fd_trans_register(ret
, &target_netlink_audit_trans
);
2701 g_assert_not_reached();
2708 /* do_bind() Must return target values and target errnos. */
2709 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2715 if ((int)addrlen
< 0) {
2716 return -TARGET_EINVAL
;
2719 addr
= alloca(addrlen
+1);
2721 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2725 return get_errno(bind(sockfd
, addr
, addrlen
));
2728 /* do_connect() Must return target values and target errnos. */
2729 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2735 if ((int)addrlen
< 0) {
2736 return -TARGET_EINVAL
;
2739 addr
= alloca(addrlen
+1);
2741 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2745 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2748 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2749 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2750 int flags
, int send
)
2756 abi_ulong target_vec
;
2758 if (msgp
->msg_name
) {
2759 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2760 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2761 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2762 tswapal(msgp
->msg_name
),
2764 if (ret
== -TARGET_EFAULT
) {
2765 /* For connected sockets msg_name and msg_namelen must
2766 * be ignored, so returning EFAULT immediately is wrong.
2767 * Instead, pass a bad msg_name to the host kernel, and
2768 * let it decide whether to return EFAULT or not.
2770 msg
.msg_name
= (void *)-1;
2775 msg
.msg_name
= NULL
;
2776 msg
.msg_namelen
= 0;
2778 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2779 msg
.msg_control
= alloca(msg
.msg_controllen
);
2780 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2782 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2784 count
= tswapal(msgp
->msg_iovlen
);
2785 target_vec
= tswapal(msgp
->msg_iov
);
2787 if (count
> IOV_MAX
) {
2788 /* sendrcvmsg returns a different errno for this condition than
2789 * readv/writev, so we must catch it here before lock_iovec() does.
2791 ret
= -TARGET_EMSGSIZE
;
2795 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2796 target_vec
, count
, send
);
2798 ret
= -host_to_target_errno(errno
);
2801 msg
.msg_iovlen
= count
;
2805 if (fd_trans_target_to_host_data(fd
)) {
2808 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2809 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2810 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2811 msg
.msg_iov
->iov_len
);
2813 msg
.msg_iov
->iov_base
= host_msg
;
2814 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2818 ret
= target_to_host_cmsg(&msg
, msgp
);
2820 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2824 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2825 if (!is_error(ret
)) {
2827 if (fd_trans_host_to_target_data(fd
)) {
2828 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2829 MIN(msg
.msg_iov
->iov_len
, len
));
2831 ret
= host_to_target_cmsg(msgp
, &msg
);
2833 if (!is_error(ret
)) {
2834 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2835 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
2836 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2837 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2838 msg
.msg_name
, msg
.msg_namelen
);
2850 unlock_iovec(vec
, target_vec
, count
, !send
);
2855 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2856 int flags
, int send
)
2859 struct target_msghdr
*msgp
;
2861 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2865 return -TARGET_EFAULT
;
2867 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2868 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2872 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2873 * so it might not have this *mmsg-specific flag either.
2875 #ifndef MSG_WAITFORONE
2876 #define MSG_WAITFORONE 0x10000
2879 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2880 unsigned int vlen
, unsigned int flags
,
2883 struct target_mmsghdr
*mmsgp
;
2887 if (vlen
> UIO_MAXIOV
) {
2891 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2893 return -TARGET_EFAULT
;
2896 for (i
= 0; i
< vlen
; i
++) {
2897 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2898 if (is_error(ret
)) {
2901 mmsgp
[i
].msg_len
= tswap32(ret
);
2902 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2903 if (flags
& MSG_WAITFORONE
) {
2904 flags
|= MSG_DONTWAIT
;
2908 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2910 /* Return number of datagrams sent if we sent any at all;
2911 * otherwise return the error.
2919 /* do_accept4() Must return target values and target errnos. */
2920 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2921 abi_ulong target_addrlen_addr
, int flags
)
2923 socklen_t addrlen
, ret_addrlen
;
2928 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2930 if (target_addr
== 0) {
2931 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2934 /* linux returns EINVAL if addrlen pointer is invalid */
2935 if (get_user_u32(addrlen
, target_addrlen_addr
))
2936 return -TARGET_EINVAL
;
2938 if ((int)addrlen
< 0) {
2939 return -TARGET_EINVAL
;
2942 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2943 return -TARGET_EINVAL
;
2945 addr
= alloca(addrlen
);
2947 ret_addrlen
= addrlen
;
2948 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
2949 if (!is_error(ret
)) {
2950 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2951 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2952 ret
= -TARGET_EFAULT
;
2958 /* do_getpeername() Must return target values and target errnos. */
2959 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2960 abi_ulong target_addrlen_addr
)
2962 socklen_t addrlen
, ret_addrlen
;
2966 if (get_user_u32(addrlen
, target_addrlen_addr
))
2967 return -TARGET_EFAULT
;
2969 if ((int)addrlen
< 0) {
2970 return -TARGET_EINVAL
;
2973 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2974 return -TARGET_EFAULT
;
2976 addr
= alloca(addrlen
);
2978 ret_addrlen
= addrlen
;
2979 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
2980 if (!is_error(ret
)) {
2981 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2982 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2983 ret
= -TARGET_EFAULT
;
2989 /* do_getsockname() Must return target values and target errnos. */
2990 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2991 abi_ulong target_addrlen_addr
)
2993 socklen_t addrlen
, ret_addrlen
;
2997 if (get_user_u32(addrlen
, target_addrlen_addr
))
2998 return -TARGET_EFAULT
;
3000 if ((int)addrlen
< 0) {
3001 return -TARGET_EINVAL
;
3004 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3005 return -TARGET_EFAULT
;
3007 addr
= alloca(addrlen
);
3009 ret_addrlen
= addrlen
;
3010 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3011 if (!is_error(ret
)) {
3012 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3013 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3014 ret
= -TARGET_EFAULT
;
3020 /* do_socketpair() Must return target values and target errnos. */
3021 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3022 abi_ulong target_tab_addr
)
3027 target_to_host_sock_type(&type
);
3029 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3030 if (!is_error(ret
)) {
3031 if (put_user_s32(tab
[0], target_tab_addr
)
3032 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3033 ret
= -TARGET_EFAULT
;
3038 /* do_sendto() Must return target values and target errnos. */
3039 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3040 abi_ulong target_addr
, socklen_t addrlen
)
3044 void *copy_msg
= NULL
;
3047 if ((int)addrlen
< 0) {
3048 return -TARGET_EINVAL
;
3051 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3053 return -TARGET_EFAULT
;
3054 if (fd_trans_target_to_host_data(fd
)) {
3055 copy_msg
= host_msg
;
3056 host_msg
= g_malloc(len
);
3057 memcpy(host_msg
, copy_msg
, len
);
3058 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3064 addr
= alloca(addrlen
+1);
3065 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3069 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3071 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3076 host_msg
= copy_msg
;
3078 unlock_user(host_msg
, msg
, 0);
3082 /* do_recvfrom() Must return target values and target errnos. */
3083 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3084 abi_ulong target_addr
,
3085 abi_ulong target_addrlen
)
3087 socklen_t addrlen
, ret_addrlen
;
3092 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3094 return -TARGET_EFAULT
;
3096 if (get_user_u32(addrlen
, target_addrlen
)) {
3097 ret
= -TARGET_EFAULT
;
3100 if ((int)addrlen
< 0) {
3101 ret
= -TARGET_EINVAL
;
3104 addr
= alloca(addrlen
);
3105 ret_addrlen
= addrlen
;
3106 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3107 addr
, &ret_addrlen
));
3109 addr
= NULL
; /* To keep compiler quiet. */
3110 addrlen
= 0; /* To keep compiler quiet. */
3111 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3113 if (!is_error(ret
)) {
3114 if (fd_trans_host_to_target_data(fd
)) {
3116 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3117 if (is_error(trans
)) {
3123 host_to_target_sockaddr(target_addr
, addr
,
3124 MIN(addrlen
, ret_addrlen
));
3125 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3126 ret
= -TARGET_EFAULT
;
3130 unlock_user(host_msg
, msg
, len
);
3133 unlock_user(host_msg
, msg
, 0);
3138 #ifdef TARGET_NR_socketcall
3139 /* do_socketcall() must return target values and target errnos. */
3140 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3142 static const unsigned nargs
[] = { /* number of arguments per operation */
3143 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3144 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3145 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3146 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3147 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3148 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3149 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3150 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3151 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3152 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3153 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3154 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3155 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3156 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3157 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3158 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3159 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3160 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3161 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3162 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3164 abi_long a
[6]; /* max 6 args */
3167 /* check the range of the first argument num */
3168 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3169 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3170 return -TARGET_EINVAL
;
3172 /* ensure we have space for args */
3173 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3174 return -TARGET_EINVAL
;
3176 /* collect the arguments in a[] according to nargs[] */
3177 for (i
= 0; i
< nargs
[num
]; ++i
) {
3178 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3179 return -TARGET_EFAULT
;
3182 /* now when we have the args, invoke the appropriate underlying function */
3184 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3185 return do_socket(a
[0], a
[1], a
[2]);
3186 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3187 return do_bind(a
[0], a
[1], a
[2]);
3188 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3189 return do_connect(a
[0], a
[1], a
[2]);
3190 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3191 return get_errno(listen(a
[0], a
[1]));
3192 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3193 return do_accept4(a
[0], a
[1], a
[2], 0);
3194 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3195 return do_getsockname(a
[0], a
[1], a
[2]);
3196 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3197 return do_getpeername(a
[0], a
[1], a
[2]);
3198 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3199 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3200 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3201 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3202 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3203 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3204 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3205 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3206 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3207 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3208 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3209 return get_errno(shutdown(a
[0], a
[1]));
3210 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3211 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3212 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3213 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3214 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3215 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3216 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3217 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3218 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3219 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3220 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3221 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3222 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3223 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3225 gemu_log("Unsupported socketcall: %d\n", num
);
3226 return -TARGET_EINVAL
;
3231 #define N_SHM_REGIONS 32
3233 static struct shm_region
{
3237 } shm_regions
[N_SHM_REGIONS
];
3239 #ifndef TARGET_SEMID64_DS
3240 /* asm-generic version of this struct */
3241 struct target_semid64_ds
3243 struct target_ipc_perm sem_perm
;
3244 abi_ulong sem_otime
;
3245 #if TARGET_ABI_BITS == 32
3246 abi_ulong __unused1
;
3248 abi_ulong sem_ctime
;
3249 #if TARGET_ABI_BITS == 32
3250 abi_ulong __unused2
;
3252 abi_ulong sem_nsems
;
3253 abi_ulong __unused3
;
3254 abi_ulong __unused4
;
3258 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3259 abi_ulong target_addr
)
3261 struct target_ipc_perm
*target_ip
;
3262 struct target_semid64_ds
*target_sd
;
3264 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3265 return -TARGET_EFAULT
;
3266 target_ip
= &(target_sd
->sem_perm
);
3267 host_ip
->__key
= tswap32(target_ip
->__key
);
3268 host_ip
->uid
= tswap32(target_ip
->uid
);
3269 host_ip
->gid
= tswap32(target_ip
->gid
);
3270 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3271 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3272 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3273 host_ip
->mode
= tswap32(target_ip
->mode
);
3275 host_ip
->mode
= tswap16(target_ip
->mode
);
3277 #if defined(TARGET_PPC)
3278 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3280 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3282 unlock_user_struct(target_sd
, target_addr
, 0);
3286 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3287 struct ipc_perm
*host_ip
)
3289 struct target_ipc_perm
*target_ip
;
3290 struct target_semid64_ds
*target_sd
;
3292 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3293 return -TARGET_EFAULT
;
3294 target_ip
= &(target_sd
->sem_perm
);
3295 target_ip
->__key
= tswap32(host_ip
->__key
);
3296 target_ip
->uid
= tswap32(host_ip
->uid
);
3297 target_ip
->gid
= tswap32(host_ip
->gid
);
3298 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3299 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3300 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3301 target_ip
->mode
= tswap32(host_ip
->mode
);
3303 target_ip
->mode
= tswap16(host_ip
->mode
);
3305 #if defined(TARGET_PPC)
3306 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3308 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3310 unlock_user_struct(target_sd
, target_addr
, 1);
3314 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3315 abi_ulong target_addr
)
3317 struct target_semid64_ds
*target_sd
;
3319 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3320 return -TARGET_EFAULT
;
3321 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3322 return -TARGET_EFAULT
;
3323 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3324 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3325 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3326 unlock_user_struct(target_sd
, target_addr
, 0);
3330 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3331 struct semid_ds
*host_sd
)
3333 struct target_semid64_ds
*target_sd
;
3335 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3336 return -TARGET_EFAULT
;
3337 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3338 return -TARGET_EFAULT
;
3339 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3340 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3341 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3342 unlock_user_struct(target_sd
, target_addr
, 1);
3346 struct target_seminfo
{
3359 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3360 struct seminfo
*host_seminfo
)
3362 struct target_seminfo
*target_seminfo
;
3363 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3364 return -TARGET_EFAULT
;
3365 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3366 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3367 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3368 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3369 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3370 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3371 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3372 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3373 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3374 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3375 unlock_user_struct(target_seminfo
, target_addr
, 1);
3381 struct semid_ds
*buf
;
3382 unsigned short *array
;
3383 struct seminfo
*__buf
;
3386 union target_semun
{
3393 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3394 abi_ulong target_addr
)
3397 unsigned short *array
;
3399 struct semid_ds semid_ds
;
3402 semun
.buf
= &semid_ds
;
3404 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3406 return get_errno(ret
);
3408 nsems
= semid_ds
.sem_nsems
;
3410 *host_array
= g_try_new(unsigned short, nsems
);
3412 return -TARGET_ENOMEM
;
3414 array
= lock_user(VERIFY_READ
, target_addr
,
3415 nsems
*sizeof(unsigned short), 1);
3417 g_free(*host_array
);
3418 return -TARGET_EFAULT
;
3421 for(i
=0; i
<nsems
; i
++) {
3422 __get_user((*host_array
)[i
], &array
[i
]);
3424 unlock_user(array
, target_addr
, 0);
3429 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3430 unsigned short **host_array
)
3433 unsigned short *array
;
3435 struct semid_ds semid_ds
;
3438 semun
.buf
= &semid_ds
;
3440 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3442 return get_errno(ret
);
3444 nsems
= semid_ds
.sem_nsems
;
3446 array
= lock_user(VERIFY_WRITE
, target_addr
,
3447 nsems
*sizeof(unsigned short), 0);
3449 return -TARGET_EFAULT
;
3451 for(i
=0; i
<nsems
; i
++) {
3452 __put_user((*host_array
)[i
], &array
[i
]);
3454 g_free(*host_array
);
3455 unlock_user(array
, target_addr
, 1);
3460 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3461 abi_ulong target_arg
)
3463 union target_semun target_su
= { .buf
= target_arg
};
3465 struct semid_ds dsarg
;
3466 unsigned short *array
= NULL
;
3467 struct seminfo seminfo
;
3468 abi_long ret
= -TARGET_EINVAL
;
3475 /* In 64 bit cross-endian situations, we will erroneously pick up
3476 * the wrong half of the union for the "val" element. To rectify
3477 * this, the entire 8-byte structure is byteswapped, followed by
3478 * a swap of the 4 byte val field. In other cases, the data is
3479 * already in proper host byte order. */
3480 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3481 target_su
.buf
= tswapal(target_su
.buf
);
3482 arg
.val
= tswap32(target_su
.val
);
3484 arg
.val
= target_su
.val
;
3486 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3490 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3494 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3495 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3502 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3506 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3507 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3513 arg
.__buf
= &seminfo
;
3514 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3515 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3523 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3530 struct target_sembuf
{
3531 unsigned short sem_num
;
3536 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3537 abi_ulong target_addr
,
3540 struct target_sembuf
*target_sembuf
;
3543 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3544 nsops
*sizeof(struct target_sembuf
), 1);
3546 return -TARGET_EFAULT
;
3548 for(i
=0; i
<nsops
; i
++) {
3549 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3550 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3551 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3554 unlock_user(target_sembuf
, target_addr
, 0);
3559 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3561 struct sembuf sops
[nsops
];
3564 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3565 return -TARGET_EFAULT
;
3567 ret
= -TARGET_ENOSYS
;
3568 #ifdef __NR_semtimedop
3569 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3572 if (ret
== -TARGET_ENOSYS
) {
3573 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3579 struct target_msqid_ds
3581 struct target_ipc_perm msg_perm
;
3582 abi_ulong msg_stime
;
3583 #if TARGET_ABI_BITS == 32
3584 abi_ulong __unused1
;
3586 abi_ulong msg_rtime
;
3587 #if TARGET_ABI_BITS == 32
3588 abi_ulong __unused2
;
3590 abi_ulong msg_ctime
;
3591 #if TARGET_ABI_BITS == 32
3592 abi_ulong __unused3
;
3594 abi_ulong __msg_cbytes
;
3596 abi_ulong msg_qbytes
;
3597 abi_ulong msg_lspid
;
3598 abi_ulong msg_lrpid
;
3599 abi_ulong __unused4
;
3600 abi_ulong __unused5
;
3603 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3604 abi_ulong target_addr
)
3606 struct target_msqid_ds
*target_md
;
3608 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3609 return -TARGET_EFAULT
;
3610 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3611 return -TARGET_EFAULT
;
3612 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3613 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3614 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3615 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3616 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3617 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3618 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3619 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3620 unlock_user_struct(target_md
, target_addr
, 0);
3624 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3625 struct msqid_ds
*host_md
)
3627 struct target_msqid_ds
*target_md
;
3629 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3630 return -TARGET_EFAULT
;
3631 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3632 return -TARGET_EFAULT
;
3633 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3634 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3635 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3636 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3637 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3638 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3639 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3640 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3641 unlock_user_struct(target_md
, target_addr
, 1);
3645 struct target_msginfo
{
3653 unsigned short int msgseg
;
3656 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3657 struct msginfo
*host_msginfo
)
3659 struct target_msginfo
*target_msginfo
;
3660 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3661 return -TARGET_EFAULT
;
3662 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3663 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3664 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3665 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3666 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3667 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3668 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3669 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3670 unlock_user_struct(target_msginfo
, target_addr
, 1);
3674 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3676 struct msqid_ds dsarg
;
3677 struct msginfo msginfo
;
3678 abi_long ret
= -TARGET_EINVAL
;
3686 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3687 return -TARGET_EFAULT
;
3688 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3689 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3690 return -TARGET_EFAULT
;
3693 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3697 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3698 if (host_to_target_msginfo(ptr
, &msginfo
))
3699 return -TARGET_EFAULT
;
3706 struct target_msgbuf
{
3711 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3712 ssize_t msgsz
, int msgflg
)
3714 struct target_msgbuf
*target_mb
;
3715 struct msgbuf
*host_mb
;
3719 return -TARGET_EINVAL
;
3722 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3723 return -TARGET_EFAULT
;
3724 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3726 unlock_user_struct(target_mb
, msgp
, 0);
3727 return -TARGET_ENOMEM
;
3729 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3730 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3731 ret
= -TARGET_ENOSYS
;
3733 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3736 if (ret
== -TARGET_ENOSYS
) {
3737 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3742 unlock_user_struct(target_mb
, msgp
, 0);
3747 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3748 ssize_t msgsz
, abi_long msgtyp
,
3751 struct target_msgbuf
*target_mb
;
3753 struct msgbuf
*host_mb
;
3757 return -TARGET_EINVAL
;
3760 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3761 return -TARGET_EFAULT
;
3763 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3765 ret
= -TARGET_ENOMEM
;
3768 ret
= -TARGET_ENOSYS
;
3770 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3773 if (ret
== -TARGET_ENOSYS
) {
3774 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
3775 msgflg
, host_mb
, msgtyp
));
3780 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3781 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3782 if (!target_mtext
) {
3783 ret
= -TARGET_EFAULT
;
3786 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3787 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3790 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3794 unlock_user_struct(target_mb
, msgp
, 1);
3799 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3800 abi_ulong target_addr
)
3802 struct target_shmid_ds
*target_sd
;
3804 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3805 return -TARGET_EFAULT
;
3806 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3807 return -TARGET_EFAULT
;
3808 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3809 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3810 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3811 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3812 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3813 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3814 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3815 unlock_user_struct(target_sd
, target_addr
, 0);
3819 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3820 struct shmid_ds
*host_sd
)
3822 struct target_shmid_ds
*target_sd
;
3824 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3825 return -TARGET_EFAULT
;
3826 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3827 return -TARGET_EFAULT
;
3828 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3829 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3830 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3831 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3832 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3833 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3834 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3835 unlock_user_struct(target_sd
, target_addr
, 1);
3839 struct target_shminfo
{
3847 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3848 struct shminfo
*host_shminfo
)
3850 struct target_shminfo
*target_shminfo
;
3851 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3852 return -TARGET_EFAULT
;
3853 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3854 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3855 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3856 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3857 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3858 unlock_user_struct(target_shminfo
, target_addr
, 1);
3862 struct target_shm_info
{
3867 abi_ulong swap_attempts
;
3868 abi_ulong swap_successes
;
3871 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3872 struct shm_info
*host_shm_info
)
3874 struct target_shm_info
*target_shm_info
;
3875 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3876 return -TARGET_EFAULT
;
3877 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3878 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3879 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3880 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3881 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3882 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3883 unlock_user_struct(target_shm_info
, target_addr
, 1);
3887 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3889 struct shmid_ds dsarg
;
3890 struct shminfo shminfo
;
3891 struct shm_info shm_info
;
3892 abi_long ret
= -TARGET_EINVAL
;
3900 if (target_to_host_shmid_ds(&dsarg
, buf
))
3901 return -TARGET_EFAULT
;
3902 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3903 if (host_to_target_shmid_ds(buf
, &dsarg
))
3904 return -TARGET_EFAULT
;
3907 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3908 if (host_to_target_shminfo(buf
, &shminfo
))
3909 return -TARGET_EFAULT
;
3912 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3913 if (host_to_target_shm_info(buf
, &shm_info
))
3914 return -TARGET_EFAULT
;
3919 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3926 #ifndef TARGET_FORCE_SHMLBA
3927 /* For most architectures, SHMLBA is the same as the page size;
3928 * some architectures have larger values, in which case they should
3929 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3930 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3931 * and defining its own value for SHMLBA.
3933 * The kernel also permits SHMLBA to be set by the architecture to a
3934 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3935 * this means that addresses are rounded to the large size if
3936 * SHM_RND is set but addresses not aligned to that size are not rejected
3937 * as long as they are at least page-aligned. Since the only architecture
3938 * which uses this is ia64 this code doesn't provide for that oddity.
3940 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3942 return TARGET_PAGE_SIZE
;
3946 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3947 int shmid
, abi_ulong shmaddr
, int shmflg
)
3951 struct shmid_ds shm_info
;
3955 /* find out the length of the shared memory segment */
3956 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3957 if (is_error(ret
)) {
3958 /* can't get length, bail out */
3962 shmlba
= target_shmlba(cpu_env
);
3964 if (shmaddr
& (shmlba
- 1)) {
3965 if (shmflg
& SHM_RND
) {
3966 shmaddr
&= ~(shmlba
- 1);
3968 return -TARGET_EINVAL
;
3971 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3972 return -TARGET_EINVAL
;
3978 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3980 abi_ulong mmap_start
;
3982 /* In order to use the host shmat, we need to honor host SHMLBA. */
3983 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
3985 if (mmap_start
== -1) {
3987 host_raddr
= (void *)-1;
3989 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3992 if (host_raddr
== (void *)-1) {
3994 return get_errno((long)host_raddr
);
3996 raddr
=h2g((unsigned long)host_raddr
);
3998 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3999 PAGE_VALID
| PAGE_READ
|
4000 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4002 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4003 if (!shm_regions
[i
].in_use
) {
4004 shm_regions
[i
].in_use
= true;
4005 shm_regions
[i
].start
= raddr
;
4006 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4016 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4023 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4024 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4025 shm_regions
[i
].in_use
= false;
4026 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4030 rv
= get_errno(shmdt(g2h(shmaddr
)));
4037 #ifdef TARGET_NR_ipc
4038 /* ??? This only works with linear mappings. */
4039 /* do_ipc() must return target values and target errnos. */
4040 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4041 unsigned int call
, abi_long first
,
4042 abi_long second
, abi_long third
,
4043 abi_long ptr
, abi_long fifth
)
4048 version
= call
>> 16;
4053 ret
= do_semop(first
, ptr
, second
);
4057 ret
= get_errno(semget(first
, second
, third
));
4060 case IPCOP_semctl
: {
4061 /* The semun argument to semctl is passed by value, so dereference the
4064 get_user_ual(atptr
, ptr
);
4065 ret
= do_semctl(first
, second
, third
, atptr
);
4070 ret
= get_errno(msgget(first
, second
));
4074 ret
= do_msgsnd(first
, ptr
, second
, third
);
4078 ret
= do_msgctl(first
, second
, ptr
);
4085 struct target_ipc_kludge
{
4090 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4091 ret
= -TARGET_EFAULT
;
4095 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4097 unlock_user_struct(tmp
, ptr
, 0);
4101 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4110 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4111 if (is_error(raddr
))
4112 return get_errno(raddr
);
4113 if (put_user_ual(raddr
, third
))
4114 return -TARGET_EFAULT
;
4118 ret
= -TARGET_EINVAL
;
4123 ret
= do_shmdt(ptr
);
4127 /* IPC_* flag values are the same on all linux platforms */
4128 ret
= get_errno(shmget(first
, second
, third
));
4131 /* IPC_* and SHM_* command values are the same on all linux platforms */
4133 ret
= do_shmctl(first
, second
, ptr
);
4136 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4137 ret
= -TARGET_ENOSYS
;
4144 /* kernel structure types definitions */
4146 #define STRUCT(name, ...) STRUCT_ ## name,
4147 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4149 #include "syscall_types.h"
4153 #undef STRUCT_SPECIAL
4155 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4156 #define STRUCT_SPECIAL(name)
4157 #include "syscall_types.h"
4159 #undef STRUCT_SPECIAL
4161 typedef struct IOCTLEntry IOCTLEntry
;
4163 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4164 int fd
, int cmd
, abi_long arg
);
4168 unsigned int host_cmd
;
4171 do_ioctl_fn
*do_ioctl
;
4172 const argtype arg_type
[5];
4175 #define IOC_R 0x0001
4176 #define IOC_W 0x0002
4177 #define IOC_RW (IOC_R | IOC_W)
4179 #define MAX_STRUCT_SIZE 4096
4181 #ifdef CONFIG_FIEMAP
4182 /* So fiemap access checks don't overflow on 32 bit systems.
4183 * This is very slightly smaller than the limit imposed by
4184 * the underlying kernel.
4186 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4187 / sizeof(struct fiemap_extent))
4189 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4190 int fd
, int cmd
, abi_long arg
)
4192 /* The parameter for this ioctl is a struct fiemap followed
4193 * by an array of struct fiemap_extent whose size is set
4194 * in fiemap->fm_extent_count. The array is filled in by the
4197 int target_size_in
, target_size_out
;
4199 const argtype
*arg_type
= ie
->arg_type
;
4200 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4203 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4207 assert(arg_type
[0] == TYPE_PTR
);
4208 assert(ie
->access
== IOC_RW
);
4210 target_size_in
= thunk_type_size(arg_type
, 0);
4211 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4213 return -TARGET_EFAULT
;
4215 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4216 unlock_user(argptr
, arg
, 0);
4217 fm
= (struct fiemap
*)buf_temp
;
4218 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4219 return -TARGET_EINVAL
;
4222 outbufsz
= sizeof (*fm
) +
4223 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4225 if (outbufsz
> MAX_STRUCT_SIZE
) {
4226 /* We can't fit all the extents into the fixed size buffer.
4227 * Allocate one that is large enough and use it instead.
4229 fm
= g_try_malloc(outbufsz
);
4231 return -TARGET_ENOMEM
;
4233 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4236 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4237 if (!is_error(ret
)) {
4238 target_size_out
= target_size_in
;
4239 /* An extent_count of 0 means we were only counting the extents
4240 * so there are no structs to copy
4242 if (fm
->fm_extent_count
!= 0) {
4243 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4245 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4247 ret
= -TARGET_EFAULT
;
4249 /* Convert the struct fiemap */
4250 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4251 if (fm
->fm_extent_count
!= 0) {
4252 p
= argptr
+ target_size_in
;
4253 /* ...and then all the struct fiemap_extents */
4254 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4255 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4260 unlock_user(argptr
, arg
, target_size_out
);
4270 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4271 int fd
, int cmd
, abi_long arg
)
4273 const argtype
*arg_type
= ie
->arg_type
;
4277 struct ifconf
*host_ifconf
;
4279 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4280 int target_ifreq_size
;
4285 abi_long target_ifc_buf
;
4289 assert(arg_type
[0] == TYPE_PTR
);
4290 assert(ie
->access
== IOC_RW
);
4293 target_size
= thunk_type_size(arg_type
, 0);
4295 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4297 return -TARGET_EFAULT
;
4298 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4299 unlock_user(argptr
, arg
, 0);
4301 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4302 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4303 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4305 if (target_ifc_buf
!= 0) {
4306 target_ifc_len
= host_ifconf
->ifc_len
;
4307 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4308 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4310 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4311 if (outbufsz
> MAX_STRUCT_SIZE
) {
4313 * We can't fit all the extents into the fixed size buffer.
4314 * Allocate one that is large enough and use it instead.
4316 host_ifconf
= malloc(outbufsz
);
4318 return -TARGET_ENOMEM
;
4320 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4323 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4325 host_ifconf
->ifc_len
= host_ifc_len
;
4327 host_ifc_buf
= NULL
;
4329 host_ifconf
->ifc_buf
= host_ifc_buf
;
4331 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4332 if (!is_error(ret
)) {
4333 /* convert host ifc_len to target ifc_len */
4335 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4336 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4337 host_ifconf
->ifc_len
= target_ifc_len
;
4339 /* restore target ifc_buf */
4341 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4343 /* copy struct ifconf to target user */
4345 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4347 return -TARGET_EFAULT
;
4348 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4349 unlock_user(argptr
, arg
, target_size
);
4351 if (target_ifc_buf
!= 0) {
4352 /* copy ifreq[] to target user */
4353 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4354 for (i
= 0; i
< nb_ifreq
; i
++) {
4355 thunk_convert(argptr
+ i
* target_ifreq_size
,
4356 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4357 ifreq_arg_type
, THUNK_TARGET
);
4359 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4370 #if defined(CONFIG_USBFS)
4371 #if HOST_LONG_BITS > 64
4372 #error USBDEVFS thunks do not support >64 bit hosts yet.
4375 uint64_t target_urb_adr
;
4376 uint64_t target_buf_adr
;
4377 char *target_buf_ptr
;
4378 struct usbdevfs_urb host_urb
;
4381 static GHashTable
*usbdevfs_urb_hashtable(void)
4383 static GHashTable
*urb_hashtable
;
4385 if (!urb_hashtable
) {
4386 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4388 return urb_hashtable
;
4391 static void urb_hashtable_insert(struct live_urb
*urb
)
4393 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4394 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4397 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4399 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4400 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4403 static void urb_hashtable_remove(struct live_urb
*urb
)
4405 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4406 g_hash_table_remove(urb_hashtable
, urb
);
4410 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4411 int fd
, int cmd
, abi_long arg
)
4413 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4414 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4415 struct live_urb
*lurb
;
4419 uintptr_t target_urb_adr
;
4422 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4424 memset(buf_temp
, 0, sizeof(uint64_t));
4425 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4426 if (is_error(ret
)) {
4430 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4431 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4432 if (!lurb
->target_urb_adr
) {
4433 return -TARGET_EFAULT
;
4435 urb_hashtable_remove(lurb
);
4436 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4437 lurb
->host_urb
.buffer_length
);
4438 lurb
->target_buf_ptr
= NULL
;
4440 /* restore the guest buffer pointer */
4441 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4443 /* update the guest urb struct */
4444 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4447 return -TARGET_EFAULT
;
4449 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4450 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4452 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4453 /* write back the urb handle */
4454 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4457 return -TARGET_EFAULT
;
4460 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4461 target_urb_adr
= lurb
->target_urb_adr
;
4462 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4463 unlock_user(argptr
, arg
, target_size
);
4470 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4471 uint8_t *buf_temp
__attribute__((unused
)),
4472 int fd
, int cmd
, abi_long arg
)
4474 struct live_urb
*lurb
;
4476 /* map target address back to host URB with metadata. */
4477 lurb
= urb_hashtable_lookup(arg
);
4479 return -TARGET_EFAULT
;
4481 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4485 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4486 int fd
, int cmd
, abi_long arg
)
4488 const argtype
*arg_type
= ie
->arg_type
;
4493 struct live_urb
*lurb
;
4496 * each submitted URB needs to map to a unique ID for the
4497 * kernel, and that unique ID needs to be a pointer to
4498 * host memory. hence, we need to malloc for each URB.
4499 * isochronous transfers have a variable length struct.
4502 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4504 /* construct host copy of urb and metadata */
4505 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4507 return -TARGET_ENOMEM
;
4510 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4513 return -TARGET_EFAULT
;
4515 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4516 unlock_user(argptr
, arg
, 0);
4518 lurb
->target_urb_adr
= arg
;
4519 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4521 /* buffer space used depends on endpoint type so lock the entire buffer */
4522 /* control type urbs should check the buffer contents for true direction */
4523 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4524 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4525 lurb
->host_urb
.buffer_length
, 1);
4526 if (lurb
->target_buf_ptr
== NULL
) {
4528 return -TARGET_EFAULT
;
4531 /* update buffer pointer in host copy */
4532 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4534 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4535 if (is_error(ret
)) {
4536 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4539 urb_hashtable_insert(lurb
);
4544 #endif /* CONFIG_USBFS */
4546 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4547 int cmd
, abi_long arg
)
4550 struct dm_ioctl
*host_dm
;
4551 abi_long guest_data
;
4552 uint32_t guest_data_size
;
4554 const argtype
*arg_type
= ie
->arg_type
;
4556 void *big_buf
= NULL
;
4560 target_size
= thunk_type_size(arg_type
, 0);
4561 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4563 ret
= -TARGET_EFAULT
;
4566 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4567 unlock_user(argptr
, arg
, 0);
4569 /* buf_temp is too small, so fetch things into a bigger buffer */
4570 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4571 memcpy(big_buf
, buf_temp
, target_size
);
4575 guest_data
= arg
+ host_dm
->data_start
;
4576 if ((guest_data
- arg
) < 0) {
4577 ret
= -TARGET_EINVAL
;
4580 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4581 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4583 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4585 ret
= -TARGET_EFAULT
;
4589 switch (ie
->host_cmd
) {
4591 case DM_LIST_DEVICES
:
4594 case DM_DEV_SUSPEND
:
4597 case DM_TABLE_STATUS
:
4598 case DM_TABLE_CLEAR
:
4600 case DM_LIST_VERSIONS
:
4604 case DM_DEV_SET_GEOMETRY
:
4605 /* data contains only strings */
4606 memcpy(host_data
, argptr
, guest_data_size
);
4609 memcpy(host_data
, argptr
, guest_data_size
);
4610 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4614 void *gspec
= argptr
;
4615 void *cur_data
= host_data
;
4616 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4617 int spec_size
= thunk_type_size(arg_type
, 0);
4620 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4621 struct dm_target_spec
*spec
= cur_data
;
4625 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4626 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4628 spec
->next
= sizeof(*spec
) + slen
;
4629 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4631 cur_data
+= spec
->next
;
4636 ret
= -TARGET_EINVAL
;
4637 unlock_user(argptr
, guest_data
, 0);
4640 unlock_user(argptr
, guest_data
, 0);
4642 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4643 if (!is_error(ret
)) {
4644 guest_data
= arg
+ host_dm
->data_start
;
4645 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4646 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4647 switch (ie
->host_cmd
) {
4652 case DM_DEV_SUSPEND
:
4655 case DM_TABLE_CLEAR
:
4657 case DM_DEV_SET_GEOMETRY
:
4658 /* no return data */
4660 case DM_LIST_DEVICES
:
4662 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4663 uint32_t remaining_data
= guest_data_size
;
4664 void *cur_data
= argptr
;
4665 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4666 int nl_size
= 12; /* can't use thunk_size due to alignment */
4669 uint32_t next
= nl
->next
;
4671 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4673 if (remaining_data
< nl
->next
) {
4674 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4677 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4678 strcpy(cur_data
+ nl_size
, nl
->name
);
4679 cur_data
+= nl
->next
;
4680 remaining_data
-= nl
->next
;
4684 nl
= (void*)nl
+ next
;
4689 case DM_TABLE_STATUS
:
4691 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4692 void *cur_data
= argptr
;
4693 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4694 int spec_size
= thunk_type_size(arg_type
, 0);
4697 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4698 uint32_t next
= spec
->next
;
4699 int slen
= strlen((char*)&spec
[1]) + 1;
4700 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4701 if (guest_data_size
< spec
->next
) {
4702 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4705 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4706 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4707 cur_data
= argptr
+ spec
->next
;
4708 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4714 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4715 int count
= *(uint32_t*)hdata
;
4716 uint64_t *hdev
= hdata
+ 8;
4717 uint64_t *gdev
= argptr
+ 8;
4720 *(uint32_t*)argptr
= tswap32(count
);
4721 for (i
= 0; i
< count
; i
++) {
4722 *gdev
= tswap64(*hdev
);
4728 case DM_LIST_VERSIONS
:
4730 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4731 uint32_t remaining_data
= guest_data_size
;
4732 void *cur_data
= argptr
;
4733 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4734 int vers_size
= thunk_type_size(arg_type
, 0);
4737 uint32_t next
= vers
->next
;
4739 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4741 if (remaining_data
< vers
->next
) {
4742 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4745 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4746 strcpy(cur_data
+ vers_size
, vers
->name
);
4747 cur_data
+= vers
->next
;
4748 remaining_data
-= vers
->next
;
4752 vers
= (void*)vers
+ next
;
4757 unlock_user(argptr
, guest_data
, 0);
4758 ret
= -TARGET_EINVAL
;
4761 unlock_user(argptr
, guest_data
, guest_data_size
);
4763 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4765 ret
= -TARGET_EFAULT
;
4768 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4769 unlock_user(argptr
, arg
, target_size
);
4776 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4777 int cmd
, abi_long arg
)
4781 const argtype
*arg_type
= ie
->arg_type
;
4782 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4785 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4786 struct blkpg_partition host_part
;
4788 /* Read and convert blkpg */
4790 target_size
= thunk_type_size(arg_type
, 0);
4791 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4793 ret
= -TARGET_EFAULT
;
4796 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4797 unlock_user(argptr
, arg
, 0);
4799 switch (host_blkpg
->op
) {
4800 case BLKPG_ADD_PARTITION
:
4801 case BLKPG_DEL_PARTITION
:
4802 /* payload is struct blkpg_partition */
4805 /* Unknown opcode */
4806 ret
= -TARGET_EINVAL
;
4810 /* Read and convert blkpg->data */
4811 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4812 target_size
= thunk_type_size(part_arg_type
, 0);
4813 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4815 ret
= -TARGET_EFAULT
;
4818 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4819 unlock_user(argptr
, arg
, 0);
4821 /* Swizzle the data pointer to our local copy and call! */
4822 host_blkpg
->data
= &host_part
;
4823 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4829 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4830 int fd
, int cmd
, abi_long arg
)
4832 const argtype
*arg_type
= ie
->arg_type
;
4833 const StructEntry
*se
;
4834 const argtype
*field_types
;
4835 const int *dst_offsets
, *src_offsets
;
4838 abi_ulong
*target_rt_dev_ptr
= NULL
;
4839 unsigned long *host_rt_dev_ptr
= NULL
;
4843 assert(ie
->access
== IOC_W
);
4844 assert(*arg_type
== TYPE_PTR
);
4846 assert(*arg_type
== TYPE_STRUCT
);
4847 target_size
= thunk_type_size(arg_type
, 0);
4848 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4850 return -TARGET_EFAULT
;
4853 assert(*arg_type
== (int)STRUCT_rtentry
);
4854 se
= struct_entries
+ *arg_type
++;
4855 assert(se
->convert
[0] == NULL
);
4856 /* convert struct here to be able to catch rt_dev string */
4857 field_types
= se
->field_types
;
4858 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4859 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4860 for (i
= 0; i
< se
->nb_fields
; i
++) {
4861 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4862 assert(*field_types
== TYPE_PTRVOID
);
4863 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4864 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4865 if (*target_rt_dev_ptr
!= 0) {
4866 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4867 tswapal(*target_rt_dev_ptr
));
4868 if (!*host_rt_dev_ptr
) {
4869 unlock_user(argptr
, arg
, 0);
4870 return -TARGET_EFAULT
;
4873 *host_rt_dev_ptr
= 0;
4878 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4879 argptr
+ src_offsets
[i
],
4880 field_types
, THUNK_HOST
);
4882 unlock_user(argptr
, arg
, 0);
4884 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4886 assert(host_rt_dev_ptr
!= NULL
);
4887 assert(target_rt_dev_ptr
!= NULL
);
4888 if (*host_rt_dev_ptr
!= 0) {
4889 unlock_user((void *)*host_rt_dev_ptr
,
4890 *target_rt_dev_ptr
, 0);
4895 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4896 int fd
, int cmd
, abi_long arg
)
4898 int sig
= target_to_host_signal(arg
);
4899 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4903 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4904 int fd
, int cmd
, abi_long arg
)
4906 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4907 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4911 static IOCTLEntry ioctl_entries
[] = {
4912 #define IOCTL(cmd, access, ...) \
4913 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4914 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4915 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4916 #define IOCTL_IGNORE(cmd) \
4917 { TARGET_ ## cmd, 0, #cmd },
4922 /* ??? Implement proper locking for ioctls. */
4923 /* do_ioctl() Must return target values and target errnos. */
4924 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4926 const IOCTLEntry
*ie
;
4927 const argtype
*arg_type
;
4929 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4935 if (ie
->target_cmd
== 0) {
4936 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4937 return -TARGET_ENOSYS
;
4939 if (ie
->target_cmd
== cmd
)
4943 arg_type
= ie
->arg_type
;
4945 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4946 } else if (!ie
->host_cmd
) {
4947 /* Some architectures define BSD ioctls in their headers
4948 that are not implemented in Linux. */
4949 return -TARGET_ENOSYS
;
4952 switch(arg_type
[0]) {
4955 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4959 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4963 target_size
= thunk_type_size(arg_type
, 0);
4964 switch(ie
->access
) {
4966 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4967 if (!is_error(ret
)) {
4968 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4970 return -TARGET_EFAULT
;
4971 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4972 unlock_user(argptr
, arg
, target_size
);
4976 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4978 return -TARGET_EFAULT
;
4979 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4980 unlock_user(argptr
, arg
, 0);
4981 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4985 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4987 return -TARGET_EFAULT
;
4988 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4989 unlock_user(argptr
, arg
, 0);
4990 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4991 if (!is_error(ret
)) {
4992 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4994 return -TARGET_EFAULT
;
4995 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4996 unlock_user(argptr
, arg
, target_size
);
5002 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5003 (long)cmd
, arg_type
[0]);
5004 ret
= -TARGET_ENOSYS
;
5010 static const bitmask_transtbl iflag_tbl
[] = {
5011 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5012 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5013 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5014 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5015 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5016 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5017 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5018 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5019 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5020 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5021 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5022 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5023 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5024 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5028 static const bitmask_transtbl oflag_tbl
[] = {
5029 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5030 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5031 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5032 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5033 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5034 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5035 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5036 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5037 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5038 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5039 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5040 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5041 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5042 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5043 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5044 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5045 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5046 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5047 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5048 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5049 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5050 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5051 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5052 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5056 static const bitmask_transtbl cflag_tbl
[] = {
5057 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5058 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5059 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5060 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5061 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5062 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5063 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5064 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5065 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5066 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5067 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5068 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5069 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5070 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5071 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5072 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5073 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5074 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5075 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5076 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5077 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5078 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5079 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5080 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5081 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5082 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5083 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5084 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5085 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5086 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5087 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5091 static const bitmask_transtbl lflag_tbl
[] = {
5092 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5093 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5094 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5095 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5096 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5097 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5098 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5099 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5100 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5101 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5102 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5103 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5104 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5105 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5106 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5110 static void target_to_host_termios (void *dst
, const void *src
)
5112 struct host_termios
*host
= dst
;
5113 const struct target_termios
*target
= src
;
5116 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5118 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5120 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5122 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5123 host
->c_line
= target
->c_line
;
5125 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5126 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5127 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5128 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5129 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5130 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5131 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5132 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5133 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5134 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5135 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5136 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5137 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5138 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5139 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5140 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5141 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5142 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5145 static void host_to_target_termios (void *dst
, const void *src
)
5147 struct target_termios
*target
= dst
;
5148 const struct host_termios
*host
= src
;
5151 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5153 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5155 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5157 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5158 target
->c_line
= host
->c_line
;
5160 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5161 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5162 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5163 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5164 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5165 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5166 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5167 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5168 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5169 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5170 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5171 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5172 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5173 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5174 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5175 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5176 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5177 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5180 static const StructEntry struct_termios_def
= {
5181 .convert
= { host_to_target_termios
, target_to_host_termios
},
5182 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5183 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5186 static bitmask_transtbl mmap_flags_tbl
[] = {
5187 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5188 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5189 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5190 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5191 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5192 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5193 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5194 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5195 MAP_DENYWRITE
, MAP_DENYWRITE
},
5196 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5197 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5198 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5199 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5200 MAP_NORESERVE
, MAP_NORESERVE
},
5201 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5202 /* MAP_STACK had been ignored by the kernel for quite some time.
5203 Recognize it for the target insofar as we do not want to pass
5204 it through to the host. */
5205 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5209 #if defined(TARGET_I386)
5211 /* NOTE: there is really one LDT for all the threads */
5212 static uint8_t *ldt_table
;
5214 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5221 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5222 if (size
> bytecount
)
5224 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5226 return -TARGET_EFAULT
;
5227 /* ??? Should this by byteswapped? */
5228 memcpy(p
, ldt_table
, size
);
5229 unlock_user(p
, ptr
, size
);
5233 /* XXX: add locking support */
5234 static abi_long
write_ldt(CPUX86State
*env
,
5235 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5237 struct target_modify_ldt_ldt_s ldt_info
;
5238 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5239 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5240 int seg_not_present
, useable
, lm
;
5241 uint32_t *lp
, entry_1
, entry_2
;
5243 if (bytecount
!= sizeof(ldt_info
))
5244 return -TARGET_EINVAL
;
5245 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5246 return -TARGET_EFAULT
;
5247 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5248 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5249 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5250 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5251 unlock_user_struct(target_ldt_info
, ptr
, 0);
5253 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5254 return -TARGET_EINVAL
;
5255 seg_32bit
= ldt_info
.flags
& 1;
5256 contents
= (ldt_info
.flags
>> 1) & 3;
5257 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5258 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5259 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5260 useable
= (ldt_info
.flags
>> 6) & 1;
5264 lm
= (ldt_info
.flags
>> 7) & 1;
5266 if (contents
== 3) {
5268 return -TARGET_EINVAL
;
5269 if (seg_not_present
== 0)
5270 return -TARGET_EINVAL
;
5272 /* allocate the LDT */
5274 env
->ldt
.base
= target_mmap(0,
5275 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5276 PROT_READ
|PROT_WRITE
,
5277 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5278 if (env
->ldt
.base
== -1)
5279 return -TARGET_ENOMEM
;
5280 memset(g2h(env
->ldt
.base
), 0,
5281 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5282 env
->ldt
.limit
= 0xffff;
5283 ldt_table
= g2h(env
->ldt
.base
);
5286 /* NOTE: same code as Linux kernel */
5287 /* Allow LDTs to be cleared by the user. */
5288 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5291 read_exec_only
== 1 &&
5293 limit_in_pages
== 0 &&
5294 seg_not_present
== 1 &&
5302 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5303 (ldt_info
.limit
& 0x0ffff);
5304 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5305 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5306 (ldt_info
.limit
& 0xf0000) |
5307 ((read_exec_only
^ 1) << 9) |
5309 ((seg_not_present
^ 1) << 15) |
5311 (limit_in_pages
<< 23) |
5315 entry_2
|= (useable
<< 20);
5317 /* Install the new entry ... */
5319 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5320 lp
[0] = tswap32(entry_1
);
5321 lp
[1] = tswap32(entry_2
);
5325 /* specific and weird i386 syscalls */
5326 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5327 unsigned long bytecount
)
5333 ret
= read_ldt(ptr
, bytecount
);
5336 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5339 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5342 ret
= -TARGET_ENOSYS
;
5348 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5349 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5351 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5352 struct target_modify_ldt_ldt_s ldt_info
;
5353 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5354 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5355 int seg_not_present
, useable
, lm
;
5356 uint32_t *lp
, entry_1
, entry_2
;
5359 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5360 if (!target_ldt_info
)
5361 return -TARGET_EFAULT
;
5362 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5363 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5364 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5365 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5366 if (ldt_info
.entry_number
== -1) {
5367 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5368 if (gdt_table
[i
] == 0) {
5369 ldt_info
.entry_number
= i
;
5370 target_ldt_info
->entry_number
= tswap32(i
);
5375 unlock_user_struct(target_ldt_info
, ptr
, 1);
5377 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5378 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5379 return -TARGET_EINVAL
;
5380 seg_32bit
= ldt_info
.flags
& 1;
5381 contents
= (ldt_info
.flags
>> 1) & 3;
5382 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5383 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5384 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5385 useable
= (ldt_info
.flags
>> 6) & 1;
5389 lm
= (ldt_info
.flags
>> 7) & 1;
5392 if (contents
== 3) {
5393 if (seg_not_present
== 0)
5394 return -TARGET_EINVAL
;
5397 /* NOTE: same code as Linux kernel */
5398 /* Allow LDTs to be cleared by the user. */
5399 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5400 if ((contents
== 0 &&
5401 read_exec_only
== 1 &&
5403 limit_in_pages
== 0 &&
5404 seg_not_present
== 1 &&
5412 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5413 (ldt_info
.limit
& 0x0ffff);
5414 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5415 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5416 (ldt_info
.limit
& 0xf0000) |
5417 ((read_exec_only
^ 1) << 9) |
5419 ((seg_not_present
^ 1) << 15) |
5421 (limit_in_pages
<< 23) |
5426 /* Install the new entry ... */
5428 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5429 lp
[0] = tswap32(entry_1
);
5430 lp
[1] = tswap32(entry_2
);
5434 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5436 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5437 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5438 uint32_t base_addr
, limit
, flags
;
5439 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5440 int seg_not_present
, useable
, lm
;
5441 uint32_t *lp
, entry_1
, entry_2
;
5443 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5444 if (!target_ldt_info
)
5445 return -TARGET_EFAULT
;
5446 idx
= tswap32(target_ldt_info
->entry_number
);
5447 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5448 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5449 unlock_user_struct(target_ldt_info
, ptr
, 1);
5450 return -TARGET_EINVAL
;
5452 lp
= (uint32_t *)(gdt_table
+ idx
);
5453 entry_1
= tswap32(lp
[0]);
5454 entry_2
= tswap32(lp
[1]);
5456 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5457 contents
= (entry_2
>> 10) & 3;
5458 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5459 seg_32bit
= (entry_2
>> 22) & 1;
5460 limit_in_pages
= (entry_2
>> 23) & 1;
5461 useable
= (entry_2
>> 20) & 1;
5465 lm
= (entry_2
>> 21) & 1;
5467 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5468 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5469 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5470 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5471 base_addr
= (entry_1
>> 16) |
5472 (entry_2
& 0xff000000) |
5473 ((entry_2
& 0xff) << 16);
5474 target_ldt_info
->base_addr
= tswapal(base_addr
);
5475 target_ldt_info
->limit
= tswap32(limit
);
5476 target_ldt_info
->flags
= tswap32(flags
);
5477 unlock_user_struct(target_ldt_info
, ptr
, 1);
5480 #endif /* TARGET_I386 && TARGET_ABI32 */
5482 #ifndef TARGET_ABI32
5483 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5490 case TARGET_ARCH_SET_GS
:
5491 case TARGET_ARCH_SET_FS
:
5492 if (code
== TARGET_ARCH_SET_GS
)
5496 cpu_x86_load_seg(env
, idx
, 0);
5497 env
->segs
[idx
].base
= addr
;
5499 case TARGET_ARCH_GET_GS
:
5500 case TARGET_ARCH_GET_FS
:
5501 if (code
== TARGET_ARCH_GET_GS
)
5505 val
= env
->segs
[idx
].base
;
5506 if (put_user(val
, addr
, abi_ulong
))
5507 ret
= -TARGET_EFAULT
;
5510 ret
= -TARGET_EINVAL
;
5517 #endif /* defined(TARGET_I386) */
5519 #define NEW_STACK_SIZE 0x40000
5522 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5525 pthread_mutex_t mutex
;
5526 pthread_cond_t cond
;
5529 abi_ulong child_tidptr
;
5530 abi_ulong parent_tidptr
;
5534 static void *clone_func(void *arg
)
5536 new_thread_info
*info
= arg
;
5541 rcu_register_thread();
5542 tcg_register_thread();
5546 ts
= (TaskState
*)cpu
->opaque
;
5547 info
->tid
= sys_gettid();
5549 if (info
->child_tidptr
)
5550 put_user_u32(info
->tid
, info
->child_tidptr
);
5551 if (info
->parent_tidptr
)
5552 put_user_u32(info
->tid
, info
->parent_tidptr
);
5553 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5554 /* Enable signals. */
5555 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5556 /* Signal to the parent that we're ready. */
5557 pthread_mutex_lock(&info
->mutex
);
5558 pthread_cond_broadcast(&info
->cond
);
5559 pthread_mutex_unlock(&info
->mutex
);
5560 /* Wait until the parent has finished initializing the tls state. */
5561 pthread_mutex_lock(&clone_lock
);
5562 pthread_mutex_unlock(&clone_lock
);
5568 /* do_fork() Must return host values and target errnos (unlike most
5569 do_*() functions). */
5570 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5571 abi_ulong parent_tidptr
, target_ulong newtls
,
5572 abi_ulong child_tidptr
)
5574 CPUState
*cpu
= env_cpu(env
);
5578 CPUArchState
*new_env
;
5581 flags
&= ~CLONE_IGNORED_FLAGS
;
5583 /* Emulate vfork() with fork() */
5584 if (flags
& CLONE_VFORK
)
5585 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5587 if (flags
& CLONE_VM
) {
5588 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5589 new_thread_info info
;
5590 pthread_attr_t attr
;
5592 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5593 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5594 return -TARGET_EINVAL
;
5597 ts
= g_new0(TaskState
, 1);
5598 init_task_state(ts
);
5600 /* Grab a mutex so that thread setup appears atomic. */
5601 pthread_mutex_lock(&clone_lock
);
5603 /* we create a new CPU instance. */
5604 new_env
= cpu_copy(env
);
5605 /* Init regs that differ from the parent. */
5606 cpu_clone_regs(new_env
, newsp
);
5607 new_cpu
= env_cpu(new_env
);
5608 new_cpu
->opaque
= ts
;
5609 ts
->bprm
= parent_ts
->bprm
;
5610 ts
->info
= parent_ts
->info
;
5611 ts
->signal_mask
= parent_ts
->signal_mask
;
5613 if (flags
& CLONE_CHILD_CLEARTID
) {
5614 ts
->child_tidptr
= child_tidptr
;
5617 if (flags
& CLONE_SETTLS
) {
5618 cpu_set_tls (new_env
, newtls
);
5621 memset(&info
, 0, sizeof(info
));
5622 pthread_mutex_init(&info
.mutex
, NULL
);
5623 pthread_mutex_lock(&info
.mutex
);
5624 pthread_cond_init(&info
.cond
, NULL
);
5626 if (flags
& CLONE_CHILD_SETTID
) {
5627 info
.child_tidptr
= child_tidptr
;
5629 if (flags
& CLONE_PARENT_SETTID
) {
5630 info
.parent_tidptr
= parent_tidptr
;
5633 ret
= pthread_attr_init(&attr
);
5634 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5635 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5636 /* It is not safe to deliver signals until the child has finished
5637 initializing, so temporarily block all signals. */
5638 sigfillset(&sigmask
);
5639 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5640 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5642 /* If this is our first additional thread, we need to ensure we
5643 * generate code for parallel execution and flush old translations.
5645 if (!parallel_cpus
) {
5646 parallel_cpus
= true;
5650 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5651 /* TODO: Free new CPU state if thread creation failed. */
5653 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5654 pthread_attr_destroy(&attr
);
5656 /* Wait for the child to initialize. */
5657 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5662 pthread_mutex_unlock(&info
.mutex
);
5663 pthread_cond_destroy(&info
.cond
);
5664 pthread_mutex_destroy(&info
.mutex
);
5665 pthread_mutex_unlock(&clone_lock
);
5667 /* if no CLONE_VM, we consider it is a fork */
5668 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5669 return -TARGET_EINVAL
;
5672 /* We can't support custom termination signals */
5673 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5674 return -TARGET_EINVAL
;
5677 if (block_signals()) {
5678 return -TARGET_ERESTARTSYS
;
5684 /* Child Process. */
5685 cpu_clone_regs(env
, newsp
);
5687 /* There is a race condition here. The parent process could
5688 theoretically read the TID in the child process before the child
5689 tid is set. This would require using either ptrace
5690 (not implemented) or having *_tidptr to point at a shared memory
5691 mapping. We can't repeat the spinlock hack used above because
5692 the child process gets its own copy of the lock. */
5693 if (flags
& CLONE_CHILD_SETTID
)
5694 put_user_u32(sys_gettid(), child_tidptr
);
5695 if (flags
& CLONE_PARENT_SETTID
)
5696 put_user_u32(sys_gettid(), parent_tidptr
);
5697 ts
= (TaskState
*)cpu
->opaque
;
5698 if (flags
& CLONE_SETTLS
)
5699 cpu_set_tls (env
, newtls
);
5700 if (flags
& CLONE_CHILD_CLEARTID
)
5701 ts
->child_tidptr
= child_tidptr
;
5709 /* warning : doesn't handle linux specific flags... */
5710 static int target_to_host_fcntl_cmd(int cmd
)
5715 case TARGET_F_DUPFD
:
5716 case TARGET_F_GETFD
:
5717 case TARGET_F_SETFD
:
5718 case TARGET_F_GETFL
:
5719 case TARGET_F_SETFL
:
5722 case TARGET_F_GETLK
:
5725 case TARGET_F_SETLK
:
5728 case TARGET_F_SETLKW
:
5731 case TARGET_F_GETOWN
:
5734 case TARGET_F_SETOWN
:
5737 case TARGET_F_GETSIG
:
5740 case TARGET_F_SETSIG
:
5743 #if TARGET_ABI_BITS == 32
5744 case TARGET_F_GETLK64
:
5747 case TARGET_F_SETLK64
:
5750 case TARGET_F_SETLKW64
:
5754 case TARGET_F_SETLEASE
:
5757 case TARGET_F_GETLEASE
:
5760 #ifdef F_DUPFD_CLOEXEC
5761 case TARGET_F_DUPFD_CLOEXEC
:
5762 ret
= F_DUPFD_CLOEXEC
;
5765 case TARGET_F_NOTIFY
:
5769 case TARGET_F_GETOWN_EX
:
5774 case TARGET_F_SETOWN_EX
:
5779 case TARGET_F_SETPIPE_SZ
:
5782 case TARGET_F_GETPIPE_SZ
:
5787 ret
= -TARGET_EINVAL
;
5791 #if defined(__powerpc64__)
5792 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5793 * is not supported by kernel. The glibc fcntl call actually adjusts
5794 * them to 5, 6 and 7 before making the syscall(). Since we make the
5795 * syscall directly, adjust to what is supported by the kernel.
5797 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5798 ret
-= F_GETLK64
- 5;
5805 #define FLOCK_TRANSTBL \
5807 TRANSTBL_CONVERT(F_RDLCK); \
5808 TRANSTBL_CONVERT(F_WRLCK); \
5809 TRANSTBL_CONVERT(F_UNLCK); \
5810 TRANSTBL_CONVERT(F_EXLCK); \
5811 TRANSTBL_CONVERT(F_SHLCK); \
5814 static int target_to_host_flock(int type
)
5816 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5818 #undef TRANSTBL_CONVERT
5819 return -TARGET_EINVAL
;
5822 static int host_to_target_flock(int type
)
5824 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5826 #undef TRANSTBL_CONVERT
5827 /* if we don't know how to convert the value coming
5828 * from the host we copy to the target field as-is
5833 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5834 abi_ulong target_flock_addr
)
5836 struct target_flock
*target_fl
;
5839 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5840 return -TARGET_EFAULT
;
5843 __get_user(l_type
, &target_fl
->l_type
);
5844 l_type
= target_to_host_flock(l_type
);
5848 fl
->l_type
= l_type
;
5849 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5850 __get_user(fl
->l_start
, &target_fl
->l_start
);
5851 __get_user(fl
->l_len
, &target_fl
->l_len
);
5852 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5853 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5857 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5858 const struct flock64
*fl
)
5860 struct target_flock
*target_fl
;
5863 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5864 return -TARGET_EFAULT
;
5867 l_type
= host_to_target_flock(fl
->l_type
);
5868 __put_user(l_type
, &target_fl
->l_type
);
5869 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5870 __put_user(fl
->l_start
, &target_fl
->l_start
);
5871 __put_user(fl
->l_len
, &target_fl
->l_len
);
5872 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5873 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5877 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5878 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5880 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5881 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5882 abi_ulong target_flock_addr
)
5884 struct target_oabi_flock64
*target_fl
;
5887 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5888 return -TARGET_EFAULT
;
5891 __get_user(l_type
, &target_fl
->l_type
);
5892 l_type
= target_to_host_flock(l_type
);
5896 fl
->l_type
= l_type
;
5897 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5898 __get_user(fl
->l_start
, &target_fl
->l_start
);
5899 __get_user(fl
->l_len
, &target_fl
->l_len
);
5900 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5901 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5905 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5906 const struct flock64
*fl
)
5908 struct target_oabi_flock64
*target_fl
;
5911 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5912 return -TARGET_EFAULT
;
5915 l_type
= host_to_target_flock(fl
->l_type
);
5916 __put_user(l_type
, &target_fl
->l_type
);
5917 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5918 __put_user(fl
->l_start
, &target_fl
->l_start
);
5919 __put_user(fl
->l_len
, &target_fl
->l_len
);
5920 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5921 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5926 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5927 abi_ulong target_flock_addr
)
5929 struct target_flock64
*target_fl
;
5932 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5933 return -TARGET_EFAULT
;
5936 __get_user(l_type
, &target_fl
->l_type
);
5937 l_type
= target_to_host_flock(l_type
);
5941 fl
->l_type
= l_type
;
5942 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5943 __get_user(fl
->l_start
, &target_fl
->l_start
);
5944 __get_user(fl
->l_len
, &target_fl
->l_len
);
5945 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5946 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5950 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5951 const struct flock64
*fl
)
5953 struct target_flock64
*target_fl
;
5956 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5957 return -TARGET_EFAULT
;
5960 l_type
= host_to_target_flock(fl
->l_type
);
5961 __put_user(l_type
, &target_fl
->l_type
);
5962 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5963 __put_user(fl
->l_start
, &target_fl
->l_start
);
5964 __put_user(fl
->l_len
, &target_fl
->l_len
);
5965 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5966 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5970 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5972 struct flock64 fl64
;
5974 struct f_owner_ex fox
;
5975 struct target_f_owner_ex
*target_fox
;
5978 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5980 if (host_cmd
== -TARGET_EINVAL
)
5984 case TARGET_F_GETLK
:
5985 ret
= copy_from_user_flock(&fl64
, arg
);
5989 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5991 ret
= copy_to_user_flock(arg
, &fl64
);
5995 case TARGET_F_SETLK
:
5996 case TARGET_F_SETLKW
:
5997 ret
= copy_from_user_flock(&fl64
, arg
);
6001 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6004 case TARGET_F_GETLK64
:
6005 ret
= copy_from_user_flock64(&fl64
, arg
);
6009 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6011 ret
= copy_to_user_flock64(arg
, &fl64
);
6014 case TARGET_F_SETLK64
:
6015 case TARGET_F_SETLKW64
:
6016 ret
= copy_from_user_flock64(&fl64
, arg
);
6020 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6023 case TARGET_F_GETFL
:
6024 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6026 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6030 case TARGET_F_SETFL
:
6031 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6032 target_to_host_bitmask(arg
,
6037 case TARGET_F_GETOWN_EX
:
6038 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6040 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6041 return -TARGET_EFAULT
;
6042 target_fox
->type
= tswap32(fox
.type
);
6043 target_fox
->pid
= tswap32(fox
.pid
);
6044 unlock_user_struct(target_fox
, arg
, 1);
6050 case TARGET_F_SETOWN_EX
:
6051 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6052 return -TARGET_EFAULT
;
6053 fox
.type
= tswap32(target_fox
->type
);
6054 fox
.pid
= tswap32(target_fox
->pid
);
6055 unlock_user_struct(target_fox
, arg
, 0);
6056 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6060 case TARGET_F_SETOWN
:
6061 case TARGET_F_GETOWN
:
6062 case TARGET_F_SETSIG
:
6063 case TARGET_F_GETSIG
:
6064 case TARGET_F_SETLEASE
:
6065 case TARGET_F_GETLEASE
:
6066 case TARGET_F_SETPIPE_SZ
:
6067 case TARGET_F_GETPIPE_SZ
:
6068 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6072 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6080 static inline int high2lowuid(int uid
)
6088 static inline int high2lowgid(int gid
)
6096 static inline int low2highuid(int uid
)
6098 if ((int16_t)uid
== -1)
6104 static inline int low2highgid(int gid
)
6106 if ((int16_t)gid
== -1)
6111 static inline int tswapid(int id
)
6116 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6118 #else /* !USE_UID16 */
6119 static inline int high2lowuid(int uid
)
6123 static inline int high2lowgid(int gid
)
6127 static inline int low2highuid(int uid
)
6131 static inline int low2highgid(int gid
)
6135 static inline int tswapid(int id
)
6140 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6142 #endif /* USE_UID16 */
6144 /* We must do direct syscalls for setting UID/GID, because we want to
6145 * implement the Linux system call semantics of "change only for this thread",
6146 * not the libc/POSIX semantics of "change for all threads in process".
6147 * (See http://ewontfix.com/17/ for more details.)
6148 * We use the 32-bit version of the syscalls if present; if it is not
6149 * then either the host architecture supports 32-bit UIDs natively with
6150 * the standard syscall, or the 16-bit UID is the best we can do.
6152 #ifdef __NR_setuid32
6153 #define __NR_sys_setuid __NR_setuid32
6155 #define __NR_sys_setuid __NR_setuid
6157 #ifdef __NR_setgid32
6158 #define __NR_sys_setgid __NR_setgid32
6160 #define __NR_sys_setgid __NR_setgid
6162 #ifdef __NR_setresuid32
6163 #define __NR_sys_setresuid __NR_setresuid32
6165 #define __NR_sys_setresuid __NR_setresuid
6167 #ifdef __NR_setresgid32
6168 #define __NR_sys_setresgid __NR_setresgid32
6170 #define __NR_sys_setresgid __NR_setresgid
6173 _syscall1(int, sys_setuid
, uid_t
, uid
)
6174 _syscall1(int, sys_setgid
, gid_t
, gid
)
6175 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6176 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6178 void syscall_init(void)
6181 const argtype
*arg_type
;
6185 thunk_init(STRUCT_MAX
);
6187 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6188 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6189 #include "syscall_types.h"
6191 #undef STRUCT_SPECIAL
6193 /* Build target_to_host_errno_table[] table from
6194 * host_to_target_errno_table[]. */
6195 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6196 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6199 /* we patch the ioctl size if necessary. We rely on the fact that
6200 no ioctl has all the bits at '1' in the size field */
6202 while (ie
->target_cmd
!= 0) {
6203 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6204 TARGET_IOC_SIZEMASK
) {
6205 arg_type
= ie
->arg_type
;
6206 if (arg_type
[0] != TYPE_PTR
) {
6207 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6212 size
= thunk_type_size(arg_type
, 0);
6213 ie
->target_cmd
= (ie
->target_cmd
&
6214 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6215 (size
<< TARGET_IOC_SIZESHIFT
);
6218 /* automatic consistency check if same arch */
6219 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6220 (defined(__x86_64__) && defined(TARGET_X86_64))
6221 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6222 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6223 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6230 #if TARGET_ABI_BITS == 32
6231 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6233 #ifdef TARGET_WORDS_BIGENDIAN
6234 return ((uint64_t)word0
<< 32) | word1
;
6236 return ((uint64_t)word1
<< 32) | word0
;
6239 #else /* TARGET_ABI_BITS == 32 */
6240 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6244 #endif /* TARGET_ABI_BITS != 32 */
6246 #ifdef TARGET_NR_truncate64
6247 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6252 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6256 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6260 #ifdef TARGET_NR_ftruncate64
6261 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6266 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6270 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6274 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6275 abi_ulong target_addr
)
6277 struct target_timespec
*target_ts
;
6279 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6280 return -TARGET_EFAULT
;
6281 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6282 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6283 unlock_user_struct(target_ts
, target_addr
, 0);
6287 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6288 struct timespec
*host_ts
)
6290 struct target_timespec
*target_ts
;
6292 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6293 return -TARGET_EFAULT
;
6294 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6295 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6296 unlock_user_struct(target_ts
, target_addr
, 1);
6300 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6301 abi_ulong target_addr
)
6303 struct target_itimerspec
*target_itspec
;
6305 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6306 return -TARGET_EFAULT
;
6309 host_itspec
->it_interval
.tv_sec
=
6310 tswapal(target_itspec
->it_interval
.tv_sec
);
6311 host_itspec
->it_interval
.tv_nsec
=
6312 tswapal(target_itspec
->it_interval
.tv_nsec
);
6313 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6314 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6316 unlock_user_struct(target_itspec
, target_addr
, 1);
6320 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6321 struct itimerspec
*host_its
)
6323 struct target_itimerspec
*target_itspec
;
6325 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6326 return -TARGET_EFAULT
;
6329 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6330 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6332 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6333 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6335 unlock_user_struct(target_itspec
, target_addr
, 0);
6339 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6340 abi_long target_addr
)
6342 struct target_timex
*target_tx
;
6344 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6345 return -TARGET_EFAULT
;
6348 __get_user(host_tx
->modes
, &target_tx
->modes
);
6349 __get_user(host_tx
->offset
, &target_tx
->offset
);
6350 __get_user(host_tx
->freq
, &target_tx
->freq
);
6351 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6352 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6353 __get_user(host_tx
->status
, &target_tx
->status
);
6354 __get_user(host_tx
->constant
, &target_tx
->constant
);
6355 __get_user(host_tx
->precision
, &target_tx
->precision
);
6356 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6357 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6358 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6359 __get_user(host_tx
->tick
, &target_tx
->tick
);
6360 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6361 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6362 __get_user(host_tx
->shift
, &target_tx
->shift
);
6363 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6364 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6365 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6366 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6367 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6368 __get_user(host_tx
->tai
, &target_tx
->tai
);
6370 unlock_user_struct(target_tx
, target_addr
, 0);
6374 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6375 struct timex
*host_tx
)
6377 struct target_timex
*target_tx
;
6379 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6380 return -TARGET_EFAULT
;
6383 __put_user(host_tx
->modes
, &target_tx
->modes
);
6384 __put_user(host_tx
->offset
, &target_tx
->offset
);
6385 __put_user(host_tx
->freq
, &target_tx
->freq
);
6386 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6387 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6388 __put_user(host_tx
->status
, &target_tx
->status
);
6389 __put_user(host_tx
->constant
, &target_tx
->constant
);
6390 __put_user(host_tx
->precision
, &target_tx
->precision
);
6391 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6392 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6393 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6394 __put_user(host_tx
->tick
, &target_tx
->tick
);
6395 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6396 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6397 __put_user(host_tx
->shift
, &target_tx
->shift
);
6398 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6399 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6400 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6401 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6402 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6403 __put_user(host_tx
->tai
, &target_tx
->tai
);
6405 unlock_user_struct(target_tx
, target_addr
, 1);
6410 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6411 abi_ulong target_addr
)
6413 struct target_sigevent
*target_sevp
;
6415 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6416 return -TARGET_EFAULT
;
6419 /* This union is awkward on 64 bit systems because it has a 32 bit
6420 * integer and a pointer in it; we follow the conversion approach
6421 * used for handling sigval types in signal.c so the guest should get
6422 * the correct value back even if we did a 64 bit byteswap and it's
6423 * using the 32 bit integer.
6425 host_sevp
->sigev_value
.sival_ptr
=
6426 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6427 host_sevp
->sigev_signo
=
6428 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6429 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6430 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6432 unlock_user_struct(target_sevp
, target_addr
, 1);
6436 #if defined(TARGET_NR_mlockall)
6437 static inline int target_to_host_mlockall_arg(int arg
)
6441 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6442 result
|= MCL_CURRENT
;
6444 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6445 result
|= MCL_FUTURE
;
6451 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6452 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6453 defined(TARGET_NR_newfstatat))
6454 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6455 abi_ulong target_addr
,
6456 struct stat
*host_st
)
6458 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6459 if (((CPUARMState
*)cpu_env
)->eabi
) {
6460 struct target_eabi_stat64
*target_st
;
6462 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6463 return -TARGET_EFAULT
;
6464 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6465 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6466 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6467 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6468 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6470 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6471 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6472 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6473 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6474 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6475 __put_user(host_st
->st_size
, &target_st
->st_size
);
6476 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6477 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6478 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6479 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6480 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6481 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6482 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6483 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6484 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6486 unlock_user_struct(target_st
, target_addr
, 1);
6490 #if defined(TARGET_HAS_STRUCT_STAT64)
6491 struct target_stat64
*target_st
;
6493 struct target_stat
*target_st
;
6496 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6497 return -TARGET_EFAULT
;
6498 memset(target_st
, 0, sizeof(*target_st
));
6499 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6500 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6501 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6502 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6504 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6505 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6506 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6507 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6508 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6509 /* XXX: better use of kernel struct */
6510 __put_user(host_st
->st_size
, &target_st
->st_size
);
6511 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6512 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6513 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6514 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6515 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6516 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6517 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6518 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6519 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6521 unlock_user_struct(target_st
, target_addr
, 1);
6528 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6529 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6530 abi_ulong target_addr
)
6532 struct target_statx
*target_stx
;
6534 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6535 return -TARGET_EFAULT
;
6537 memset(target_stx
, 0, sizeof(*target_stx
));
6539 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6540 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6541 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6542 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6543 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6544 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6545 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6546 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6547 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6548 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6549 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6550 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6551 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6552 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6553 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6554 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6555 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6556 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6557 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6558 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6559 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6560 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6561 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6563 unlock_user_struct(target_stx
, target_addr
, 1);
6570 /* ??? Using host futex calls even when target atomic operations
6571 are not really atomic probably breaks things. However implementing
6572 futexes locally would make futexes shared between multiple processes
6573 tricky. However they're probably useless because guest atomic
6574 operations won't work either. */
6575 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6576 target_ulong uaddr2
, int val3
)
6578 struct timespec ts
, *pts
;
6581 /* ??? We assume FUTEX_* constants are the same on both host
6583 #ifdef FUTEX_CMD_MASK
6584 base_op
= op
& FUTEX_CMD_MASK
;
6590 case FUTEX_WAIT_BITSET
:
6593 target_to_host_timespec(pts
, timeout
);
6597 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6600 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6602 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6604 case FUTEX_CMP_REQUEUE
:
6606 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6607 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6608 But the prototype takes a `struct timespec *'; insert casts
6609 to satisfy the compiler. We do not need to tswap TIMEOUT
6610 since it's not compared to guest memory. */
6611 pts
= (struct timespec
*)(uintptr_t) timeout
;
6612 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6614 (base_op
== FUTEX_CMP_REQUEUE
6618 return -TARGET_ENOSYS
;
6621 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6622 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6623 abi_long handle
, abi_long mount_id
,
6626 struct file_handle
*target_fh
;
6627 struct file_handle
*fh
;
6631 unsigned int size
, total_size
;
6633 if (get_user_s32(size
, handle
)) {
6634 return -TARGET_EFAULT
;
6637 name
= lock_user_string(pathname
);
6639 return -TARGET_EFAULT
;
6642 total_size
= sizeof(struct file_handle
) + size
;
6643 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6645 unlock_user(name
, pathname
, 0);
6646 return -TARGET_EFAULT
;
6649 fh
= g_malloc0(total_size
);
6650 fh
->handle_bytes
= size
;
6652 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6653 unlock_user(name
, pathname
, 0);
6655 /* man name_to_handle_at(2):
6656 * Other than the use of the handle_bytes field, the caller should treat
6657 * the file_handle structure as an opaque data type
6660 memcpy(target_fh
, fh
, total_size
);
6661 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6662 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6664 unlock_user(target_fh
, handle
, total_size
);
6666 if (put_user_s32(mid
, mount_id
)) {
6667 return -TARGET_EFAULT
;
6675 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6676 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6679 struct file_handle
*target_fh
;
6680 struct file_handle
*fh
;
6681 unsigned int size
, total_size
;
6684 if (get_user_s32(size
, handle
)) {
6685 return -TARGET_EFAULT
;
6688 total_size
= sizeof(struct file_handle
) + size
;
6689 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6691 return -TARGET_EFAULT
;
6694 fh
= g_memdup(target_fh
, total_size
);
6695 fh
->handle_bytes
= size
;
6696 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6698 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6699 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6703 unlock_user(target_fh
, handle
, total_size
);
6709 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6711 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6714 target_sigset_t
*target_mask
;
6718 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6719 return -TARGET_EINVAL
;
6721 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6722 return -TARGET_EFAULT
;
6725 target_to_host_sigset(&host_mask
, target_mask
);
6727 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6729 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6731 fd_trans_register(ret
, &target_signalfd_trans
);
6734 unlock_user_struct(target_mask
, mask
, 0);
6740 /* Map host to target signal numbers for the wait family of syscalls.
6741 Assume all other status bits are the same. */
6742 int host_to_target_waitstatus(int status
)
6744 if (WIFSIGNALED(status
)) {
6745 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6747 if (WIFSTOPPED(status
)) {
6748 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6754 static int open_self_cmdline(void *cpu_env
, int fd
)
6756 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6757 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6760 for (i
= 0; i
< bprm
->argc
; i
++) {
6761 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6763 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6771 static int open_self_maps(void *cpu_env
, int fd
)
6773 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6774 TaskState
*ts
= cpu
->opaque
;
6780 fp
= fopen("/proc/self/maps", "r");
6785 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6786 int fields
, dev_maj
, dev_min
, inode
;
6787 uint64_t min
, max
, offset
;
6788 char flag_r
, flag_w
, flag_x
, flag_p
;
6789 char path
[512] = "";
6790 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6791 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6792 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6794 if ((fields
< 10) || (fields
> 11)) {
6797 if (h2g_valid(min
)) {
6798 int flags
= page_get_flags(h2g(min
));
6799 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6800 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6803 if (h2g(min
) == ts
->info
->stack_limit
) {
6804 pstrcpy(path
, sizeof(path
), " [stack]");
6806 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6807 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6808 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6809 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6810 path
[0] ? " " : "", path
);
6820 static int open_self_stat(void *cpu_env
, int fd
)
6822 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6823 TaskState
*ts
= cpu
->opaque
;
6824 abi_ulong start_stack
= ts
->info
->start_stack
;
6827 for (i
= 0; i
< 44; i
++) {
6835 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6836 } else if (i
== 1) {
6838 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6839 } else if (i
== 27) {
6842 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6844 /* for the rest, there is MasterCard */
6845 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6849 if (write(fd
, buf
, len
) != len
) {
6857 static int open_self_auxv(void *cpu_env
, int fd
)
6859 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6860 TaskState
*ts
= cpu
->opaque
;
6861 abi_ulong auxv
= ts
->info
->saved_auxv
;
6862 abi_ulong len
= ts
->info
->auxv_len
;
6866 * Auxiliary vector is stored in target process stack.
6867 * read in whole auxv vector and copy it to file
6869 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6873 r
= write(fd
, ptr
, len
);
6880 lseek(fd
, 0, SEEK_SET
);
6881 unlock_user(ptr
, auxv
, len
);
6887 static int is_proc_myself(const char *filename
, const char *entry
)
6889 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6890 filename
+= strlen("/proc/");
6891 if (!strncmp(filename
, "self/", strlen("self/"))) {
6892 filename
+= strlen("self/");
6893 } else if (*filename
>= '1' && *filename
<= '9') {
6895 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6896 if (!strncmp(filename
, myself
, strlen(myself
))) {
6897 filename
+= strlen(myself
);
6904 if (!strcmp(filename
, entry
)) {
6911 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6912 defined(TARGET_SPARC) || defined(TARGET_M68K)
6913 static int is_proc(const char *filename
, const char *entry
)
6915 return strcmp(filename
, entry
) == 0;
6919 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6920 static int open_net_route(void *cpu_env
, int fd
)
6927 fp
= fopen("/proc/net/route", "r");
6934 read
= getline(&line
, &len
, fp
);
6935 dprintf(fd
, "%s", line
);
6939 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6941 uint32_t dest
, gw
, mask
;
6942 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6945 fields
= sscanf(line
,
6946 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6947 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6948 &mask
, &mtu
, &window
, &irtt
);
6952 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6953 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6954 metric
, tswap32(mask
), mtu
, window
, irtt
);
6964 #if defined(TARGET_SPARC)
6965 static int open_cpuinfo(void *cpu_env
, int fd
)
6967 dprintf(fd
, "type\t\t: sun4u\n");
6972 #if defined(TARGET_M68K)
6973 static int open_hardware(void *cpu_env
, int fd
)
6975 dprintf(fd
, "Model:\t\tqemu-m68k\n");
6980 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6983 const char *filename
;
6984 int (*fill
)(void *cpu_env
, int fd
);
6985 int (*cmp
)(const char *s1
, const char *s2
);
6987 const struct fake_open
*fake_open
;
6988 static const struct fake_open fakes
[] = {
6989 { "maps", open_self_maps
, is_proc_myself
},
6990 { "stat", open_self_stat
, is_proc_myself
},
6991 { "auxv", open_self_auxv
, is_proc_myself
},
6992 { "cmdline", open_self_cmdline
, is_proc_myself
},
6993 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6994 { "/proc/net/route", open_net_route
, is_proc
},
6996 #if defined(TARGET_SPARC)
6997 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
6999 #if defined(TARGET_M68K)
7000 { "/proc/hardware", open_hardware
, is_proc
},
7002 { NULL
, NULL
, NULL
}
7005 if (is_proc_myself(pathname
, "exe")) {
7006 int execfd
= qemu_getauxval(AT_EXECFD
);
7007 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7010 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7011 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7016 if (fake_open
->filename
) {
7018 char filename
[PATH_MAX
];
7021 /* create temporary file to map stat to */
7022 tmpdir
= getenv("TMPDIR");
7025 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7026 fd
= mkstemp(filename
);
7032 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7038 lseek(fd
, 0, SEEK_SET
);
7043 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7046 #define TIMER_MAGIC 0x0caf0000
7047 #define TIMER_MAGIC_MASK 0xffff0000
7049 /* Convert QEMU provided timer ID back to internal 16bit index format */
7050 static target_timer_t
get_timer_id(abi_long arg
)
7052 target_timer_t timerid
= arg
;
7054 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7055 return -TARGET_EINVAL
;
7060 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7061 return -TARGET_EINVAL
;
7067 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7069 abi_ulong target_addr
,
7072 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7073 unsigned host_bits
= sizeof(*host_mask
) * 8;
7074 abi_ulong
*target_mask
;
7077 assert(host_size
>= target_size
);
7079 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7081 return -TARGET_EFAULT
;
7083 memset(host_mask
, 0, host_size
);
7085 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7086 unsigned bit
= i
* target_bits
;
7089 __get_user(val
, &target_mask
[i
]);
7090 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7091 if (val
& (1UL << j
)) {
7092 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7097 unlock_user(target_mask
, target_addr
, 0);
7101 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7103 abi_ulong target_addr
,
7106 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7107 unsigned host_bits
= sizeof(*host_mask
) * 8;
7108 abi_ulong
*target_mask
;
7111 assert(host_size
>= target_size
);
7113 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7115 return -TARGET_EFAULT
;
7118 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7119 unsigned bit
= i
* target_bits
;
7122 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7123 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7127 __put_user(val
, &target_mask
[i
]);
7130 unlock_user(target_mask
, target_addr
, target_size
);
7134 /* This is an internal helper for do_syscall so that it is easier
7135 * to have a single return point, so that actions, such as logging
7136 * of syscall results, can be performed.
7137 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7139 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7140 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7141 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7144 CPUState
*cpu
= env_cpu(cpu_env
);
7146 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7147 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7148 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7149 || defined(TARGET_NR_statx)
7152 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7153 || defined(TARGET_NR_fstatfs)
7159 case TARGET_NR_exit
:
7160 /* In old applications this may be used to implement _exit(2).
7161 However in threaded applictions it is used for thread termination,
7162 and _exit_group is used for application termination.
7163 Do thread termination if we have more then one thread. */
7165 if (block_signals()) {
7166 return -TARGET_ERESTARTSYS
;
7171 if (CPU_NEXT(first_cpu
)) {
7174 /* Remove the CPU from the list. */
7175 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7180 if (ts
->child_tidptr
) {
7181 put_user_u32(0, ts
->child_tidptr
);
7182 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7186 object_unref(OBJECT(cpu
));
7188 rcu_unregister_thread();
7193 preexit_cleanup(cpu_env
, arg1
);
7195 return 0; /* avoid warning */
7196 case TARGET_NR_read
:
7197 if (arg2
== 0 && arg3
== 0) {
7198 return get_errno(safe_read(arg1
, 0, 0));
7200 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7201 return -TARGET_EFAULT
;
7202 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7204 fd_trans_host_to_target_data(arg1
)) {
7205 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7207 unlock_user(p
, arg2
, ret
);
7210 case TARGET_NR_write
:
7211 if (arg2
== 0 && arg3
== 0) {
7212 return get_errno(safe_write(arg1
, 0, 0));
7214 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7215 return -TARGET_EFAULT
;
7216 if (fd_trans_target_to_host_data(arg1
)) {
7217 void *copy
= g_malloc(arg3
);
7218 memcpy(copy
, p
, arg3
);
7219 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7221 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7225 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7227 unlock_user(p
, arg2
, 0);
7230 #ifdef TARGET_NR_open
7231 case TARGET_NR_open
:
7232 if (!(p
= lock_user_string(arg1
)))
7233 return -TARGET_EFAULT
;
7234 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7235 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7237 fd_trans_unregister(ret
);
7238 unlock_user(p
, arg1
, 0);
7241 case TARGET_NR_openat
:
7242 if (!(p
= lock_user_string(arg2
)))
7243 return -TARGET_EFAULT
;
7244 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7245 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7247 fd_trans_unregister(ret
);
7248 unlock_user(p
, arg2
, 0);
7250 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7251 case TARGET_NR_name_to_handle_at
:
7252 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7255 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7256 case TARGET_NR_open_by_handle_at
:
7257 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7258 fd_trans_unregister(ret
);
7261 case TARGET_NR_close
:
7262 fd_trans_unregister(arg1
);
7263 return get_errno(close(arg1
));
7266 return do_brk(arg1
);
7267 #ifdef TARGET_NR_fork
7268 case TARGET_NR_fork
:
7269 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7271 #ifdef TARGET_NR_waitpid
7272 case TARGET_NR_waitpid
:
7275 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7276 if (!is_error(ret
) && arg2
&& ret
7277 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7278 return -TARGET_EFAULT
;
7282 #ifdef TARGET_NR_waitid
7283 case TARGET_NR_waitid
:
7287 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7288 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7289 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7290 return -TARGET_EFAULT
;
7291 host_to_target_siginfo(p
, &info
);
7292 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7297 #ifdef TARGET_NR_creat /* not on alpha */
7298 case TARGET_NR_creat
:
7299 if (!(p
= lock_user_string(arg1
)))
7300 return -TARGET_EFAULT
;
7301 ret
= get_errno(creat(p
, arg2
));
7302 fd_trans_unregister(ret
);
7303 unlock_user(p
, arg1
, 0);
7306 #ifdef TARGET_NR_link
7307 case TARGET_NR_link
:
7310 p
= lock_user_string(arg1
);
7311 p2
= lock_user_string(arg2
);
7313 ret
= -TARGET_EFAULT
;
7315 ret
= get_errno(link(p
, p2
));
7316 unlock_user(p2
, arg2
, 0);
7317 unlock_user(p
, arg1
, 0);
7321 #if defined(TARGET_NR_linkat)
7322 case TARGET_NR_linkat
:
7326 return -TARGET_EFAULT
;
7327 p
= lock_user_string(arg2
);
7328 p2
= lock_user_string(arg4
);
7330 ret
= -TARGET_EFAULT
;
7332 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7333 unlock_user(p
, arg2
, 0);
7334 unlock_user(p2
, arg4
, 0);
7338 #ifdef TARGET_NR_unlink
7339 case TARGET_NR_unlink
:
7340 if (!(p
= lock_user_string(arg1
)))
7341 return -TARGET_EFAULT
;
7342 ret
= get_errno(unlink(p
));
7343 unlock_user(p
, arg1
, 0);
7346 #if defined(TARGET_NR_unlinkat)
7347 case TARGET_NR_unlinkat
:
7348 if (!(p
= lock_user_string(arg2
)))
7349 return -TARGET_EFAULT
;
7350 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7351 unlock_user(p
, arg2
, 0);
7354 case TARGET_NR_execve
:
7356 char **argp
, **envp
;
7359 abi_ulong guest_argp
;
7360 abi_ulong guest_envp
;
7367 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7368 if (get_user_ual(addr
, gp
))
7369 return -TARGET_EFAULT
;
7376 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7377 if (get_user_ual(addr
, gp
))
7378 return -TARGET_EFAULT
;
7384 argp
= g_new0(char *, argc
+ 1);
7385 envp
= g_new0(char *, envc
+ 1);
7387 for (gp
= guest_argp
, q
= argp
; gp
;
7388 gp
+= sizeof(abi_ulong
), q
++) {
7389 if (get_user_ual(addr
, gp
))
7393 if (!(*q
= lock_user_string(addr
)))
7395 total_size
+= strlen(*q
) + 1;
7399 for (gp
= guest_envp
, q
= envp
; gp
;
7400 gp
+= sizeof(abi_ulong
), q
++) {
7401 if (get_user_ual(addr
, gp
))
7405 if (!(*q
= lock_user_string(addr
)))
7407 total_size
+= strlen(*q
) + 1;
7411 if (!(p
= lock_user_string(arg1
)))
7413 /* Although execve() is not an interruptible syscall it is
7414 * a special case where we must use the safe_syscall wrapper:
7415 * if we allow a signal to happen before we make the host
7416 * syscall then we will 'lose' it, because at the point of
7417 * execve the process leaves QEMU's control. So we use the
7418 * safe syscall wrapper to ensure that we either take the
7419 * signal as a guest signal, or else it does not happen
7420 * before the execve completes and makes it the other
7421 * program's problem.
7423 ret
= get_errno(safe_execve(p
, argp
, envp
));
7424 unlock_user(p
, arg1
, 0);
7429 ret
= -TARGET_EFAULT
;
7432 for (gp
= guest_argp
, q
= argp
; *q
;
7433 gp
+= sizeof(abi_ulong
), q
++) {
7434 if (get_user_ual(addr
, gp
)
7437 unlock_user(*q
, addr
, 0);
7439 for (gp
= guest_envp
, q
= envp
; *q
;
7440 gp
+= sizeof(abi_ulong
), q
++) {
7441 if (get_user_ual(addr
, gp
)
7444 unlock_user(*q
, addr
, 0);
7451 case TARGET_NR_chdir
:
7452 if (!(p
= lock_user_string(arg1
)))
7453 return -TARGET_EFAULT
;
7454 ret
= get_errno(chdir(p
));
7455 unlock_user(p
, arg1
, 0);
7457 #ifdef TARGET_NR_time
7458 case TARGET_NR_time
:
7461 ret
= get_errno(time(&host_time
));
7464 && put_user_sal(host_time
, arg1
))
7465 return -TARGET_EFAULT
;
7469 #ifdef TARGET_NR_mknod
7470 case TARGET_NR_mknod
:
7471 if (!(p
= lock_user_string(arg1
)))
7472 return -TARGET_EFAULT
;
7473 ret
= get_errno(mknod(p
, arg2
, arg3
));
7474 unlock_user(p
, arg1
, 0);
7477 #if defined(TARGET_NR_mknodat)
7478 case TARGET_NR_mknodat
:
7479 if (!(p
= lock_user_string(arg2
)))
7480 return -TARGET_EFAULT
;
7481 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7482 unlock_user(p
, arg2
, 0);
7485 #ifdef TARGET_NR_chmod
7486 case TARGET_NR_chmod
:
7487 if (!(p
= lock_user_string(arg1
)))
7488 return -TARGET_EFAULT
;
7489 ret
= get_errno(chmod(p
, arg2
));
7490 unlock_user(p
, arg1
, 0);
7493 #ifdef TARGET_NR_lseek
7494 case TARGET_NR_lseek
:
7495 return get_errno(lseek(arg1
, arg2
, arg3
));
7497 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7498 /* Alpha specific */
7499 case TARGET_NR_getxpid
:
7500 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7501 return get_errno(getpid());
7503 #ifdef TARGET_NR_getpid
7504 case TARGET_NR_getpid
:
7505 return get_errno(getpid());
7507 case TARGET_NR_mount
:
7509 /* need to look at the data field */
7513 p
= lock_user_string(arg1
);
7515 return -TARGET_EFAULT
;
7521 p2
= lock_user_string(arg2
);
7524 unlock_user(p
, arg1
, 0);
7526 return -TARGET_EFAULT
;
7530 p3
= lock_user_string(arg3
);
7533 unlock_user(p
, arg1
, 0);
7535 unlock_user(p2
, arg2
, 0);
7536 return -TARGET_EFAULT
;
7542 /* FIXME - arg5 should be locked, but it isn't clear how to
7543 * do that since it's not guaranteed to be a NULL-terminated
7547 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7549 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7551 ret
= get_errno(ret
);
7554 unlock_user(p
, arg1
, 0);
7556 unlock_user(p2
, arg2
, 0);
7558 unlock_user(p3
, arg3
, 0);
7562 #ifdef TARGET_NR_umount
7563 case TARGET_NR_umount
:
7564 if (!(p
= lock_user_string(arg1
)))
7565 return -TARGET_EFAULT
;
7566 ret
= get_errno(umount(p
));
7567 unlock_user(p
, arg1
, 0);
7570 #ifdef TARGET_NR_stime /* not on alpha */
7571 case TARGET_NR_stime
:
7574 if (get_user_sal(host_time
, arg1
))
7575 return -TARGET_EFAULT
;
7576 return get_errno(stime(&host_time
));
7579 #ifdef TARGET_NR_alarm /* not on alpha */
7580 case TARGET_NR_alarm
:
7583 #ifdef TARGET_NR_pause /* not on alpha */
7584 case TARGET_NR_pause
:
7585 if (!block_signals()) {
7586 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7588 return -TARGET_EINTR
;
7590 #ifdef TARGET_NR_utime
7591 case TARGET_NR_utime
:
7593 struct utimbuf tbuf
, *host_tbuf
;
7594 struct target_utimbuf
*target_tbuf
;
7596 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7597 return -TARGET_EFAULT
;
7598 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7599 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7600 unlock_user_struct(target_tbuf
, arg2
, 0);
7605 if (!(p
= lock_user_string(arg1
)))
7606 return -TARGET_EFAULT
;
7607 ret
= get_errno(utime(p
, host_tbuf
));
7608 unlock_user(p
, arg1
, 0);
7612 #ifdef TARGET_NR_utimes
7613 case TARGET_NR_utimes
:
7615 struct timeval
*tvp
, tv
[2];
7617 if (copy_from_user_timeval(&tv
[0], arg2
)
7618 || copy_from_user_timeval(&tv
[1],
7619 arg2
+ sizeof(struct target_timeval
)))
7620 return -TARGET_EFAULT
;
7625 if (!(p
= lock_user_string(arg1
)))
7626 return -TARGET_EFAULT
;
7627 ret
= get_errno(utimes(p
, tvp
));
7628 unlock_user(p
, arg1
, 0);
7632 #if defined(TARGET_NR_futimesat)
7633 case TARGET_NR_futimesat
:
7635 struct timeval
*tvp
, tv
[2];
7637 if (copy_from_user_timeval(&tv
[0], arg3
)
7638 || copy_from_user_timeval(&tv
[1],
7639 arg3
+ sizeof(struct target_timeval
)))
7640 return -TARGET_EFAULT
;
7645 if (!(p
= lock_user_string(arg2
))) {
7646 return -TARGET_EFAULT
;
7648 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7649 unlock_user(p
, arg2
, 0);
7653 #ifdef TARGET_NR_access
7654 case TARGET_NR_access
:
7655 if (!(p
= lock_user_string(arg1
))) {
7656 return -TARGET_EFAULT
;
7658 ret
= get_errno(access(path(p
), arg2
));
7659 unlock_user(p
, arg1
, 0);
7662 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7663 case TARGET_NR_faccessat
:
7664 if (!(p
= lock_user_string(arg2
))) {
7665 return -TARGET_EFAULT
;
7667 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7668 unlock_user(p
, arg2
, 0);
7671 #ifdef TARGET_NR_nice /* not on alpha */
7672 case TARGET_NR_nice
:
7673 return get_errno(nice(arg1
));
7675 case TARGET_NR_sync
:
7678 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7679 case TARGET_NR_syncfs
:
7680 return get_errno(syncfs(arg1
));
7682 case TARGET_NR_kill
:
7683 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7684 #ifdef TARGET_NR_rename
7685 case TARGET_NR_rename
:
7688 p
= lock_user_string(arg1
);
7689 p2
= lock_user_string(arg2
);
7691 ret
= -TARGET_EFAULT
;
7693 ret
= get_errno(rename(p
, p2
));
7694 unlock_user(p2
, arg2
, 0);
7695 unlock_user(p
, arg1
, 0);
7699 #if defined(TARGET_NR_renameat)
7700 case TARGET_NR_renameat
:
7703 p
= lock_user_string(arg2
);
7704 p2
= lock_user_string(arg4
);
7706 ret
= -TARGET_EFAULT
;
7708 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7709 unlock_user(p2
, arg4
, 0);
7710 unlock_user(p
, arg2
, 0);
7714 #if defined(TARGET_NR_renameat2)
7715 case TARGET_NR_renameat2
:
7718 p
= lock_user_string(arg2
);
7719 p2
= lock_user_string(arg4
);
7721 ret
= -TARGET_EFAULT
;
7723 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7725 unlock_user(p2
, arg4
, 0);
7726 unlock_user(p
, arg2
, 0);
7730 #ifdef TARGET_NR_mkdir
7731 case TARGET_NR_mkdir
:
7732 if (!(p
= lock_user_string(arg1
)))
7733 return -TARGET_EFAULT
;
7734 ret
= get_errno(mkdir(p
, arg2
));
7735 unlock_user(p
, arg1
, 0);
7738 #if defined(TARGET_NR_mkdirat)
7739 case TARGET_NR_mkdirat
:
7740 if (!(p
= lock_user_string(arg2
)))
7741 return -TARGET_EFAULT
;
7742 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7743 unlock_user(p
, arg2
, 0);
7746 #ifdef TARGET_NR_rmdir
7747 case TARGET_NR_rmdir
:
7748 if (!(p
= lock_user_string(arg1
)))
7749 return -TARGET_EFAULT
;
7750 ret
= get_errno(rmdir(p
));
7751 unlock_user(p
, arg1
, 0);
7755 ret
= get_errno(dup(arg1
));
7757 fd_trans_dup(arg1
, ret
);
7760 #ifdef TARGET_NR_pipe
7761 case TARGET_NR_pipe
:
7762 return do_pipe(cpu_env
, arg1
, 0, 0);
7764 #ifdef TARGET_NR_pipe2
7765 case TARGET_NR_pipe2
:
7766 return do_pipe(cpu_env
, arg1
,
7767 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7769 case TARGET_NR_times
:
7771 struct target_tms
*tmsp
;
7773 ret
= get_errno(times(&tms
));
7775 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7777 return -TARGET_EFAULT
;
7778 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7779 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7780 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7781 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7784 ret
= host_to_target_clock_t(ret
);
7787 case TARGET_NR_acct
:
7789 ret
= get_errno(acct(NULL
));
7791 if (!(p
= lock_user_string(arg1
))) {
7792 return -TARGET_EFAULT
;
7794 ret
= get_errno(acct(path(p
)));
7795 unlock_user(p
, arg1
, 0);
7798 #ifdef TARGET_NR_umount2
7799 case TARGET_NR_umount2
:
7800 if (!(p
= lock_user_string(arg1
)))
7801 return -TARGET_EFAULT
;
7802 ret
= get_errno(umount2(p
, arg2
));
7803 unlock_user(p
, arg1
, 0);
7806 case TARGET_NR_ioctl
:
7807 return do_ioctl(arg1
, arg2
, arg3
);
7808 #ifdef TARGET_NR_fcntl
7809 case TARGET_NR_fcntl
:
7810 return do_fcntl(arg1
, arg2
, arg3
);
7812 case TARGET_NR_setpgid
:
7813 return get_errno(setpgid(arg1
, arg2
));
7814 case TARGET_NR_umask
:
7815 return get_errno(umask(arg1
));
7816 case TARGET_NR_chroot
:
7817 if (!(p
= lock_user_string(arg1
)))
7818 return -TARGET_EFAULT
;
7819 ret
= get_errno(chroot(p
));
7820 unlock_user(p
, arg1
, 0);
7822 #ifdef TARGET_NR_dup2
7823 case TARGET_NR_dup2
:
7824 ret
= get_errno(dup2(arg1
, arg2
));
7826 fd_trans_dup(arg1
, arg2
);
7830 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7831 case TARGET_NR_dup3
:
7835 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7838 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7839 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7841 fd_trans_dup(arg1
, arg2
);
7846 #ifdef TARGET_NR_getppid /* not on alpha */
7847 case TARGET_NR_getppid
:
7848 return get_errno(getppid());
7850 #ifdef TARGET_NR_getpgrp
7851 case TARGET_NR_getpgrp
:
7852 return get_errno(getpgrp());
7854 case TARGET_NR_setsid
:
7855 return get_errno(setsid());
7856 #ifdef TARGET_NR_sigaction
7857 case TARGET_NR_sigaction
:
7859 #if defined(TARGET_ALPHA)
7860 struct target_sigaction act
, oact
, *pact
= 0;
7861 struct target_old_sigaction
*old_act
;
7863 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7864 return -TARGET_EFAULT
;
7865 act
._sa_handler
= old_act
->_sa_handler
;
7866 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7867 act
.sa_flags
= old_act
->sa_flags
;
7868 act
.sa_restorer
= 0;
7869 unlock_user_struct(old_act
, arg2
, 0);
7872 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7873 if (!is_error(ret
) && arg3
) {
7874 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7875 return -TARGET_EFAULT
;
7876 old_act
->_sa_handler
= oact
._sa_handler
;
7877 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7878 old_act
->sa_flags
= oact
.sa_flags
;
7879 unlock_user_struct(old_act
, arg3
, 1);
7881 #elif defined(TARGET_MIPS)
7882 struct target_sigaction act
, oact
, *pact
, *old_act
;
7885 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7886 return -TARGET_EFAULT
;
7887 act
._sa_handler
= old_act
->_sa_handler
;
7888 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7889 act
.sa_flags
= old_act
->sa_flags
;
7890 unlock_user_struct(old_act
, arg2
, 0);
7896 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7898 if (!is_error(ret
) && arg3
) {
7899 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7900 return -TARGET_EFAULT
;
7901 old_act
->_sa_handler
= oact
._sa_handler
;
7902 old_act
->sa_flags
= oact
.sa_flags
;
7903 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7904 old_act
->sa_mask
.sig
[1] = 0;
7905 old_act
->sa_mask
.sig
[2] = 0;
7906 old_act
->sa_mask
.sig
[3] = 0;
7907 unlock_user_struct(old_act
, arg3
, 1);
7910 struct target_old_sigaction
*old_act
;
7911 struct target_sigaction act
, oact
, *pact
;
7913 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7914 return -TARGET_EFAULT
;
7915 act
._sa_handler
= old_act
->_sa_handler
;
7916 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7917 act
.sa_flags
= old_act
->sa_flags
;
7918 act
.sa_restorer
= old_act
->sa_restorer
;
7919 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7920 act
.ka_restorer
= 0;
7922 unlock_user_struct(old_act
, arg2
, 0);
7927 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7928 if (!is_error(ret
) && arg3
) {
7929 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7930 return -TARGET_EFAULT
;
7931 old_act
->_sa_handler
= oact
._sa_handler
;
7932 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7933 old_act
->sa_flags
= oact
.sa_flags
;
7934 old_act
->sa_restorer
= oact
.sa_restorer
;
7935 unlock_user_struct(old_act
, arg3
, 1);
7941 case TARGET_NR_rt_sigaction
:
7943 #if defined(TARGET_ALPHA)
7944 /* For Alpha and SPARC this is a 5 argument syscall, with
7945 * a 'restorer' parameter which must be copied into the
7946 * sa_restorer field of the sigaction struct.
7947 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7948 * and arg5 is the sigsetsize.
7949 * Alpha also has a separate rt_sigaction struct that it uses
7950 * here; SPARC uses the usual sigaction struct.
7952 struct target_rt_sigaction
*rt_act
;
7953 struct target_sigaction act
, oact
, *pact
= 0;
7955 if (arg4
!= sizeof(target_sigset_t
)) {
7956 return -TARGET_EINVAL
;
7959 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7960 return -TARGET_EFAULT
;
7961 act
._sa_handler
= rt_act
->_sa_handler
;
7962 act
.sa_mask
= rt_act
->sa_mask
;
7963 act
.sa_flags
= rt_act
->sa_flags
;
7964 act
.sa_restorer
= arg5
;
7965 unlock_user_struct(rt_act
, arg2
, 0);
7968 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7969 if (!is_error(ret
) && arg3
) {
7970 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7971 return -TARGET_EFAULT
;
7972 rt_act
->_sa_handler
= oact
._sa_handler
;
7973 rt_act
->sa_mask
= oact
.sa_mask
;
7974 rt_act
->sa_flags
= oact
.sa_flags
;
7975 unlock_user_struct(rt_act
, arg3
, 1);
7979 target_ulong restorer
= arg4
;
7980 target_ulong sigsetsize
= arg5
;
7982 target_ulong sigsetsize
= arg4
;
7984 struct target_sigaction
*act
;
7985 struct target_sigaction
*oact
;
7987 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7988 return -TARGET_EINVAL
;
7991 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7992 return -TARGET_EFAULT
;
7994 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7995 act
->ka_restorer
= restorer
;
8001 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8002 ret
= -TARGET_EFAULT
;
8003 goto rt_sigaction_fail
;
8007 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8010 unlock_user_struct(act
, arg2
, 0);
8012 unlock_user_struct(oact
, arg3
, 1);
8016 #ifdef TARGET_NR_sgetmask /* not on alpha */
8017 case TARGET_NR_sgetmask
:
8020 abi_ulong target_set
;
8021 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8023 host_to_target_old_sigset(&target_set
, &cur_set
);
8029 #ifdef TARGET_NR_ssetmask /* not on alpha */
8030 case TARGET_NR_ssetmask
:
8033 abi_ulong target_set
= arg1
;
8034 target_to_host_old_sigset(&set
, &target_set
);
8035 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8037 host_to_target_old_sigset(&target_set
, &oset
);
8043 #ifdef TARGET_NR_sigprocmask
8044 case TARGET_NR_sigprocmask
:
8046 #if defined(TARGET_ALPHA)
8047 sigset_t set
, oldset
;
8052 case TARGET_SIG_BLOCK
:
8055 case TARGET_SIG_UNBLOCK
:
8058 case TARGET_SIG_SETMASK
:
8062 return -TARGET_EINVAL
;
8065 target_to_host_old_sigset(&set
, &mask
);
8067 ret
= do_sigprocmask(how
, &set
, &oldset
);
8068 if (!is_error(ret
)) {
8069 host_to_target_old_sigset(&mask
, &oldset
);
8071 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8074 sigset_t set
, oldset
, *set_ptr
;
8079 case TARGET_SIG_BLOCK
:
8082 case TARGET_SIG_UNBLOCK
:
8085 case TARGET_SIG_SETMASK
:
8089 return -TARGET_EINVAL
;
8091 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8092 return -TARGET_EFAULT
;
8093 target_to_host_old_sigset(&set
, p
);
8094 unlock_user(p
, arg2
, 0);
8100 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8101 if (!is_error(ret
) && arg3
) {
8102 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8103 return -TARGET_EFAULT
;
8104 host_to_target_old_sigset(p
, &oldset
);
8105 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8111 case TARGET_NR_rt_sigprocmask
:
8114 sigset_t set
, oldset
, *set_ptr
;
8116 if (arg4
!= sizeof(target_sigset_t
)) {
8117 return -TARGET_EINVAL
;
8122 case TARGET_SIG_BLOCK
:
8125 case TARGET_SIG_UNBLOCK
:
8128 case TARGET_SIG_SETMASK
:
8132 return -TARGET_EINVAL
;
8134 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8135 return -TARGET_EFAULT
;
8136 target_to_host_sigset(&set
, p
);
8137 unlock_user(p
, arg2
, 0);
8143 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8144 if (!is_error(ret
) && arg3
) {
8145 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8146 return -TARGET_EFAULT
;
8147 host_to_target_sigset(p
, &oldset
);
8148 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8152 #ifdef TARGET_NR_sigpending
8153 case TARGET_NR_sigpending
:
8156 ret
= get_errno(sigpending(&set
));
8157 if (!is_error(ret
)) {
8158 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8159 return -TARGET_EFAULT
;
8160 host_to_target_old_sigset(p
, &set
);
8161 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8166 case TARGET_NR_rt_sigpending
:
8170 /* Yes, this check is >, not != like most. We follow the kernel's
8171 * logic and it does it like this because it implements
8172 * NR_sigpending through the same code path, and in that case
8173 * the old_sigset_t is smaller in size.
8175 if (arg2
> sizeof(target_sigset_t
)) {
8176 return -TARGET_EINVAL
;
8179 ret
= get_errno(sigpending(&set
));
8180 if (!is_error(ret
)) {
8181 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8182 return -TARGET_EFAULT
;
8183 host_to_target_sigset(p
, &set
);
8184 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8188 #ifdef TARGET_NR_sigsuspend
8189 case TARGET_NR_sigsuspend
:
8191 TaskState
*ts
= cpu
->opaque
;
8192 #if defined(TARGET_ALPHA)
8193 abi_ulong mask
= arg1
;
8194 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8196 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8197 return -TARGET_EFAULT
;
8198 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8199 unlock_user(p
, arg1
, 0);
8201 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8203 if (ret
!= -TARGET_ERESTARTSYS
) {
8204 ts
->in_sigsuspend
= 1;
8209 case TARGET_NR_rt_sigsuspend
:
8211 TaskState
*ts
= cpu
->opaque
;
8213 if (arg2
!= sizeof(target_sigset_t
)) {
8214 return -TARGET_EINVAL
;
8216 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8217 return -TARGET_EFAULT
;
8218 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8219 unlock_user(p
, arg1
, 0);
8220 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8222 if (ret
!= -TARGET_ERESTARTSYS
) {
8223 ts
->in_sigsuspend
= 1;
8227 case TARGET_NR_rt_sigtimedwait
:
8230 struct timespec uts
, *puts
;
8233 if (arg4
!= sizeof(target_sigset_t
)) {
8234 return -TARGET_EINVAL
;
8237 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8238 return -TARGET_EFAULT
;
8239 target_to_host_sigset(&set
, p
);
8240 unlock_user(p
, arg1
, 0);
8243 target_to_host_timespec(puts
, arg3
);
8247 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8249 if (!is_error(ret
)) {
8251 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8254 return -TARGET_EFAULT
;
8256 host_to_target_siginfo(p
, &uinfo
);
8257 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8259 ret
= host_to_target_signal(ret
);
8263 case TARGET_NR_rt_sigqueueinfo
:
8267 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8269 return -TARGET_EFAULT
;
8271 target_to_host_siginfo(&uinfo
, p
);
8272 unlock_user(p
, arg3
, 0);
8273 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8276 case TARGET_NR_rt_tgsigqueueinfo
:
8280 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8282 return -TARGET_EFAULT
;
8284 target_to_host_siginfo(&uinfo
, p
);
8285 unlock_user(p
, arg4
, 0);
8286 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8289 #ifdef TARGET_NR_sigreturn
8290 case TARGET_NR_sigreturn
:
8291 if (block_signals()) {
8292 return -TARGET_ERESTARTSYS
;
8294 return do_sigreturn(cpu_env
);
8296 case TARGET_NR_rt_sigreturn
:
8297 if (block_signals()) {
8298 return -TARGET_ERESTARTSYS
;
8300 return do_rt_sigreturn(cpu_env
);
8301 case TARGET_NR_sethostname
:
8302 if (!(p
= lock_user_string(arg1
)))
8303 return -TARGET_EFAULT
;
8304 ret
= get_errno(sethostname(p
, arg2
));
8305 unlock_user(p
, arg1
, 0);
8307 #ifdef TARGET_NR_setrlimit
8308 case TARGET_NR_setrlimit
:
8310 int resource
= target_to_host_resource(arg1
);
8311 struct target_rlimit
*target_rlim
;
8313 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8314 return -TARGET_EFAULT
;
8315 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8316 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8317 unlock_user_struct(target_rlim
, arg2
, 0);
8319 * If we just passed through resource limit settings for memory then
8320 * they would also apply to QEMU's own allocations, and QEMU will
8321 * crash or hang or die if its allocations fail. Ideally we would
8322 * track the guest allocations in QEMU and apply the limits ourselves.
8323 * For now, just tell the guest the call succeeded but don't actually
8326 if (resource
!= RLIMIT_AS
&&
8327 resource
!= RLIMIT_DATA
&&
8328 resource
!= RLIMIT_STACK
) {
8329 return get_errno(setrlimit(resource
, &rlim
));
8335 #ifdef TARGET_NR_getrlimit
8336 case TARGET_NR_getrlimit
:
8338 int resource
= target_to_host_resource(arg1
);
8339 struct target_rlimit
*target_rlim
;
8342 ret
= get_errno(getrlimit(resource
, &rlim
));
8343 if (!is_error(ret
)) {
8344 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8345 return -TARGET_EFAULT
;
8346 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8347 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8348 unlock_user_struct(target_rlim
, arg2
, 1);
8353 case TARGET_NR_getrusage
:
8355 struct rusage rusage
;
8356 ret
= get_errno(getrusage(arg1
, &rusage
));
8357 if (!is_error(ret
)) {
8358 ret
= host_to_target_rusage(arg2
, &rusage
);
8362 case TARGET_NR_gettimeofday
:
8365 ret
= get_errno(gettimeofday(&tv
, NULL
));
8366 if (!is_error(ret
)) {
8367 if (copy_to_user_timeval(arg1
, &tv
))
8368 return -TARGET_EFAULT
;
8372 case TARGET_NR_settimeofday
:
8374 struct timeval tv
, *ptv
= NULL
;
8375 struct timezone tz
, *ptz
= NULL
;
8378 if (copy_from_user_timeval(&tv
, arg1
)) {
8379 return -TARGET_EFAULT
;
8385 if (copy_from_user_timezone(&tz
, arg2
)) {
8386 return -TARGET_EFAULT
;
8391 return get_errno(settimeofday(ptv
, ptz
));
8393 #if defined(TARGET_NR_select)
8394 case TARGET_NR_select
:
8395 #if defined(TARGET_WANT_NI_OLD_SELECT)
8396 /* some architectures used to have old_select here
8397 * but now ENOSYS it.
8399 ret
= -TARGET_ENOSYS
;
8400 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8401 ret
= do_old_select(arg1
);
8403 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8407 #ifdef TARGET_NR_pselect6
8408 case TARGET_NR_pselect6
:
8410 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8411 fd_set rfds
, wfds
, efds
;
8412 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8413 struct timespec ts
, *ts_ptr
;
8416 * The 6th arg is actually two args smashed together,
8417 * so we cannot use the C library.
8425 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8426 target_sigset_t
*target_sigset
;
8434 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8438 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8442 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8448 * This takes a timespec, and not a timeval, so we cannot
8449 * use the do_select() helper ...
8452 if (target_to_host_timespec(&ts
, ts_addr
)) {
8453 return -TARGET_EFAULT
;
8460 /* Extract the two packed args for the sigset */
8463 sig
.size
= SIGSET_T_SIZE
;
8465 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8467 return -TARGET_EFAULT
;
8469 arg_sigset
= tswapal(arg7
[0]);
8470 arg_sigsize
= tswapal(arg7
[1]);
8471 unlock_user(arg7
, arg6
, 0);
8475 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8476 /* Like the kernel, we enforce correct size sigsets */
8477 return -TARGET_EINVAL
;
8479 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8480 sizeof(*target_sigset
), 1);
8481 if (!target_sigset
) {
8482 return -TARGET_EFAULT
;
8484 target_to_host_sigset(&set
, target_sigset
);
8485 unlock_user(target_sigset
, arg_sigset
, 0);
8493 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8496 if (!is_error(ret
)) {
8497 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8498 return -TARGET_EFAULT
;
8499 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8500 return -TARGET_EFAULT
;
8501 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8502 return -TARGET_EFAULT
;
8504 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8505 return -TARGET_EFAULT
;
8510 #ifdef TARGET_NR_symlink
8511 case TARGET_NR_symlink
:
8514 p
= lock_user_string(arg1
);
8515 p2
= lock_user_string(arg2
);
8517 ret
= -TARGET_EFAULT
;
8519 ret
= get_errno(symlink(p
, p2
));
8520 unlock_user(p2
, arg2
, 0);
8521 unlock_user(p
, arg1
, 0);
8525 #if defined(TARGET_NR_symlinkat)
8526 case TARGET_NR_symlinkat
:
8529 p
= lock_user_string(arg1
);
8530 p2
= lock_user_string(arg3
);
8532 ret
= -TARGET_EFAULT
;
8534 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8535 unlock_user(p2
, arg3
, 0);
8536 unlock_user(p
, arg1
, 0);
8540 #ifdef TARGET_NR_readlink
8541 case TARGET_NR_readlink
:
8544 p
= lock_user_string(arg1
);
8545 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8547 ret
= -TARGET_EFAULT
;
8549 /* Short circuit this for the magic exe check. */
8550 ret
= -TARGET_EINVAL
;
8551 } else if (is_proc_myself((const char *)p
, "exe")) {
8552 char real
[PATH_MAX
], *temp
;
8553 temp
= realpath(exec_path
, real
);
8554 /* Return value is # of bytes that we wrote to the buffer. */
8556 ret
= get_errno(-1);
8558 /* Don't worry about sign mismatch as earlier mapping
8559 * logic would have thrown a bad address error. */
8560 ret
= MIN(strlen(real
), arg3
);
8561 /* We cannot NUL terminate the string. */
8562 memcpy(p2
, real
, ret
);
8565 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8567 unlock_user(p2
, arg2
, ret
);
8568 unlock_user(p
, arg1
, 0);
8572 #if defined(TARGET_NR_readlinkat)
8573 case TARGET_NR_readlinkat
:
8576 p
= lock_user_string(arg2
);
8577 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8579 ret
= -TARGET_EFAULT
;
8580 } else if (is_proc_myself((const char *)p
, "exe")) {
8581 char real
[PATH_MAX
], *temp
;
8582 temp
= realpath(exec_path
, real
);
8583 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8584 snprintf((char *)p2
, arg4
, "%s", real
);
8586 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8588 unlock_user(p2
, arg3
, ret
);
8589 unlock_user(p
, arg2
, 0);
8593 #ifdef TARGET_NR_swapon
8594 case TARGET_NR_swapon
:
8595 if (!(p
= lock_user_string(arg1
)))
8596 return -TARGET_EFAULT
;
8597 ret
= get_errno(swapon(p
, arg2
));
8598 unlock_user(p
, arg1
, 0);
8601 case TARGET_NR_reboot
:
8602 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8603 /* arg4 must be ignored in all other cases */
8604 p
= lock_user_string(arg4
);
8606 return -TARGET_EFAULT
;
8608 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8609 unlock_user(p
, arg4
, 0);
8611 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8614 #ifdef TARGET_NR_mmap
8615 case TARGET_NR_mmap
:
8616 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8617 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8618 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8619 || defined(TARGET_S390X)
8622 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8623 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8624 return -TARGET_EFAULT
;
8631 unlock_user(v
, arg1
, 0);
8632 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8633 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8637 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8638 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8644 #ifdef TARGET_NR_mmap2
8645 case TARGET_NR_mmap2
:
8647 #define MMAP_SHIFT 12
8649 ret
= target_mmap(arg1
, arg2
, arg3
,
8650 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8651 arg5
, arg6
<< MMAP_SHIFT
);
8652 return get_errno(ret
);
8654 case TARGET_NR_munmap
:
8655 return get_errno(target_munmap(arg1
, arg2
));
8656 case TARGET_NR_mprotect
:
8658 TaskState
*ts
= cpu
->opaque
;
8659 /* Special hack to detect libc making the stack executable. */
8660 if ((arg3
& PROT_GROWSDOWN
)
8661 && arg1
>= ts
->info
->stack_limit
8662 && arg1
<= ts
->info
->start_stack
) {
8663 arg3
&= ~PROT_GROWSDOWN
;
8664 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8665 arg1
= ts
->info
->stack_limit
;
8668 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8669 #ifdef TARGET_NR_mremap
8670 case TARGET_NR_mremap
:
8671 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8673 /* ??? msync/mlock/munlock are broken for softmmu. */
8674 #ifdef TARGET_NR_msync
8675 case TARGET_NR_msync
:
8676 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8678 #ifdef TARGET_NR_mlock
8679 case TARGET_NR_mlock
:
8680 return get_errno(mlock(g2h(arg1
), arg2
));
8682 #ifdef TARGET_NR_munlock
8683 case TARGET_NR_munlock
:
8684 return get_errno(munlock(g2h(arg1
), arg2
));
8686 #ifdef TARGET_NR_mlockall
8687 case TARGET_NR_mlockall
:
8688 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8690 #ifdef TARGET_NR_munlockall
8691 case TARGET_NR_munlockall
:
8692 return get_errno(munlockall());
8694 #ifdef TARGET_NR_truncate
8695 case TARGET_NR_truncate
:
8696 if (!(p
= lock_user_string(arg1
)))
8697 return -TARGET_EFAULT
;
8698 ret
= get_errno(truncate(p
, arg2
));
8699 unlock_user(p
, arg1
, 0);
8702 #ifdef TARGET_NR_ftruncate
8703 case TARGET_NR_ftruncate
:
8704 return get_errno(ftruncate(arg1
, arg2
));
8706 case TARGET_NR_fchmod
:
8707 return get_errno(fchmod(arg1
, arg2
));
8708 #if defined(TARGET_NR_fchmodat)
8709 case TARGET_NR_fchmodat
:
8710 if (!(p
= lock_user_string(arg2
)))
8711 return -TARGET_EFAULT
;
8712 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8713 unlock_user(p
, arg2
, 0);
8716 case TARGET_NR_getpriority
:
8717 /* Note that negative values are valid for getpriority, so we must
8718 differentiate based on errno settings. */
8720 ret
= getpriority(arg1
, arg2
);
8721 if (ret
== -1 && errno
!= 0) {
8722 return -host_to_target_errno(errno
);
8725 /* Return value is the unbiased priority. Signal no error. */
8726 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8728 /* Return value is a biased priority to avoid negative numbers. */
8732 case TARGET_NR_setpriority
:
8733 return get_errno(setpriority(arg1
, arg2
, arg3
));
8734 #ifdef TARGET_NR_statfs
8735 case TARGET_NR_statfs
:
8736 if (!(p
= lock_user_string(arg1
))) {
8737 return -TARGET_EFAULT
;
8739 ret
= get_errno(statfs(path(p
), &stfs
));
8740 unlock_user(p
, arg1
, 0);
8742 if (!is_error(ret
)) {
8743 struct target_statfs
*target_stfs
;
8745 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8746 return -TARGET_EFAULT
;
8747 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8748 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8749 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8750 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8751 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8752 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8753 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8754 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8755 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8756 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8757 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8758 #ifdef _STATFS_F_FLAGS
8759 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8761 __put_user(0, &target_stfs
->f_flags
);
8763 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8764 unlock_user_struct(target_stfs
, arg2
, 1);
8768 #ifdef TARGET_NR_fstatfs
8769 case TARGET_NR_fstatfs
:
8770 ret
= get_errno(fstatfs(arg1
, &stfs
));
8771 goto convert_statfs
;
8773 #ifdef TARGET_NR_statfs64
8774 case TARGET_NR_statfs64
:
8775 if (!(p
= lock_user_string(arg1
))) {
8776 return -TARGET_EFAULT
;
8778 ret
= get_errno(statfs(path(p
), &stfs
));
8779 unlock_user(p
, arg1
, 0);
8781 if (!is_error(ret
)) {
8782 struct target_statfs64
*target_stfs
;
8784 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8785 return -TARGET_EFAULT
;
8786 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8787 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8788 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8789 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8790 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8791 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8792 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8793 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8794 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8795 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8796 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8797 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8798 unlock_user_struct(target_stfs
, arg3
, 1);
8801 case TARGET_NR_fstatfs64
:
8802 ret
= get_errno(fstatfs(arg1
, &stfs
));
8803 goto convert_statfs64
;
8805 #ifdef TARGET_NR_socketcall
8806 case TARGET_NR_socketcall
:
8807 return do_socketcall(arg1
, arg2
);
8809 #ifdef TARGET_NR_accept
8810 case TARGET_NR_accept
:
8811 return do_accept4(arg1
, arg2
, arg3
, 0);
8813 #ifdef TARGET_NR_accept4
8814 case TARGET_NR_accept4
:
8815 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8817 #ifdef TARGET_NR_bind
8818 case TARGET_NR_bind
:
8819 return do_bind(arg1
, arg2
, arg3
);
8821 #ifdef TARGET_NR_connect
8822 case TARGET_NR_connect
:
8823 return do_connect(arg1
, arg2
, arg3
);
8825 #ifdef TARGET_NR_getpeername
8826 case TARGET_NR_getpeername
:
8827 return do_getpeername(arg1
, arg2
, arg3
);
8829 #ifdef TARGET_NR_getsockname
8830 case TARGET_NR_getsockname
:
8831 return do_getsockname(arg1
, arg2
, arg3
);
8833 #ifdef TARGET_NR_getsockopt
8834 case TARGET_NR_getsockopt
:
8835 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8837 #ifdef TARGET_NR_listen
8838 case TARGET_NR_listen
:
8839 return get_errno(listen(arg1
, arg2
));
8841 #ifdef TARGET_NR_recv
8842 case TARGET_NR_recv
:
8843 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8845 #ifdef TARGET_NR_recvfrom
8846 case TARGET_NR_recvfrom
:
8847 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8849 #ifdef TARGET_NR_recvmsg
8850 case TARGET_NR_recvmsg
:
8851 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8853 #ifdef TARGET_NR_send
8854 case TARGET_NR_send
:
8855 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8857 #ifdef TARGET_NR_sendmsg
8858 case TARGET_NR_sendmsg
:
8859 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8861 #ifdef TARGET_NR_sendmmsg
8862 case TARGET_NR_sendmmsg
:
8863 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8864 case TARGET_NR_recvmmsg
:
8865 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8867 #ifdef TARGET_NR_sendto
8868 case TARGET_NR_sendto
:
8869 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8871 #ifdef TARGET_NR_shutdown
8872 case TARGET_NR_shutdown
:
8873 return get_errno(shutdown(arg1
, arg2
));
8875 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8876 case TARGET_NR_getrandom
:
8877 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8879 return -TARGET_EFAULT
;
8881 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8882 unlock_user(p
, arg1
, ret
);
8885 #ifdef TARGET_NR_socket
8886 case TARGET_NR_socket
:
8887 return do_socket(arg1
, arg2
, arg3
);
8889 #ifdef TARGET_NR_socketpair
8890 case TARGET_NR_socketpair
:
8891 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8893 #ifdef TARGET_NR_setsockopt
8894 case TARGET_NR_setsockopt
:
8895 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8897 #if defined(TARGET_NR_syslog)
8898 case TARGET_NR_syslog
:
8903 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8904 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8905 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8906 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8907 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8908 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8909 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8910 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8911 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8912 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8913 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8914 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8917 return -TARGET_EINVAL
;
8922 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8924 return -TARGET_EFAULT
;
8926 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8927 unlock_user(p
, arg2
, arg3
);
8931 return -TARGET_EINVAL
;
8936 case TARGET_NR_setitimer
:
8938 struct itimerval value
, ovalue
, *pvalue
;
8942 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8943 || copy_from_user_timeval(&pvalue
->it_value
,
8944 arg2
+ sizeof(struct target_timeval
)))
8945 return -TARGET_EFAULT
;
8949 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8950 if (!is_error(ret
) && arg3
) {
8951 if (copy_to_user_timeval(arg3
,
8952 &ovalue
.it_interval
)
8953 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8955 return -TARGET_EFAULT
;
8959 case TARGET_NR_getitimer
:
8961 struct itimerval value
;
8963 ret
= get_errno(getitimer(arg1
, &value
));
8964 if (!is_error(ret
) && arg2
) {
8965 if (copy_to_user_timeval(arg2
,
8967 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8969 return -TARGET_EFAULT
;
8973 #ifdef TARGET_NR_stat
8974 case TARGET_NR_stat
:
8975 if (!(p
= lock_user_string(arg1
))) {
8976 return -TARGET_EFAULT
;
8978 ret
= get_errno(stat(path(p
), &st
));
8979 unlock_user(p
, arg1
, 0);
8982 #ifdef TARGET_NR_lstat
8983 case TARGET_NR_lstat
:
8984 if (!(p
= lock_user_string(arg1
))) {
8985 return -TARGET_EFAULT
;
8987 ret
= get_errno(lstat(path(p
), &st
));
8988 unlock_user(p
, arg1
, 0);
8991 #ifdef TARGET_NR_fstat
8992 case TARGET_NR_fstat
:
8994 ret
= get_errno(fstat(arg1
, &st
));
8995 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8998 if (!is_error(ret
)) {
8999 struct target_stat
*target_st
;
9001 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9002 return -TARGET_EFAULT
;
9003 memset(target_st
, 0, sizeof(*target_st
));
9004 __put_user(st
.st_dev
, &target_st
->st_dev
);
9005 __put_user(st
.st_ino
, &target_st
->st_ino
);
9006 __put_user(st
.st_mode
, &target_st
->st_mode
);
9007 __put_user(st
.st_uid
, &target_st
->st_uid
);
9008 __put_user(st
.st_gid
, &target_st
->st_gid
);
9009 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9010 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9011 __put_user(st
.st_size
, &target_st
->st_size
);
9012 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9013 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9014 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9015 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9016 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9017 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9018 defined(TARGET_STAT_HAVE_NSEC)
9019 __put_user(st
.st_atim
.tv_nsec
,
9020 &target_st
->target_st_atime_nsec
);
9021 __put_user(st
.st_mtim
.tv_nsec
,
9022 &target_st
->target_st_mtime_nsec
);
9023 __put_user(st
.st_ctim
.tv_nsec
,
9024 &target_st
->target_st_ctime_nsec
);
9026 unlock_user_struct(target_st
, arg2
, 1);
9031 case TARGET_NR_vhangup
:
9032 return get_errno(vhangup());
9033 #ifdef TARGET_NR_syscall
9034 case TARGET_NR_syscall
:
9035 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9036 arg6
, arg7
, arg8
, 0);
9038 case TARGET_NR_wait4
:
9041 abi_long status_ptr
= arg2
;
9042 struct rusage rusage
, *rusage_ptr
;
9043 abi_ulong target_rusage
= arg4
;
9044 abi_long rusage_err
;
9046 rusage_ptr
= &rusage
;
9049 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9050 if (!is_error(ret
)) {
9051 if (status_ptr
&& ret
) {
9052 status
= host_to_target_waitstatus(status
);
9053 if (put_user_s32(status
, status_ptr
))
9054 return -TARGET_EFAULT
;
9056 if (target_rusage
) {
9057 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9065 #ifdef TARGET_NR_swapoff
9066 case TARGET_NR_swapoff
:
9067 if (!(p
= lock_user_string(arg1
)))
9068 return -TARGET_EFAULT
;
9069 ret
= get_errno(swapoff(p
));
9070 unlock_user(p
, arg1
, 0);
9073 case TARGET_NR_sysinfo
:
9075 struct target_sysinfo
*target_value
;
9076 struct sysinfo value
;
9077 ret
= get_errno(sysinfo(&value
));
9078 if (!is_error(ret
) && arg1
)
9080 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9081 return -TARGET_EFAULT
;
9082 __put_user(value
.uptime
, &target_value
->uptime
);
9083 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9084 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9085 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9086 __put_user(value
.totalram
, &target_value
->totalram
);
9087 __put_user(value
.freeram
, &target_value
->freeram
);
9088 __put_user(value
.sharedram
, &target_value
->sharedram
);
9089 __put_user(value
.bufferram
, &target_value
->bufferram
);
9090 __put_user(value
.totalswap
, &target_value
->totalswap
);
9091 __put_user(value
.freeswap
, &target_value
->freeswap
);
9092 __put_user(value
.procs
, &target_value
->procs
);
9093 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9094 __put_user(value
.freehigh
, &target_value
->freehigh
);
9095 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9096 unlock_user_struct(target_value
, arg1
, 1);
9100 #ifdef TARGET_NR_ipc
9102 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9104 #ifdef TARGET_NR_semget
9105 case TARGET_NR_semget
:
9106 return get_errno(semget(arg1
, arg2
, arg3
));
9108 #ifdef TARGET_NR_semop
9109 case TARGET_NR_semop
:
9110 return do_semop(arg1
, arg2
, arg3
);
9112 #ifdef TARGET_NR_semctl
9113 case TARGET_NR_semctl
:
9114 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9116 #ifdef TARGET_NR_msgctl
9117 case TARGET_NR_msgctl
:
9118 return do_msgctl(arg1
, arg2
, arg3
);
9120 #ifdef TARGET_NR_msgget
9121 case TARGET_NR_msgget
:
9122 return get_errno(msgget(arg1
, arg2
));
9124 #ifdef TARGET_NR_msgrcv
9125 case TARGET_NR_msgrcv
:
9126 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9128 #ifdef TARGET_NR_msgsnd
9129 case TARGET_NR_msgsnd
:
9130 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9132 #ifdef TARGET_NR_shmget
9133 case TARGET_NR_shmget
:
9134 return get_errno(shmget(arg1
, arg2
, arg3
));
9136 #ifdef TARGET_NR_shmctl
9137 case TARGET_NR_shmctl
:
9138 return do_shmctl(arg1
, arg2
, arg3
);
9140 #ifdef TARGET_NR_shmat
9141 case TARGET_NR_shmat
:
9142 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9144 #ifdef TARGET_NR_shmdt
9145 case TARGET_NR_shmdt
:
9146 return do_shmdt(arg1
);
9148 case TARGET_NR_fsync
:
9149 return get_errno(fsync(arg1
));
9150 case TARGET_NR_clone
:
9151 /* Linux manages to have three different orderings for its
9152 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9153 * match the kernel's CONFIG_CLONE_* settings.
9154 * Microblaze is further special in that it uses a sixth
9155 * implicit argument to clone for the TLS pointer.
9157 #if defined(TARGET_MICROBLAZE)
9158 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9159 #elif defined(TARGET_CLONE_BACKWARDS)
9160 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9161 #elif defined(TARGET_CLONE_BACKWARDS2)
9162 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9164 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9167 #ifdef __NR_exit_group
9168 /* new thread calls */
9169 case TARGET_NR_exit_group
:
9170 preexit_cleanup(cpu_env
, arg1
);
9171 return get_errno(exit_group(arg1
));
9173 case TARGET_NR_setdomainname
:
9174 if (!(p
= lock_user_string(arg1
)))
9175 return -TARGET_EFAULT
;
9176 ret
= get_errno(setdomainname(p
, arg2
));
9177 unlock_user(p
, arg1
, 0);
9179 case TARGET_NR_uname
:
9180 /* no need to transcode because we use the linux syscall */
9182 struct new_utsname
* buf
;
9184 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9185 return -TARGET_EFAULT
;
9186 ret
= get_errno(sys_uname(buf
));
9187 if (!is_error(ret
)) {
9188 /* Overwrite the native machine name with whatever is being
9190 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9191 sizeof(buf
->machine
));
9192 /* Allow the user to override the reported release. */
9193 if (qemu_uname_release
&& *qemu_uname_release
) {
9194 g_strlcpy(buf
->release
, qemu_uname_release
,
9195 sizeof(buf
->release
));
9198 unlock_user_struct(buf
, arg1
, 1);
9202 case TARGET_NR_modify_ldt
:
9203 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9204 #if !defined(TARGET_X86_64)
9205 case TARGET_NR_vm86
:
9206 return do_vm86(cpu_env
, arg1
, arg2
);
9209 case TARGET_NR_adjtimex
:
9211 struct timex host_buf
;
9213 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9214 return -TARGET_EFAULT
;
9216 ret
= get_errno(adjtimex(&host_buf
));
9217 if (!is_error(ret
)) {
9218 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9219 return -TARGET_EFAULT
;
9224 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9225 case TARGET_NR_clock_adjtime
:
9227 struct timex htx
, *phtx
= &htx
;
9229 if (target_to_host_timex(phtx
, arg2
) != 0) {
9230 return -TARGET_EFAULT
;
9232 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9233 if (!is_error(ret
) && phtx
) {
9234 if (host_to_target_timex(arg2
, phtx
) != 0) {
9235 return -TARGET_EFAULT
;
9241 case TARGET_NR_getpgid
:
9242 return get_errno(getpgid(arg1
));
9243 case TARGET_NR_fchdir
:
9244 return get_errno(fchdir(arg1
));
9245 case TARGET_NR_personality
:
9246 return get_errno(personality(arg1
));
9247 #ifdef TARGET_NR__llseek /* Not on alpha */
9248 case TARGET_NR__llseek
:
9251 #if !defined(__NR_llseek)
9252 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9254 ret
= get_errno(res
);
9259 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9261 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9262 return -TARGET_EFAULT
;
9267 #ifdef TARGET_NR_getdents
9268 case TARGET_NR_getdents
:
9269 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9270 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9272 struct target_dirent
*target_dirp
;
9273 struct linux_dirent
*dirp
;
9274 abi_long count
= arg3
;
9276 dirp
= g_try_malloc(count
);
9278 return -TARGET_ENOMEM
;
9281 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9282 if (!is_error(ret
)) {
9283 struct linux_dirent
*de
;
9284 struct target_dirent
*tde
;
9286 int reclen
, treclen
;
9287 int count1
, tnamelen
;
9291 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9292 return -TARGET_EFAULT
;
9295 reclen
= de
->d_reclen
;
9296 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9297 assert(tnamelen
>= 0);
9298 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9299 assert(count1
+ treclen
<= count
);
9300 tde
->d_reclen
= tswap16(treclen
);
9301 tde
->d_ino
= tswapal(de
->d_ino
);
9302 tde
->d_off
= tswapal(de
->d_off
);
9303 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9304 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9306 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9310 unlock_user(target_dirp
, arg2
, ret
);
9316 struct linux_dirent
*dirp
;
9317 abi_long count
= arg3
;
9319 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9320 return -TARGET_EFAULT
;
9321 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9322 if (!is_error(ret
)) {
9323 struct linux_dirent
*de
;
9328 reclen
= de
->d_reclen
;
9331 de
->d_reclen
= tswap16(reclen
);
9332 tswapls(&de
->d_ino
);
9333 tswapls(&de
->d_off
);
9334 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9338 unlock_user(dirp
, arg2
, ret
);
9342 /* Implement getdents in terms of getdents64 */
9344 struct linux_dirent64
*dirp
;
9345 abi_long count
= arg3
;
9347 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9349 return -TARGET_EFAULT
;
9351 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9352 if (!is_error(ret
)) {
9353 /* Convert the dirent64 structs to target dirent. We do this
9354 * in-place, since we can guarantee that a target_dirent is no
9355 * larger than a dirent64; however this means we have to be
9356 * careful to read everything before writing in the new format.
9358 struct linux_dirent64
*de
;
9359 struct target_dirent
*tde
;
9364 tde
= (struct target_dirent
*)dirp
;
9366 int namelen
, treclen
;
9367 int reclen
= de
->d_reclen
;
9368 uint64_t ino
= de
->d_ino
;
9369 int64_t off
= de
->d_off
;
9370 uint8_t type
= de
->d_type
;
9372 namelen
= strlen(de
->d_name
);
9373 treclen
= offsetof(struct target_dirent
, d_name
)
9375 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9377 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9378 tde
->d_ino
= tswapal(ino
);
9379 tde
->d_off
= tswapal(off
);
9380 tde
->d_reclen
= tswap16(treclen
);
9381 /* The target_dirent type is in what was formerly a padding
9382 * byte at the end of the structure:
9384 *(((char *)tde
) + treclen
- 1) = type
;
9386 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9387 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9393 unlock_user(dirp
, arg2
, ret
);
9397 #endif /* TARGET_NR_getdents */
9398 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9399 case TARGET_NR_getdents64
:
9401 struct linux_dirent64
*dirp
;
9402 abi_long count
= arg3
;
9403 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9404 return -TARGET_EFAULT
;
9405 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9406 if (!is_error(ret
)) {
9407 struct linux_dirent64
*de
;
9412 reclen
= de
->d_reclen
;
9415 de
->d_reclen
= tswap16(reclen
);
9416 tswap64s((uint64_t *)&de
->d_ino
);
9417 tswap64s((uint64_t *)&de
->d_off
);
9418 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9422 unlock_user(dirp
, arg2
, ret
);
9425 #endif /* TARGET_NR_getdents64 */
9426 #if defined(TARGET_NR__newselect)
9427 case TARGET_NR__newselect
:
9428 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9430 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9431 # ifdef TARGET_NR_poll
9432 case TARGET_NR_poll
:
9434 # ifdef TARGET_NR_ppoll
9435 case TARGET_NR_ppoll
:
9438 struct target_pollfd
*target_pfd
;
9439 unsigned int nfds
= arg2
;
9446 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9447 return -TARGET_EINVAL
;
9450 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9451 sizeof(struct target_pollfd
) * nfds
, 1);
9453 return -TARGET_EFAULT
;
9456 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9457 for (i
= 0; i
< nfds
; i
++) {
9458 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9459 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9464 # ifdef TARGET_NR_ppoll
9465 case TARGET_NR_ppoll
:
9467 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9468 target_sigset_t
*target_set
;
9469 sigset_t _set
, *set
= &_set
;
9472 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9473 unlock_user(target_pfd
, arg1
, 0);
9474 return -TARGET_EFAULT
;
9481 if (arg5
!= sizeof(target_sigset_t
)) {
9482 unlock_user(target_pfd
, arg1
, 0);
9483 return -TARGET_EINVAL
;
9486 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9488 unlock_user(target_pfd
, arg1
, 0);
9489 return -TARGET_EFAULT
;
9491 target_to_host_sigset(set
, target_set
);
9496 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9497 set
, SIGSET_T_SIZE
));
9499 if (!is_error(ret
) && arg3
) {
9500 host_to_target_timespec(arg3
, timeout_ts
);
9503 unlock_user(target_set
, arg4
, 0);
9508 # ifdef TARGET_NR_poll
9509 case TARGET_NR_poll
:
9511 struct timespec ts
, *pts
;
9514 /* Convert ms to secs, ns */
9515 ts
.tv_sec
= arg3
/ 1000;
9516 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9519 /* -ve poll() timeout means "infinite" */
9522 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9527 g_assert_not_reached();
9530 if (!is_error(ret
)) {
9531 for(i
= 0; i
< nfds
; i
++) {
9532 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9535 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9539 case TARGET_NR_flock
:
9540 /* NOTE: the flock constant seems to be the same for every
9542 return get_errno(safe_flock(arg1
, arg2
));
9543 case TARGET_NR_readv
:
9545 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9547 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9548 unlock_iovec(vec
, arg2
, arg3
, 1);
9550 ret
= -host_to_target_errno(errno
);
9554 case TARGET_NR_writev
:
9556 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9558 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9559 unlock_iovec(vec
, arg2
, arg3
, 0);
9561 ret
= -host_to_target_errno(errno
);
9565 #if defined(TARGET_NR_preadv)
9566 case TARGET_NR_preadv
:
9568 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9570 unsigned long low
, high
;
9572 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9573 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9574 unlock_iovec(vec
, arg2
, arg3
, 1);
9576 ret
= -host_to_target_errno(errno
);
9581 #if defined(TARGET_NR_pwritev)
9582 case TARGET_NR_pwritev
:
9584 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9586 unsigned long low
, high
;
9588 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9589 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9590 unlock_iovec(vec
, arg2
, arg3
, 0);
9592 ret
= -host_to_target_errno(errno
);
9597 case TARGET_NR_getsid
:
9598 return get_errno(getsid(arg1
));
9599 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9600 case TARGET_NR_fdatasync
:
9601 return get_errno(fdatasync(arg1
));
9603 #ifdef TARGET_NR__sysctl
9604 case TARGET_NR__sysctl
:
9605 /* We don't implement this, but ENOTDIR is always a safe
9607 return -TARGET_ENOTDIR
;
9609 case TARGET_NR_sched_getaffinity
:
9611 unsigned int mask_size
;
9612 unsigned long *mask
;
9615 * sched_getaffinity needs multiples of ulong, so need to take
9616 * care of mismatches between target ulong and host ulong sizes.
9618 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9619 return -TARGET_EINVAL
;
9621 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9623 mask
= alloca(mask_size
);
9624 memset(mask
, 0, mask_size
);
9625 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9627 if (!is_error(ret
)) {
9629 /* More data returned than the caller's buffer will fit.
9630 * This only happens if sizeof(abi_long) < sizeof(long)
9631 * and the caller passed us a buffer holding an odd number
9632 * of abi_longs. If the host kernel is actually using the
9633 * extra 4 bytes then fail EINVAL; otherwise we can just
9634 * ignore them and only copy the interesting part.
9636 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9637 if (numcpus
> arg2
* 8) {
9638 return -TARGET_EINVAL
;
9643 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9644 return -TARGET_EFAULT
;
9649 case TARGET_NR_sched_setaffinity
:
9651 unsigned int mask_size
;
9652 unsigned long *mask
;
9655 * sched_setaffinity needs multiples of ulong, so need to take
9656 * care of mismatches between target ulong and host ulong sizes.
9658 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9659 return -TARGET_EINVAL
;
9661 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9662 mask
= alloca(mask_size
);
9664 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9669 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9671 case TARGET_NR_getcpu
:
9674 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9675 arg2
? &node
: NULL
,
9677 if (is_error(ret
)) {
9680 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9681 return -TARGET_EFAULT
;
9683 if (arg2
&& put_user_u32(node
, arg2
)) {
9684 return -TARGET_EFAULT
;
9688 case TARGET_NR_sched_setparam
:
9690 struct sched_param
*target_schp
;
9691 struct sched_param schp
;
9694 return -TARGET_EINVAL
;
9696 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9697 return -TARGET_EFAULT
;
9698 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9699 unlock_user_struct(target_schp
, arg2
, 0);
9700 return get_errno(sched_setparam(arg1
, &schp
));
9702 case TARGET_NR_sched_getparam
:
9704 struct sched_param
*target_schp
;
9705 struct sched_param schp
;
9708 return -TARGET_EINVAL
;
9710 ret
= get_errno(sched_getparam(arg1
, &schp
));
9711 if (!is_error(ret
)) {
9712 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9713 return -TARGET_EFAULT
;
9714 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9715 unlock_user_struct(target_schp
, arg2
, 1);
9719 case TARGET_NR_sched_setscheduler
:
9721 struct sched_param
*target_schp
;
9722 struct sched_param schp
;
9724 return -TARGET_EINVAL
;
9726 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9727 return -TARGET_EFAULT
;
9728 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9729 unlock_user_struct(target_schp
, arg3
, 0);
9730 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9732 case TARGET_NR_sched_getscheduler
:
9733 return get_errno(sched_getscheduler(arg1
));
9734 case TARGET_NR_sched_yield
:
9735 return get_errno(sched_yield());
9736 case TARGET_NR_sched_get_priority_max
:
9737 return get_errno(sched_get_priority_max(arg1
));
9738 case TARGET_NR_sched_get_priority_min
:
9739 return get_errno(sched_get_priority_min(arg1
));
9740 case TARGET_NR_sched_rr_get_interval
:
9743 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9744 if (!is_error(ret
)) {
9745 ret
= host_to_target_timespec(arg2
, &ts
);
9749 case TARGET_NR_nanosleep
:
9751 struct timespec req
, rem
;
9752 target_to_host_timespec(&req
, arg1
);
9753 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9754 if (is_error(ret
) && arg2
) {
9755 host_to_target_timespec(arg2
, &rem
);
9759 case TARGET_NR_prctl
:
9761 case PR_GET_PDEATHSIG
:
9764 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9765 if (!is_error(ret
) && arg2
9766 && put_user_ual(deathsig
, arg2
)) {
9767 return -TARGET_EFAULT
;
9774 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9776 return -TARGET_EFAULT
;
9778 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9780 unlock_user(name
, arg2
, 16);
9785 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9787 return -TARGET_EFAULT
;
9789 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9791 unlock_user(name
, arg2
, 0);
9796 case TARGET_PR_GET_FP_MODE
:
9798 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9800 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9801 ret
|= TARGET_PR_FP_MODE_FR
;
9803 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9804 ret
|= TARGET_PR_FP_MODE_FRE
;
9808 case TARGET_PR_SET_FP_MODE
:
9810 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9811 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9812 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9813 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9814 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9816 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9817 TARGET_PR_FP_MODE_FRE
;
9819 /* If nothing to change, return right away, successfully. */
9820 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9823 /* Check the value is valid */
9824 if (arg2
& ~known_bits
) {
9825 return -TARGET_EOPNOTSUPP
;
9827 /* Setting FRE without FR is not supported. */
9828 if (new_fre
&& !new_fr
) {
9829 return -TARGET_EOPNOTSUPP
;
9831 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9832 /* FR1 is not supported */
9833 return -TARGET_EOPNOTSUPP
;
9835 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9836 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9837 /* cannot set FR=0 */
9838 return -TARGET_EOPNOTSUPP
;
9840 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9841 /* Cannot set FRE=1 */
9842 return -TARGET_EOPNOTSUPP
;
9846 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9847 for (i
= 0; i
< 32 ; i
+= 2) {
9848 if (!old_fr
&& new_fr
) {
9849 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9850 } else if (old_fr
&& !new_fr
) {
9851 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9856 env
->CP0_Status
|= (1 << CP0St_FR
);
9857 env
->hflags
|= MIPS_HFLAG_F64
;
9859 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9860 env
->hflags
&= ~MIPS_HFLAG_F64
;
9863 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9864 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9865 env
->hflags
|= MIPS_HFLAG_FRE
;
9868 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9869 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9875 #ifdef TARGET_AARCH64
9876 case TARGET_PR_SVE_SET_VL
:
9878 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9879 * PR_SVE_VL_INHERIT. Note the kernel definition
9880 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9881 * even though the current architectural maximum is VQ=16.
9883 ret
= -TARGET_EINVAL
;
9884 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
9885 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9886 CPUARMState
*env
= cpu_env
;
9887 ARMCPU
*cpu
= env_archcpu(env
);
9888 uint32_t vq
, old_vq
;
9890 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9891 vq
= MAX(arg2
/ 16, 1);
9892 vq
= MIN(vq
, cpu
->sve_max_vq
);
9895 aarch64_sve_narrow_vq(env
, vq
);
9897 env
->vfp
.zcr_el
[1] = vq
- 1;
9901 case TARGET_PR_SVE_GET_VL
:
9902 ret
= -TARGET_EINVAL
;
9904 ARMCPU
*cpu
= env_archcpu(cpu_env
);
9905 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9906 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9910 case TARGET_PR_PAC_RESET_KEYS
:
9912 CPUARMState
*env
= cpu_env
;
9913 ARMCPU
*cpu
= env_archcpu(env
);
9915 if (arg3
|| arg4
|| arg5
) {
9916 return -TARGET_EINVAL
;
9918 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
9919 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
9920 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
9921 TARGET_PR_PAC_APGAKEY
);
9927 } else if (arg2
& ~all
) {
9928 return -TARGET_EINVAL
;
9930 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
9931 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
9932 sizeof(ARMPACKey
), &err
);
9934 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
9935 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
9936 sizeof(ARMPACKey
), &err
);
9938 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
9939 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
9940 sizeof(ARMPACKey
), &err
);
9942 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
9943 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
9944 sizeof(ARMPACKey
), &err
);
9946 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
9947 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
9948 sizeof(ARMPACKey
), &err
);
9952 * Some unknown failure in the crypto. The best
9953 * we can do is log it and fail the syscall.
9954 * The real syscall cannot fail this way.
9956 qemu_log_mask(LOG_UNIMP
,
9957 "PR_PAC_RESET_KEYS: Crypto failure: %s",
9958 error_get_pretty(err
));
9965 return -TARGET_EINVAL
;
9966 #endif /* AARCH64 */
9967 case PR_GET_SECCOMP
:
9968 case PR_SET_SECCOMP
:
9969 /* Disable seccomp to prevent the target disabling syscalls we
9971 return -TARGET_EINVAL
;
9973 /* Most prctl options have no pointer arguments */
9974 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9977 #ifdef TARGET_NR_arch_prctl
9978 case TARGET_NR_arch_prctl
:
9979 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9980 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9985 #ifdef TARGET_NR_pread64
9986 case TARGET_NR_pread64
:
9987 if (regpairs_aligned(cpu_env
, num
)) {
9991 if (arg2
== 0 && arg3
== 0) {
9992 /* Special-case NULL buffer and zero length, which should succeed */
9995 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9997 return -TARGET_EFAULT
;
10000 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10001 unlock_user(p
, arg2
, ret
);
10003 case TARGET_NR_pwrite64
:
10004 if (regpairs_aligned(cpu_env
, num
)) {
10008 if (arg2
== 0 && arg3
== 0) {
10009 /* Special-case NULL buffer and zero length, which should succeed */
10012 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10014 return -TARGET_EFAULT
;
10017 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10018 unlock_user(p
, arg2
, 0);
10021 case TARGET_NR_getcwd
:
10022 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10023 return -TARGET_EFAULT
;
10024 ret
= get_errno(sys_getcwd1(p
, arg2
));
10025 unlock_user(p
, arg1
, ret
);
10027 case TARGET_NR_capget
:
10028 case TARGET_NR_capset
:
10030 struct target_user_cap_header
*target_header
;
10031 struct target_user_cap_data
*target_data
= NULL
;
10032 struct __user_cap_header_struct header
;
10033 struct __user_cap_data_struct data
[2];
10034 struct __user_cap_data_struct
*dataptr
= NULL
;
10035 int i
, target_datalen
;
10036 int data_items
= 1;
10038 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10039 return -TARGET_EFAULT
;
10041 header
.version
= tswap32(target_header
->version
);
10042 header
.pid
= tswap32(target_header
->pid
);
10044 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10045 /* Version 2 and up takes pointer to two user_data structs */
10049 target_datalen
= sizeof(*target_data
) * data_items
;
10052 if (num
== TARGET_NR_capget
) {
10053 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10055 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10057 if (!target_data
) {
10058 unlock_user_struct(target_header
, arg1
, 0);
10059 return -TARGET_EFAULT
;
10062 if (num
== TARGET_NR_capset
) {
10063 for (i
= 0; i
< data_items
; i
++) {
10064 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10065 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10066 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10073 if (num
== TARGET_NR_capget
) {
10074 ret
= get_errno(capget(&header
, dataptr
));
10076 ret
= get_errno(capset(&header
, dataptr
));
10079 /* The kernel always updates version for both capget and capset */
10080 target_header
->version
= tswap32(header
.version
);
10081 unlock_user_struct(target_header
, arg1
, 1);
10084 if (num
== TARGET_NR_capget
) {
10085 for (i
= 0; i
< data_items
; i
++) {
10086 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10087 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10088 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10090 unlock_user(target_data
, arg2
, target_datalen
);
10092 unlock_user(target_data
, arg2
, 0);
10097 case TARGET_NR_sigaltstack
:
10098 return do_sigaltstack(arg1
, arg2
,
10099 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10101 #ifdef CONFIG_SENDFILE
10102 #ifdef TARGET_NR_sendfile
10103 case TARGET_NR_sendfile
:
10105 off_t
*offp
= NULL
;
10108 ret
= get_user_sal(off
, arg3
);
10109 if (is_error(ret
)) {
10114 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10115 if (!is_error(ret
) && arg3
) {
10116 abi_long ret2
= put_user_sal(off
, arg3
);
10117 if (is_error(ret2
)) {
10124 #ifdef TARGET_NR_sendfile64
10125 case TARGET_NR_sendfile64
:
10127 off_t
*offp
= NULL
;
10130 ret
= get_user_s64(off
, arg3
);
10131 if (is_error(ret
)) {
10136 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10137 if (!is_error(ret
) && arg3
) {
10138 abi_long ret2
= put_user_s64(off
, arg3
);
10139 if (is_error(ret2
)) {
10147 #ifdef TARGET_NR_vfork
10148 case TARGET_NR_vfork
:
10149 return get_errno(do_fork(cpu_env
,
10150 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10153 #ifdef TARGET_NR_ugetrlimit
10154 case TARGET_NR_ugetrlimit
:
10156 struct rlimit rlim
;
10157 int resource
= target_to_host_resource(arg1
);
10158 ret
= get_errno(getrlimit(resource
, &rlim
));
10159 if (!is_error(ret
)) {
10160 struct target_rlimit
*target_rlim
;
10161 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10162 return -TARGET_EFAULT
;
10163 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10164 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10165 unlock_user_struct(target_rlim
, arg2
, 1);
10170 #ifdef TARGET_NR_truncate64
10171 case TARGET_NR_truncate64
:
10172 if (!(p
= lock_user_string(arg1
)))
10173 return -TARGET_EFAULT
;
10174 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10175 unlock_user(p
, arg1
, 0);
10178 #ifdef TARGET_NR_ftruncate64
10179 case TARGET_NR_ftruncate64
:
10180 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10182 #ifdef TARGET_NR_stat64
10183 case TARGET_NR_stat64
:
10184 if (!(p
= lock_user_string(arg1
))) {
10185 return -TARGET_EFAULT
;
10187 ret
= get_errno(stat(path(p
), &st
));
10188 unlock_user(p
, arg1
, 0);
10189 if (!is_error(ret
))
10190 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10193 #ifdef TARGET_NR_lstat64
10194 case TARGET_NR_lstat64
:
10195 if (!(p
= lock_user_string(arg1
))) {
10196 return -TARGET_EFAULT
;
10198 ret
= get_errno(lstat(path(p
), &st
));
10199 unlock_user(p
, arg1
, 0);
10200 if (!is_error(ret
))
10201 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10204 #ifdef TARGET_NR_fstat64
10205 case TARGET_NR_fstat64
:
10206 ret
= get_errno(fstat(arg1
, &st
));
10207 if (!is_error(ret
))
10208 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10211 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10212 #ifdef TARGET_NR_fstatat64
10213 case TARGET_NR_fstatat64
:
10215 #ifdef TARGET_NR_newfstatat
10216 case TARGET_NR_newfstatat
:
10218 if (!(p
= lock_user_string(arg2
))) {
10219 return -TARGET_EFAULT
;
10221 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10222 unlock_user(p
, arg2
, 0);
10223 if (!is_error(ret
))
10224 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10227 #if defined(TARGET_NR_statx)
10228 case TARGET_NR_statx
:
10230 struct target_statx
*target_stx
;
10234 p
= lock_user_string(arg2
);
10236 return -TARGET_EFAULT
;
10238 #if defined(__NR_statx)
10241 * It is assumed that struct statx is architecture independent.
10243 struct target_statx host_stx
;
10246 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10247 if (!is_error(ret
)) {
10248 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10249 unlock_user(p
, arg2
, 0);
10250 return -TARGET_EFAULT
;
10254 if (ret
!= -TARGET_ENOSYS
) {
10255 unlock_user(p
, arg2
, 0);
10260 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10261 unlock_user(p
, arg2
, 0);
10263 if (!is_error(ret
)) {
10264 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10265 return -TARGET_EFAULT
;
10267 memset(target_stx
, 0, sizeof(*target_stx
));
10268 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10269 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10270 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10271 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10272 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10273 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10274 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10275 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10276 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10277 __put_user(st
.st_size
, &target_stx
->stx_size
);
10278 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10279 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10280 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10281 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10282 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10283 unlock_user_struct(target_stx
, arg5
, 1);
10288 #ifdef TARGET_NR_lchown
10289 case TARGET_NR_lchown
:
10290 if (!(p
= lock_user_string(arg1
)))
10291 return -TARGET_EFAULT
;
10292 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10293 unlock_user(p
, arg1
, 0);
10296 #ifdef TARGET_NR_getuid
10297 case TARGET_NR_getuid
:
10298 return get_errno(high2lowuid(getuid()));
10300 #ifdef TARGET_NR_getgid
10301 case TARGET_NR_getgid
:
10302 return get_errno(high2lowgid(getgid()));
10304 #ifdef TARGET_NR_geteuid
10305 case TARGET_NR_geteuid
:
10306 return get_errno(high2lowuid(geteuid()));
10308 #ifdef TARGET_NR_getegid
10309 case TARGET_NR_getegid
:
10310 return get_errno(high2lowgid(getegid()));
10312 case TARGET_NR_setreuid
:
10313 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10314 case TARGET_NR_setregid
:
10315 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10316 case TARGET_NR_getgroups
:
10318 int gidsetsize
= arg1
;
10319 target_id
*target_grouplist
;
10323 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10324 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10325 if (gidsetsize
== 0)
10327 if (!is_error(ret
)) {
10328 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10329 if (!target_grouplist
)
10330 return -TARGET_EFAULT
;
10331 for(i
= 0;i
< ret
; i
++)
10332 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10333 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10337 case TARGET_NR_setgroups
:
10339 int gidsetsize
= arg1
;
10340 target_id
*target_grouplist
;
10341 gid_t
*grouplist
= NULL
;
10344 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10345 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10346 if (!target_grouplist
) {
10347 return -TARGET_EFAULT
;
10349 for (i
= 0; i
< gidsetsize
; i
++) {
10350 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10352 unlock_user(target_grouplist
, arg2
, 0);
10354 return get_errno(setgroups(gidsetsize
, grouplist
));
10356 case TARGET_NR_fchown
:
10357 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10358 #if defined(TARGET_NR_fchownat)
10359 case TARGET_NR_fchownat
:
10360 if (!(p
= lock_user_string(arg2
)))
10361 return -TARGET_EFAULT
;
10362 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10363 low2highgid(arg4
), arg5
));
10364 unlock_user(p
, arg2
, 0);
10367 #ifdef TARGET_NR_setresuid
10368 case TARGET_NR_setresuid
:
10369 return get_errno(sys_setresuid(low2highuid(arg1
),
10371 low2highuid(arg3
)));
10373 #ifdef TARGET_NR_getresuid
10374 case TARGET_NR_getresuid
:
10376 uid_t ruid
, euid
, suid
;
10377 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10378 if (!is_error(ret
)) {
10379 if (put_user_id(high2lowuid(ruid
), arg1
)
10380 || put_user_id(high2lowuid(euid
), arg2
)
10381 || put_user_id(high2lowuid(suid
), arg3
))
10382 return -TARGET_EFAULT
;
10387 #ifdef TARGET_NR_getresgid
10388 case TARGET_NR_setresgid
:
10389 return get_errno(sys_setresgid(low2highgid(arg1
),
10391 low2highgid(arg3
)));
10393 #ifdef TARGET_NR_getresgid
10394 case TARGET_NR_getresgid
:
10396 gid_t rgid
, egid
, sgid
;
10397 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10398 if (!is_error(ret
)) {
10399 if (put_user_id(high2lowgid(rgid
), arg1
)
10400 || put_user_id(high2lowgid(egid
), arg2
)
10401 || put_user_id(high2lowgid(sgid
), arg3
))
10402 return -TARGET_EFAULT
;
10407 #ifdef TARGET_NR_chown
10408 case TARGET_NR_chown
:
10409 if (!(p
= lock_user_string(arg1
)))
10410 return -TARGET_EFAULT
;
10411 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10412 unlock_user(p
, arg1
, 0);
10415 case TARGET_NR_setuid
:
10416 return get_errno(sys_setuid(low2highuid(arg1
)));
10417 case TARGET_NR_setgid
:
10418 return get_errno(sys_setgid(low2highgid(arg1
)));
10419 case TARGET_NR_setfsuid
:
10420 return get_errno(setfsuid(arg1
));
10421 case TARGET_NR_setfsgid
:
10422 return get_errno(setfsgid(arg1
));
10424 #ifdef TARGET_NR_lchown32
10425 case TARGET_NR_lchown32
:
10426 if (!(p
= lock_user_string(arg1
)))
10427 return -TARGET_EFAULT
;
10428 ret
= get_errno(lchown(p
, arg2
, arg3
));
10429 unlock_user(p
, arg1
, 0);
10432 #ifdef TARGET_NR_getuid32
10433 case TARGET_NR_getuid32
:
10434 return get_errno(getuid());
10437 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10438 /* Alpha specific */
10439 case TARGET_NR_getxuid
:
10443 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10445 return get_errno(getuid());
10447 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10448 /* Alpha specific */
10449 case TARGET_NR_getxgid
:
10453 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10455 return get_errno(getgid());
10457 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10458 /* Alpha specific */
10459 case TARGET_NR_osf_getsysinfo
:
10460 ret
= -TARGET_EOPNOTSUPP
;
10462 case TARGET_GSI_IEEE_FP_CONTROL
:
10464 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10465 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10467 swcr
&= ~SWCR_STATUS_MASK
;
10468 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10470 if (put_user_u64 (swcr
, arg2
))
10471 return -TARGET_EFAULT
;
10476 /* case GSI_IEEE_STATE_AT_SIGNAL:
10477 -- Not implemented in linux kernel.
10479 -- Retrieves current unaligned access state; not much used.
10480 case GSI_PROC_TYPE:
10481 -- Retrieves implver information; surely not used.
10482 case GSI_GET_HWRPB:
10483 -- Grabs a copy of the HWRPB; surely not used.
10488 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10489 /* Alpha specific */
10490 case TARGET_NR_osf_setsysinfo
:
10491 ret
= -TARGET_EOPNOTSUPP
;
10493 case TARGET_SSI_IEEE_FP_CONTROL
:
10495 uint64_t swcr
, fpcr
;
10497 if (get_user_u64 (swcr
, arg2
)) {
10498 return -TARGET_EFAULT
;
10502 * The kernel calls swcr_update_status to update the
10503 * status bits from the fpcr at every point that it
10504 * could be queried. Therefore, we store the status
10505 * bits only in FPCR.
10507 ((CPUAlphaState
*)cpu_env
)->swcr
10508 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10510 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10511 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10512 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10513 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10518 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10520 uint64_t exc
, fpcr
, fex
;
10522 if (get_user_u64(exc
, arg2
)) {
10523 return -TARGET_EFAULT
;
10525 exc
&= SWCR_STATUS_MASK
;
10526 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10528 /* Old exceptions are not signaled. */
10529 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10531 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10532 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10534 /* Update the hardware fpcr. */
10535 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10536 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10539 int si_code
= TARGET_FPE_FLTUNK
;
10540 target_siginfo_t info
;
10542 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10543 si_code
= TARGET_FPE_FLTUND
;
10545 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10546 si_code
= TARGET_FPE_FLTRES
;
10548 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10549 si_code
= TARGET_FPE_FLTUND
;
10551 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10552 si_code
= TARGET_FPE_FLTOVF
;
10554 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10555 si_code
= TARGET_FPE_FLTDIV
;
10557 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10558 si_code
= TARGET_FPE_FLTINV
;
10561 info
.si_signo
= SIGFPE
;
10563 info
.si_code
= si_code
;
10564 info
._sifields
._sigfault
._addr
10565 = ((CPUArchState
*)cpu_env
)->pc
;
10566 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10567 QEMU_SI_FAULT
, &info
);
10573 /* case SSI_NVPAIRS:
10574 -- Used with SSIN_UACPROC to enable unaligned accesses.
10575 case SSI_IEEE_STATE_AT_SIGNAL:
10576 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10577 -- Not implemented in linux kernel
10582 #ifdef TARGET_NR_osf_sigprocmask
10583 /* Alpha specific. */
10584 case TARGET_NR_osf_sigprocmask
:
10588 sigset_t set
, oldset
;
10591 case TARGET_SIG_BLOCK
:
10594 case TARGET_SIG_UNBLOCK
:
10597 case TARGET_SIG_SETMASK
:
10601 return -TARGET_EINVAL
;
10604 target_to_host_old_sigset(&set
, &mask
);
10605 ret
= do_sigprocmask(how
, &set
, &oldset
);
10607 host_to_target_old_sigset(&mask
, &oldset
);
10614 #ifdef TARGET_NR_getgid32
10615 case TARGET_NR_getgid32
:
10616 return get_errno(getgid());
10618 #ifdef TARGET_NR_geteuid32
10619 case TARGET_NR_geteuid32
:
10620 return get_errno(geteuid());
10622 #ifdef TARGET_NR_getegid32
10623 case TARGET_NR_getegid32
:
10624 return get_errno(getegid());
10626 #ifdef TARGET_NR_setreuid32
10627 case TARGET_NR_setreuid32
:
10628 return get_errno(setreuid(arg1
, arg2
));
10630 #ifdef TARGET_NR_setregid32
10631 case TARGET_NR_setregid32
:
10632 return get_errno(setregid(arg1
, arg2
));
10634 #ifdef TARGET_NR_getgroups32
10635 case TARGET_NR_getgroups32
:
10637 int gidsetsize
= arg1
;
10638 uint32_t *target_grouplist
;
10642 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10643 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10644 if (gidsetsize
== 0)
10646 if (!is_error(ret
)) {
10647 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10648 if (!target_grouplist
) {
10649 return -TARGET_EFAULT
;
10651 for(i
= 0;i
< ret
; i
++)
10652 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10653 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10658 #ifdef TARGET_NR_setgroups32
10659 case TARGET_NR_setgroups32
:
10661 int gidsetsize
= arg1
;
10662 uint32_t *target_grouplist
;
10666 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10667 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10668 if (!target_grouplist
) {
10669 return -TARGET_EFAULT
;
10671 for(i
= 0;i
< gidsetsize
; i
++)
10672 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10673 unlock_user(target_grouplist
, arg2
, 0);
10674 return get_errno(setgroups(gidsetsize
, grouplist
));
10677 #ifdef TARGET_NR_fchown32
10678 case TARGET_NR_fchown32
:
10679 return get_errno(fchown(arg1
, arg2
, arg3
));
10681 #ifdef TARGET_NR_setresuid32
10682 case TARGET_NR_setresuid32
:
10683 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10685 #ifdef TARGET_NR_getresuid32
10686 case TARGET_NR_getresuid32
:
10688 uid_t ruid
, euid
, suid
;
10689 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10690 if (!is_error(ret
)) {
10691 if (put_user_u32(ruid
, arg1
)
10692 || put_user_u32(euid
, arg2
)
10693 || put_user_u32(suid
, arg3
))
10694 return -TARGET_EFAULT
;
10699 #ifdef TARGET_NR_setresgid32
10700 case TARGET_NR_setresgid32
:
10701 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10703 #ifdef TARGET_NR_getresgid32
10704 case TARGET_NR_getresgid32
:
10706 gid_t rgid
, egid
, sgid
;
10707 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10708 if (!is_error(ret
)) {
10709 if (put_user_u32(rgid
, arg1
)
10710 || put_user_u32(egid
, arg2
)
10711 || put_user_u32(sgid
, arg3
))
10712 return -TARGET_EFAULT
;
10717 #ifdef TARGET_NR_chown32
10718 case TARGET_NR_chown32
:
10719 if (!(p
= lock_user_string(arg1
)))
10720 return -TARGET_EFAULT
;
10721 ret
= get_errno(chown(p
, arg2
, arg3
));
10722 unlock_user(p
, arg1
, 0);
10725 #ifdef TARGET_NR_setuid32
10726 case TARGET_NR_setuid32
:
10727 return get_errno(sys_setuid(arg1
));
10729 #ifdef TARGET_NR_setgid32
10730 case TARGET_NR_setgid32
:
10731 return get_errno(sys_setgid(arg1
));
10733 #ifdef TARGET_NR_setfsuid32
10734 case TARGET_NR_setfsuid32
:
10735 return get_errno(setfsuid(arg1
));
10737 #ifdef TARGET_NR_setfsgid32
10738 case TARGET_NR_setfsgid32
:
10739 return get_errno(setfsgid(arg1
));
10741 #ifdef TARGET_NR_mincore
10742 case TARGET_NR_mincore
:
10744 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10746 return -TARGET_ENOMEM
;
10748 p
= lock_user_string(arg3
);
10750 ret
= -TARGET_EFAULT
;
10752 ret
= get_errno(mincore(a
, arg2
, p
));
10753 unlock_user(p
, arg3
, ret
);
10755 unlock_user(a
, arg1
, 0);
10759 #ifdef TARGET_NR_arm_fadvise64_64
10760 case TARGET_NR_arm_fadvise64_64
:
10761 /* arm_fadvise64_64 looks like fadvise64_64 but
10762 * with different argument order: fd, advice, offset, len
10763 * rather than the usual fd, offset, len, advice.
10764 * Note that offset and len are both 64-bit so appear as
10765 * pairs of 32-bit registers.
10767 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10768 target_offset64(arg5
, arg6
), arg2
);
10769 return -host_to_target_errno(ret
);
10772 #if TARGET_ABI_BITS == 32
10774 #ifdef TARGET_NR_fadvise64_64
10775 case TARGET_NR_fadvise64_64
:
10776 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10777 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10785 /* 6 args: fd, offset (high, low), len (high, low), advice */
10786 if (regpairs_aligned(cpu_env
, num
)) {
10787 /* offset is in (3,4), len in (5,6) and advice in 7 */
10795 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10796 target_offset64(arg4
, arg5
), arg6
);
10797 return -host_to_target_errno(ret
);
10800 #ifdef TARGET_NR_fadvise64
10801 case TARGET_NR_fadvise64
:
10802 /* 5 args: fd, offset (high, low), len, advice */
10803 if (regpairs_aligned(cpu_env
, num
)) {
10804 /* offset is in (3,4), len in 5 and advice in 6 */
10810 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10811 return -host_to_target_errno(ret
);
10814 #else /* not a 32-bit ABI */
10815 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10816 #ifdef TARGET_NR_fadvise64_64
10817 case TARGET_NR_fadvise64_64
:
10819 #ifdef TARGET_NR_fadvise64
10820 case TARGET_NR_fadvise64
:
10822 #ifdef TARGET_S390X
10824 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10825 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10826 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10827 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10831 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10833 #endif /* end of 64-bit ABI fadvise handling */
10835 #ifdef TARGET_NR_madvise
10836 case TARGET_NR_madvise
:
10837 /* A straight passthrough may not be safe because qemu sometimes
10838 turns private file-backed mappings into anonymous mappings.
10839 This will break MADV_DONTNEED.
10840 This is a hint, so ignoring and returning success is ok. */
10843 #if TARGET_ABI_BITS == 32
10844 case TARGET_NR_fcntl64
:
10848 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10849 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10852 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10853 copyfrom
= copy_from_user_oabi_flock64
;
10854 copyto
= copy_to_user_oabi_flock64
;
10858 cmd
= target_to_host_fcntl_cmd(arg2
);
10859 if (cmd
== -TARGET_EINVAL
) {
10864 case TARGET_F_GETLK64
:
10865 ret
= copyfrom(&fl
, arg3
);
10869 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10871 ret
= copyto(arg3
, &fl
);
10875 case TARGET_F_SETLK64
:
10876 case TARGET_F_SETLKW64
:
10877 ret
= copyfrom(&fl
, arg3
);
10881 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10884 ret
= do_fcntl(arg1
, arg2
, arg3
);
10890 #ifdef TARGET_NR_cacheflush
10891 case TARGET_NR_cacheflush
:
10892 /* self-modifying code is handled automatically, so nothing needed */
10895 #ifdef TARGET_NR_getpagesize
10896 case TARGET_NR_getpagesize
:
10897 return TARGET_PAGE_SIZE
;
10899 case TARGET_NR_gettid
:
10900 return get_errno(sys_gettid());
10901 #ifdef TARGET_NR_readahead
10902 case TARGET_NR_readahead
:
10903 #if TARGET_ABI_BITS == 32
10904 if (regpairs_aligned(cpu_env
, num
)) {
10909 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10911 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10916 #ifdef TARGET_NR_setxattr
10917 case TARGET_NR_listxattr
:
10918 case TARGET_NR_llistxattr
:
10922 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10924 return -TARGET_EFAULT
;
10927 p
= lock_user_string(arg1
);
10929 if (num
== TARGET_NR_listxattr
) {
10930 ret
= get_errno(listxattr(p
, b
, arg3
));
10932 ret
= get_errno(llistxattr(p
, b
, arg3
));
10935 ret
= -TARGET_EFAULT
;
10937 unlock_user(p
, arg1
, 0);
10938 unlock_user(b
, arg2
, arg3
);
10941 case TARGET_NR_flistxattr
:
10945 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10947 return -TARGET_EFAULT
;
10950 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10951 unlock_user(b
, arg2
, arg3
);
10954 case TARGET_NR_setxattr
:
10955 case TARGET_NR_lsetxattr
:
10957 void *p
, *n
, *v
= 0;
10959 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10961 return -TARGET_EFAULT
;
10964 p
= lock_user_string(arg1
);
10965 n
= lock_user_string(arg2
);
10967 if (num
== TARGET_NR_setxattr
) {
10968 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10970 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10973 ret
= -TARGET_EFAULT
;
10975 unlock_user(p
, arg1
, 0);
10976 unlock_user(n
, arg2
, 0);
10977 unlock_user(v
, arg3
, 0);
10980 case TARGET_NR_fsetxattr
:
10984 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10986 return -TARGET_EFAULT
;
10989 n
= lock_user_string(arg2
);
10991 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10993 ret
= -TARGET_EFAULT
;
10995 unlock_user(n
, arg2
, 0);
10996 unlock_user(v
, arg3
, 0);
10999 case TARGET_NR_getxattr
:
11000 case TARGET_NR_lgetxattr
:
11002 void *p
, *n
, *v
= 0;
11004 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11006 return -TARGET_EFAULT
;
11009 p
= lock_user_string(arg1
);
11010 n
= lock_user_string(arg2
);
11012 if (num
== TARGET_NR_getxattr
) {
11013 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11015 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11018 ret
= -TARGET_EFAULT
;
11020 unlock_user(p
, arg1
, 0);
11021 unlock_user(n
, arg2
, 0);
11022 unlock_user(v
, arg3
, arg4
);
11025 case TARGET_NR_fgetxattr
:
11029 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11031 return -TARGET_EFAULT
;
11034 n
= lock_user_string(arg2
);
11036 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11038 ret
= -TARGET_EFAULT
;
11040 unlock_user(n
, arg2
, 0);
11041 unlock_user(v
, arg3
, arg4
);
11044 case TARGET_NR_removexattr
:
11045 case TARGET_NR_lremovexattr
:
11048 p
= lock_user_string(arg1
);
11049 n
= lock_user_string(arg2
);
11051 if (num
== TARGET_NR_removexattr
) {
11052 ret
= get_errno(removexattr(p
, n
));
11054 ret
= get_errno(lremovexattr(p
, n
));
11057 ret
= -TARGET_EFAULT
;
11059 unlock_user(p
, arg1
, 0);
11060 unlock_user(n
, arg2
, 0);
11063 case TARGET_NR_fremovexattr
:
11066 n
= lock_user_string(arg2
);
11068 ret
= get_errno(fremovexattr(arg1
, n
));
11070 ret
= -TARGET_EFAULT
;
11072 unlock_user(n
, arg2
, 0);
11076 #endif /* CONFIG_ATTR */
11077 #ifdef TARGET_NR_set_thread_area
11078 case TARGET_NR_set_thread_area
:
11079 #if defined(TARGET_MIPS)
11080 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11082 #elif defined(TARGET_CRIS)
11084 ret
= -TARGET_EINVAL
;
11086 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11090 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11091 return do_set_thread_area(cpu_env
, arg1
);
11092 #elif defined(TARGET_M68K)
11094 TaskState
*ts
= cpu
->opaque
;
11095 ts
->tp_value
= arg1
;
11099 return -TARGET_ENOSYS
;
11102 #ifdef TARGET_NR_get_thread_area
11103 case TARGET_NR_get_thread_area
:
11104 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11105 return do_get_thread_area(cpu_env
, arg1
);
11106 #elif defined(TARGET_M68K)
11108 TaskState
*ts
= cpu
->opaque
;
11109 return ts
->tp_value
;
11112 return -TARGET_ENOSYS
;
11115 #ifdef TARGET_NR_getdomainname
11116 case TARGET_NR_getdomainname
:
11117 return -TARGET_ENOSYS
;
11120 #ifdef TARGET_NR_clock_settime
11121 case TARGET_NR_clock_settime
:
11123 struct timespec ts
;
11125 ret
= target_to_host_timespec(&ts
, arg2
);
11126 if (!is_error(ret
)) {
11127 ret
= get_errno(clock_settime(arg1
, &ts
));
11132 #ifdef TARGET_NR_clock_gettime
11133 case TARGET_NR_clock_gettime
:
11135 struct timespec ts
;
11136 ret
= get_errno(clock_gettime(arg1
, &ts
));
11137 if (!is_error(ret
)) {
11138 ret
= host_to_target_timespec(arg2
, &ts
);
11143 #ifdef TARGET_NR_clock_getres
11144 case TARGET_NR_clock_getres
:
11146 struct timespec ts
;
11147 ret
= get_errno(clock_getres(arg1
, &ts
));
11148 if (!is_error(ret
)) {
11149 host_to_target_timespec(arg2
, &ts
);
11154 #ifdef TARGET_NR_clock_nanosleep
11155 case TARGET_NR_clock_nanosleep
:
11157 struct timespec ts
;
11158 target_to_host_timespec(&ts
, arg3
);
11159 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11160 &ts
, arg4
? &ts
: NULL
));
11162 host_to_target_timespec(arg4
, &ts
);
11164 #if defined(TARGET_PPC)
11165 /* clock_nanosleep is odd in that it returns positive errno values.
11166 * On PPC, CR0 bit 3 should be set in such a situation. */
11167 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11168 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11175 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11176 case TARGET_NR_set_tid_address
:
11177 return get_errno(set_tid_address((int *)g2h(arg1
)));
11180 case TARGET_NR_tkill
:
11181 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11183 case TARGET_NR_tgkill
:
11184 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11185 target_to_host_signal(arg3
)));
11187 #ifdef TARGET_NR_set_robust_list
11188 case TARGET_NR_set_robust_list
:
11189 case TARGET_NR_get_robust_list
:
11190 /* The ABI for supporting robust futexes has userspace pass
11191 * the kernel a pointer to a linked list which is updated by
11192 * userspace after the syscall; the list is walked by the kernel
11193 * when the thread exits. Since the linked list in QEMU guest
11194 * memory isn't a valid linked list for the host and we have
11195 * no way to reliably intercept the thread-death event, we can't
11196 * support these. Silently return ENOSYS so that guest userspace
11197 * falls back to a non-robust futex implementation (which should
11198 * be OK except in the corner case of the guest crashing while
11199 * holding a mutex that is shared with another process via
11202 return -TARGET_ENOSYS
;
11205 #if defined(TARGET_NR_utimensat)
11206 case TARGET_NR_utimensat
:
11208 struct timespec
*tsp
, ts
[2];
11212 target_to_host_timespec(ts
, arg3
);
11213 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11217 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11219 if (!(p
= lock_user_string(arg2
))) {
11220 return -TARGET_EFAULT
;
11222 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11223 unlock_user(p
, arg2
, 0);
11228 case TARGET_NR_futex
:
11229 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11230 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11231 case TARGET_NR_inotify_init
:
11232 ret
= get_errno(sys_inotify_init());
11234 fd_trans_register(ret
, &target_inotify_trans
);
11238 #ifdef CONFIG_INOTIFY1
11239 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11240 case TARGET_NR_inotify_init1
:
11241 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11242 fcntl_flags_tbl
)));
11244 fd_trans_register(ret
, &target_inotify_trans
);
11249 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11250 case TARGET_NR_inotify_add_watch
:
11251 p
= lock_user_string(arg2
);
11252 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11253 unlock_user(p
, arg2
, 0);
11256 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11257 case TARGET_NR_inotify_rm_watch
:
11258 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11261 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11262 case TARGET_NR_mq_open
:
11264 struct mq_attr posix_mq_attr
;
11265 struct mq_attr
*pposix_mq_attr
;
11268 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11269 pposix_mq_attr
= NULL
;
11271 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11272 return -TARGET_EFAULT
;
11274 pposix_mq_attr
= &posix_mq_attr
;
11276 p
= lock_user_string(arg1
- 1);
11278 return -TARGET_EFAULT
;
11280 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11281 unlock_user (p
, arg1
, 0);
11285 case TARGET_NR_mq_unlink
:
11286 p
= lock_user_string(arg1
- 1);
11288 return -TARGET_EFAULT
;
11290 ret
= get_errno(mq_unlink(p
));
11291 unlock_user (p
, arg1
, 0);
11294 case TARGET_NR_mq_timedsend
:
11296 struct timespec ts
;
11298 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11300 target_to_host_timespec(&ts
, arg5
);
11301 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11302 host_to_target_timespec(arg5
, &ts
);
11304 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11306 unlock_user (p
, arg2
, arg3
);
11310 case TARGET_NR_mq_timedreceive
:
11312 struct timespec ts
;
11315 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11317 target_to_host_timespec(&ts
, arg5
);
11318 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11320 host_to_target_timespec(arg5
, &ts
);
11322 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11325 unlock_user (p
, arg2
, arg3
);
11327 put_user_u32(prio
, arg4
);
11331 /* Not implemented for now... */
11332 /* case TARGET_NR_mq_notify: */
11335 case TARGET_NR_mq_getsetattr
:
11337 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11340 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11341 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11342 &posix_mq_attr_out
));
11343 } else if (arg3
!= 0) {
11344 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11346 if (ret
== 0 && arg3
!= 0) {
11347 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11353 #ifdef CONFIG_SPLICE
11354 #ifdef TARGET_NR_tee
11355 case TARGET_NR_tee
:
11357 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11361 #ifdef TARGET_NR_splice
11362 case TARGET_NR_splice
:
11364 loff_t loff_in
, loff_out
;
11365 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11367 if (get_user_u64(loff_in
, arg2
)) {
11368 return -TARGET_EFAULT
;
11370 ploff_in
= &loff_in
;
11373 if (get_user_u64(loff_out
, arg4
)) {
11374 return -TARGET_EFAULT
;
11376 ploff_out
= &loff_out
;
11378 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11380 if (put_user_u64(loff_in
, arg2
)) {
11381 return -TARGET_EFAULT
;
11385 if (put_user_u64(loff_out
, arg4
)) {
11386 return -TARGET_EFAULT
;
11392 #ifdef TARGET_NR_vmsplice
11393 case TARGET_NR_vmsplice
:
11395 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11397 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11398 unlock_iovec(vec
, arg2
, arg3
, 0);
11400 ret
= -host_to_target_errno(errno
);
11405 #endif /* CONFIG_SPLICE */
11406 #ifdef CONFIG_EVENTFD
11407 #if defined(TARGET_NR_eventfd)
11408 case TARGET_NR_eventfd
:
11409 ret
= get_errno(eventfd(arg1
, 0));
11411 fd_trans_register(ret
, &target_eventfd_trans
);
11415 #if defined(TARGET_NR_eventfd2)
11416 case TARGET_NR_eventfd2
:
11418 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11419 if (arg2
& TARGET_O_NONBLOCK
) {
11420 host_flags
|= O_NONBLOCK
;
11422 if (arg2
& TARGET_O_CLOEXEC
) {
11423 host_flags
|= O_CLOEXEC
;
11425 ret
= get_errno(eventfd(arg1
, host_flags
));
11427 fd_trans_register(ret
, &target_eventfd_trans
);
11432 #endif /* CONFIG_EVENTFD */
11433 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11434 case TARGET_NR_fallocate
:
11435 #if TARGET_ABI_BITS == 32
11436 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11437 target_offset64(arg5
, arg6
)));
11439 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11443 #if defined(CONFIG_SYNC_FILE_RANGE)
11444 #if defined(TARGET_NR_sync_file_range)
11445 case TARGET_NR_sync_file_range
:
11446 #if TARGET_ABI_BITS == 32
11447 #if defined(TARGET_MIPS)
11448 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11449 target_offset64(arg5
, arg6
), arg7
));
11451 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11452 target_offset64(arg4
, arg5
), arg6
));
11453 #endif /* !TARGET_MIPS */
11455 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11459 #if defined(TARGET_NR_sync_file_range2)
11460 case TARGET_NR_sync_file_range2
:
11461 /* This is like sync_file_range but the arguments are reordered */
11462 #if TARGET_ABI_BITS == 32
11463 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11464 target_offset64(arg5
, arg6
), arg2
));
11466 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11471 #if defined(TARGET_NR_signalfd4)
11472 case TARGET_NR_signalfd4
:
11473 return do_signalfd4(arg1
, arg2
, arg4
);
11475 #if defined(TARGET_NR_signalfd)
11476 case TARGET_NR_signalfd
:
11477 return do_signalfd4(arg1
, arg2
, 0);
11479 #if defined(CONFIG_EPOLL)
11480 #if defined(TARGET_NR_epoll_create)
11481 case TARGET_NR_epoll_create
:
11482 return get_errno(epoll_create(arg1
));
11484 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11485 case TARGET_NR_epoll_create1
:
11486 return get_errno(epoll_create1(arg1
));
11488 #if defined(TARGET_NR_epoll_ctl)
11489 case TARGET_NR_epoll_ctl
:
11491 struct epoll_event ep
;
11492 struct epoll_event
*epp
= 0;
11494 struct target_epoll_event
*target_ep
;
11495 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11496 return -TARGET_EFAULT
;
11498 ep
.events
= tswap32(target_ep
->events
);
11499 /* The epoll_data_t union is just opaque data to the kernel,
11500 * so we transfer all 64 bits across and need not worry what
11501 * actual data type it is.
11503 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11504 unlock_user_struct(target_ep
, arg4
, 0);
11507 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11511 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11512 #if defined(TARGET_NR_epoll_wait)
11513 case TARGET_NR_epoll_wait
:
11515 #if defined(TARGET_NR_epoll_pwait)
11516 case TARGET_NR_epoll_pwait
:
11519 struct target_epoll_event
*target_ep
;
11520 struct epoll_event
*ep
;
11522 int maxevents
= arg3
;
11523 int timeout
= arg4
;
11525 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11526 return -TARGET_EINVAL
;
11529 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11530 maxevents
* sizeof(struct target_epoll_event
), 1);
11532 return -TARGET_EFAULT
;
11535 ep
= g_try_new(struct epoll_event
, maxevents
);
11537 unlock_user(target_ep
, arg2
, 0);
11538 return -TARGET_ENOMEM
;
11542 #if defined(TARGET_NR_epoll_pwait)
11543 case TARGET_NR_epoll_pwait
:
11545 target_sigset_t
*target_set
;
11546 sigset_t _set
, *set
= &_set
;
11549 if (arg6
!= sizeof(target_sigset_t
)) {
11550 ret
= -TARGET_EINVAL
;
11554 target_set
= lock_user(VERIFY_READ
, arg5
,
11555 sizeof(target_sigset_t
), 1);
11557 ret
= -TARGET_EFAULT
;
11560 target_to_host_sigset(set
, target_set
);
11561 unlock_user(target_set
, arg5
, 0);
11566 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11567 set
, SIGSET_T_SIZE
));
11571 #if defined(TARGET_NR_epoll_wait)
11572 case TARGET_NR_epoll_wait
:
11573 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11578 ret
= -TARGET_ENOSYS
;
11580 if (!is_error(ret
)) {
11582 for (i
= 0; i
< ret
; i
++) {
11583 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11584 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11586 unlock_user(target_ep
, arg2
,
11587 ret
* sizeof(struct target_epoll_event
));
11589 unlock_user(target_ep
, arg2
, 0);
11596 #ifdef TARGET_NR_prlimit64
11597 case TARGET_NR_prlimit64
:
11599 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11600 struct target_rlimit64
*target_rnew
, *target_rold
;
11601 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11602 int resource
= target_to_host_resource(arg2
);
11604 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11605 return -TARGET_EFAULT
;
11607 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11608 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11609 unlock_user_struct(target_rnew
, arg3
, 0);
11613 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11614 if (!is_error(ret
) && arg4
) {
11615 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11616 return -TARGET_EFAULT
;
11618 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11619 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11620 unlock_user_struct(target_rold
, arg4
, 1);
11625 #ifdef TARGET_NR_gethostname
11626 case TARGET_NR_gethostname
:
11628 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11630 ret
= get_errno(gethostname(name
, arg2
));
11631 unlock_user(name
, arg1
, arg2
);
11633 ret
= -TARGET_EFAULT
;
11638 #ifdef TARGET_NR_atomic_cmpxchg_32
11639 case TARGET_NR_atomic_cmpxchg_32
:
11641 /* should use start_exclusive from main.c */
11642 abi_ulong mem_value
;
11643 if (get_user_u32(mem_value
, arg6
)) {
11644 target_siginfo_t info
;
11645 info
.si_signo
= SIGSEGV
;
11647 info
.si_code
= TARGET_SEGV_MAPERR
;
11648 info
._sifields
._sigfault
._addr
= arg6
;
11649 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11650 QEMU_SI_FAULT
, &info
);
11654 if (mem_value
== arg2
)
11655 put_user_u32(arg1
, arg6
);
11659 #ifdef TARGET_NR_atomic_barrier
11660 case TARGET_NR_atomic_barrier
:
11661 /* Like the kernel implementation and the
11662 qemu arm barrier, no-op this? */
11666 #ifdef TARGET_NR_timer_create
11667 case TARGET_NR_timer_create
:
11669 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11671 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11674 int timer_index
= next_free_host_timer();
11676 if (timer_index
< 0) {
11677 ret
= -TARGET_EAGAIN
;
11679 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11682 phost_sevp
= &host_sevp
;
11683 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11689 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11693 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11694 return -TARGET_EFAULT
;
11702 #ifdef TARGET_NR_timer_settime
11703 case TARGET_NR_timer_settime
:
11705 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11706 * struct itimerspec * old_value */
11707 target_timer_t timerid
= get_timer_id(arg1
);
11711 } else if (arg3
== 0) {
11712 ret
= -TARGET_EINVAL
;
11714 timer_t htimer
= g_posix_timers
[timerid
];
11715 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11717 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11718 return -TARGET_EFAULT
;
11721 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11722 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11723 return -TARGET_EFAULT
;
11730 #ifdef TARGET_NR_timer_gettime
11731 case TARGET_NR_timer_gettime
:
11733 /* args: timer_t timerid, struct itimerspec *curr_value */
11734 target_timer_t timerid
= get_timer_id(arg1
);
11738 } else if (!arg2
) {
11739 ret
= -TARGET_EFAULT
;
11741 timer_t htimer
= g_posix_timers
[timerid
];
11742 struct itimerspec hspec
;
11743 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11745 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11746 ret
= -TARGET_EFAULT
;
11753 #ifdef TARGET_NR_timer_getoverrun
11754 case TARGET_NR_timer_getoverrun
:
11756 /* args: timer_t timerid */
11757 target_timer_t timerid
= get_timer_id(arg1
);
11762 timer_t htimer
= g_posix_timers
[timerid
];
11763 ret
= get_errno(timer_getoverrun(htimer
));
11765 fd_trans_unregister(ret
);
11770 #ifdef TARGET_NR_timer_delete
11771 case TARGET_NR_timer_delete
:
11773 /* args: timer_t timerid */
11774 target_timer_t timerid
= get_timer_id(arg1
);
11779 timer_t htimer
= g_posix_timers
[timerid
];
11780 ret
= get_errno(timer_delete(htimer
));
11781 g_posix_timers
[timerid
] = 0;
11787 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11788 case TARGET_NR_timerfd_create
:
11789 return get_errno(timerfd_create(arg1
,
11790 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11793 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11794 case TARGET_NR_timerfd_gettime
:
11796 struct itimerspec its_curr
;
11798 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11800 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11801 return -TARGET_EFAULT
;
11807 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11808 case TARGET_NR_timerfd_settime
:
11810 struct itimerspec its_new
, its_old
, *p_new
;
11813 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11814 return -TARGET_EFAULT
;
11821 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11823 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11824 return -TARGET_EFAULT
;
11830 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11831 case TARGET_NR_ioprio_get
:
11832 return get_errno(ioprio_get(arg1
, arg2
));
11835 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11836 case TARGET_NR_ioprio_set
:
11837 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11840 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11841 case TARGET_NR_setns
:
11842 return get_errno(setns(arg1
, arg2
));
11844 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11845 case TARGET_NR_unshare
:
11846 return get_errno(unshare(arg1
));
11848 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11849 case TARGET_NR_kcmp
:
11850 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11852 #ifdef TARGET_NR_swapcontext
11853 case TARGET_NR_swapcontext
:
11854 /* PowerPC specific. */
11855 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11859 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11860 return -TARGET_ENOSYS
;
11865 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11866 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11867 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11870 CPUState
*cpu
= env_cpu(cpu_env
);
11873 #ifdef DEBUG_ERESTARTSYS
11874 /* Debug-only code for exercising the syscall-restart code paths
11875 * in the per-architecture cpu main loops: restart every syscall
11876 * the guest makes once before letting it through.
11882 return -TARGET_ERESTARTSYS
;
11887 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11888 arg5
, arg6
, arg7
, arg8
);
11890 if (unlikely(do_strace
)) {
11891 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11892 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11893 arg5
, arg6
, arg7
, arg8
);
11894 print_syscall_ret(num
, ret
);
11896 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11897 arg5
, arg6
, arg7
, arg8
);
11900 trace_guest_user_syscall_ret(cpu
, num
, ret
);