4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
109 #include "fd-trans.h"
112 #define CLONE_IO 0x80000000 /* Clone io context */
115 /* We can't directly call the host clone syscall, because this will
116 * badly confuse libc (breaking mutexes, for example). So we must
117 * divide clone flags into:
118 * * flag combinations that look like pthread_create()
119 * * flag combinations that look like fork()
120 * * flags we can implement within QEMU itself
121 * * flags we can't support and will return an error for
123 /* For thread creation, all these flags must be present; for
124 * fork, none must be present.
126 #define CLONE_THREAD_FLAGS \
127 (CLONE_VM | CLONE_FS | CLONE_FILES | \
128 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 /* These flags are ignored:
131 * CLONE_DETACHED is now ignored by the kernel;
132 * CLONE_IO is just an optimisation hint to the I/O scheduler
134 #define CLONE_IGNORED_FLAGS \
135 (CLONE_DETACHED | CLONE_IO)
137 /* Flags for fork which we can implement within QEMU itself */
138 #define CLONE_OPTIONAL_FORK_FLAGS \
139 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
140 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 /* Flags for thread creation which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_THREAD_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 #define CLONE_INVALID_FORK_FLAGS \
148 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 #define CLONE_INVALID_THREAD_FLAGS \
151 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
152 CLONE_IGNORED_FLAGS))
154 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
155 * have almost all been allocated. We cannot support any of
156 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
157 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
158 * The checks against the invalid thread masks above will catch these.
159 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
162 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
163 * once. This exercises the codepaths for restart.
165 //#define DEBUG_ERESTARTSYS
167 //#include <linux/msdos_fs.h>
168 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
169 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
179 #define _syscall0(type,name) \
180 static type name (void) \
182 return syscall(__NR_##name); \
185 #define _syscall1(type,name,type1,arg1) \
186 static type name (type1 arg1) \
188 return syscall(__NR_##name, arg1); \
191 #define _syscall2(type,name,type1,arg1,type2,arg2) \
192 static type name (type1 arg1,type2 arg2) \
194 return syscall(__NR_##name, arg1, arg2); \
197 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
198 static type name (type1 arg1,type2 arg2,type3 arg3) \
200 return syscall(__NR_##name, arg1, arg2, arg3); \
203 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
204 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
206 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
209 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
217 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 type5,arg5,type6,arg6) \
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
226 #define __NR_sys_uname __NR_uname
227 #define __NR_sys_getcwd1 __NR_getcwd
228 #define __NR_sys_getdents __NR_getdents
229 #define __NR_sys_getdents64 __NR_getdents64
230 #define __NR_sys_getpriority __NR_getpriority
231 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
232 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
233 #define __NR_sys_syslog __NR_syslog
234 #define __NR_sys_futex __NR_futex
235 #define __NR_sys_inotify_init __NR_inotify_init
236 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
237 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
240 #define __NR__llseek __NR_lseek
243 /* Newer kernel ports have llseek() instead of _llseek() */
244 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
245 #define TARGET_NR__llseek TARGET_NR_llseek
249 _syscall0(int, gettid
)
251 /* This is a replacement for the host gettid() and must return a host
253 static int gettid(void) {
258 /* For the 64-bit guest on 32-bit host case we must emulate
259 * getdents using getdents64, because otherwise the host
260 * might hand us back more dirent records than we can fit
261 * into the guest buffer after structure format conversion.
262 * Otherwise we emulate getdents with getdents if the host has it.
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
271 #if (defined(TARGET_NR_getdents) && \
272 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
278 loff_t
*, res
, uint
, wh
);
280 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
281 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
283 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group
,int,error_code
)
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address
,int *,tidptr
)
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
292 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
296 unsigned long *, user_mask_ptr
);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
299 unsigned long *, user_mask_ptr
);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
302 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
304 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
305 struct __user_cap_data_struct
*, data
);
306 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
307 struct __user_cap_data_struct
*, data
);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get
, int, which
, int, who
)
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
320 unsigned long, idx1
, unsigned long, idx2
)
323 static bitmask_transtbl fcntl_flags_tbl
[] = {
324 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
325 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
326 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
327 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
328 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
329 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
330 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
331 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
332 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
333 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
334 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
335 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
336 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
337 #if defined(O_DIRECT)
338 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
340 #if defined(O_NOATIME)
341 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
343 #if defined(O_CLOEXEC)
344 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
347 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
349 #if defined(O_TMPFILE)
350 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
352 /* Don't terminate the list prematurely on 64-bit host+guest. */
353 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
354 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
359 static int sys_getcwd1(char *buf
, size_t size
)
361 if (getcwd(buf
, size
) == NULL
) {
362 /* getcwd() sets errno */
365 return strlen(buf
)+1;
368 #ifdef TARGET_NR_utimensat
369 #if defined(__NR_utimensat)
370 #define __NR_sys_utimensat __NR_utimensat
371 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
372 const struct timespec
*,tsp
,int,flags
)
374 static int sys_utimensat(int dirfd
, const char *pathname
,
375 const struct timespec times
[2], int flags
)
381 #endif /* TARGET_NR_utimensat */
383 #ifdef TARGET_NR_renameat2
384 #if defined(__NR_renameat2)
385 #define __NR_sys_renameat2 __NR_renameat2
386 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
387 const char *, new, unsigned int, flags
)
389 static int sys_renameat2(int oldfd
, const char *old
,
390 int newfd
, const char *new, int flags
)
393 return renameat(oldfd
, old
, newfd
, new);
399 #endif /* TARGET_NR_renameat2 */
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
407 return (inotify_init());
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
413 return (inotify_add_watch(fd
, pathname
, mask
));
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
419 return (inotify_rm_watch(fd
, wd
));
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags
)
426 return (inotify_init1(flags
));
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY */
438 #if defined(TARGET_NR_prlimit64)
439 #ifndef __NR_prlimit64
440 # define __NR_prlimit64 -1
442 #define __NR_sys_prlimit64 __NR_prlimit64
443 /* The glibc rlimit structure may not be that used by the underlying syscall */
444 struct host_rlimit64
{
448 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
449 const struct host_rlimit64
*, new_limit
,
450 struct host_rlimit64
*, old_limit
)
454 #if defined(TARGET_NR_timer_create)
455 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
456 static timer_t g_posix_timers
[32] = { 0, } ;
458 static inline int next_free_host_timer(void)
461 /* FIXME: Does finding the next free slot require a lock? */
462 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
463 if (g_posix_timers
[k
] == 0) {
464 g_posix_timers
[k
] = (timer_t
) 1;
472 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
474 static inline int regpairs_aligned(void *cpu_env
, int num
)
476 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
478 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
479 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
480 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
481 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
482 * of registers which translates to the same as ARM/MIPS, because we start with
484 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
485 #elif defined(TARGET_SH4)
486 /* SH4 doesn't align register pairs, except for p{read,write}64 */
487 static inline int regpairs_aligned(void *cpu_env
, int num
)
490 case TARGET_NR_pread64
:
491 case TARGET_NR_pwrite64
:
498 #elif defined(TARGET_XTENSA)
499 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
501 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
504 #define ERRNO_TABLE_SIZE 1200
506 /* target_to_host_errno_table[] is initialized from
507 * host_to_target_errno_table[] in syscall_init(). */
508 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
512 * This list is the union of errno values overridden in asm-<arch>/errno.h
513 * minus the errnos that are not actually generic to all archs.
515 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
516 [EAGAIN
] = TARGET_EAGAIN
,
517 [EIDRM
] = TARGET_EIDRM
,
518 [ECHRNG
] = TARGET_ECHRNG
,
519 [EL2NSYNC
] = TARGET_EL2NSYNC
,
520 [EL3HLT
] = TARGET_EL3HLT
,
521 [EL3RST
] = TARGET_EL3RST
,
522 [ELNRNG
] = TARGET_ELNRNG
,
523 [EUNATCH
] = TARGET_EUNATCH
,
524 [ENOCSI
] = TARGET_ENOCSI
,
525 [EL2HLT
] = TARGET_EL2HLT
,
526 [EDEADLK
] = TARGET_EDEADLK
,
527 [ENOLCK
] = TARGET_ENOLCK
,
528 [EBADE
] = TARGET_EBADE
,
529 [EBADR
] = TARGET_EBADR
,
530 [EXFULL
] = TARGET_EXFULL
,
531 [ENOANO
] = TARGET_ENOANO
,
532 [EBADRQC
] = TARGET_EBADRQC
,
533 [EBADSLT
] = TARGET_EBADSLT
,
534 [EBFONT
] = TARGET_EBFONT
,
535 [ENOSTR
] = TARGET_ENOSTR
,
536 [ENODATA
] = TARGET_ENODATA
,
537 [ETIME
] = TARGET_ETIME
,
538 [ENOSR
] = TARGET_ENOSR
,
539 [ENONET
] = TARGET_ENONET
,
540 [ENOPKG
] = TARGET_ENOPKG
,
541 [EREMOTE
] = TARGET_EREMOTE
,
542 [ENOLINK
] = TARGET_ENOLINK
,
543 [EADV
] = TARGET_EADV
,
544 [ESRMNT
] = TARGET_ESRMNT
,
545 [ECOMM
] = TARGET_ECOMM
,
546 [EPROTO
] = TARGET_EPROTO
,
547 [EDOTDOT
] = TARGET_EDOTDOT
,
548 [EMULTIHOP
] = TARGET_EMULTIHOP
,
549 [EBADMSG
] = TARGET_EBADMSG
,
550 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
551 [EOVERFLOW
] = TARGET_EOVERFLOW
,
552 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
553 [EBADFD
] = TARGET_EBADFD
,
554 [EREMCHG
] = TARGET_EREMCHG
,
555 [ELIBACC
] = TARGET_ELIBACC
,
556 [ELIBBAD
] = TARGET_ELIBBAD
,
557 [ELIBSCN
] = TARGET_ELIBSCN
,
558 [ELIBMAX
] = TARGET_ELIBMAX
,
559 [ELIBEXEC
] = TARGET_ELIBEXEC
,
560 [EILSEQ
] = TARGET_EILSEQ
,
561 [ENOSYS
] = TARGET_ENOSYS
,
562 [ELOOP
] = TARGET_ELOOP
,
563 [ERESTART
] = TARGET_ERESTART
,
564 [ESTRPIPE
] = TARGET_ESTRPIPE
,
565 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
566 [EUSERS
] = TARGET_EUSERS
,
567 [ENOTSOCK
] = TARGET_ENOTSOCK
,
568 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
569 [EMSGSIZE
] = TARGET_EMSGSIZE
,
570 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
571 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
572 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
573 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
574 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
575 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
576 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
577 [EADDRINUSE
] = TARGET_EADDRINUSE
,
578 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
579 [ENETDOWN
] = TARGET_ENETDOWN
,
580 [ENETUNREACH
] = TARGET_ENETUNREACH
,
581 [ENETRESET
] = TARGET_ENETRESET
,
582 [ECONNABORTED
] = TARGET_ECONNABORTED
,
583 [ECONNRESET
] = TARGET_ECONNRESET
,
584 [ENOBUFS
] = TARGET_ENOBUFS
,
585 [EISCONN
] = TARGET_EISCONN
,
586 [ENOTCONN
] = TARGET_ENOTCONN
,
587 [EUCLEAN
] = TARGET_EUCLEAN
,
588 [ENOTNAM
] = TARGET_ENOTNAM
,
589 [ENAVAIL
] = TARGET_ENAVAIL
,
590 [EISNAM
] = TARGET_EISNAM
,
591 [EREMOTEIO
] = TARGET_EREMOTEIO
,
592 [EDQUOT
] = TARGET_EDQUOT
,
593 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
594 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
595 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
596 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
597 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
598 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
599 [EALREADY
] = TARGET_EALREADY
,
600 [EINPROGRESS
] = TARGET_EINPROGRESS
,
601 [ESTALE
] = TARGET_ESTALE
,
602 [ECANCELED
] = TARGET_ECANCELED
,
603 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
604 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
606 [ENOKEY
] = TARGET_ENOKEY
,
609 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
612 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
615 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
618 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
620 #ifdef ENOTRECOVERABLE
621 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
624 [ENOMSG
] = TARGET_ENOMSG
,
627 [ERFKILL
] = TARGET_ERFKILL
,
630 [EHWPOISON
] = TARGET_EHWPOISON
,
634 static inline int host_to_target_errno(int err
)
636 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
637 host_to_target_errno_table
[err
]) {
638 return host_to_target_errno_table
[err
];
643 static inline int target_to_host_errno(int err
)
645 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
646 target_to_host_errno_table
[err
]) {
647 return target_to_host_errno_table
[err
];
652 static inline abi_long
get_errno(abi_long ret
)
655 return -host_to_target_errno(errno
);
660 const char *target_strerror(int err
)
662 if (err
== TARGET_ERESTARTSYS
) {
663 return "To be restarted";
665 if (err
== TARGET_QEMU_ESIGRETURN
) {
666 return "Successful exit from sigreturn";
669 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
672 return strerror(target_to_host_errno(err
));
675 #define safe_syscall0(type, name) \
676 static type safe_##name(void) \
678 return safe_syscall(__NR_##name); \
681 #define safe_syscall1(type, name, type1, arg1) \
682 static type safe_##name(type1 arg1) \
684 return safe_syscall(__NR_##name, arg1); \
687 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
688 static type safe_##name(type1 arg1, type2 arg2) \
690 return safe_syscall(__NR_##name, arg1, arg2); \
693 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
694 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
696 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
699 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
703 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
706 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 type4, arg4, type5, arg5) \
708 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
714 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
715 type4, arg4, type5, arg5, type6, arg6) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 type5 arg5, type6 arg6) \
719 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
722 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
723 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
724 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
725 int, flags
, mode_t
, mode
)
726 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
727 struct rusage
*, rusage
)
728 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
729 int, options
, struct rusage
*, rusage
)
730 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
731 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
732 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
733 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
734 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
736 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
737 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
739 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
740 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
741 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
742 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
743 safe_syscall2(int, tkill
, int, tid
, int, sig
)
744 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
745 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
746 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
747 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
748 unsigned long, pos_l
, unsigned long, pos_h
)
749 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
750 unsigned long, pos_l
, unsigned long, pos_h
)
751 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
753 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
754 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
755 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
756 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
757 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
758 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
759 safe_syscall2(int, flock
, int, fd
, int, operation
)
760 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
761 const struct timespec
*, uts
, size_t, sigsetsize
)
762 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
764 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
765 struct timespec
*, rem
)
766 #ifdef TARGET_NR_clock_nanosleep
767 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
768 const struct timespec
*, req
, struct timespec
*, rem
)
771 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
773 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
774 long, msgtype
, int, flags
)
775 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
776 unsigned, nsops
, const struct timespec
*, timeout
)
778 /* This host kernel architecture uses a single ipc syscall; fake up
779 * wrappers for the sub-operations to hide this implementation detail.
780 * Annoyingly we can't include linux/ipc.h to get the constant definitions
781 * for the call parameter because some structs in there conflict with the
782 * sys/ipc.h ones. So we just define them here, and rely on them being
783 * the same for all host architectures.
785 #define Q_SEMTIMEDOP 4
788 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
790 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
791 void *, ptr
, long, fifth
)
792 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
794 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
796 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
798 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
800 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
801 const struct timespec
*timeout
)
803 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
807 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
808 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
809 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
810 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
811 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
829 static inline int host_to_target_sock_type(int host_type
)
833 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
835 target_type
= TARGET_SOCK_DGRAM
;
838 target_type
= TARGET_SOCK_STREAM
;
841 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type
& SOCK_CLOEXEC
) {
847 target_type
|= TARGET_SOCK_CLOEXEC
;
851 #if defined(SOCK_NONBLOCK)
852 if (host_type
& SOCK_NONBLOCK
) {
853 target_type
|= TARGET_SOCK_NONBLOCK
;
860 static abi_ulong target_brk
;
861 static abi_ulong target_original_brk
;
862 static abi_ulong brk_page
;
864 void target_set_brk(abi_ulong new_brk
)
866 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
867 brk_page
= HOST_PAGE_ALIGN(target_brk
);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long
do_brk(abi_ulong new_brk
)
876 abi_long mapped_addr
;
877 abi_ulong new_alloc_size
;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
882 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
885 if (new_brk
< target_original_brk
) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk
<= brk_page
) {
894 /* Heap contents are initialized to zero, as for anonymous
896 if (new_brk
> target_brk
) {
897 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
899 target_brk
= new_brk
;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
911 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
912 PROT_READ
|PROT_WRITE
,
913 MAP_ANON
|MAP_PRIVATE
, 0, 0));
915 if (mapped_addr
== brk_page
) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
923 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
925 target_brk
= new_brk
;
926 brk_page
= HOST_PAGE_ALIGN(target_brk
);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
930 } else if (mapped_addr
!= -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr
, new_alloc_size
);
936 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
939 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM
;
947 /* For everything else, return the previous break. */
951 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
952 abi_ulong target_fds_addr
,
956 abi_ulong b
, *target_fds
;
958 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
959 if (!(target_fds
= lock_user(VERIFY_READ
,
961 sizeof(abi_ulong
) * nw
,
963 return -TARGET_EFAULT
;
967 for (i
= 0; i
< nw
; i
++) {
968 /* grab the abi_ulong */
969 __get_user(b
, &target_fds
[i
]);
970 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
971 /* check the bit inside the abi_ulong */
978 unlock_user(target_fds
, target_fds_addr
, 0);
983 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
984 abi_ulong target_fds_addr
,
987 if (target_fds_addr
) {
988 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
989 return -TARGET_EFAULT
;
997 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1003 abi_ulong
*target_fds
;
1005 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1006 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1008 sizeof(abi_ulong
) * nw
,
1010 return -TARGET_EFAULT
;
1013 for (i
= 0; i
< nw
; i
++) {
1015 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1016 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1019 __put_user(v
, &target_fds
[i
]);
1022 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1033 static inline abi_long
host_to_target_clock_t(long ticks
)
1035 #if HOST_HZ == TARGET_HZ
1038 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1042 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1043 const struct rusage
*rusage
)
1045 struct target_rusage
*target_rusage
;
1047 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1048 return -TARGET_EFAULT
;
1049 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1050 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1051 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1052 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1053 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1054 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1055 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1056 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1057 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1058 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1059 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1060 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1061 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1062 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1063 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1064 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1065 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1066 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1067 unlock_user_struct(target_rusage
, target_addr
, 1);
1072 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1074 abi_ulong target_rlim_swap
;
1077 target_rlim_swap
= tswapal(target_rlim
);
1078 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1079 return RLIM_INFINITY
;
1081 result
= target_rlim_swap
;
1082 if (target_rlim_swap
!= (rlim_t
)result
)
1083 return RLIM_INFINITY
;
1088 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1090 abi_ulong target_rlim_swap
;
1093 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1094 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1096 target_rlim_swap
= rlim
;
1097 result
= tswapal(target_rlim_swap
);
1102 static inline int target_to_host_resource(int code
)
1105 case TARGET_RLIMIT_AS
:
1107 case TARGET_RLIMIT_CORE
:
1109 case TARGET_RLIMIT_CPU
:
1111 case TARGET_RLIMIT_DATA
:
1113 case TARGET_RLIMIT_FSIZE
:
1114 return RLIMIT_FSIZE
;
1115 case TARGET_RLIMIT_LOCKS
:
1116 return RLIMIT_LOCKS
;
1117 case TARGET_RLIMIT_MEMLOCK
:
1118 return RLIMIT_MEMLOCK
;
1119 case TARGET_RLIMIT_MSGQUEUE
:
1120 return RLIMIT_MSGQUEUE
;
1121 case TARGET_RLIMIT_NICE
:
1123 case TARGET_RLIMIT_NOFILE
:
1124 return RLIMIT_NOFILE
;
1125 case TARGET_RLIMIT_NPROC
:
1126 return RLIMIT_NPROC
;
1127 case TARGET_RLIMIT_RSS
:
1129 case TARGET_RLIMIT_RTPRIO
:
1130 return RLIMIT_RTPRIO
;
1131 case TARGET_RLIMIT_SIGPENDING
:
1132 return RLIMIT_SIGPENDING
;
1133 case TARGET_RLIMIT_STACK
:
1134 return RLIMIT_STACK
;
1140 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1141 abi_ulong target_tv_addr
)
1143 struct target_timeval
*target_tv
;
1145 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1146 return -TARGET_EFAULT
;
1148 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1149 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1151 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1156 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1157 const struct timeval
*tv
)
1159 struct target_timeval
*target_tv
;
1161 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1162 return -TARGET_EFAULT
;
1164 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1165 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1167 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1172 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1173 abi_ulong target_tz_addr
)
1175 struct target_timezone
*target_tz
;
1177 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1178 return -TARGET_EFAULT
;
1181 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1182 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1184 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1189 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1192 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1193 abi_ulong target_mq_attr_addr
)
1195 struct target_mq_attr
*target_mq_attr
;
1197 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1198 target_mq_attr_addr
, 1))
1199 return -TARGET_EFAULT
;
1201 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1202 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1203 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1204 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1206 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1211 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1212 const struct mq_attr
*attr
)
1214 struct target_mq_attr
*target_mq_attr
;
1216 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1217 target_mq_attr_addr
, 0))
1218 return -TARGET_EFAULT
;
1220 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1221 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1222 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1223 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1225 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1231 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1232 /* do_select() must return target values and target errnos. */
1233 static abi_long
do_select(int n
,
1234 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1235 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1237 fd_set rfds
, wfds
, efds
;
1238 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1240 struct timespec ts
, *ts_ptr
;
1243 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1247 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1251 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1256 if (target_tv_addr
) {
1257 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1258 return -TARGET_EFAULT
;
1259 ts
.tv_sec
= tv
.tv_sec
;
1260 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1266 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1269 if (!is_error(ret
)) {
1270 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1271 return -TARGET_EFAULT
;
1272 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1273 return -TARGET_EFAULT
;
1274 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1275 return -TARGET_EFAULT
;
1277 if (target_tv_addr
) {
1278 tv
.tv_sec
= ts
.tv_sec
;
1279 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1280 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1281 return -TARGET_EFAULT
;
1289 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1290 static abi_long
do_old_select(abi_ulong arg1
)
1292 struct target_sel_arg_struct
*sel
;
1293 abi_ulong inp
, outp
, exp
, tvp
;
1296 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1297 return -TARGET_EFAULT
;
1300 nsel
= tswapal(sel
->n
);
1301 inp
= tswapal(sel
->inp
);
1302 outp
= tswapal(sel
->outp
);
1303 exp
= tswapal(sel
->exp
);
1304 tvp
= tswapal(sel
->tvp
);
1306 unlock_user_struct(sel
, arg1
, 0);
1308 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1313 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1316 return pipe2(host_pipe
, flags
);
1322 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1323 int flags
, int is_pipe2
)
1327 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1330 return get_errno(ret
);
1332 /* Several targets have special calling conventions for the original
1333 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1335 #if defined(TARGET_ALPHA)
1336 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1337 return host_pipe
[0];
1338 #elif defined(TARGET_MIPS)
1339 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1340 return host_pipe
[0];
1341 #elif defined(TARGET_SH4)
1342 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1343 return host_pipe
[0];
1344 #elif defined(TARGET_SPARC)
1345 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1346 return host_pipe
[0];
1350 if (put_user_s32(host_pipe
[0], pipedes
)
1351 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1352 return -TARGET_EFAULT
;
1353 return get_errno(ret
);
1356 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1357 abi_ulong target_addr
,
1360 struct target_ip_mreqn
*target_smreqn
;
1362 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1364 return -TARGET_EFAULT
;
1365 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1366 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1367 if (len
== sizeof(struct target_ip_mreqn
))
1368 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1369 unlock_user(target_smreqn
, target_addr
, 0);
1374 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1375 abi_ulong target_addr
,
1378 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1379 sa_family_t sa_family
;
1380 struct target_sockaddr
*target_saddr
;
1382 if (fd_trans_target_to_host_addr(fd
)) {
1383 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1386 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1388 return -TARGET_EFAULT
;
1390 sa_family
= tswap16(target_saddr
->sa_family
);
1392 /* Oops. The caller might send a incomplete sun_path; sun_path
1393 * must be terminated by \0 (see the manual page), but
1394 * unfortunately it is quite common to specify sockaddr_un
1395 * length as "strlen(x->sun_path)" while it should be
1396 * "strlen(...) + 1". We'll fix that here if needed.
1397 * Linux kernel has a similar feature.
1400 if (sa_family
== AF_UNIX
) {
1401 if (len
< unix_maxlen
&& len
> 0) {
1402 char *cp
= (char*)target_saddr
;
1404 if ( cp
[len
-1] && !cp
[len
] )
1407 if (len
> unix_maxlen
)
1411 memcpy(addr
, target_saddr
, len
);
1412 addr
->sa_family
= sa_family
;
1413 if (sa_family
== AF_NETLINK
) {
1414 struct sockaddr_nl
*nladdr
;
1416 nladdr
= (struct sockaddr_nl
*)addr
;
1417 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1418 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1419 } else if (sa_family
== AF_PACKET
) {
1420 struct target_sockaddr_ll
*lladdr
;
1422 lladdr
= (struct target_sockaddr_ll
*)addr
;
1423 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1424 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1426 unlock_user(target_saddr
, target_addr
, 0);
1431 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1432 struct sockaddr
*addr
,
1435 struct target_sockaddr
*target_saddr
;
1442 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1444 return -TARGET_EFAULT
;
1445 memcpy(target_saddr
, addr
, len
);
1446 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1447 sizeof(target_saddr
->sa_family
)) {
1448 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1450 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1451 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1452 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1453 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1454 } else if (addr
->sa_family
== AF_PACKET
) {
1455 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1456 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1457 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1458 } else if (addr
->sa_family
== AF_INET6
&&
1459 len
>= sizeof(struct target_sockaddr_in6
)) {
1460 struct target_sockaddr_in6
*target_in6
=
1461 (struct target_sockaddr_in6
*)target_saddr
;
1462 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1464 unlock_user(target_saddr
, target_addr
, len
);
1469 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1470 struct target_msghdr
*target_msgh
)
1472 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1473 abi_long msg_controllen
;
1474 abi_ulong target_cmsg_addr
;
1475 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1476 socklen_t space
= 0;
1478 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1479 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1481 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1482 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1483 target_cmsg_start
= target_cmsg
;
1485 return -TARGET_EFAULT
;
1487 while (cmsg
&& target_cmsg
) {
1488 void *data
= CMSG_DATA(cmsg
);
1489 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1491 int len
= tswapal(target_cmsg
->cmsg_len
)
1492 - sizeof(struct target_cmsghdr
);
1494 space
+= CMSG_SPACE(len
);
1495 if (space
> msgh
->msg_controllen
) {
1496 space
-= CMSG_SPACE(len
);
1497 /* This is a QEMU bug, since we allocated the payload
1498 * area ourselves (unlike overflow in host-to-target
1499 * conversion, which is just the guest giving us a buffer
1500 * that's too small). It can't happen for the payload types
1501 * we currently support; if it becomes an issue in future
1502 * we would need to improve our allocation strategy to
1503 * something more intelligent than "twice the size of the
1504 * target buffer we're reading from".
1506 gemu_log("Host cmsg overflow\n");
1510 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1511 cmsg
->cmsg_level
= SOL_SOCKET
;
1513 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1515 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1516 cmsg
->cmsg_len
= CMSG_LEN(len
);
1518 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1519 int *fd
= (int *)data
;
1520 int *target_fd
= (int *)target_data
;
1521 int i
, numfds
= len
/ sizeof(int);
1523 for (i
= 0; i
< numfds
; i
++) {
1524 __get_user(fd
[i
], target_fd
+ i
);
1526 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1527 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1528 struct ucred
*cred
= (struct ucred
*)data
;
1529 struct target_ucred
*target_cred
=
1530 (struct target_ucred
*)target_data
;
1532 __get_user(cred
->pid
, &target_cred
->pid
);
1533 __get_user(cred
->uid
, &target_cred
->uid
);
1534 __get_user(cred
->gid
, &target_cred
->gid
);
1536 gemu_log("Unsupported ancillary data: %d/%d\n",
1537 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1538 memcpy(data
, target_data
, len
);
1541 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1542 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1545 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1547 msgh
->msg_controllen
= space
;
1551 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1552 struct msghdr
*msgh
)
1554 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1555 abi_long msg_controllen
;
1556 abi_ulong target_cmsg_addr
;
1557 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1558 socklen_t space
= 0;
1560 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1561 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1563 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1564 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1565 target_cmsg_start
= target_cmsg
;
1567 return -TARGET_EFAULT
;
1569 while (cmsg
&& target_cmsg
) {
1570 void *data
= CMSG_DATA(cmsg
);
1571 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1573 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1574 int tgt_len
, tgt_space
;
1576 /* We never copy a half-header but may copy half-data;
1577 * this is Linux's behaviour in put_cmsg(). Note that
1578 * truncation here is a guest problem (which we report
1579 * to the guest via the CTRUNC bit), unlike truncation
1580 * in target_to_host_cmsg, which is a QEMU bug.
1582 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1583 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1587 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1588 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1590 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1592 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1594 /* Payload types which need a different size of payload on
1595 * the target must adjust tgt_len here.
1598 switch (cmsg
->cmsg_level
) {
1600 switch (cmsg
->cmsg_type
) {
1602 tgt_len
= sizeof(struct target_timeval
);
1612 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1613 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1614 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1617 /* We must now copy-and-convert len bytes of payload
1618 * into tgt_len bytes of destination space. Bear in mind
1619 * that in both source and destination we may be dealing
1620 * with a truncated value!
1622 switch (cmsg
->cmsg_level
) {
1624 switch (cmsg
->cmsg_type
) {
1627 int *fd
= (int *)data
;
1628 int *target_fd
= (int *)target_data
;
1629 int i
, numfds
= tgt_len
/ sizeof(int);
1631 for (i
= 0; i
< numfds
; i
++) {
1632 __put_user(fd
[i
], target_fd
+ i
);
1638 struct timeval
*tv
= (struct timeval
*)data
;
1639 struct target_timeval
*target_tv
=
1640 (struct target_timeval
*)target_data
;
1642 if (len
!= sizeof(struct timeval
) ||
1643 tgt_len
!= sizeof(struct target_timeval
)) {
1647 /* copy struct timeval to target */
1648 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1649 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1652 case SCM_CREDENTIALS
:
1654 struct ucred
*cred
= (struct ucred
*)data
;
1655 struct target_ucred
*target_cred
=
1656 (struct target_ucred
*)target_data
;
1658 __put_user(cred
->pid
, &target_cred
->pid
);
1659 __put_user(cred
->uid
, &target_cred
->uid
);
1660 __put_user(cred
->gid
, &target_cred
->gid
);
1669 switch (cmsg
->cmsg_type
) {
1672 uint32_t *v
= (uint32_t *)data
;
1673 uint32_t *t_int
= (uint32_t *)target_data
;
1675 if (len
!= sizeof(uint32_t) ||
1676 tgt_len
!= sizeof(uint32_t)) {
1679 __put_user(*v
, t_int
);
1685 struct sock_extended_err ee
;
1686 struct sockaddr_in offender
;
1688 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1689 struct errhdr_t
*target_errh
=
1690 (struct errhdr_t
*)target_data
;
1692 if (len
!= sizeof(struct errhdr_t
) ||
1693 tgt_len
!= sizeof(struct errhdr_t
)) {
1696 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1697 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1698 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1699 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1700 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1701 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1702 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1703 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1704 (void *) &errh
->offender
, sizeof(errh
->offender
));
1713 switch (cmsg
->cmsg_type
) {
1716 uint32_t *v
= (uint32_t *)data
;
1717 uint32_t *t_int
= (uint32_t *)target_data
;
1719 if (len
!= sizeof(uint32_t) ||
1720 tgt_len
!= sizeof(uint32_t)) {
1723 __put_user(*v
, t_int
);
1729 struct sock_extended_err ee
;
1730 struct sockaddr_in6 offender
;
1732 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1733 struct errhdr6_t
*target_errh
=
1734 (struct errhdr6_t
*)target_data
;
1736 if (len
!= sizeof(struct errhdr6_t
) ||
1737 tgt_len
!= sizeof(struct errhdr6_t
)) {
1740 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1741 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1742 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1743 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1744 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1745 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1746 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1747 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1748 (void *) &errh
->offender
, sizeof(errh
->offender
));
1758 gemu_log("Unsupported ancillary data: %d/%d\n",
1759 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1760 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1761 if (tgt_len
> len
) {
1762 memset(target_data
+ len
, 0, tgt_len
- len
);
1766 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1767 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1768 if (msg_controllen
< tgt_space
) {
1769 tgt_space
= msg_controllen
;
1771 msg_controllen
-= tgt_space
;
1773 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1774 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1777 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1779 target_msgh
->msg_controllen
= tswapal(space
);
1783 /* do_setsockopt() Must return target values and target errnos. */
1784 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1785 abi_ulong optval_addr
, socklen_t optlen
)
1789 struct ip_mreqn
*ip_mreq
;
1790 struct ip_mreq_source
*ip_mreq_source
;
1794 /* TCP options all take an 'int' value. */
1795 if (optlen
< sizeof(uint32_t))
1796 return -TARGET_EINVAL
;
1798 if (get_user_u32(val
, optval_addr
))
1799 return -TARGET_EFAULT
;
1800 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1807 case IP_ROUTER_ALERT
:
1811 case IP_MTU_DISCOVER
:
1818 case IP_MULTICAST_TTL
:
1819 case IP_MULTICAST_LOOP
:
1821 if (optlen
>= sizeof(uint32_t)) {
1822 if (get_user_u32(val
, optval_addr
))
1823 return -TARGET_EFAULT
;
1824 } else if (optlen
>= 1) {
1825 if (get_user_u8(val
, optval_addr
))
1826 return -TARGET_EFAULT
;
1828 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1830 case IP_ADD_MEMBERSHIP
:
1831 case IP_DROP_MEMBERSHIP
:
1832 if (optlen
< sizeof (struct target_ip_mreq
) ||
1833 optlen
> sizeof (struct target_ip_mreqn
))
1834 return -TARGET_EINVAL
;
1836 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1837 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1838 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1841 case IP_BLOCK_SOURCE
:
1842 case IP_UNBLOCK_SOURCE
:
1843 case IP_ADD_SOURCE_MEMBERSHIP
:
1844 case IP_DROP_SOURCE_MEMBERSHIP
:
1845 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1846 return -TARGET_EINVAL
;
1848 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1849 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1850 unlock_user (ip_mreq_source
, optval_addr
, 0);
1859 case IPV6_MTU_DISCOVER
:
1862 case IPV6_RECVPKTINFO
:
1863 case IPV6_UNICAST_HOPS
:
1864 case IPV6_MULTICAST_HOPS
:
1865 case IPV6_MULTICAST_LOOP
:
1867 case IPV6_RECVHOPLIMIT
:
1868 case IPV6_2292HOPLIMIT
:
1871 if (optlen
< sizeof(uint32_t)) {
1872 return -TARGET_EINVAL
;
1874 if (get_user_u32(val
, optval_addr
)) {
1875 return -TARGET_EFAULT
;
1877 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1878 &val
, sizeof(val
)));
1882 struct in6_pktinfo pki
;
1884 if (optlen
< sizeof(pki
)) {
1885 return -TARGET_EINVAL
;
1888 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1889 return -TARGET_EFAULT
;
1892 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1894 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1895 &pki
, sizeof(pki
)));
1906 struct icmp6_filter icmp6f
;
1908 if (optlen
> sizeof(icmp6f
)) {
1909 optlen
= sizeof(icmp6f
);
1912 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1913 return -TARGET_EFAULT
;
1916 for (val
= 0; val
< 8; val
++) {
1917 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1920 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1932 /* those take an u32 value */
1933 if (optlen
< sizeof(uint32_t)) {
1934 return -TARGET_EINVAL
;
1937 if (get_user_u32(val
, optval_addr
)) {
1938 return -TARGET_EFAULT
;
1940 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1941 &val
, sizeof(val
)));
1948 case TARGET_SOL_SOCKET
:
1950 case TARGET_SO_RCVTIMEO
:
1954 optname
= SO_RCVTIMEO
;
1957 if (optlen
!= sizeof(struct target_timeval
)) {
1958 return -TARGET_EINVAL
;
1961 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1962 return -TARGET_EFAULT
;
1965 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1969 case TARGET_SO_SNDTIMEO
:
1970 optname
= SO_SNDTIMEO
;
1972 case TARGET_SO_ATTACH_FILTER
:
1974 struct target_sock_fprog
*tfprog
;
1975 struct target_sock_filter
*tfilter
;
1976 struct sock_fprog fprog
;
1977 struct sock_filter
*filter
;
1980 if (optlen
!= sizeof(*tfprog
)) {
1981 return -TARGET_EINVAL
;
1983 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1984 return -TARGET_EFAULT
;
1986 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1987 tswapal(tfprog
->filter
), 0)) {
1988 unlock_user_struct(tfprog
, optval_addr
, 1);
1989 return -TARGET_EFAULT
;
1992 fprog
.len
= tswap16(tfprog
->len
);
1993 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1994 if (filter
== NULL
) {
1995 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1996 unlock_user_struct(tfprog
, optval_addr
, 1);
1997 return -TARGET_ENOMEM
;
1999 for (i
= 0; i
< fprog
.len
; i
++) {
2000 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2001 filter
[i
].jt
= tfilter
[i
].jt
;
2002 filter
[i
].jf
= tfilter
[i
].jf
;
2003 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2005 fprog
.filter
= filter
;
2007 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2008 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2011 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2012 unlock_user_struct(tfprog
, optval_addr
, 1);
2015 case TARGET_SO_BINDTODEVICE
:
2017 char *dev_ifname
, *addr_ifname
;
2019 if (optlen
> IFNAMSIZ
- 1) {
2020 optlen
= IFNAMSIZ
- 1;
2022 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2024 return -TARGET_EFAULT
;
2026 optname
= SO_BINDTODEVICE
;
2027 addr_ifname
= alloca(IFNAMSIZ
);
2028 memcpy(addr_ifname
, dev_ifname
, optlen
);
2029 addr_ifname
[optlen
] = 0;
2030 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2031 addr_ifname
, optlen
));
2032 unlock_user (dev_ifname
, optval_addr
, 0);
2035 /* Options with 'int' argument. */
2036 case TARGET_SO_DEBUG
:
2039 case TARGET_SO_REUSEADDR
:
2040 optname
= SO_REUSEADDR
;
2042 case TARGET_SO_TYPE
:
2045 case TARGET_SO_ERROR
:
2048 case TARGET_SO_DONTROUTE
:
2049 optname
= SO_DONTROUTE
;
2051 case TARGET_SO_BROADCAST
:
2052 optname
= SO_BROADCAST
;
2054 case TARGET_SO_SNDBUF
:
2055 optname
= SO_SNDBUF
;
2057 case TARGET_SO_SNDBUFFORCE
:
2058 optname
= SO_SNDBUFFORCE
;
2060 case TARGET_SO_RCVBUF
:
2061 optname
= SO_RCVBUF
;
2063 case TARGET_SO_RCVBUFFORCE
:
2064 optname
= SO_RCVBUFFORCE
;
2066 case TARGET_SO_KEEPALIVE
:
2067 optname
= SO_KEEPALIVE
;
2069 case TARGET_SO_OOBINLINE
:
2070 optname
= SO_OOBINLINE
;
2072 case TARGET_SO_NO_CHECK
:
2073 optname
= SO_NO_CHECK
;
2075 case TARGET_SO_PRIORITY
:
2076 optname
= SO_PRIORITY
;
2079 case TARGET_SO_BSDCOMPAT
:
2080 optname
= SO_BSDCOMPAT
;
2083 case TARGET_SO_PASSCRED
:
2084 optname
= SO_PASSCRED
;
2086 case TARGET_SO_PASSSEC
:
2087 optname
= SO_PASSSEC
;
2089 case TARGET_SO_TIMESTAMP
:
2090 optname
= SO_TIMESTAMP
;
2092 case TARGET_SO_RCVLOWAT
:
2093 optname
= SO_RCVLOWAT
;
2098 if (optlen
< sizeof(uint32_t))
2099 return -TARGET_EINVAL
;
2101 if (get_user_u32(val
, optval_addr
))
2102 return -TARGET_EFAULT
;
2103 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2107 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2108 ret
= -TARGET_ENOPROTOOPT
;
2113 /* do_getsockopt() Must return target values and target errnos. */
2114 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2115 abi_ulong optval_addr
, abi_ulong optlen
)
2122 case TARGET_SOL_SOCKET
:
2125 /* These don't just return a single integer */
2126 case TARGET_SO_LINGER
:
2127 case TARGET_SO_RCVTIMEO
:
2128 case TARGET_SO_SNDTIMEO
:
2129 case TARGET_SO_PEERNAME
:
2131 case TARGET_SO_PEERCRED
: {
2134 struct target_ucred
*tcr
;
2136 if (get_user_u32(len
, optlen
)) {
2137 return -TARGET_EFAULT
;
2140 return -TARGET_EINVAL
;
2144 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2152 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2153 return -TARGET_EFAULT
;
2155 __put_user(cr
.pid
, &tcr
->pid
);
2156 __put_user(cr
.uid
, &tcr
->uid
);
2157 __put_user(cr
.gid
, &tcr
->gid
);
2158 unlock_user_struct(tcr
, optval_addr
, 1);
2159 if (put_user_u32(len
, optlen
)) {
2160 return -TARGET_EFAULT
;
2164 /* Options with 'int' argument. */
2165 case TARGET_SO_DEBUG
:
2168 case TARGET_SO_REUSEADDR
:
2169 optname
= SO_REUSEADDR
;
2171 case TARGET_SO_TYPE
:
2174 case TARGET_SO_ERROR
:
2177 case TARGET_SO_DONTROUTE
:
2178 optname
= SO_DONTROUTE
;
2180 case TARGET_SO_BROADCAST
:
2181 optname
= SO_BROADCAST
;
2183 case TARGET_SO_SNDBUF
:
2184 optname
= SO_SNDBUF
;
2186 case TARGET_SO_RCVBUF
:
2187 optname
= SO_RCVBUF
;
2189 case TARGET_SO_KEEPALIVE
:
2190 optname
= SO_KEEPALIVE
;
2192 case TARGET_SO_OOBINLINE
:
2193 optname
= SO_OOBINLINE
;
2195 case TARGET_SO_NO_CHECK
:
2196 optname
= SO_NO_CHECK
;
2198 case TARGET_SO_PRIORITY
:
2199 optname
= SO_PRIORITY
;
2202 case TARGET_SO_BSDCOMPAT
:
2203 optname
= SO_BSDCOMPAT
;
2206 case TARGET_SO_PASSCRED
:
2207 optname
= SO_PASSCRED
;
2209 case TARGET_SO_TIMESTAMP
:
2210 optname
= SO_TIMESTAMP
;
2212 case TARGET_SO_RCVLOWAT
:
2213 optname
= SO_RCVLOWAT
;
2215 case TARGET_SO_ACCEPTCONN
:
2216 optname
= SO_ACCEPTCONN
;
2223 /* TCP options all take an 'int' value. */
2225 if (get_user_u32(len
, optlen
))
2226 return -TARGET_EFAULT
;
2228 return -TARGET_EINVAL
;
2230 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2233 if (optname
== SO_TYPE
) {
2234 val
= host_to_target_sock_type(val
);
2239 if (put_user_u32(val
, optval_addr
))
2240 return -TARGET_EFAULT
;
2242 if (put_user_u8(val
, optval_addr
))
2243 return -TARGET_EFAULT
;
2245 if (put_user_u32(len
, optlen
))
2246 return -TARGET_EFAULT
;
2253 case IP_ROUTER_ALERT
:
2257 case IP_MTU_DISCOVER
:
2263 case IP_MULTICAST_TTL
:
2264 case IP_MULTICAST_LOOP
:
2265 if (get_user_u32(len
, optlen
))
2266 return -TARGET_EFAULT
;
2268 return -TARGET_EINVAL
;
2270 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2273 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2275 if (put_user_u32(len
, optlen
)
2276 || put_user_u8(val
, optval_addr
))
2277 return -TARGET_EFAULT
;
2279 if (len
> sizeof(int))
2281 if (put_user_u32(len
, optlen
)
2282 || put_user_u32(val
, optval_addr
))
2283 return -TARGET_EFAULT
;
2287 ret
= -TARGET_ENOPROTOOPT
;
2293 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2295 ret
= -TARGET_EOPNOTSUPP
;
2301 /* Convert target low/high pair representing file offset into the host
2302 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2303 * as the kernel doesn't handle them either.
2305 static void target_to_host_low_high(abi_ulong tlow
,
2307 unsigned long *hlow
,
2308 unsigned long *hhigh
)
2310 uint64_t off
= tlow
|
2311 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2312 TARGET_LONG_BITS
/ 2;
2315 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2318 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2319 abi_ulong count
, int copy
)
2321 struct target_iovec
*target_vec
;
2323 abi_ulong total_len
, max_len
;
2326 bool bad_address
= false;
2332 if (count
> IOV_MAX
) {
2337 vec
= g_try_new0(struct iovec
, count
);
2343 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2344 count
* sizeof(struct target_iovec
), 1);
2345 if (target_vec
== NULL
) {
2350 /* ??? If host page size > target page size, this will result in a
2351 value larger than what we can actually support. */
2352 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2355 for (i
= 0; i
< count
; i
++) {
2356 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2357 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2362 } else if (len
== 0) {
2363 /* Zero length pointer is ignored. */
2364 vec
[i
].iov_base
= 0;
2366 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2367 /* If the first buffer pointer is bad, this is a fault. But
2368 * subsequent bad buffers will result in a partial write; this
2369 * is realized by filling the vector with null pointers and
2371 if (!vec
[i
].iov_base
) {
2382 if (len
> max_len
- total_len
) {
2383 len
= max_len
- total_len
;
2386 vec
[i
].iov_len
= len
;
2390 unlock_user(target_vec
, target_addr
, 0);
2395 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2396 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2399 unlock_user(target_vec
, target_addr
, 0);
2406 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2407 abi_ulong count
, int copy
)
2409 struct target_iovec
*target_vec
;
2412 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2413 count
* sizeof(struct target_iovec
), 1);
2415 for (i
= 0; i
< count
; i
++) {
2416 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2417 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2421 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2423 unlock_user(target_vec
, target_addr
, 0);
2429 static inline int target_to_host_sock_type(int *type
)
2432 int target_type
= *type
;
2434 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2435 case TARGET_SOCK_DGRAM
:
2436 host_type
= SOCK_DGRAM
;
2438 case TARGET_SOCK_STREAM
:
2439 host_type
= SOCK_STREAM
;
2442 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2445 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2446 #if defined(SOCK_CLOEXEC)
2447 host_type
|= SOCK_CLOEXEC
;
2449 return -TARGET_EINVAL
;
2452 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2453 #if defined(SOCK_NONBLOCK)
2454 host_type
|= SOCK_NONBLOCK
;
2455 #elif !defined(O_NONBLOCK)
2456 return -TARGET_EINVAL
;
2463 /* Try to emulate socket type flags after socket creation. */
2464 static int sock_flags_fixup(int fd
, int target_type
)
2466 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2467 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2468 int flags
= fcntl(fd
, F_GETFL
);
2469 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2471 return -TARGET_EINVAL
;
2478 /* do_socket() Must return target values and target errnos. */
2479 static abi_long
do_socket(int domain
, int type
, int protocol
)
2481 int target_type
= type
;
2484 ret
= target_to_host_sock_type(&type
);
2489 if (domain
== PF_NETLINK
&& !(
2490 #ifdef CONFIG_RTNETLINK
2491 protocol
== NETLINK_ROUTE
||
2493 protocol
== NETLINK_KOBJECT_UEVENT
||
2494 protocol
== NETLINK_AUDIT
)) {
2495 return -EPFNOSUPPORT
;
2498 if (domain
== AF_PACKET
||
2499 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2500 protocol
= tswap16(protocol
);
2503 ret
= get_errno(socket(domain
, type
, protocol
));
2505 ret
= sock_flags_fixup(ret
, target_type
);
2506 if (type
== SOCK_PACKET
) {
2507 /* Manage an obsolete case :
2508 * if socket type is SOCK_PACKET, bind by name
2510 fd_trans_register(ret
, &target_packet_trans
);
2511 } else if (domain
== PF_NETLINK
) {
2513 #ifdef CONFIG_RTNETLINK
2515 fd_trans_register(ret
, &target_netlink_route_trans
);
2518 case NETLINK_KOBJECT_UEVENT
:
2519 /* nothing to do: messages are strings */
2522 fd_trans_register(ret
, &target_netlink_audit_trans
);
2525 g_assert_not_reached();
2532 /* do_bind() Must return target values and target errnos. */
2533 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2539 if ((int)addrlen
< 0) {
2540 return -TARGET_EINVAL
;
2543 addr
= alloca(addrlen
+1);
2545 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2549 return get_errno(bind(sockfd
, addr
, addrlen
));
2552 /* do_connect() Must return target values and target errnos. */
2553 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2559 if ((int)addrlen
< 0) {
2560 return -TARGET_EINVAL
;
2563 addr
= alloca(addrlen
+1);
2565 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2569 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2572 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2573 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2574 int flags
, int send
)
2580 abi_ulong target_vec
;
2582 if (msgp
->msg_name
) {
2583 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2584 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2585 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2586 tswapal(msgp
->msg_name
),
2588 if (ret
== -TARGET_EFAULT
) {
2589 /* For connected sockets msg_name and msg_namelen must
2590 * be ignored, so returning EFAULT immediately is wrong.
2591 * Instead, pass a bad msg_name to the host kernel, and
2592 * let it decide whether to return EFAULT or not.
2594 msg
.msg_name
= (void *)-1;
2599 msg
.msg_name
= NULL
;
2600 msg
.msg_namelen
= 0;
2602 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2603 msg
.msg_control
= alloca(msg
.msg_controllen
);
2604 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2606 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2608 count
= tswapal(msgp
->msg_iovlen
);
2609 target_vec
= tswapal(msgp
->msg_iov
);
2611 if (count
> IOV_MAX
) {
2612 /* sendrcvmsg returns a different errno for this condition than
2613 * readv/writev, so we must catch it here before lock_iovec() does.
2615 ret
= -TARGET_EMSGSIZE
;
2619 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2620 target_vec
, count
, send
);
2622 ret
= -host_to_target_errno(errno
);
2625 msg
.msg_iovlen
= count
;
2629 if (fd_trans_target_to_host_data(fd
)) {
2632 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2633 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2634 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2635 msg
.msg_iov
->iov_len
);
2637 msg
.msg_iov
->iov_base
= host_msg
;
2638 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2642 ret
= target_to_host_cmsg(&msg
, msgp
);
2644 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2648 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2649 if (!is_error(ret
)) {
2651 if (fd_trans_host_to_target_data(fd
)) {
2652 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2653 MIN(msg
.msg_iov
->iov_len
, len
));
2655 ret
= host_to_target_cmsg(msgp
, &msg
);
2657 if (!is_error(ret
)) {
2658 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2659 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2660 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2661 msg
.msg_name
, msg
.msg_namelen
);
2673 unlock_iovec(vec
, target_vec
, count
, !send
);
2678 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2679 int flags
, int send
)
2682 struct target_msghdr
*msgp
;
2684 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2688 return -TARGET_EFAULT
;
2690 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2691 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2695 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2696 * so it might not have this *mmsg-specific flag either.
2698 #ifndef MSG_WAITFORONE
2699 #define MSG_WAITFORONE 0x10000
2702 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2703 unsigned int vlen
, unsigned int flags
,
2706 struct target_mmsghdr
*mmsgp
;
2710 if (vlen
> UIO_MAXIOV
) {
2714 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2716 return -TARGET_EFAULT
;
2719 for (i
= 0; i
< vlen
; i
++) {
2720 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2721 if (is_error(ret
)) {
2724 mmsgp
[i
].msg_len
= tswap32(ret
);
2725 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2726 if (flags
& MSG_WAITFORONE
) {
2727 flags
|= MSG_DONTWAIT
;
2731 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2733 /* Return number of datagrams sent if we sent any at all;
2734 * otherwise return the error.
2742 /* do_accept4() Must return target values and target errnos. */
2743 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2744 abi_ulong target_addrlen_addr
, int flags
)
2751 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2753 if (target_addr
== 0) {
2754 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2757 /* linux returns EINVAL if addrlen pointer is invalid */
2758 if (get_user_u32(addrlen
, target_addrlen_addr
))
2759 return -TARGET_EINVAL
;
2761 if ((int)addrlen
< 0) {
2762 return -TARGET_EINVAL
;
2765 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2766 return -TARGET_EINVAL
;
2768 addr
= alloca(addrlen
);
2770 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
2771 if (!is_error(ret
)) {
2772 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2773 if (put_user_u32(addrlen
, target_addrlen_addr
))
2774 ret
= -TARGET_EFAULT
;
2779 /* do_getpeername() Must return target values and target errnos. */
2780 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2781 abi_ulong target_addrlen_addr
)
2787 if (get_user_u32(addrlen
, target_addrlen_addr
))
2788 return -TARGET_EFAULT
;
2790 if ((int)addrlen
< 0) {
2791 return -TARGET_EINVAL
;
2794 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2795 return -TARGET_EFAULT
;
2797 addr
= alloca(addrlen
);
2799 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2800 if (!is_error(ret
)) {
2801 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2802 if (put_user_u32(addrlen
, target_addrlen_addr
))
2803 ret
= -TARGET_EFAULT
;
2808 /* do_getsockname() Must return target values and target errnos. */
2809 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2810 abi_ulong target_addrlen_addr
)
2816 if (get_user_u32(addrlen
, target_addrlen_addr
))
2817 return -TARGET_EFAULT
;
2819 if ((int)addrlen
< 0) {
2820 return -TARGET_EINVAL
;
2823 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2824 return -TARGET_EFAULT
;
2826 addr
= alloca(addrlen
);
2828 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2829 if (!is_error(ret
)) {
2830 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2831 if (put_user_u32(addrlen
, target_addrlen_addr
))
2832 ret
= -TARGET_EFAULT
;
2837 /* do_socketpair() Must return target values and target errnos. */
2838 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2839 abi_ulong target_tab_addr
)
2844 target_to_host_sock_type(&type
);
2846 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2847 if (!is_error(ret
)) {
2848 if (put_user_s32(tab
[0], target_tab_addr
)
2849 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2850 ret
= -TARGET_EFAULT
;
2855 /* do_sendto() Must return target values and target errnos. */
2856 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2857 abi_ulong target_addr
, socklen_t addrlen
)
2861 void *copy_msg
= NULL
;
2864 if ((int)addrlen
< 0) {
2865 return -TARGET_EINVAL
;
2868 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2870 return -TARGET_EFAULT
;
2871 if (fd_trans_target_to_host_data(fd
)) {
2872 copy_msg
= host_msg
;
2873 host_msg
= g_malloc(len
);
2874 memcpy(host_msg
, copy_msg
, len
);
2875 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
2881 addr
= alloca(addrlen
+1);
2882 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2886 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2888 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
2893 host_msg
= copy_msg
;
2895 unlock_user(host_msg
, msg
, 0);
2899 /* do_recvfrom() Must return target values and target errnos. */
2900 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2901 abi_ulong target_addr
,
2902 abi_ulong target_addrlen
)
2909 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2911 return -TARGET_EFAULT
;
2913 if (get_user_u32(addrlen
, target_addrlen
)) {
2914 ret
= -TARGET_EFAULT
;
2917 if ((int)addrlen
< 0) {
2918 ret
= -TARGET_EINVAL
;
2921 addr
= alloca(addrlen
);
2922 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
2925 addr
= NULL
; /* To keep compiler quiet. */
2926 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
2928 if (!is_error(ret
)) {
2929 if (fd_trans_host_to_target_data(fd
)) {
2931 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
2932 if (is_error(trans
)) {
2938 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2939 if (put_user_u32(addrlen
, target_addrlen
)) {
2940 ret
= -TARGET_EFAULT
;
2944 unlock_user(host_msg
, msg
, len
);
2947 unlock_user(host_msg
, msg
, 0);
2952 #ifdef TARGET_NR_socketcall
2953 /* do_socketcall() must return target values and target errnos. */
2954 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2956 static const unsigned nargs
[] = { /* number of arguments per operation */
2957 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
2958 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
2959 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
2960 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
2961 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
2962 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
2963 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
2964 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
2965 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
2966 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
2967 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
2968 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
2969 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
2970 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
2971 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
2972 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
2973 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
2974 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
2975 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
2976 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
2978 abi_long a
[6]; /* max 6 args */
2981 /* check the range of the first argument num */
2982 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
2983 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
2984 return -TARGET_EINVAL
;
2986 /* ensure we have space for args */
2987 if (nargs
[num
] > ARRAY_SIZE(a
)) {
2988 return -TARGET_EINVAL
;
2990 /* collect the arguments in a[] according to nargs[] */
2991 for (i
= 0; i
< nargs
[num
]; ++i
) {
2992 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2993 return -TARGET_EFAULT
;
2996 /* now when we have the args, invoke the appropriate underlying function */
2998 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
2999 return do_socket(a
[0], a
[1], a
[2]);
3000 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3001 return do_bind(a
[0], a
[1], a
[2]);
3002 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3003 return do_connect(a
[0], a
[1], a
[2]);
3004 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3005 return get_errno(listen(a
[0], a
[1]));
3006 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3007 return do_accept4(a
[0], a
[1], a
[2], 0);
3008 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3009 return do_getsockname(a
[0], a
[1], a
[2]);
3010 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3011 return do_getpeername(a
[0], a
[1], a
[2]);
3012 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3013 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3014 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3015 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3016 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3017 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3018 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3019 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3020 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3021 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3022 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3023 return get_errno(shutdown(a
[0], a
[1]));
3024 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3025 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3026 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3027 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3028 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3029 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3030 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3031 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3032 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3033 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3034 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3035 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3036 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3037 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3039 gemu_log("Unsupported socketcall: %d\n", num
);
3040 return -TARGET_EINVAL
;
3045 #define N_SHM_REGIONS 32
3047 static struct shm_region
{
3051 } shm_regions
[N_SHM_REGIONS
];
3053 #ifndef TARGET_SEMID64_DS
3054 /* asm-generic version of this struct */
3055 struct target_semid64_ds
3057 struct target_ipc_perm sem_perm
;
3058 abi_ulong sem_otime
;
3059 #if TARGET_ABI_BITS == 32
3060 abi_ulong __unused1
;
3062 abi_ulong sem_ctime
;
3063 #if TARGET_ABI_BITS == 32
3064 abi_ulong __unused2
;
3066 abi_ulong sem_nsems
;
3067 abi_ulong __unused3
;
3068 abi_ulong __unused4
;
3072 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3073 abi_ulong target_addr
)
3075 struct target_ipc_perm
*target_ip
;
3076 struct target_semid64_ds
*target_sd
;
3078 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3079 return -TARGET_EFAULT
;
3080 target_ip
= &(target_sd
->sem_perm
);
3081 host_ip
->__key
= tswap32(target_ip
->__key
);
3082 host_ip
->uid
= tswap32(target_ip
->uid
);
3083 host_ip
->gid
= tswap32(target_ip
->gid
);
3084 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3085 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3086 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3087 host_ip
->mode
= tswap32(target_ip
->mode
);
3089 host_ip
->mode
= tswap16(target_ip
->mode
);
3091 #if defined(TARGET_PPC)
3092 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3094 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3096 unlock_user_struct(target_sd
, target_addr
, 0);
3100 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3101 struct ipc_perm
*host_ip
)
3103 struct target_ipc_perm
*target_ip
;
3104 struct target_semid64_ds
*target_sd
;
3106 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3107 return -TARGET_EFAULT
;
3108 target_ip
= &(target_sd
->sem_perm
);
3109 target_ip
->__key
= tswap32(host_ip
->__key
);
3110 target_ip
->uid
= tswap32(host_ip
->uid
);
3111 target_ip
->gid
= tswap32(host_ip
->gid
);
3112 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3113 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3114 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3115 target_ip
->mode
= tswap32(host_ip
->mode
);
3117 target_ip
->mode
= tswap16(host_ip
->mode
);
3119 #if defined(TARGET_PPC)
3120 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3122 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3124 unlock_user_struct(target_sd
, target_addr
, 1);
3128 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3129 abi_ulong target_addr
)
3131 struct target_semid64_ds
*target_sd
;
3133 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3134 return -TARGET_EFAULT
;
3135 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3136 return -TARGET_EFAULT
;
3137 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3138 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3139 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3140 unlock_user_struct(target_sd
, target_addr
, 0);
3144 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3145 struct semid_ds
*host_sd
)
3147 struct target_semid64_ds
*target_sd
;
3149 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3150 return -TARGET_EFAULT
;
3151 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3152 return -TARGET_EFAULT
;
3153 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3154 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3155 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3156 unlock_user_struct(target_sd
, target_addr
, 1);
3160 struct target_seminfo
{
3173 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3174 struct seminfo
*host_seminfo
)
3176 struct target_seminfo
*target_seminfo
;
3177 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3178 return -TARGET_EFAULT
;
3179 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3180 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3181 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3182 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3183 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3184 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3185 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3186 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3187 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3188 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3189 unlock_user_struct(target_seminfo
, target_addr
, 1);
3195 struct semid_ds
*buf
;
3196 unsigned short *array
;
3197 struct seminfo
*__buf
;
3200 union target_semun
{
3207 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3208 abi_ulong target_addr
)
3211 unsigned short *array
;
3213 struct semid_ds semid_ds
;
3216 semun
.buf
= &semid_ds
;
3218 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3220 return get_errno(ret
);
3222 nsems
= semid_ds
.sem_nsems
;
3224 *host_array
= g_try_new(unsigned short, nsems
);
3226 return -TARGET_ENOMEM
;
3228 array
= lock_user(VERIFY_READ
, target_addr
,
3229 nsems
*sizeof(unsigned short), 1);
3231 g_free(*host_array
);
3232 return -TARGET_EFAULT
;
3235 for(i
=0; i
<nsems
; i
++) {
3236 __get_user((*host_array
)[i
], &array
[i
]);
3238 unlock_user(array
, target_addr
, 0);
3243 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3244 unsigned short **host_array
)
3247 unsigned short *array
;
3249 struct semid_ds semid_ds
;
3252 semun
.buf
= &semid_ds
;
3254 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3256 return get_errno(ret
);
3258 nsems
= semid_ds
.sem_nsems
;
3260 array
= lock_user(VERIFY_WRITE
, target_addr
,
3261 nsems
*sizeof(unsigned short), 0);
3263 return -TARGET_EFAULT
;
3265 for(i
=0; i
<nsems
; i
++) {
3266 __put_user((*host_array
)[i
], &array
[i
]);
3268 g_free(*host_array
);
3269 unlock_user(array
, target_addr
, 1);
3274 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3275 abi_ulong target_arg
)
3277 union target_semun target_su
= { .buf
= target_arg
};
3279 struct semid_ds dsarg
;
3280 unsigned short *array
= NULL
;
3281 struct seminfo seminfo
;
3282 abi_long ret
= -TARGET_EINVAL
;
3289 /* In 64 bit cross-endian situations, we will erroneously pick up
3290 * the wrong half of the union for the "val" element. To rectify
3291 * this, the entire 8-byte structure is byteswapped, followed by
3292 * a swap of the 4 byte val field. In other cases, the data is
3293 * already in proper host byte order. */
3294 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3295 target_su
.buf
= tswapal(target_su
.buf
);
3296 arg
.val
= tswap32(target_su
.val
);
3298 arg
.val
= target_su
.val
;
3300 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3304 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3308 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3309 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3316 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3320 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3321 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3327 arg
.__buf
= &seminfo
;
3328 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3329 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3337 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3344 struct target_sembuf
{
3345 unsigned short sem_num
;
3350 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3351 abi_ulong target_addr
,
3354 struct target_sembuf
*target_sembuf
;
3357 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3358 nsops
*sizeof(struct target_sembuf
), 1);
3360 return -TARGET_EFAULT
;
3362 for(i
=0; i
<nsops
; i
++) {
3363 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3364 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3365 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3368 unlock_user(target_sembuf
, target_addr
, 0);
3373 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3375 struct sembuf sops
[nsops
];
3377 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3378 return -TARGET_EFAULT
;
3380 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3383 struct target_msqid_ds
3385 struct target_ipc_perm msg_perm
;
3386 abi_ulong msg_stime
;
3387 #if TARGET_ABI_BITS == 32
3388 abi_ulong __unused1
;
3390 abi_ulong msg_rtime
;
3391 #if TARGET_ABI_BITS == 32
3392 abi_ulong __unused2
;
3394 abi_ulong msg_ctime
;
3395 #if TARGET_ABI_BITS == 32
3396 abi_ulong __unused3
;
3398 abi_ulong __msg_cbytes
;
3400 abi_ulong msg_qbytes
;
3401 abi_ulong msg_lspid
;
3402 abi_ulong msg_lrpid
;
3403 abi_ulong __unused4
;
3404 abi_ulong __unused5
;
3407 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3408 abi_ulong target_addr
)
3410 struct target_msqid_ds
*target_md
;
3412 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3413 return -TARGET_EFAULT
;
3414 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3415 return -TARGET_EFAULT
;
3416 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3417 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3418 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3419 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3420 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3421 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3422 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3423 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3424 unlock_user_struct(target_md
, target_addr
, 0);
3428 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3429 struct msqid_ds
*host_md
)
3431 struct target_msqid_ds
*target_md
;
3433 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3434 return -TARGET_EFAULT
;
3435 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3436 return -TARGET_EFAULT
;
3437 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3438 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3439 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3440 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3441 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3442 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3443 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3444 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3445 unlock_user_struct(target_md
, target_addr
, 1);
3449 struct target_msginfo
{
3457 unsigned short int msgseg
;
3460 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3461 struct msginfo
*host_msginfo
)
3463 struct target_msginfo
*target_msginfo
;
3464 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3465 return -TARGET_EFAULT
;
3466 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3467 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3468 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3469 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3470 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3471 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3472 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3473 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3474 unlock_user_struct(target_msginfo
, target_addr
, 1);
3478 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3480 struct msqid_ds dsarg
;
3481 struct msginfo msginfo
;
3482 abi_long ret
= -TARGET_EINVAL
;
3490 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3491 return -TARGET_EFAULT
;
3492 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3493 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3494 return -TARGET_EFAULT
;
3497 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3501 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3502 if (host_to_target_msginfo(ptr
, &msginfo
))
3503 return -TARGET_EFAULT
;
3510 struct target_msgbuf
{
3515 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3516 ssize_t msgsz
, int msgflg
)
3518 struct target_msgbuf
*target_mb
;
3519 struct msgbuf
*host_mb
;
3523 return -TARGET_EINVAL
;
3526 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3527 return -TARGET_EFAULT
;
3528 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3530 unlock_user_struct(target_mb
, msgp
, 0);
3531 return -TARGET_ENOMEM
;
3533 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3534 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3535 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3537 unlock_user_struct(target_mb
, msgp
, 0);
3542 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3543 ssize_t msgsz
, abi_long msgtyp
,
3546 struct target_msgbuf
*target_mb
;
3548 struct msgbuf
*host_mb
;
3552 return -TARGET_EINVAL
;
3555 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3556 return -TARGET_EFAULT
;
3558 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3560 ret
= -TARGET_ENOMEM
;
3563 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3566 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3567 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3568 if (!target_mtext
) {
3569 ret
= -TARGET_EFAULT
;
3572 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3573 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3576 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3580 unlock_user_struct(target_mb
, msgp
, 1);
3585 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3586 abi_ulong target_addr
)
3588 struct target_shmid_ds
*target_sd
;
3590 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3591 return -TARGET_EFAULT
;
3592 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3593 return -TARGET_EFAULT
;
3594 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3595 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3596 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3597 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3598 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3599 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3600 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3601 unlock_user_struct(target_sd
, target_addr
, 0);
3605 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3606 struct shmid_ds
*host_sd
)
3608 struct target_shmid_ds
*target_sd
;
3610 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3611 return -TARGET_EFAULT
;
3612 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3613 return -TARGET_EFAULT
;
3614 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3615 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3616 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3617 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3618 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3619 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3620 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3621 unlock_user_struct(target_sd
, target_addr
, 1);
3625 struct target_shminfo
{
3633 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3634 struct shminfo
*host_shminfo
)
3636 struct target_shminfo
*target_shminfo
;
3637 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3638 return -TARGET_EFAULT
;
3639 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3640 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3641 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3642 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3643 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3644 unlock_user_struct(target_shminfo
, target_addr
, 1);
3648 struct target_shm_info
{
3653 abi_ulong swap_attempts
;
3654 abi_ulong swap_successes
;
3657 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3658 struct shm_info
*host_shm_info
)
3660 struct target_shm_info
*target_shm_info
;
3661 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3662 return -TARGET_EFAULT
;
3663 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3664 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3665 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3666 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3667 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3668 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3669 unlock_user_struct(target_shm_info
, target_addr
, 1);
3673 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3675 struct shmid_ds dsarg
;
3676 struct shminfo shminfo
;
3677 struct shm_info shm_info
;
3678 abi_long ret
= -TARGET_EINVAL
;
3686 if (target_to_host_shmid_ds(&dsarg
, buf
))
3687 return -TARGET_EFAULT
;
3688 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3689 if (host_to_target_shmid_ds(buf
, &dsarg
))
3690 return -TARGET_EFAULT
;
3693 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3694 if (host_to_target_shminfo(buf
, &shminfo
))
3695 return -TARGET_EFAULT
;
3698 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3699 if (host_to_target_shm_info(buf
, &shm_info
))
3700 return -TARGET_EFAULT
;
3705 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3712 #ifndef TARGET_FORCE_SHMLBA
3713 /* For most architectures, SHMLBA is the same as the page size;
3714 * some architectures have larger values, in which case they should
3715 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3716 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3717 * and defining its own value for SHMLBA.
3719 * The kernel also permits SHMLBA to be set by the architecture to a
3720 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3721 * this means that addresses are rounded to the large size if
3722 * SHM_RND is set but addresses not aligned to that size are not rejected
3723 * as long as they are at least page-aligned. Since the only architecture
3724 * which uses this is ia64 this code doesn't provide for that oddity.
3726 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3728 return TARGET_PAGE_SIZE
;
3732 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3733 int shmid
, abi_ulong shmaddr
, int shmflg
)
3737 struct shmid_ds shm_info
;
3741 /* find out the length of the shared memory segment */
3742 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3743 if (is_error(ret
)) {
3744 /* can't get length, bail out */
3748 shmlba
= target_shmlba(cpu_env
);
3750 if (shmaddr
& (shmlba
- 1)) {
3751 if (shmflg
& SHM_RND
) {
3752 shmaddr
&= ~(shmlba
- 1);
3754 return -TARGET_EINVAL
;
3757 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3758 return -TARGET_EINVAL
;
3764 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3766 abi_ulong mmap_start
;
3768 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3770 if (mmap_start
== -1) {
3772 host_raddr
= (void *)-1;
3774 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3777 if (host_raddr
== (void *)-1) {
3779 return get_errno((long)host_raddr
);
3781 raddr
=h2g((unsigned long)host_raddr
);
3783 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3784 PAGE_VALID
| PAGE_READ
|
3785 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3787 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3788 if (!shm_regions
[i
].in_use
) {
3789 shm_regions
[i
].in_use
= true;
3790 shm_regions
[i
].start
= raddr
;
3791 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3801 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3808 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3809 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3810 shm_regions
[i
].in_use
= false;
3811 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3815 rv
= get_errno(shmdt(g2h(shmaddr
)));
3822 #ifdef TARGET_NR_ipc
3823 /* ??? This only works with linear mappings. */
3824 /* do_ipc() must return target values and target errnos. */
3825 static abi_long
do_ipc(CPUArchState
*cpu_env
,
3826 unsigned int call
, abi_long first
,
3827 abi_long second
, abi_long third
,
3828 abi_long ptr
, abi_long fifth
)
3833 version
= call
>> 16;
3838 ret
= do_semop(first
, ptr
, second
);
3842 ret
= get_errno(semget(first
, second
, third
));
3845 case IPCOP_semctl
: {
3846 /* The semun argument to semctl is passed by value, so dereference the
3849 get_user_ual(atptr
, ptr
);
3850 ret
= do_semctl(first
, second
, third
, atptr
);
3855 ret
= get_errno(msgget(first
, second
));
3859 ret
= do_msgsnd(first
, ptr
, second
, third
);
3863 ret
= do_msgctl(first
, second
, ptr
);
3870 struct target_ipc_kludge
{
3875 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3876 ret
= -TARGET_EFAULT
;
3880 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3882 unlock_user_struct(tmp
, ptr
, 0);
3886 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3895 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
3896 if (is_error(raddr
))
3897 return get_errno(raddr
);
3898 if (put_user_ual(raddr
, third
))
3899 return -TARGET_EFAULT
;
3903 ret
= -TARGET_EINVAL
;
3908 ret
= do_shmdt(ptr
);
3912 /* IPC_* flag values are the same on all linux platforms */
3913 ret
= get_errno(shmget(first
, second
, third
));
3916 /* IPC_* and SHM_* command values are the same on all linux platforms */
3918 ret
= do_shmctl(first
, second
, ptr
);
3921 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3922 ret
= -TARGET_ENOSYS
;
3929 /* kernel structure types definitions */
3931 #define STRUCT(name, ...) STRUCT_ ## name,
3932 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3934 #include "syscall_types.h"
3938 #undef STRUCT_SPECIAL
3940 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3941 #define STRUCT_SPECIAL(name)
3942 #include "syscall_types.h"
3944 #undef STRUCT_SPECIAL
3946 typedef struct IOCTLEntry IOCTLEntry
;
3948 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3949 int fd
, int cmd
, abi_long arg
);
3953 unsigned int host_cmd
;
3956 do_ioctl_fn
*do_ioctl
;
3957 const argtype arg_type
[5];
3960 #define IOC_R 0x0001
3961 #define IOC_W 0x0002
3962 #define IOC_RW (IOC_R | IOC_W)
3964 #define MAX_STRUCT_SIZE 4096
3966 #ifdef CONFIG_FIEMAP
3967 /* So fiemap access checks don't overflow on 32 bit systems.
3968 * This is very slightly smaller than the limit imposed by
3969 * the underlying kernel.
3971 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3972 / sizeof(struct fiemap_extent))
3974 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3975 int fd
, int cmd
, abi_long arg
)
3977 /* The parameter for this ioctl is a struct fiemap followed
3978 * by an array of struct fiemap_extent whose size is set
3979 * in fiemap->fm_extent_count. The array is filled in by the
3982 int target_size_in
, target_size_out
;
3984 const argtype
*arg_type
= ie
->arg_type
;
3985 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3988 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3992 assert(arg_type
[0] == TYPE_PTR
);
3993 assert(ie
->access
== IOC_RW
);
3995 target_size_in
= thunk_type_size(arg_type
, 0);
3996 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3998 return -TARGET_EFAULT
;
4000 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4001 unlock_user(argptr
, arg
, 0);
4002 fm
= (struct fiemap
*)buf_temp
;
4003 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4004 return -TARGET_EINVAL
;
4007 outbufsz
= sizeof (*fm
) +
4008 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4010 if (outbufsz
> MAX_STRUCT_SIZE
) {
4011 /* We can't fit all the extents into the fixed size buffer.
4012 * Allocate one that is large enough and use it instead.
4014 fm
= g_try_malloc(outbufsz
);
4016 return -TARGET_ENOMEM
;
4018 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4021 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4022 if (!is_error(ret
)) {
4023 target_size_out
= target_size_in
;
4024 /* An extent_count of 0 means we were only counting the extents
4025 * so there are no structs to copy
4027 if (fm
->fm_extent_count
!= 0) {
4028 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4030 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4032 ret
= -TARGET_EFAULT
;
4034 /* Convert the struct fiemap */
4035 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4036 if (fm
->fm_extent_count
!= 0) {
4037 p
= argptr
+ target_size_in
;
4038 /* ...and then all the struct fiemap_extents */
4039 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4040 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4045 unlock_user(argptr
, arg
, target_size_out
);
4055 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4056 int fd
, int cmd
, abi_long arg
)
4058 const argtype
*arg_type
= ie
->arg_type
;
4062 struct ifconf
*host_ifconf
;
4064 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4065 int target_ifreq_size
;
4070 abi_long target_ifc_buf
;
4074 assert(arg_type
[0] == TYPE_PTR
);
4075 assert(ie
->access
== IOC_RW
);
4078 target_size
= thunk_type_size(arg_type
, 0);
4080 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4082 return -TARGET_EFAULT
;
4083 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4084 unlock_user(argptr
, arg
, 0);
4086 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4087 target_ifc_len
= host_ifconf
->ifc_len
;
4088 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4090 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4091 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4092 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4094 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4095 if (outbufsz
> MAX_STRUCT_SIZE
) {
4096 /* We can't fit all the extents into the fixed size buffer.
4097 * Allocate one that is large enough and use it instead.
4099 host_ifconf
= malloc(outbufsz
);
4101 return -TARGET_ENOMEM
;
4103 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4106 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4108 host_ifconf
->ifc_len
= host_ifc_len
;
4109 host_ifconf
->ifc_buf
= host_ifc_buf
;
4111 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4112 if (!is_error(ret
)) {
4113 /* convert host ifc_len to target ifc_len */
4115 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4116 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4117 host_ifconf
->ifc_len
= target_ifc_len
;
4119 /* restore target ifc_buf */
4121 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4123 /* copy struct ifconf to target user */
4125 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4127 return -TARGET_EFAULT
;
4128 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4129 unlock_user(argptr
, arg
, target_size
);
4131 /* copy ifreq[] to target user */
4133 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4134 for (i
= 0; i
< nb_ifreq
; i
++) {
4135 thunk_convert(argptr
+ i
* target_ifreq_size
,
4136 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4137 ifreq_arg_type
, THUNK_TARGET
);
4139 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4149 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4150 int cmd
, abi_long arg
)
4153 struct dm_ioctl
*host_dm
;
4154 abi_long guest_data
;
4155 uint32_t guest_data_size
;
4157 const argtype
*arg_type
= ie
->arg_type
;
4159 void *big_buf
= NULL
;
4163 target_size
= thunk_type_size(arg_type
, 0);
4164 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4166 ret
= -TARGET_EFAULT
;
4169 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4170 unlock_user(argptr
, arg
, 0);
4172 /* buf_temp is too small, so fetch things into a bigger buffer */
4173 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4174 memcpy(big_buf
, buf_temp
, target_size
);
4178 guest_data
= arg
+ host_dm
->data_start
;
4179 if ((guest_data
- arg
) < 0) {
4180 ret
= -TARGET_EINVAL
;
4183 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4184 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4186 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4188 ret
= -TARGET_EFAULT
;
4192 switch (ie
->host_cmd
) {
4194 case DM_LIST_DEVICES
:
4197 case DM_DEV_SUSPEND
:
4200 case DM_TABLE_STATUS
:
4201 case DM_TABLE_CLEAR
:
4203 case DM_LIST_VERSIONS
:
4207 case DM_DEV_SET_GEOMETRY
:
4208 /* data contains only strings */
4209 memcpy(host_data
, argptr
, guest_data_size
);
4212 memcpy(host_data
, argptr
, guest_data_size
);
4213 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4217 void *gspec
= argptr
;
4218 void *cur_data
= host_data
;
4219 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4220 int spec_size
= thunk_type_size(arg_type
, 0);
4223 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4224 struct dm_target_spec
*spec
= cur_data
;
4228 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4229 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4231 spec
->next
= sizeof(*spec
) + slen
;
4232 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4234 cur_data
+= spec
->next
;
4239 ret
= -TARGET_EINVAL
;
4240 unlock_user(argptr
, guest_data
, 0);
4243 unlock_user(argptr
, guest_data
, 0);
4245 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4246 if (!is_error(ret
)) {
4247 guest_data
= arg
+ host_dm
->data_start
;
4248 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4249 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4250 switch (ie
->host_cmd
) {
4255 case DM_DEV_SUSPEND
:
4258 case DM_TABLE_CLEAR
:
4260 case DM_DEV_SET_GEOMETRY
:
4261 /* no return data */
4263 case DM_LIST_DEVICES
:
4265 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4266 uint32_t remaining_data
= guest_data_size
;
4267 void *cur_data
= argptr
;
4268 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4269 int nl_size
= 12; /* can't use thunk_size due to alignment */
4272 uint32_t next
= nl
->next
;
4274 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4276 if (remaining_data
< nl
->next
) {
4277 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4280 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4281 strcpy(cur_data
+ nl_size
, nl
->name
);
4282 cur_data
+= nl
->next
;
4283 remaining_data
-= nl
->next
;
4287 nl
= (void*)nl
+ next
;
4292 case DM_TABLE_STATUS
:
4294 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4295 void *cur_data
= argptr
;
4296 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4297 int spec_size
= thunk_type_size(arg_type
, 0);
4300 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4301 uint32_t next
= spec
->next
;
4302 int slen
= strlen((char*)&spec
[1]) + 1;
4303 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4304 if (guest_data_size
< spec
->next
) {
4305 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4308 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4309 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4310 cur_data
= argptr
+ spec
->next
;
4311 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4317 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4318 int count
= *(uint32_t*)hdata
;
4319 uint64_t *hdev
= hdata
+ 8;
4320 uint64_t *gdev
= argptr
+ 8;
4323 *(uint32_t*)argptr
= tswap32(count
);
4324 for (i
= 0; i
< count
; i
++) {
4325 *gdev
= tswap64(*hdev
);
4331 case DM_LIST_VERSIONS
:
4333 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4334 uint32_t remaining_data
= guest_data_size
;
4335 void *cur_data
= argptr
;
4336 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4337 int vers_size
= thunk_type_size(arg_type
, 0);
4340 uint32_t next
= vers
->next
;
4342 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4344 if (remaining_data
< vers
->next
) {
4345 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4348 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4349 strcpy(cur_data
+ vers_size
, vers
->name
);
4350 cur_data
+= vers
->next
;
4351 remaining_data
-= vers
->next
;
4355 vers
= (void*)vers
+ next
;
4360 unlock_user(argptr
, guest_data
, 0);
4361 ret
= -TARGET_EINVAL
;
4364 unlock_user(argptr
, guest_data
, guest_data_size
);
4366 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4368 ret
= -TARGET_EFAULT
;
4371 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4372 unlock_user(argptr
, arg
, target_size
);
4379 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4380 int cmd
, abi_long arg
)
4384 const argtype
*arg_type
= ie
->arg_type
;
4385 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4388 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4389 struct blkpg_partition host_part
;
4391 /* Read and convert blkpg */
4393 target_size
= thunk_type_size(arg_type
, 0);
4394 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4396 ret
= -TARGET_EFAULT
;
4399 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4400 unlock_user(argptr
, arg
, 0);
4402 switch (host_blkpg
->op
) {
4403 case BLKPG_ADD_PARTITION
:
4404 case BLKPG_DEL_PARTITION
:
4405 /* payload is struct blkpg_partition */
4408 /* Unknown opcode */
4409 ret
= -TARGET_EINVAL
;
4413 /* Read and convert blkpg->data */
4414 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4415 target_size
= thunk_type_size(part_arg_type
, 0);
4416 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4418 ret
= -TARGET_EFAULT
;
4421 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4422 unlock_user(argptr
, arg
, 0);
4424 /* Swizzle the data pointer to our local copy and call! */
4425 host_blkpg
->data
= &host_part
;
4426 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4432 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4433 int fd
, int cmd
, abi_long arg
)
4435 const argtype
*arg_type
= ie
->arg_type
;
4436 const StructEntry
*se
;
4437 const argtype
*field_types
;
4438 const int *dst_offsets
, *src_offsets
;
4441 abi_ulong
*target_rt_dev_ptr
;
4442 unsigned long *host_rt_dev_ptr
;
4446 assert(ie
->access
== IOC_W
);
4447 assert(*arg_type
== TYPE_PTR
);
4449 assert(*arg_type
== TYPE_STRUCT
);
4450 target_size
= thunk_type_size(arg_type
, 0);
4451 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4453 return -TARGET_EFAULT
;
4456 assert(*arg_type
== (int)STRUCT_rtentry
);
4457 se
= struct_entries
+ *arg_type
++;
4458 assert(se
->convert
[0] == NULL
);
4459 /* convert struct here to be able to catch rt_dev string */
4460 field_types
= se
->field_types
;
4461 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4462 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4463 for (i
= 0; i
< se
->nb_fields
; i
++) {
4464 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4465 assert(*field_types
== TYPE_PTRVOID
);
4466 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4467 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4468 if (*target_rt_dev_ptr
!= 0) {
4469 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4470 tswapal(*target_rt_dev_ptr
));
4471 if (!*host_rt_dev_ptr
) {
4472 unlock_user(argptr
, arg
, 0);
4473 return -TARGET_EFAULT
;
4476 *host_rt_dev_ptr
= 0;
4481 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4482 argptr
+ src_offsets
[i
],
4483 field_types
, THUNK_HOST
);
4485 unlock_user(argptr
, arg
, 0);
4487 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4488 if (*host_rt_dev_ptr
!= 0) {
4489 unlock_user((void *)*host_rt_dev_ptr
,
4490 *target_rt_dev_ptr
, 0);
4495 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4496 int fd
, int cmd
, abi_long arg
)
4498 int sig
= target_to_host_signal(arg
);
4499 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4503 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4504 int fd
, int cmd
, abi_long arg
)
4506 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4507 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4511 static IOCTLEntry ioctl_entries
[] = {
4512 #define IOCTL(cmd, access, ...) \
4513 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4514 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4515 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4516 #define IOCTL_IGNORE(cmd) \
4517 { TARGET_ ## cmd, 0, #cmd },
4522 /* ??? Implement proper locking for ioctls. */
4523 /* do_ioctl() Must return target values and target errnos. */
4524 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4526 const IOCTLEntry
*ie
;
4527 const argtype
*arg_type
;
4529 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4535 if (ie
->target_cmd
== 0) {
4536 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4537 return -TARGET_ENOSYS
;
4539 if (ie
->target_cmd
== cmd
)
4543 arg_type
= ie
->arg_type
;
4545 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4546 } else if (!ie
->host_cmd
) {
4547 /* Some architectures define BSD ioctls in their headers
4548 that are not implemented in Linux. */
4549 return -TARGET_ENOSYS
;
4552 switch(arg_type
[0]) {
4555 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4559 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4563 target_size
= thunk_type_size(arg_type
, 0);
4564 switch(ie
->access
) {
4566 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4567 if (!is_error(ret
)) {
4568 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4570 return -TARGET_EFAULT
;
4571 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4572 unlock_user(argptr
, arg
, target_size
);
4576 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4578 return -TARGET_EFAULT
;
4579 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4580 unlock_user(argptr
, arg
, 0);
4581 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4585 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4587 return -TARGET_EFAULT
;
4588 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4589 unlock_user(argptr
, arg
, 0);
4590 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4591 if (!is_error(ret
)) {
4592 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4594 return -TARGET_EFAULT
;
4595 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4596 unlock_user(argptr
, arg
, target_size
);
4602 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4603 (long)cmd
, arg_type
[0]);
4604 ret
= -TARGET_ENOSYS
;
4610 static const bitmask_transtbl iflag_tbl
[] = {
4611 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4612 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4613 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4614 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4615 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4616 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4617 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4618 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4619 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4620 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4621 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4622 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4623 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4624 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4628 static const bitmask_transtbl oflag_tbl
[] = {
4629 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4630 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4631 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4632 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4633 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4634 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4635 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4636 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4637 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4638 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4639 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4640 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4641 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4642 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4643 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4644 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4645 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4646 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4647 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4648 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4649 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4650 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4651 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4652 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4656 static const bitmask_transtbl cflag_tbl
[] = {
4657 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4658 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4659 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4660 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4661 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4662 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4663 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4664 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4665 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4666 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4667 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4668 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4669 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4670 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4671 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4672 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4673 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4674 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4675 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4676 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4677 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4678 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4679 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4680 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4681 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4682 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4683 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4684 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4685 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4686 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4687 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4691 static const bitmask_transtbl lflag_tbl
[] = {
4692 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4693 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4694 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4695 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4696 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4697 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4698 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4699 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4700 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4701 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4702 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4703 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4704 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4705 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4706 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4710 static void target_to_host_termios (void *dst
, const void *src
)
4712 struct host_termios
*host
= dst
;
4713 const struct target_termios
*target
= src
;
4716 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4718 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4720 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4722 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4723 host
->c_line
= target
->c_line
;
4725 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4726 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4727 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4728 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4729 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4730 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4731 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4732 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4733 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4734 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4735 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4736 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4737 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4738 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4739 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4740 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4741 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4742 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4745 static void host_to_target_termios (void *dst
, const void *src
)
4747 struct target_termios
*target
= dst
;
4748 const struct host_termios
*host
= src
;
4751 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4753 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4755 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4757 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4758 target
->c_line
= host
->c_line
;
4760 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4761 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4762 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4763 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4764 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4765 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4766 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4767 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4768 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4769 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4770 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4771 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4772 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4773 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4774 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4775 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4776 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4777 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4780 static const StructEntry struct_termios_def
= {
4781 .convert
= { host_to_target_termios
, target_to_host_termios
},
4782 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4783 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4786 static bitmask_transtbl mmap_flags_tbl
[] = {
4787 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4788 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4789 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4790 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
4791 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4792 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
4793 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4794 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
4795 MAP_DENYWRITE
, MAP_DENYWRITE
},
4796 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
4797 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4798 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4799 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
4800 MAP_NORESERVE
, MAP_NORESERVE
},
4801 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
4802 /* MAP_STACK had been ignored by the kernel for quite some time.
4803 Recognize it for the target insofar as we do not want to pass
4804 it through to the host. */
4805 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
4809 #if defined(TARGET_I386)
4811 /* NOTE: there is really one LDT for all the threads */
4812 static uint8_t *ldt_table
;
4814 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4821 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4822 if (size
> bytecount
)
4824 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4826 return -TARGET_EFAULT
;
4827 /* ??? Should this by byteswapped? */
4828 memcpy(p
, ldt_table
, size
);
4829 unlock_user(p
, ptr
, size
);
4833 /* XXX: add locking support */
4834 static abi_long
write_ldt(CPUX86State
*env
,
4835 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4837 struct target_modify_ldt_ldt_s ldt_info
;
4838 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4839 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4840 int seg_not_present
, useable
, lm
;
4841 uint32_t *lp
, entry_1
, entry_2
;
4843 if (bytecount
!= sizeof(ldt_info
))
4844 return -TARGET_EINVAL
;
4845 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4846 return -TARGET_EFAULT
;
4847 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4848 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4849 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4850 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4851 unlock_user_struct(target_ldt_info
, ptr
, 0);
4853 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4854 return -TARGET_EINVAL
;
4855 seg_32bit
= ldt_info
.flags
& 1;
4856 contents
= (ldt_info
.flags
>> 1) & 3;
4857 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4858 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4859 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4860 useable
= (ldt_info
.flags
>> 6) & 1;
4864 lm
= (ldt_info
.flags
>> 7) & 1;
4866 if (contents
== 3) {
4868 return -TARGET_EINVAL
;
4869 if (seg_not_present
== 0)
4870 return -TARGET_EINVAL
;
4872 /* allocate the LDT */
4874 env
->ldt
.base
= target_mmap(0,
4875 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4876 PROT_READ
|PROT_WRITE
,
4877 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4878 if (env
->ldt
.base
== -1)
4879 return -TARGET_ENOMEM
;
4880 memset(g2h(env
->ldt
.base
), 0,
4881 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4882 env
->ldt
.limit
= 0xffff;
4883 ldt_table
= g2h(env
->ldt
.base
);
4886 /* NOTE: same code as Linux kernel */
4887 /* Allow LDTs to be cleared by the user. */
4888 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4891 read_exec_only
== 1 &&
4893 limit_in_pages
== 0 &&
4894 seg_not_present
== 1 &&
4902 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4903 (ldt_info
.limit
& 0x0ffff);
4904 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4905 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4906 (ldt_info
.limit
& 0xf0000) |
4907 ((read_exec_only
^ 1) << 9) |
4909 ((seg_not_present
^ 1) << 15) |
4911 (limit_in_pages
<< 23) |
4915 entry_2
|= (useable
<< 20);
4917 /* Install the new entry ... */
4919 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4920 lp
[0] = tswap32(entry_1
);
4921 lp
[1] = tswap32(entry_2
);
4925 /* specific and weird i386 syscalls */
4926 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4927 unsigned long bytecount
)
4933 ret
= read_ldt(ptr
, bytecount
);
4936 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4939 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4942 ret
= -TARGET_ENOSYS
;
4948 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4949 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4951 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4952 struct target_modify_ldt_ldt_s ldt_info
;
4953 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4954 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4955 int seg_not_present
, useable
, lm
;
4956 uint32_t *lp
, entry_1
, entry_2
;
4959 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4960 if (!target_ldt_info
)
4961 return -TARGET_EFAULT
;
4962 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4963 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4964 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4965 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4966 if (ldt_info
.entry_number
== -1) {
4967 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4968 if (gdt_table
[i
] == 0) {
4969 ldt_info
.entry_number
= i
;
4970 target_ldt_info
->entry_number
= tswap32(i
);
4975 unlock_user_struct(target_ldt_info
, ptr
, 1);
4977 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4978 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4979 return -TARGET_EINVAL
;
4980 seg_32bit
= ldt_info
.flags
& 1;
4981 contents
= (ldt_info
.flags
>> 1) & 3;
4982 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4983 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4984 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4985 useable
= (ldt_info
.flags
>> 6) & 1;
4989 lm
= (ldt_info
.flags
>> 7) & 1;
4992 if (contents
== 3) {
4993 if (seg_not_present
== 0)
4994 return -TARGET_EINVAL
;
4997 /* NOTE: same code as Linux kernel */
4998 /* Allow LDTs to be cleared by the user. */
4999 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5000 if ((contents
== 0 &&
5001 read_exec_only
== 1 &&
5003 limit_in_pages
== 0 &&
5004 seg_not_present
== 1 &&
5012 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5013 (ldt_info
.limit
& 0x0ffff);
5014 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5015 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5016 (ldt_info
.limit
& 0xf0000) |
5017 ((read_exec_only
^ 1) << 9) |
5019 ((seg_not_present
^ 1) << 15) |
5021 (limit_in_pages
<< 23) |
5026 /* Install the new entry ... */
5028 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5029 lp
[0] = tswap32(entry_1
);
5030 lp
[1] = tswap32(entry_2
);
5034 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5036 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5037 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5038 uint32_t base_addr
, limit
, flags
;
5039 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5040 int seg_not_present
, useable
, lm
;
5041 uint32_t *lp
, entry_1
, entry_2
;
5043 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5044 if (!target_ldt_info
)
5045 return -TARGET_EFAULT
;
5046 idx
= tswap32(target_ldt_info
->entry_number
);
5047 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5048 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5049 unlock_user_struct(target_ldt_info
, ptr
, 1);
5050 return -TARGET_EINVAL
;
5052 lp
= (uint32_t *)(gdt_table
+ idx
);
5053 entry_1
= tswap32(lp
[0]);
5054 entry_2
= tswap32(lp
[1]);
5056 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5057 contents
= (entry_2
>> 10) & 3;
5058 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5059 seg_32bit
= (entry_2
>> 22) & 1;
5060 limit_in_pages
= (entry_2
>> 23) & 1;
5061 useable
= (entry_2
>> 20) & 1;
5065 lm
= (entry_2
>> 21) & 1;
5067 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5068 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5069 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5070 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5071 base_addr
= (entry_1
>> 16) |
5072 (entry_2
& 0xff000000) |
5073 ((entry_2
& 0xff) << 16);
5074 target_ldt_info
->base_addr
= tswapal(base_addr
);
5075 target_ldt_info
->limit
= tswap32(limit
);
5076 target_ldt_info
->flags
= tswap32(flags
);
5077 unlock_user_struct(target_ldt_info
, ptr
, 1);
5080 #endif /* TARGET_I386 && TARGET_ABI32 */
5082 #ifndef TARGET_ABI32
5083 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5090 case TARGET_ARCH_SET_GS
:
5091 case TARGET_ARCH_SET_FS
:
5092 if (code
== TARGET_ARCH_SET_GS
)
5096 cpu_x86_load_seg(env
, idx
, 0);
5097 env
->segs
[idx
].base
= addr
;
5099 case TARGET_ARCH_GET_GS
:
5100 case TARGET_ARCH_GET_FS
:
5101 if (code
== TARGET_ARCH_GET_GS
)
5105 val
= env
->segs
[idx
].base
;
5106 if (put_user(val
, addr
, abi_ulong
))
5107 ret
= -TARGET_EFAULT
;
5110 ret
= -TARGET_EINVAL
;
5117 #endif /* defined(TARGET_I386) */
5119 #define NEW_STACK_SIZE 0x40000
5122 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5125 pthread_mutex_t mutex
;
5126 pthread_cond_t cond
;
5129 abi_ulong child_tidptr
;
5130 abi_ulong parent_tidptr
;
5134 static void *clone_func(void *arg
)
5136 new_thread_info
*info
= arg
;
5141 rcu_register_thread();
5142 tcg_register_thread();
5144 cpu
= ENV_GET_CPU(env
);
5146 ts
= (TaskState
*)cpu
->opaque
;
5147 info
->tid
= gettid();
5149 if (info
->child_tidptr
)
5150 put_user_u32(info
->tid
, info
->child_tidptr
);
5151 if (info
->parent_tidptr
)
5152 put_user_u32(info
->tid
, info
->parent_tidptr
);
5153 /* Enable signals. */
5154 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5155 /* Signal to the parent that we're ready. */
5156 pthread_mutex_lock(&info
->mutex
);
5157 pthread_cond_broadcast(&info
->cond
);
5158 pthread_mutex_unlock(&info
->mutex
);
5159 /* Wait until the parent has finished initializing the tls state. */
5160 pthread_mutex_lock(&clone_lock
);
5161 pthread_mutex_unlock(&clone_lock
);
5167 /* do_fork() Must return host values and target errnos (unlike most
5168 do_*() functions). */
5169 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5170 abi_ulong parent_tidptr
, target_ulong newtls
,
5171 abi_ulong child_tidptr
)
5173 CPUState
*cpu
= ENV_GET_CPU(env
);
5177 CPUArchState
*new_env
;
5180 flags
&= ~CLONE_IGNORED_FLAGS
;
5182 /* Emulate vfork() with fork() */
5183 if (flags
& CLONE_VFORK
)
5184 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5186 if (flags
& CLONE_VM
) {
5187 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5188 new_thread_info info
;
5189 pthread_attr_t attr
;
5191 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5192 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5193 return -TARGET_EINVAL
;
5196 ts
= g_new0(TaskState
, 1);
5197 init_task_state(ts
);
5199 /* Grab a mutex so that thread setup appears atomic. */
5200 pthread_mutex_lock(&clone_lock
);
5202 /* we create a new CPU instance. */
5203 new_env
= cpu_copy(env
);
5204 /* Init regs that differ from the parent. */
5205 cpu_clone_regs(new_env
, newsp
);
5206 new_cpu
= ENV_GET_CPU(new_env
);
5207 new_cpu
->opaque
= ts
;
5208 ts
->bprm
= parent_ts
->bprm
;
5209 ts
->info
= parent_ts
->info
;
5210 ts
->signal_mask
= parent_ts
->signal_mask
;
5212 if (flags
& CLONE_CHILD_CLEARTID
) {
5213 ts
->child_tidptr
= child_tidptr
;
5216 if (flags
& CLONE_SETTLS
) {
5217 cpu_set_tls (new_env
, newtls
);
5220 memset(&info
, 0, sizeof(info
));
5221 pthread_mutex_init(&info
.mutex
, NULL
);
5222 pthread_mutex_lock(&info
.mutex
);
5223 pthread_cond_init(&info
.cond
, NULL
);
5225 if (flags
& CLONE_CHILD_SETTID
) {
5226 info
.child_tidptr
= child_tidptr
;
5228 if (flags
& CLONE_PARENT_SETTID
) {
5229 info
.parent_tidptr
= parent_tidptr
;
5232 ret
= pthread_attr_init(&attr
);
5233 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5234 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5235 /* It is not safe to deliver signals until the child has finished
5236 initializing, so temporarily block all signals. */
5237 sigfillset(&sigmask
);
5238 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5240 /* If this is our first additional thread, we need to ensure we
5241 * generate code for parallel execution and flush old translations.
5243 if (!parallel_cpus
) {
5244 parallel_cpus
= true;
5248 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5249 /* TODO: Free new CPU state if thread creation failed. */
5251 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5252 pthread_attr_destroy(&attr
);
5254 /* Wait for the child to initialize. */
5255 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5260 pthread_mutex_unlock(&info
.mutex
);
5261 pthread_cond_destroy(&info
.cond
);
5262 pthread_mutex_destroy(&info
.mutex
);
5263 pthread_mutex_unlock(&clone_lock
);
5265 /* if no CLONE_VM, we consider it is a fork */
5266 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5267 return -TARGET_EINVAL
;
5270 /* We can't support custom termination signals */
5271 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5272 return -TARGET_EINVAL
;
5275 if (block_signals()) {
5276 return -TARGET_ERESTARTSYS
;
5282 /* Child Process. */
5283 cpu_clone_regs(env
, newsp
);
5285 /* There is a race condition here. The parent process could
5286 theoretically read the TID in the child process before the child
5287 tid is set. This would require using either ptrace
5288 (not implemented) or having *_tidptr to point at a shared memory
5289 mapping. We can't repeat the spinlock hack used above because
5290 the child process gets its own copy of the lock. */
5291 if (flags
& CLONE_CHILD_SETTID
)
5292 put_user_u32(gettid(), child_tidptr
);
5293 if (flags
& CLONE_PARENT_SETTID
)
5294 put_user_u32(gettid(), parent_tidptr
);
5295 ts
= (TaskState
*)cpu
->opaque
;
5296 if (flags
& CLONE_SETTLS
)
5297 cpu_set_tls (env
, newtls
);
5298 if (flags
& CLONE_CHILD_CLEARTID
)
5299 ts
->child_tidptr
= child_tidptr
;
5307 /* warning : doesn't handle linux specific flags... */
5308 static int target_to_host_fcntl_cmd(int cmd
)
5313 case TARGET_F_DUPFD
:
5314 case TARGET_F_GETFD
:
5315 case TARGET_F_SETFD
:
5316 case TARGET_F_GETFL
:
5317 case TARGET_F_SETFL
:
5320 case TARGET_F_GETLK
:
5323 case TARGET_F_SETLK
:
5326 case TARGET_F_SETLKW
:
5329 case TARGET_F_GETOWN
:
5332 case TARGET_F_SETOWN
:
5335 case TARGET_F_GETSIG
:
5338 case TARGET_F_SETSIG
:
5341 #if TARGET_ABI_BITS == 32
5342 case TARGET_F_GETLK64
:
5345 case TARGET_F_SETLK64
:
5348 case TARGET_F_SETLKW64
:
5352 case TARGET_F_SETLEASE
:
5355 case TARGET_F_GETLEASE
:
5358 #ifdef F_DUPFD_CLOEXEC
5359 case TARGET_F_DUPFD_CLOEXEC
:
5360 ret
= F_DUPFD_CLOEXEC
;
5363 case TARGET_F_NOTIFY
:
5367 case TARGET_F_GETOWN_EX
:
5372 case TARGET_F_SETOWN_EX
:
5377 case TARGET_F_SETPIPE_SZ
:
5380 case TARGET_F_GETPIPE_SZ
:
5385 ret
= -TARGET_EINVAL
;
5389 #if defined(__powerpc64__)
5390 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5391 * is not supported by kernel. The glibc fcntl call actually adjusts
5392 * them to 5, 6 and 7 before making the syscall(). Since we make the
5393 * syscall directly, adjust to what is supported by the kernel.
5395 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5396 ret
-= F_GETLK64
- 5;
5403 #define FLOCK_TRANSTBL \
5405 TRANSTBL_CONVERT(F_RDLCK); \
5406 TRANSTBL_CONVERT(F_WRLCK); \
5407 TRANSTBL_CONVERT(F_UNLCK); \
5408 TRANSTBL_CONVERT(F_EXLCK); \
5409 TRANSTBL_CONVERT(F_SHLCK); \
5412 static int target_to_host_flock(int type
)
5414 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5416 #undef TRANSTBL_CONVERT
5417 return -TARGET_EINVAL
;
5420 static int host_to_target_flock(int type
)
5422 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5424 #undef TRANSTBL_CONVERT
5425 /* if we don't know how to convert the value coming
5426 * from the host we copy to the target field as-is
5431 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5432 abi_ulong target_flock_addr
)
5434 struct target_flock
*target_fl
;
5437 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5438 return -TARGET_EFAULT
;
5441 __get_user(l_type
, &target_fl
->l_type
);
5442 l_type
= target_to_host_flock(l_type
);
5446 fl
->l_type
= l_type
;
5447 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5448 __get_user(fl
->l_start
, &target_fl
->l_start
);
5449 __get_user(fl
->l_len
, &target_fl
->l_len
);
5450 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5451 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5455 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5456 const struct flock64
*fl
)
5458 struct target_flock
*target_fl
;
5461 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5462 return -TARGET_EFAULT
;
5465 l_type
= host_to_target_flock(fl
->l_type
);
5466 __put_user(l_type
, &target_fl
->l_type
);
5467 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5468 __put_user(fl
->l_start
, &target_fl
->l_start
);
5469 __put_user(fl
->l_len
, &target_fl
->l_len
);
5470 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5471 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5475 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5476 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5478 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5479 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5480 abi_ulong target_flock_addr
)
5482 struct target_oabi_flock64
*target_fl
;
5485 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5486 return -TARGET_EFAULT
;
5489 __get_user(l_type
, &target_fl
->l_type
);
5490 l_type
= target_to_host_flock(l_type
);
5494 fl
->l_type
= l_type
;
5495 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5496 __get_user(fl
->l_start
, &target_fl
->l_start
);
5497 __get_user(fl
->l_len
, &target_fl
->l_len
);
5498 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5499 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5503 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5504 const struct flock64
*fl
)
5506 struct target_oabi_flock64
*target_fl
;
5509 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5510 return -TARGET_EFAULT
;
5513 l_type
= host_to_target_flock(fl
->l_type
);
5514 __put_user(l_type
, &target_fl
->l_type
);
5515 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5516 __put_user(fl
->l_start
, &target_fl
->l_start
);
5517 __put_user(fl
->l_len
, &target_fl
->l_len
);
5518 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5519 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5524 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5525 abi_ulong target_flock_addr
)
5527 struct target_flock64
*target_fl
;
5530 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5531 return -TARGET_EFAULT
;
5534 __get_user(l_type
, &target_fl
->l_type
);
5535 l_type
= target_to_host_flock(l_type
);
5539 fl
->l_type
= l_type
;
5540 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5541 __get_user(fl
->l_start
, &target_fl
->l_start
);
5542 __get_user(fl
->l_len
, &target_fl
->l_len
);
5543 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5544 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5548 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5549 const struct flock64
*fl
)
5551 struct target_flock64
*target_fl
;
5554 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5555 return -TARGET_EFAULT
;
5558 l_type
= host_to_target_flock(fl
->l_type
);
5559 __put_user(l_type
, &target_fl
->l_type
);
5560 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5561 __put_user(fl
->l_start
, &target_fl
->l_start
);
5562 __put_user(fl
->l_len
, &target_fl
->l_len
);
5563 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5564 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5568 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5570 struct flock64 fl64
;
5572 struct f_owner_ex fox
;
5573 struct target_f_owner_ex
*target_fox
;
5576 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5578 if (host_cmd
== -TARGET_EINVAL
)
5582 case TARGET_F_GETLK
:
5583 ret
= copy_from_user_flock(&fl64
, arg
);
5587 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5589 ret
= copy_to_user_flock(arg
, &fl64
);
5593 case TARGET_F_SETLK
:
5594 case TARGET_F_SETLKW
:
5595 ret
= copy_from_user_flock(&fl64
, arg
);
5599 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5602 case TARGET_F_GETLK64
:
5603 ret
= copy_from_user_flock64(&fl64
, arg
);
5607 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5609 ret
= copy_to_user_flock64(arg
, &fl64
);
5612 case TARGET_F_SETLK64
:
5613 case TARGET_F_SETLKW64
:
5614 ret
= copy_from_user_flock64(&fl64
, arg
);
5618 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5621 case TARGET_F_GETFL
:
5622 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5624 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5628 case TARGET_F_SETFL
:
5629 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5630 target_to_host_bitmask(arg
,
5635 case TARGET_F_GETOWN_EX
:
5636 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5638 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5639 return -TARGET_EFAULT
;
5640 target_fox
->type
= tswap32(fox
.type
);
5641 target_fox
->pid
= tswap32(fox
.pid
);
5642 unlock_user_struct(target_fox
, arg
, 1);
5648 case TARGET_F_SETOWN_EX
:
5649 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5650 return -TARGET_EFAULT
;
5651 fox
.type
= tswap32(target_fox
->type
);
5652 fox
.pid
= tswap32(target_fox
->pid
);
5653 unlock_user_struct(target_fox
, arg
, 0);
5654 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5658 case TARGET_F_SETOWN
:
5659 case TARGET_F_GETOWN
:
5660 case TARGET_F_SETSIG
:
5661 case TARGET_F_GETSIG
:
5662 case TARGET_F_SETLEASE
:
5663 case TARGET_F_GETLEASE
:
5664 case TARGET_F_SETPIPE_SZ
:
5665 case TARGET_F_GETPIPE_SZ
:
5666 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5670 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5678 static inline int high2lowuid(int uid
)
5686 static inline int high2lowgid(int gid
)
5694 static inline int low2highuid(int uid
)
5696 if ((int16_t)uid
== -1)
5702 static inline int low2highgid(int gid
)
5704 if ((int16_t)gid
== -1)
5709 static inline int tswapid(int id
)
5714 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5716 #else /* !USE_UID16 */
5717 static inline int high2lowuid(int uid
)
5721 static inline int high2lowgid(int gid
)
5725 static inline int low2highuid(int uid
)
5729 static inline int low2highgid(int gid
)
5733 static inline int tswapid(int id
)
5738 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5740 #endif /* USE_UID16 */
5742 /* We must do direct syscalls for setting UID/GID, because we want to
5743 * implement the Linux system call semantics of "change only for this thread",
5744 * not the libc/POSIX semantics of "change for all threads in process".
5745 * (See http://ewontfix.com/17/ for more details.)
5746 * We use the 32-bit version of the syscalls if present; if it is not
5747 * then either the host architecture supports 32-bit UIDs natively with
5748 * the standard syscall, or the 16-bit UID is the best we can do.
5750 #ifdef __NR_setuid32
5751 #define __NR_sys_setuid __NR_setuid32
5753 #define __NR_sys_setuid __NR_setuid
5755 #ifdef __NR_setgid32
5756 #define __NR_sys_setgid __NR_setgid32
5758 #define __NR_sys_setgid __NR_setgid
5760 #ifdef __NR_setresuid32
5761 #define __NR_sys_setresuid __NR_setresuid32
5763 #define __NR_sys_setresuid __NR_setresuid
5765 #ifdef __NR_setresgid32
5766 #define __NR_sys_setresgid __NR_setresgid32
5768 #define __NR_sys_setresgid __NR_setresgid
5771 _syscall1(int, sys_setuid
, uid_t
, uid
)
5772 _syscall1(int, sys_setgid
, gid_t
, gid
)
5773 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5774 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5776 void syscall_init(void)
5779 const argtype
*arg_type
;
5783 thunk_init(STRUCT_MAX
);
5785 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5786 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5787 #include "syscall_types.h"
5789 #undef STRUCT_SPECIAL
5791 /* Build target_to_host_errno_table[] table from
5792 * host_to_target_errno_table[]. */
5793 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5794 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5797 /* we patch the ioctl size if necessary. We rely on the fact that
5798 no ioctl has all the bits at '1' in the size field */
5800 while (ie
->target_cmd
!= 0) {
5801 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5802 TARGET_IOC_SIZEMASK
) {
5803 arg_type
= ie
->arg_type
;
5804 if (arg_type
[0] != TYPE_PTR
) {
5805 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5810 size
= thunk_type_size(arg_type
, 0);
5811 ie
->target_cmd
= (ie
->target_cmd
&
5812 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5813 (size
<< TARGET_IOC_SIZESHIFT
);
5816 /* automatic consistency check if same arch */
5817 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5818 (defined(__x86_64__) && defined(TARGET_X86_64))
5819 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5820 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5821 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5828 #if TARGET_ABI_BITS == 32
5829 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5831 #ifdef TARGET_WORDS_BIGENDIAN
5832 return ((uint64_t)word0
<< 32) | word1
;
5834 return ((uint64_t)word1
<< 32) | word0
;
5837 #else /* TARGET_ABI_BITS == 32 */
5838 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5842 #endif /* TARGET_ABI_BITS != 32 */
5844 #ifdef TARGET_NR_truncate64
5845 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5850 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
5854 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5858 #ifdef TARGET_NR_ftruncate64
5859 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5864 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
5868 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5872 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5873 abi_ulong target_addr
)
5875 struct target_timespec
*target_ts
;
5877 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5878 return -TARGET_EFAULT
;
5879 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5880 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5881 unlock_user_struct(target_ts
, target_addr
, 0);
5885 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5886 struct timespec
*host_ts
)
5888 struct target_timespec
*target_ts
;
5890 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5891 return -TARGET_EFAULT
;
5892 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
5893 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
5894 unlock_user_struct(target_ts
, target_addr
, 1);
5898 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5899 abi_ulong target_addr
)
5901 struct target_itimerspec
*target_itspec
;
5903 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5904 return -TARGET_EFAULT
;
5907 host_itspec
->it_interval
.tv_sec
=
5908 tswapal(target_itspec
->it_interval
.tv_sec
);
5909 host_itspec
->it_interval
.tv_nsec
=
5910 tswapal(target_itspec
->it_interval
.tv_nsec
);
5911 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5912 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5914 unlock_user_struct(target_itspec
, target_addr
, 1);
5918 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5919 struct itimerspec
*host_its
)
5921 struct target_itimerspec
*target_itspec
;
5923 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5924 return -TARGET_EFAULT
;
5927 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5928 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5930 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5931 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5933 unlock_user_struct(target_itspec
, target_addr
, 0);
5937 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
5938 abi_long target_addr
)
5940 struct target_timex
*target_tx
;
5942 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
5943 return -TARGET_EFAULT
;
5946 __get_user(host_tx
->modes
, &target_tx
->modes
);
5947 __get_user(host_tx
->offset
, &target_tx
->offset
);
5948 __get_user(host_tx
->freq
, &target_tx
->freq
);
5949 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
5950 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
5951 __get_user(host_tx
->status
, &target_tx
->status
);
5952 __get_user(host_tx
->constant
, &target_tx
->constant
);
5953 __get_user(host_tx
->precision
, &target_tx
->precision
);
5954 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
5955 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
5956 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
5957 __get_user(host_tx
->tick
, &target_tx
->tick
);
5958 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
5959 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
5960 __get_user(host_tx
->shift
, &target_tx
->shift
);
5961 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
5962 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
5963 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
5964 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
5965 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
5966 __get_user(host_tx
->tai
, &target_tx
->tai
);
5968 unlock_user_struct(target_tx
, target_addr
, 0);
5972 static inline abi_long
host_to_target_timex(abi_long target_addr
,
5973 struct timex
*host_tx
)
5975 struct target_timex
*target_tx
;
5977 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
5978 return -TARGET_EFAULT
;
5981 __put_user(host_tx
->modes
, &target_tx
->modes
);
5982 __put_user(host_tx
->offset
, &target_tx
->offset
);
5983 __put_user(host_tx
->freq
, &target_tx
->freq
);
5984 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
5985 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
5986 __put_user(host_tx
->status
, &target_tx
->status
);
5987 __put_user(host_tx
->constant
, &target_tx
->constant
);
5988 __put_user(host_tx
->precision
, &target_tx
->precision
);
5989 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
5990 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
5991 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
5992 __put_user(host_tx
->tick
, &target_tx
->tick
);
5993 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
5994 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
5995 __put_user(host_tx
->shift
, &target_tx
->shift
);
5996 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
5997 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
5998 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
5999 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6000 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6001 __put_user(host_tx
->tai
, &target_tx
->tai
);
6003 unlock_user_struct(target_tx
, target_addr
, 1);
6008 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6009 abi_ulong target_addr
)
6011 struct target_sigevent
*target_sevp
;
6013 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6014 return -TARGET_EFAULT
;
6017 /* This union is awkward on 64 bit systems because it has a 32 bit
6018 * integer and a pointer in it; we follow the conversion approach
6019 * used for handling sigval types in signal.c so the guest should get
6020 * the correct value back even if we did a 64 bit byteswap and it's
6021 * using the 32 bit integer.
6023 host_sevp
->sigev_value
.sival_ptr
=
6024 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6025 host_sevp
->sigev_signo
=
6026 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6027 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6028 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6030 unlock_user_struct(target_sevp
, target_addr
, 1);
6034 #if defined(TARGET_NR_mlockall)
6035 static inline int target_to_host_mlockall_arg(int arg
)
6039 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6040 result
|= MCL_CURRENT
;
6042 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6043 result
|= MCL_FUTURE
;
6049 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6050 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6051 defined(TARGET_NR_newfstatat))
6052 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6053 abi_ulong target_addr
,
6054 struct stat
*host_st
)
6056 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6057 if (((CPUARMState
*)cpu_env
)->eabi
) {
6058 struct target_eabi_stat64
*target_st
;
6060 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6061 return -TARGET_EFAULT
;
6062 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6063 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6064 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6065 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6066 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6068 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6069 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6070 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6071 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6072 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6073 __put_user(host_st
->st_size
, &target_st
->st_size
);
6074 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6075 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6076 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6077 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6078 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6079 unlock_user_struct(target_st
, target_addr
, 1);
6083 #if defined(TARGET_HAS_STRUCT_STAT64)
6084 struct target_stat64
*target_st
;
6086 struct target_stat
*target_st
;
6089 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6090 return -TARGET_EFAULT
;
6091 memset(target_st
, 0, sizeof(*target_st
));
6092 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6093 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6094 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6095 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6097 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6098 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6099 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6100 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6101 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6102 /* XXX: better use of kernel struct */
6103 __put_user(host_st
->st_size
, &target_st
->st_size
);
6104 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6105 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6106 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6107 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6108 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6109 unlock_user_struct(target_st
, target_addr
, 1);
6116 /* ??? Using host futex calls even when target atomic operations
6117 are not really atomic probably breaks things. However implementing
6118 futexes locally would make futexes shared between multiple processes
6119 tricky. However they're probably useless because guest atomic
6120 operations won't work either. */
6121 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6122 target_ulong uaddr2
, int val3
)
6124 struct timespec ts
, *pts
;
6127 /* ??? We assume FUTEX_* constants are the same on both host
6129 #ifdef FUTEX_CMD_MASK
6130 base_op
= op
& FUTEX_CMD_MASK
;
6136 case FUTEX_WAIT_BITSET
:
6139 target_to_host_timespec(pts
, timeout
);
6143 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6146 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6148 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6150 case FUTEX_CMP_REQUEUE
:
6152 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6153 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6154 But the prototype takes a `struct timespec *'; insert casts
6155 to satisfy the compiler. We do not need to tswap TIMEOUT
6156 since it's not compared to guest memory. */
6157 pts
= (struct timespec
*)(uintptr_t) timeout
;
6158 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6160 (base_op
== FUTEX_CMP_REQUEUE
6164 return -TARGET_ENOSYS
;
6167 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6168 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6169 abi_long handle
, abi_long mount_id
,
6172 struct file_handle
*target_fh
;
6173 struct file_handle
*fh
;
6177 unsigned int size
, total_size
;
6179 if (get_user_s32(size
, handle
)) {
6180 return -TARGET_EFAULT
;
6183 name
= lock_user_string(pathname
);
6185 return -TARGET_EFAULT
;
6188 total_size
= sizeof(struct file_handle
) + size
;
6189 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6191 unlock_user(name
, pathname
, 0);
6192 return -TARGET_EFAULT
;
6195 fh
= g_malloc0(total_size
);
6196 fh
->handle_bytes
= size
;
6198 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6199 unlock_user(name
, pathname
, 0);
6201 /* man name_to_handle_at(2):
6202 * Other than the use of the handle_bytes field, the caller should treat
6203 * the file_handle structure as an opaque data type
6206 memcpy(target_fh
, fh
, total_size
);
6207 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6208 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6210 unlock_user(target_fh
, handle
, total_size
);
6212 if (put_user_s32(mid
, mount_id
)) {
6213 return -TARGET_EFAULT
;
6221 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6222 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6225 struct file_handle
*target_fh
;
6226 struct file_handle
*fh
;
6227 unsigned int size
, total_size
;
6230 if (get_user_s32(size
, handle
)) {
6231 return -TARGET_EFAULT
;
6234 total_size
= sizeof(struct file_handle
) + size
;
6235 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6237 return -TARGET_EFAULT
;
6240 fh
= g_memdup(target_fh
, total_size
);
6241 fh
->handle_bytes
= size
;
6242 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6244 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6245 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6249 unlock_user(target_fh
, handle
, total_size
);
6255 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6257 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6260 target_sigset_t
*target_mask
;
6264 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6265 return -TARGET_EINVAL
;
6267 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6268 return -TARGET_EFAULT
;
6271 target_to_host_sigset(&host_mask
, target_mask
);
6273 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6275 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6277 fd_trans_register(ret
, &target_signalfd_trans
);
6280 unlock_user_struct(target_mask
, mask
, 0);
6286 /* Map host to target signal numbers for the wait family of syscalls.
6287 Assume all other status bits are the same. */
6288 int host_to_target_waitstatus(int status
)
6290 if (WIFSIGNALED(status
)) {
6291 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6293 if (WIFSTOPPED(status
)) {
6294 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6300 static int open_self_cmdline(void *cpu_env
, int fd
)
6302 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6303 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6306 for (i
= 0; i
< bprm
->argc
; i
++) {
6307 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6309 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6317 static int open_self_maps(void *cpu_env
, int fd
)
6319 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6320 TaskState
*ts
= cpu
->opaque
;
6326 fp
= fopen("/proc/self/maps", "r");
6331 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6332 int fields
, dev_maj
, dev_min
, inode
;
6333 uint64_t min
, max
, offset
;
6334 char flag_r
, flag_w
, flag_x
, flag_p
;
6335 char path
[512] = "";
6336 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6337 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6338 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6340 if ((fields
< 10) || (fields
> 11)) {
6343 if (h2g_valid(min
)) {
6344 int flags
= page_get_flags(h2g(min
));
6345 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6346 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6349 if (h2g(min
) == ts
->info
->stack_limit
) {
6350 pstrcpy(path
, sizeof(path
), " [stack]");
6352 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6353 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6354 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6355 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6356 path
[0] ? " " : "", path
);
6366 static int open_self_stat(void *cpu_env
, int fd
)
6368 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6369 TaskState
*ts
= cpu
->opaque
;
6370 abi_ulong start_stack
= ts
->info
->start_stack
;
6373 for (i
= 0; i
< 44; i
++) {
6381 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6382 } else if (i
== 1) {
6384 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6385 } else if (i
== 27) {
6388 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6390 /* for the rest, there is MasterCard */
6391 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6395 if (write(fd
, buf
, len
) != len
) {
6403 static int open_self_auxv(void *cpu_env
, int fd
)
6405 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6406 TaskState
*ts
= cpu
->opaque
;
6407 abi_ulong auxv
= ts
->info
->saved_auxv
;
6408 abi_ulong len
= ts
->info
->auxv_len
;
6412 * Auxiliary vector is stored in target process stack.
6413 * read in whole auxv vector and copy it to file
6415 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6419 r
= write(fd
, ptr
, len
);
6426 lseek(fd
, 0, SEEK_SET
);
6427 unlock_user(ptr
, auxv
, len
);
6433 static int is_proc_myself(const char *filename
, const char *entry
)
6435 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6436 filename
+= strlen("/proc/");
6437 if (!strncmp(filename
, "self/", strlen("self/"))) {
6438 filename
+= strlen("self/");
6439 } else if (*filename
>= '1' && *filename
<= '9') {
6441 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6442 if (!strncmp(filename
, myself
, strlen(myself
))) {
6443 filename
+= strlen(myself
);
6450 if (!strcmp(filename
, entry
)) {
6457 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6458 static int is_proc(const char *filename
, const char *entry
)
6460 return strcmp(filename
, entry
) == 0;
6463 static int open_net_route(void *cpu_env
, int fd
)
6470 fp
= fopen("/proc/net/route", "r");
6477 read
= getline(&line
, &len
, fp
);
6478 dprintf(fd
, "%s", line
);
6482 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6484 uint32_t dest
, gw
, mask
;
6485 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6486 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6487 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6488 &mask
, &mtu
, &window
, &irtt
);
6489 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6490 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6491 metric
, tswap32(mask
), mtu
, window
, irtt
);
6501 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6504 const char *filename
;
6505 int (*fill
)(void *cpu_env
, int fd
);
6506 int (*cmp
)(const char *s1
, const char *s2
);
6508 const struct fake_open
*fake_open
;
6509 static const struct fake_open fakes
[] = {
6510 { "maps", open_self_maps
, is_proc_myself
},
6511 { "stat", open_self_stat
, is_proc_myself
},
6512 { "auxv", open_self_auxv
, is_proc_myself
},
6513 { "cmdline", open_self_cmdline
, is_proc_myself
},
6514 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6515 { "/proc/net/route", open_net_route
, is_proc
},
6517 { NULL
, NULL
, NULL
}
6520 if (is_proc_myself(pathname
, "exe")) {
6521 int execfd
= qemu_getauxval(AT_EXECFD
);
6522 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6525 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6526 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6531 if (fake_open
->filename
) {
6533 char filename
[PATH_MAX
];
6536 /* create temporary file to map stat to */
6537 tmpdir
= getenv("TMPDIR");
6540 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6541 fd
= mkstemp(filename
);
6547 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6553 lseek(fd
, 0, SEEK_SET
);
6558 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6561 #define TIMER_MAGIC 0x0caf0000
6562 #define TIMER_MAGIC_MASK 0xffff0000
6564 /* Convert QEMU provided timer ID back to internal 16bit index format */
6565 static target_timer_t
get_timer_id(abi_long arg
)
6567 target_timer_t timerid
= arg
;
6569 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6570 return -TARGET_EINVAL
;
6575 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6576 return -TARGET_EINVAL
;
6582 static int target_to_host_cpu_mask(unsigned long *host_mask
,
6584 abi_ulong target_addr
,
6587 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6588 unsigned host_bits
= sizeof(*host_mask
) * 8;
6589 abi_ulong
*target_mask
;
6592 assert(host_size
>= target_size
);
6594 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
6596 return -TARGET_EFAULT
;
6598 memset(host_mask
, 0, host_size
);
6600 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6601 unsigned bit
= i
* target_bits
;
6604 __get_user(val
, &target_mask
[i
]);
6605 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6606 if (val
& (1UL << j
)) {
6607 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
6612 unlock_user(target_mask
, target_addr
, 0);
6616 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
6618 abi_ulong target_addr
,
6621 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6622 unsigned host_bits
= sizeof(*host_mask
) * 8;
6623 abi_ulong
*target_mask
;
6626 assert(host_size
>= target_size
);
6628 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
6630 return -TARGET_EFAULT
;
6633 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6634 unsigned bit
= i
* target_bits
;
6637 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6638 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
6642 __put_user(val
, &target_mask
[i
]);
6645 unlock_user(target_mask
, target_addr
, target_size
);
6649 /* This is an internal helper for do_syscall so that it is easier
6650 * to have a single return point, so that actions, such as logging
6651 * of syscall results, can be performed.
6652 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6654 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
6655 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6656 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6659 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6661 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6662 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6663 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6666 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6667 || defined(TARGET_NR_fstatfs)
6673 case TARGET_NR_exit
:
6674 /* In old applications this may be used to implement _exit(2).
6675 However in threaded applictions it is used for thread termination,
6676 and _exit_group is used for application termination.
6677 Do thread termination if we have more then one thread. */
6679 if (block_signals()) {
6680 return -TARGET_ERESTARTSYS
;
6685 if (CPU_NEXT(first_cpu
)) {
6688 /* Remove the CPU from the list. */
6689 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
6694 if (ts
->child_tidptr
) {
6695 put_user_u32(0, ts
->child_tidptr
);
6696 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6700 object_unref(OBJECT(cpu
));
6702 rcu_unregister_thread();
6707 preexit_cleanup(cpu_env
, arg1
);
6709 return 0; /* avoid warning */
6710 case TARGET_NR_read
:
6714 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6715 return -TARGET_EFAULT
;
6716 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6718 fd_trans_host_to_target_data(arg1
)) {
6719 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6721 unlock_user(p
, arg2
, ret
);
6724 case TARGET_NR_write
:
6725 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6726 return -TARGET_EFAULT
;
6727 if (fd_trans_target_to_host_data(arg1
)) {
6728 void *copy
= g_malloc(arg3
);
6729 memcpy(copy
, p
, arg3
);
6730 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
6732 ret
= get_errno(safe_write(arg1
, copy
, ret
));
6736 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6738 unlock_user(p
, arg2
, 0);
6741 #ifdef TARGET_NR_open
6742 case TARGET_NR_open
:
6743 if (!(p
= lock_user_string(arg1
)))
6744 return -TARGET_EFAULT
;
6745 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6746 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6748 fd_trans_unregister(ret
);
6749 unlock_user(p
, arg1
, 0);
6752 case TARGET_NR_openat
:
6753 if (!(p
= lock_user_string(arg2
)))
6754 return -TARGET_EFAULT
;
6755 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6756 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6758 fd_trans_unregister(ret
);
6759 unlock_user(p
, arg2
, 0);
6761 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 case TARGET_NR_name_to_handle_at
:
6763 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6766 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6767 case TARGET_NR_open_by_handle_at
:
6768 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6769 fd_trans_unregister(ret
);
6772 case TARGET_NR_close
:
6773 fd_trans_unregister(arg1
);
6774 return get_errno(close(arg1
));
6777 return do_brk(arg1
);
6778 #ifdef TARGET_NR_fork
6779 case TARGET_NR_fork
:
6780 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
6782 #ifdef TARGET_NR_waitpid
6783 case TARGET_NR_waitpid
:
6786 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6787 if (!is_error(ret
) && arg2
&& ret
6788 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6789 return -TARGET_EFAULT
;
6793 #ifdef TARGET_NR_waitid
6794 case TARGET_NR_waitid
:
6798 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6799 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6800 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6801 return -TARGET_EFAULT
;
6802 host_to_target_siginfo(p
, &info
);
6803 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6808 #ifdef TARGET_NR_creat /* not on alpha */
6809 case TARGET_NR_creat
:
6810 if (!(p
= lock_user_string(arg1
)))
6811 return -TARGET_EFAULT
;
6812 ret
= get_errno(creat(p
, arg2
));
6813 fd_trans_unregister(ret
);
6814 unlock_user(p
, arg1
, 0);
6817 #ifdef TARGET_NR_link
6818 case TARGET_NR_link
:
6821 p
= lock_user_string(arg1
);
6822 p2
= lock_user_string(arg2
);
6824 ret
= -TARGET_EFAULT
;
6826 ret
= get_errno(link(p
, p2
));
6827 unlock_user(p2
, arg2
, 0);
6828 unlock_user(p
, arg1
, 0);
6832 #if defined(TARGET_NR_linkat)
6833 case TARGET_NR_linkat
:
6837 return -TARGET_EFAULT
;
6838 p
= lock_user_string(arg2
);
6839 p2
= lock_user_string(arg4
);
6841 ret
= -TARGET_EFAULT
;
6843 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6844 unlock_user(p
, arg2
, 0);
6845 unlock_user(p2
, arg4
, 0);
6849 #ifdef TARGET_NR_unlink
6850 case TARGET_NR_unlink
:
6851 if (!(p
= lock_user_string(arg1
)))
6852 return -TARGET_EFAULT
;
6853 ret
= get_errno(unlink(p
));
6854 unlock_user(p
, arg1
, 0);
6857 #if defined(TARGET_NR_unlinkat)
6858 case TARGET_NR_unlinkat
:
6859 if (!(p
= lock_user_string(arg2
)))
6860 return -TARGET_EFAULT
;
6861 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6862 unlock_user(p
, arg2
, 0);
6865 case TARGET_NR_execve
:
6867 char **argp
, **envp
;
6870 abi_ulong guest_argp
;
6871 abi_ulong guest_envp
;
6878 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6879 if (get_user_ual(addr
, gp
))
6880 return -TARGET_EFAULT
;
6887 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6888 if (get_user_ual(addr
, gp
))
6889 return -TARGET_EFAULT
;
6895 argp
= g_new0(char *, argc
+ 1);
6896 envp
= g_new0(char *, envc
+ 1);
6898 for (gp
= guest_argp
, q
= argp
; gp
;
6899 gp
+= sizeof(abi_ulong
), q
++) {
6900 if (get_user_ual(addr
, gp
))
6904 if (!(*q
= lock_user_string(addr
)))
6906 total_size
+= strlen(*q
) + 1;
6910 for (gp
= guest_envp
, q
= envp
; gp
;
6911 gp
+= sizeof(abi_ulong
), q
++) {
6912 if (get_user_ual(addr
, gp
))
6916 if (!(*q
= lock_user_string(addr
)))
6918 total_size
+= strlen(*q
) + 1;
6922 if (!(p
= lock_user_string(arg1
)))
6924 /* Although execve() is not an interruptible syscall it is
6925 * a special case where we must use the safe_syscall wrapper:
6926 * if we allow a signal to happen before we make the host
6927 * syscall then we will 'lose' it, because at the point of
6928 * execve the process leaves QEMU's control. So we use the
6929 * safe syscall wrapper to ensure that we either take the
6930 * signal as a guest signal, or else it does not happen
6931 * before the execve completes and makes it the other
6932 * program's problem.
6934 ret
= get_errno(safe_execve(p
, argp
, envp
));
6935 unlock_user(p
, arg1
, 0);
6940 ret
= -TARGET_EFAULT
;
6943 for (gp
= guest_argp
, q
= argp
; *q
;
6944 gp
+= sizeof(abi_ulong
), q
++) {
6945 if (get_user_ual(addr
, gp
)
6948 unlock_user(*q
, addr
, 0);
6950 for (gp
= guest_envp
, q
= envp
; *q
;
6951 gp
+= sizeof(abi_ulong
), q
++) {
6952 if (get_user_ual(addr
, gp
)
6955 unlock_user(*q
, addr
, 0);
6962 case TARGET_NR_chdir
:
6963 if (!(p
= lock_user_string(arg1
)))
6964 return -TARGET_EFAULT
;
6965 ret
= get_errno(chdir(p
));
6966 unlock_user(p
, arg1
, 0);
6968 #ifdef TARGET_NR_time
6969 case TARGET_NR_time
:
6972 ret
= get_errno(time(&host_time
));
6975 && put_user_sal(host_time
, arg1
))
6976 return -TARGET_EFAULT
;
6980 #ifdef TARGET_NR_mknod
6981 case TARGET_NR_mknod
:
6982 if (!(p
= lock_user_string(arg1
)))
6983 return -TARGET_EFAULT
;
6984 ret
= get_errno(mknod(p
, arg2
, arg3
));
6985 unlock_user(p
, arg1
, 0);
6988 #if defined(TARGET_NR_mknodat)
6989 case TARGET_NR_mknodat
:
6990 if (!(p
= lock_user_string(arg2
)))
6991 return -TARGET_EFAULT
;
6992 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6993 unlock_user(p
, arg2
, 0);
6996 #ifdef TARGET_NR_chmod
6997 case TARGET_NR_chmod
:
6998 if (!(p
= lock_user_string(arg1
)))
6999 return -TARGET_EFAULT
;
7000 ret
= get_errno(chmod(p
, arg2
));
7001 unlock_user(p
, arg1
, 0);
7004 #ifdef TARGET_NR_lseek
7005 case TARGET_NR_lseek
:
7006 return get_errno(lseek(arg1
, arg2
, arg3
));
7008 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7009 /* Alpha specific */
7010 case TARGET_NR_getxpid
:
7011 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7012 return get_errno(getpid());
7014 #ifdef TARGET_NR_getpid
7015 case TARGET_NR_getpid
:
7016 return get_errno(getpid());
7018 case TARGET_NR_mount
:
7020 /* need to look at the data field */
7024 p
= lock_user_string(arg1
);
7026 return -TARGET_EFAULT
;
7032 p2
= lock_user_string(arg2
);
7035 unlock_user(p
, arg1
, 0);
7037 return -TARGET_EFAULT
;
7041 p3
= lock_user_string(arg3
);
7044 unlock_user(p
, arg1
, 0);
7046 unlock_user(p2
, arg2
, 0);
7047 return -TARGET_EFAULT
;
7053 /* FIXME - arg5 should be locked, but it isn't clear how to
7054 * do that since it's not guaranteed to be a NULL-terminated
7058 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7060 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7062 ret
= get_errno(ret
);
7065 unlock_user(p
, arg1
, 0);
7067 unlock_user(p2
, arg2
, 0);
7069 unlock_user(p3
, arg3
, 0);
7073 #ifdef TARGET_NR_umount
7074 case TARGET_NR_umount
:
7075 if (!(p
= lock_user_string(arg1
)))
7076 return -TARGET_EFAULT
;
7077 ret
= get_errno(umount(p
));
7078 unlock_user(p
, arg1
, 0);
7081 #ifdef TARGET_NR_stime /* not on alpha */
7082 case TARGET_NR_stime
:
7085 if (get_user_sal(host_time
, arg1
))
7086 return -TARGET_EFAULT
;
7087 return get_errno(stime(&host_time
));
7090 #ifdef TARGET_NR_alarm /* not on alpha */
7091 case TARGET_NR_alarm
:
7094 #ifdef TARGET_NR_pause /* not on alpha */
7095 case TARGET_NR_pause
:
7096 if (!block_signals()) {
7097 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7099 return -TARGET_EINTR
;
7101 #ifdef TARGET_NR_utime
7102 case TARGET_NR_utime
:
7104 struct utimbuf tbuf
, *host_tbuf
;
7105 struct target_utimbuf
*target_tbuf
;
7107 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7108 return -TARGET_EFAULT
;
7109 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7110 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7111 unlock_user_struct(target_tbuf
, arg2
, 0);
7116 if (!(p
= lock_user_string(arg1
)))
7117 return -TARGET_EFAULT
;
7118 ret
= get_errno(utime(p
, host_tbuf
));
7119 unlock_user(p
, arg1
, 0);
7123 #ifdef TARGET_NR_utimes
7124 case TARGET_NR_utimes
:
7126 struct timeval
*tvp
, tv
[2];
7128 if (copy_from_user_timeval(&tv
[0], arg2
)
7129 || copy_from_user_timeval(&tv
[1],
7130 arg2
+ sizeof(struct target_timeval
)))
7131 return -TARGET_EFAULT
;
7136 if (!(p
= lock_user_string(arg1
)))
7137 return -TARGET_EFAULT
;
7138 ret
= get_errno(utimes(p
, tvp
));
7139 unlock_user(p
, arg1
, 0);
7143 #if defined(TARGET_NR_futimesat)
7144 case TARGET_NR_futimesat
:
7146 struct timeval
*tvp
, tv
[2];
7148 if (copy_from_user_timeval(&tv
[0], arg3
)
7149 || copy_from_user_timeval(&tv
[1],
7150 arg3
+ sizeof(struct target_timeval
)))
7151 return -TARGET_EFAULT
;
7156 if (!(p
= lock_user_string(arg2
))) {
7157 return -TARGET_EFAULT
;
7159 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7160 unlock_user(p
, arg2
, 0);
7164 #ifdef TARGET_NR_access
7165 case TARGET_NR_access
:
7166 if (!(p
= lock_user_string(arg1
))) {
7167 return -TARGET_EFAULT
;
7169 ret
= get_errno(access(path(p
), arg2
));
7170 unlock_user(p
, arg1
, 0);
7173 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7174 case TARGET_NR_faccessat
:
7175 if (!(p
= lock_user_string(arg2
))) {
7176 return -TARGET_EFAULT
;
7178 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7179 unlock_user(p
, arg2
, 0);
7182 #ifdef TARGET_NR_nice /* not on alpha */
7183 case TARGET_NR_nice
:
7184 return get_errno(nice(arg1
));
7186 case TARGET_NR_sync
:
7189 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7190 case TARGET_NR_syncfs
:
7191 return get_errno(syncfs(arg1
));
7193 case TARGET_NR_kill
:
7194 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7195 #ifdef TARGET_NR_rename
7196 case TARGET_NR_rename
:
7199 p
= lock_user_string(arg1
);
7200 p2
= lock_user_string(arg2
);
7202 ret
= -TARGET_EFAULT
;
7204 ret
= get_errno(rename(p
, p2
));
7205 unlock_user(p2
, arg2
, 0);
7206 unlock_user(p
, arg1
, 0);
7210 #if defined(TARGET_NR_renameat)
7211 case TARGET_NR_renameat
:
7214 p
= lock_user_string(arg2
);
7215 p2
= lock_user_string(arg4
);
7217 ret
= -TARGET_EFAULT
;
7219 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7220 unlock_user(p2
, arg4
, 0);
7221 unlock_user(p
, arg2
, 0);
7225 #if defined(TARGET_NR_renameat2)
7226 case TARGET_NR_renameat2
:
7229 p
= lock_user_string(arg2
);
7230 p2
= lock_user_string(arg4
);
7232 ret
= -TARGET_EFAULT
;
7234 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7236 unlock_user(p2
, arg4
, 0);
7237 unlock_user(p
, arg2
, 0);
7241 #ifdef TARGET_NR_mkdir
7242 case TARGET_NR_mkdir
:
7243 if (!(p
= lock_user_string(arg1
)))
7244 return -TARGET_EFAULT
;
7245 ret
= get_errno(mkdir(p
, arg2
));
7246 unlock_user(p
, arg1
, 0);
7249 #if defined(TARGET_NR_mkdirat)
7250 case TARGET_NR_mkdirat
:
7251 if (!(p
= lock_user_string(arg2
)))
7252 return -TARGET_EFAULT
;
7253 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7254 unlock_user(p
, arg2
, 0);
7257 #ifdef TARGET_NR_rmdir
7258 case TARGET_NR_rmdir
:
7259 if (!(p
= lock_user_string(arg1
)))
7260 return -TARGET_EFAULT
;
7261 ret
= get_errno(rmdir(p
));
7262 unlock_user(p
, arg1
, 0);
7266 ret
= get_errno(dup(arg1
));
7268 fd_trans_dup(arg1
, ret
);
7271 #ifdef TARGET_NR_pipe
7272 case TARGET_NR_pipe
:
7273 return do_pipe(cpu_env
, arg1
, 0, 0);
7275 #ifdef TARGET_NR_pipe2
7276 case TARGET_NR_pipe2
:
7277 return do_pipe(cpu_env
, arg1
,
7278 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7280 case TARGET_NR_times
:
7282 struct target_tms
*tmsp
;
7284 ret
= get_errno(times(&tms
));
7286 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7288 return -TARGET_EFAULT
;
7289 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7290 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7291 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7292 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7295 ret
= host_to_target_clock_t(ret
);
7298 case TARGET_NR_acct
:
7300 ret
= get_errno(acct(NULL
));
7302 if (!(p
= lock_user_string(arg1
))) {
7303 return -TARGET_EFAULT
;
7305 ret
= get_errno(acct(path(p
)));
7306 unlock_user(p
, arg1
, 0);
7309 #ifdef TARGET_NR_umount2
7310 case TARGET_NR_umount2
:
7311 if (!(p
= lock_user_string(arg1
)))
7312 return -TARGET_EFAULT
;
7313 ret
= get_errno(umount2(p
, arg2
));
7314 unlock_user(p
, arg1
, 0);
7317 case TARGET_NR_ioctl
:
7318 return do_ioctl(arg1
, arg2
, arg3
);
7319 #ifdef TARGET_NR_fcntl
7320 case TARGET_NR_fcntl
:
7321 return do_fcntl(arg1
, arg2
, arg3
);
7323 case TARGET_NR_setpgid
:
7324 return get_errno(setpgid(arg1
, arg2
));
7325 case TARGET_NR_umask
:
7326 return get_errno(umask(arg1
));
7327 case TARGET_NR_chroot
:
7328 if (!(p
= lock_user_string(arg1
)))
7329 return -TARGET_EFAULT
;
7330 ret
= get_errno(chroot(p
));
7331 unlock_user(p
, arg1
, 0);
7333 #ifdef TARGET_NR_dup2
7334 case TARGET_NR_dup2
:
7335 ret
= get_errno(dup2(arg1
, arg2
));
7337 fd_trans_dup(arg1
, arg2
);
7341 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7342 case TARGET_NR_dup3
:
7346 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7349 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7350 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7352 fd_trans_dup(arg1
, arg2
);
7357 #ifdef TARGET_NR_getppid /* not on alpha */
7358 case TARGET_NR_getppid
:
7359 return get_errno(getppid());
7361 #ifdef TARGET_NR_getpgrp
7362 case TARGET_NR_getpgrp
:
7363 return get_errno(getpgrp());
7365 case TARGET_NR_setsid
:
7366 return get_errno(setsid());
7367 #ifdef TARGET_NR_sigaction
7368 case TARGET_NR_sigaction
:
7370 #if defined(TARGET_ALPHA)
7371 struct target_sigaction act
, oact
, *pact
= 0;
7372 struct target_old_sigaction
*old_act
;
7374 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7375 return -TARGET_EFAULT
;
7376 act
._sa_handler
= old_act
->_sa_handler
;
7377 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7378 act
.sa_flags
= old_act
->sa_flags
;
7379 act
.sa_restorer
= 0;
7380 unlock_user_struct(old_act
, arg2
, 0);
7383 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7384 if (!is_error(ret
) && arg3
) {
7385 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7386 return -TARGET_EFAULT
;
7387 old_act
->_sa_handler
= oact
._sa_handler
;
7388 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7389 old_act
->sa_flags
= oact
.sa_flags
;
7390 unlock_user_struct(old_act
, arg3
, 1);
7392 #elif defined(TARGET_MIPS)
7393 struct target_sigaction act
, oact
, *pact
, *old_act
;
7396 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7397 return -TARGET_EFAULT
;
7398 act
._sa_handler
= old_act
->_sa_handler
;
7399 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7400 act
.sa_flags
= old_act
->sa_flags
;
7401 unlock_user_struct(old_act
, arg2
, 0);
7407 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7409 if (!is_error(ret
) && arg3
) {
7410 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7411 return -TARGET_EFAULT
;
7412 old_act
->_sa_handler
= oact
._sa_handler
;
7413 old_act
->sa_flags
= oact
.sa_flags
;
7414 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7415 old_act
->sa_mask
.sig
[1] = 0;
7416 old_act
->sa_mask
.sig
[2] = 0;
7417 old_act
->sa_mask
.sig
[3] = 0;
7418 unlock_user_struct(old_act
, arg3
, 1);
7421 struct target_old_sigaction
*old_act
;
7422 struct target_sigaction act
, oact
, *pact
;
7424 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7425 return -TARGET_EFAULT
;
7426 act
._sa_handler
= old_act
->_sa_handler
;
7427 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7428 act
.sa_flags
= old_act
->sa_flags
;
7429 act
.sa_restorer
= old_act
->sa_restorer
;
7430 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7431 act
.ka_restorer
= 0;
7433 unlock_user_struct(old_act
, arg2
, 0);
7438 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7439 if (!is_error(ret
) && arg3
) {
7440 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7441 return -TARGET_EFAULT
;
7442 old_act
->_sa_handler
= oact
._sa_handler
;
7443 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7444 old_act
->sa_flags
= oact
.sa_flags
;
7445 old_act
->sa_restorer
= oact
.sa_restorer
;
7446 unlock_user_struct(old_act
, arg3
, 1);
7452 case TARGET_NR_rt_sigaction
:
7454 #if defined(TARGET_ALPHA)
7455 /* For Alpha and SPARC this is a 5 argument syscall, with
7456 * a 'restorer' parameter which must be copied into the
7457 * sa_restorer field of the sigaction struct.
7458 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7459 * and arg5 is the sigsetsize.
7460 * Alpha also has a separate rt_sigaction struct that it uses
7461 * here; SPARC uses the usual sigaction struct.
7463 struct target_rt_sigaction
*rt_act
;
7464 struct target_sigaction act
, oact
, *pact
= 0;
7466 if (arg4
!= sizeof(target_sigset_t
)) {
7467 return -TARGET_EINVAL
;
7470 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7471 return -TARGET_EFAULT
;
7472 act
._sa_handler
= rt_act
->_sa_handler
;
7473 act
.sa_mask
= rt_act
->sa_mask
;
7474 act
.sa_flags
= rt_act
->sa_flags
;
7475 act
.sa_restorer
= arg5
;
7476 unlock_user_struct(rt_act
, arg2
, 0);
7479 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7480 if (!is_error(ret
) && arg3
) {
7481 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7482 return -TARGET_EFAULT
;
7483 rt_act
->_sa_handler
= oact
._sa_handler
;
7484 rt_act
->sa_mask
= oact
.sa_mask
;
7485 rt_act
->sa_flags
= oact
.sa_flags
;
7486 unlock_user_struct(rt_act
, arg3
, 1);
7490 target_ulong restorer
= arg4
;
7491 target_ulong sigsetsize
= arg5
;
7493 target_ulong sigsetsize
= arg4
;
7495 struct target_sigaction
*act
;
7496 struct target_sigaction
*oact
;
7498 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7499 return -TARGET_EINVAL
;
7502 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7503 return -TARGET_EFAULT
;
7505 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7506 act
->ka_restorer
= restorer
;
7512 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7513 ret
= -TARGET_EFAULT
;
7514 goto rt_sigaction_fail
;
7518 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7521 unlock_user_struct(act
, arg2
, 0);
7523 unlock_user_struct(oact
, arg3
, 1);
7527 #ifdef TARGET_NR_sgetmask /* not on alpha */
7528 case TARGET_NR_sgetmask
:
7531 abi_ulong target_set
;
7532 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7534 host_to_target_old_sigset(&target_set
, &cur_set
);
7540 #ifdef TARGET_NR_ssetmask /* not on alpha */
7541 case TARGET_NR_ssetmask
:
7544 abi_ulong target_set
= arg1
;
7545 target_to_host_old_sigset(&set
, &target_set
);
7546 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7548 host_to_target_old_sigset(&target_set
, &oset
);
7554 #ifdef TARGET_NR_sigprocmask
7555 case TARGET_NR_sigprocmask
:
7557 #if defined(TARGET_ALPHA)
7558 sigset_t set
, oldset
;
7563 case TARGET_SIG_BLOCK
:
7566 case TARGET_SIG_UNBLOCK
:
7569 case TARGET_SIG_SETMASK
:
7573 return -TARGET_EINVAL
;
7576 target_to_host_old_sigset(&set
, &mask
);
7578 ret
= do_sigprocmask(how
, &set
, &oldset
);
7579 if (!is_error(ret
)) {
7580 host_to_target_old_sigset(&mask
, &oldset
);
7582 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7585 sigset_t set
, oldset
, *set_ptr
;
7590 case TARGET_SIG_BLOCK
:
7593 case TARGET_SIG_UNBLOCK
:
7596 case TARGET_SIG_SETMASK
:
7600 return -TARGET_EINVAL
;
7602 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7603 return -TARGET_EFAULT
;
7604 target_to_host_old_sigset(&set
, p
);
7605 unlock_user(p
, arg2
, 0);
7611 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7612 if (!is_error(ret
) && arg3
) {
7613 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7614 return -TARGET_EFAULT
;
7615 host_to_target_old_sigset(p
, &oldset
);
7616 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7622 case TARGET_NR_rt_sigprocmask
:
7625 sigset_t set
, oldset
, *set_ptr
;
7627 if (arg4
!= sizeof(target_sigset_t
)) {
7628 return -TARGET_EINVAL
;
7633 case TARGET_SIG_BLOCK
:
7636 case TARGET_SIG_UNBLOCK
:
7639 case TARGET_SIG_SETMASK
:
7643 return -TARGET_EINVAL
;
7645 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7646 return -TARGET_EFAULT
;
7647 target_to_host_sigset(&set
, p
);
7648 unlock_user(p
, arg2
, 0);
7654 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7655 if (!is_error(ret
) && arg3
) {
7656 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7657 return -TARGET_EFAULT
;
7658 host_to_target_sigset(p
, &oldset
);
7659 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7663 #ifdef TARGET_NR_sigpending
7664 case TARGET_NR_sigpending
:
7667 ret
= get_errno(sigpending(&set
));
7668 if (!is_error(ret
)) {
7669 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7670 return -TARGET_EFAULT
;
7671 host_to_target_old_sigset(p
, &set
);
7672 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7677 case TARGET_NR_rt_sigpending
:
7681 /* Yes, this check is >, not != like most. We follow the kernel's
7682 * logic and it does it like this because it implements
7683 * NR_sigpending through the same code path, and in that case
7684 * the old_sigset_t is smaller in size.
7686 if (arg2
> sizeof(target_sigset_t
)) {
7687 return -TARGET_EINVAL
;
7690 ret
= get_errno(sigpending(&set
));
7691 if (!is_error(ret
)) {
7692 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7693 return -TARGET_EFAULT
;
7694 host_to_target_sigset(p
, &set
);
7695 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7699 #ifdef TARGET_NR_sigsuspend
7700 case TARGET_NR_sigsuspend
:
7702 TaskState
*ts
= cpu
->opaque
;
7703 #if defined(TARGET_ALPHA)
7704 abi_ulong mask
= arg1
;
7705 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7707 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7708 return -TARGET_EFAULT
;
7709 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7710 unlock_user(p
, arg1
, 0);
7712 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7714 if (ret
!= -TARGET_ERESTARTSYS
) {
7715 ts
->in_sigsuspend
= 1;
7720 case TARGET_NR_rt_sigsuspend
:
7722 TaskState
*ts
= cpu
->opaque
;
7724 if (arg2
!= sizeof(target_sigset_t
)) {
7725 return -TARGET_EINVAL
;
7727 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7728 return -TARGET_EFAULT
;
7729 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7730 unlock_user(p
, arg1
, 0);
7731 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7733 if (ret
!= -TARGET_ERESTARTSYS
) {
7734 ts
->in_sigsuspend
= 1;
7738 case TARGET_NR_rt_sigtimedwait
:
7741 struct timespec uts
, *puts
;
7744 if (arg4
!= sizeof(target_sigset_t
)) {
7745 return -TARGET_EINVAL
;
7748 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7749 return -TARGET_EFAULT
;
7750 target_to_host_sigset(&set
, p
);
7751 unlock_user(p
, arg1
, 0);
7754 target_to_host_timespec(puts
, arg3
);
7758 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7760 if (!is_error(ret
)) {
7762 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7765 return -TARGET_EFAULT
;
7767 host_to_target_siginfo(p
, &uinfo
);
7768 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7770 ret
= host_to_target_signal(ret
);
7774 case TARGET_NR_rt_sigqueueinfo
:
7778 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
7780 return -TARGET_EFAULT
;
7782 target_to_host_siginfo(&uinfo
, p
);
7783 unlock_user(p
, arg3
, 0);
7784 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7787 case TARGET_NR_rt_tgsigqueueinfo
:
7791 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
7793 return -TARGET_EFAULT
;
7795 target_to_host_siginfo(&uinfo
, p
);
7796 unlock_user(p
, arg4
, 0);
7797 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
7800 #ifdef TARGET_NR_sigreturn
7801 case TARGET_NR_sigreturn
:
7802 if (block_signals()) {
7803 return -TARGET_ERESTARTSYS
;
7805 return do_sigreturn(cpu_env
);
7807 case TARGET_NR_rt_sigreturn
:
7808 if (block_signals()) {
7809 return -TARGET_ERESTARTSYS
;
7811 return do_rt_sigreturn(cpu_env
);
7812 case TARGET_NR_sethostname
:
7813 if (!(p
= lock_user_string(arg1
)))
7814 return -TARGET_EFAULT
;
7815 ret
= get_errno(sethostname(p
, arg2
));
7816 unlock_user(p
, arg1
, 0);
7818 #ifdef TARGET_NR_setrlimit
7819 case TARGET_NR_setrlimit
:
7821 int resource
= target_to_host_resource(arg1
);
7822 struct target_rlimit
*target_rlim
;
7824 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7825 return -TARGET_EFAULT
;
7826 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7827 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7828 unlock_user_struct(target_rlim
, arg2
, 0);
7829 return get_errno(setrlimit(resource
, &rlim
));
7832 #ifdef TARGET_NR_getrlimit
7833 case TARGET_NR_getrlimit
:
7835 int resource
= target_to_host_resource(arg1
);
7836 struct target_rlimit
*target_rlim
;
7839 ret
= get_errno(getrlimit(resource
, &rlim
));
7840 if (!is_error(ret
)) {
7841 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7842 return -TARGET_EFAULT
;
7843 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7844 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7845 unlock_user_struct(target_rlim
, arg2
, 1);
7850 case TARGET_NR_getrusage
:
7852 struct rusage rusage
;
7853 ret
= get_errno(getrusage(arg1
, &rusage
));
7854 if (!is_error(ret
)) {
7855 ret
= host_to_target_rusage(arg2
, &rusage
);
7859 case TARGET_NR_gettimeofday
:
7862 ret
= get_errno(gettimeofday(&tv
, NULL
));
7863 if (!is_error(ret
)) {
7864 if (copy_to_user_timeval(arg1
, &tv
))
7865 return -TARGET_EFAULT
;
7869 case TARGET_NR_settimeofday
:
7871 struct timeval tv
, *ptv
= NULL
;
7872 struct timezone tz
, *ptz
= NULL
;
7875 if (copy_from_user_timeval(&tv
, arg1
)) {
7876 return -TARGET_EFAULT
;
7882 if (copy_from_user_timezone(&tz
, arg2
)) {
7883 return -TARGET_EFAULT
;
7888 return get_errno(settimeofday(ptv
, ptz
));
7890 #if defined(TARGET_NR_select)
7891 case TARGET_NR_select
:
7892 #if defined(TARGET_WANT_NI_OLD_SELECT)
7893 /* some architectures used to have old_select here
7894 * but now ENOSYS it.
7896 ret
= -TARGET_ENOSYS
;
7897 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
7898 ret
= do_old_select(arg1
);
7900 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7904 #ifdef TARGET_NR_pselect6
7905 case TARGET_NR_pselect6
:
7907 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7908 fd_set rfds
, wfds
, efds
;
7909 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7910 struct timespec ts
, *ts_ptr
;
7913 * The 6th arg is actually two args smashed together,
7914 * so we cannot use the C library.
7922 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7923 target_sigset_t
*target_sigset
;
7931 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7935 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7939 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7945 * This takes a timespec, and not a timeval, so we cannot
7946 * use the do_select() helper ...
7949 if (target_to_host_timespec(&ts
, ts_addr
)) {
7950 return -TARGET_EFAULT
;
7957 /* Extract the two packed args for the sigset */
7960 sig
.size
= SIGSET_T_SIZE
;
7962 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7964 return -TARGET_EFAULT
;
7966 arg_sigset
= tswapal(arg7
[0]);
7967 arg_sigsize
= tswapal(arg7
[1]);
7968 unlock_user(arg7
, arg6
, 0);
7972 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7973 /* Like the kernel, we enforce correct size sigsets */
7974 return -TARGET_EINVAL
;
7976 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7977 sizeof(*target_sigset
), 1);
7978 if (!target_sigset
) {
7979 return -TARGET_EFAULT
;
7981 target_to_host_sigset(&set
, target_sigset
);
7982 unlock_user(target_sigset
, arg_sigset
, 0);
7990 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7993 if (!is_error(ret
)) {
7994 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7995 return -TARGET_EFAULT
;
7996 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7997 return -TARGET_EFAULT
;
7998 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7999 return -TARGET_EFAULT
;
8001 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8002 return -TARGET_EFAULT
;
8007 #ifdef TARGET_NR_symlink
8008 case TARGET_NR_symlink
:
8011 p
= lock_user_string(arg1
);
8012 p2
= lock_user_string(arg2
);
8014 ret
= -TARGET_EFAULT
;
8016 ret
= get_errno(symlink(p
, p2
));
8017 unlock_user(p2
, arg2
, 0);
8018 unlock_user(p
, arg1
, 0);
8022 #if defined(TARGET_NR_symlinkat)
8023 case TARGET_NR_symlinkat
:
8026 p
= lock_user_string(arg1
);
8027 p2
= lock_user_string(arg3
);
8029 ret
= -TARGET_EFAULT
;
8031 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8032 unlock_user(p2
, arg3
, 0);
8033 unlock_user(p
, arg1
, 0);
8037 #ifdef TARGET_NR_readlink
8038 case TARGET_NR_readlink
:
8041 p
= lock_user_string(arg1
);
8042 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8044 ret
= -TARGET_EFAULT
;
8046 /* Short circuit this for the magic exe check. */
8047 ret
= -TARGET_EINVAL
;
8048 } else if (is_proc_myself((const char *)p
, "exe")) {
8049 char real
[PATH_MAX
], *temp
;
8050 temp
= realpath(exec_path
, real
);
8051 /* Return value is # of bytes that we wrote to the buffer. */
8053 ret
= get_errno(-1);
8055 /* Don't worry about sign mismatch as earlier mapping
8056 * logic would have thrown a bad address error. */
8057 ret
= MIN(strlen(real
), arg3
);
8058 /* We cannot NUL terminate the string. */
8059 memcpy(p2
, real
, ret
);
8062 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8064 unlock_user(p2
, arg2
, ret
);
8065 unlock_user(p
, arg1
, 0);
8069 #if defined(TARGET_NR_readlinkat)
8070 case TARGET_NR_readlinkat
:
8073 p
= lock_user_string(arg2
);
8074 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8076 ret
= -TARGET_EFAULT
;
8077 } else if (is_proc_myself((const char *)p
, "exe")) {
8078 char real
[PATH_MAX
], *temp
;
8079 temp
= realpath(exec_path
, real
);
8080 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8081 snprintf((char *)p2
, arg4
, "%s", real
);
8083 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8085 unlock_user(p2
, arg3
, ret
);
8086 unlock_user(p
, arg2
, 0);
8090 #ifdef TARGET_NR_swapon
8091 case TARGET_NR_swapon
:
8092 if (!(p
= lock_user_string(arg1
)))
8093 return -TARGET_EFAULT
;
8094 ret
= get_errno(swapon(p
, arg2
));
8095 unlock_user(p
, arg1
, 0);
8098 case TARGET_NR_reboot
:
8099 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8100 /* arg4 must be ignored in all other cases */
8101 p
= lock_user_string(arg4
);
8103 return -TARGET_EFAULT
;
8105 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8106 unlock_user(p
, arg4
, 0);
8108 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8111 #ifdef TARGET_NR_mmap
8112 case TARGET_NR_mmap
:
8113 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8114 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8115 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8116 || defined(TARGET_S390X)
8119 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8120 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8121 return -TARGET_EFAULT
;
8128 unlock_user(v
, arg1
, 0);
8129 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8130 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8134 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8135 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8141 #ifdef TARGET_NR_mmap2
8142 case TARGET_NR_mmap2
:
8144 #define MMAP_SHIFT 12
8146 ret
= target_mmap(arg1
, arg2
, arg3
,
8147 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8148 arg5
, arg6
<< MMAP_SHIFT
);
8149 return get_errno(ret
);
8151 case TARGET_NR_munmap
:
8152 return get_errno(target_munmap(arg1
, arg2
));
8153 case TARGET_NR_mprotect
:
8155 TaskState
*ts
= cpu
->opaque
;
8156 /* Special hack to detect libc making the stack executable. */
8157 if ((arg3
& PROT_GROWSDOWN
)
8158 && arg1
>= ts
->info
->stack_limit
8159 && arg1
<= ts
->info
->start_stack
) {
8160 arg3
&= ~PROT_GROWSDOWN
;
8161 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8162 arg1
= ts
->info
->stack_limit
;
8165 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8166 #ifdef TARGET_NR_mremap
8167 case TARGET_NR_mremap
:
8168 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8170 /* ??? msync/mlock/munlock are broken for softmmu. */
8171 #ifdef TARGET_NR_msync
8172 case TARGET_NR_msync
:
8173 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8175 #ifdef TARGET_NR_mlock
8176 case TARGET_NR_mlock
:
8177 return get_errno(mlock(g2h(arg1
), arg2
));
8179 #ifdef TARGET_NR_munlock
8180 case TARGET_NR_munlock
:
8181 return get_errno(munlock(g2h(arg1
), arg2
));
8183 #ifdef TARGET_NR_mlockall
8184 case TARGET_NR_mlockall
:
8185 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8187 #ifdef TARGET_NR_munlockall
8188 case TARGET_NR_munlockall
:
8189 return get_errno(munlockall());
8191 #ifdef TARGET_NR_truncate
8192 case TARGET_NR_truncate
:
8193 if (!(p
= lock_user_string(arg1
)))
8194 return -TARGET_EFAULT
;
8195 ret
= get_errno(truncate(p
, arg2
));
8196 unlock_user(p
, arg1
, 0);
8199 #ifdef TARGET_NR_ftruncate
8200 case TARGET_NR_ftruncate
:
8201 return get_errno(ftruncate(arg1
, arg2
));
8203 case TARGET_NR_fchmod
:
8204 return get_errno(fchmod(arg1
, arg2
));
8205 #if defined(TARGET_NR_fchmodat)
8206 case TARGET_NR_fchmodat
:
8207 if (!(p
= lock_user_string(arg2
)))
8208 return -TARGET_EFAULT
;
8209 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8210 unlock_user(p
, arg2
, 0);
8213 case TARGET_NR_getpriority
:
8214 /* Note that negative values are valid for getpriority, so we must
8215 differentiate based on errno settings. */
8217 ret
= getpriority(arg1
, arg2
);
8218 if (ret
== -1 && errno
!= 0) {
8219 return -host_to_target_errno(errno
);
8222 /* Return value is the unbiased priority. Signal no error. */
8223 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8225 /* Return value is a biased priority to avoid negative numbers. */
8229 case TARGET_NR_setpriority
:
8230 return get_errno(setpriority(arg1
, arg2
, arg3
));
8231 #ifdef TARGET_NR_statfs
8232 case TARGET_NR_statfs
:
8233 if (!(p
= lock_user_string(arg1
))) {
8234 return -TARGET_EFAULT
;
8236 ret
= get_errno(statfs(path(p
), &stfs
));
8237 unlock_user(p
, arg1
, 0);
8239 if (!is_error(ret
)) {
8240 struct target_statfs
*target_stfs
;
8242 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8243 return -TARGET_EFAULT
;
8244 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8245 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8246 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8247 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8248 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8249 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8250 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8251 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8252 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8253 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8254 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8255 #ifdef _STATFS_F_FLAGS
8256 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8258 __put_user(0, &target_stfs
->f_flags
);
8260 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8261 unlock_user_struct(target_stfs
, arg2
, 1);
8265 #ifdef TARGET_NR_fstatfs
8266 case TARGET_NR_fstatfs
:
8267 ret
= get_errno(fstatfs(arg1
, &stfs
));
8268 goto convert_statfs
;
8270 #ifdef TARGET_NR_statfs64
8271 case TARGET_NR_statfs64
:
8272 if (!(p
= lock_user_string(arg1
))) {
8273 return -TARGET_EFAULT
;
8275 ret
= get_errno(statfs(path(p
), &stfs
));
8276 unlock_user(p
, arg1
, 0);
8278 if (!is_error(ret
)) {
8279 struct target_statfs64
*target_stfs
;
8281 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8282 return -TARGET_EFAULT
;
8283 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8284 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8285 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8286 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8287 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8288 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8289 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8290 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8291 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8292 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8293 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8294 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8295 unlock_user_struct(target_stfs
, arg3
, 1);
8298 case TARGET_NR_fstatfs64
:
8299 ret
= get_errno(fstatfs(arg1
, &stfs
));
8300 goto convert_statfs64
;
8302 #ifdef TARGET_NR_socketcall
8303 case TARGET_NR_socketcall
:
8304 return do_socketcall(arg1
, arg2
);
8306 #ifdef TARGET_NR_accept
8307 case TARGET_NR_accept
:
8308 return do_accept4(arg1
, arg2
, arg3
, 0);
8310 #ifdef TARGET_NR_accept4
8311 case TARGET_NR_accept4
:
8312 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8314 #ifdef TARGET_NR_bind
8315 case TARGET_NR_bind
:
8316 return do_bind(arg1
, arg2
, arg3
);
8318 #ifdef TARGET_NR_connect
8319 case TARGET_NR_connect
:
8320 return do_connect(arg1
, arg2
, arg3
);
8322 #ifdef TARGET_NR_getpeername
8323 case TARGET_NR_getpeername
:
8324 return do_getpeername(arg1
, arg2
, arg3
);
8326 #ifdef TARGET_NR_getsockname
8327 case TARGET_NR_getsockname
:
8328 return do_getsockname(arg1
, arg2
, arg3
);
8330 #ifdef TARGET_NR_getsockopt
8331 case TARGET_NR_getsockopt
:
8332 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8334 #ifdef TARGET_NR_listen
8335 case TARGET_NR_listen
:
8336 return get_errno(listen(arg1
, arg2
));
8338 #ifdef TARGET_NR_recv
8339 case TARGET_NR_recv
:
8340 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8342 #ifdef TARGET_NR_recvfrom
8343 case TARGET_NR_recvfrom
:
8344 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8346 #ifdef TARGET_NR_recvmsg
8347 case TARGET_NR_recvmsg
:
8348 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8350 #ifdef TARGET_NR_send
8351 case TARGET_NR_send
:
8352 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8354 #ifdef TARGET_NR_sendmsg
8355 case TARGET_NR_sendmsg
:
8356 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8358 #ifdef TARGET_NR_sendmmsg
8359 case TARGET_NR_sendmmsg
:
8360 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8361 case TARGET_NR_recvmmsg
:
8362 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8364 #ifdef TARGET_NR_sendto
8365 case TARGET_NR_sendto
:
8366 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8368 #ifdef TARGET_NR_shutdown
8369 case TARGET_NR_shutdown
:
8370 return get_errno(shutdown(arg1
, arg2
));
8372 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8373 case TARGET_NR_getrandom
:
8374 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8376 return -TARGET_EFAULT
;
8378 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8379 unlock_user(p
, arg1
, ret
);
8382 #ifdef TARGET_NR_socket
8383 case TARGET_NR_socket
:
8384 return do_socket(arg1
, arg2
, arg3
);
8386 #ifdef TARGET_NR_socketpair
8387 case TARGET_NR_socketpair
:
8388 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8390 #ifdef TARGET_NR_setsockopt
8391 case TARGET_NR_setsockopt
:
8392 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8394 #if defined(TARGET_NR_syslog)
8395 case TARGET_NR_syslog
:
8400 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8401 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8402 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8403 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8404 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8405 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8406 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8407 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8408 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8409 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8410 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8411 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8414 return -TARGET_EINVAL
;
8419 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8421 return -TARGET_EFAULT
;
8423 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8424 unlock_user(p
, arg2
, arg3
);
8428 return -TARGET_EINVAL
;
8433 case TARGET_NR_setitimer
:
8435 struct itimerval value
, ovalue
, *pvalue
;
8439 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8440 || copy_from_user_timeval(&pvalue
->it_value
,
8441 arg2
+ sizeof(struct target_timeval
)))
8442 return -TARGET_EFAULT
;
8446 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8447 if (!is_error(ret
) && arg3
) {
8448 if (copy_to_user_timeval(arg3
,
8449 &ovalue
.it_interval
)
8450 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8452 return -TARGET_EFAULT
;
8456 case TARGET_NR_getitimer
:
8458 struct itimerval value
;
8460 ret
= get_errno(getitimer(arg1
, &value
));
8461 if (!is_error(ret
) && arg2
) {
8462 if (copy_to_user_timeval(arg2
,
8464 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8466 return -TARGET_EFAULT
;
8470 #ifdef TARGET_NR_stat
8471 case TARGET_NR_stat
:
8472 if (!(p
= lock_user_string(arg1
))) {
8473 return -TARGET_EFAULT
;
8475 ret
= get_errno(stat(path(p
), &st
));
8476 unlock_user(p
, arg1
, 0);
8479 #ifdef TARGET_NR_lstat
8480 case TARGET_NR_lstat
:
8481 if (!(p
= lock_user_string(arg1
))) {
8482 return -TARGET_EFAULT
;
8484 ret
= get_errno(lstat(path(p
), &st
));
8485 unlock_user(p
, arg1
, 0);
8488 #ifdef TARGET_NR_fstat
8489 case TARGET_NR_fstat
:
8491 ret
= get_errno(fstat(arg1
, &st
));
8492 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8495 if (!is_error(ret
)) {
8496 struct target_stat
*target_st
;
8498 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8499 return -TARGET_EFAULT
;
8500 memset(target_st
, 0, sizeof(*target_st
));
8501 __put_user(st
.st_dev
, &target_st
->st_dev
);
8502 __put_user(st
.st_ino
, &target_st
->st_ino
);
8503 __put_user(st
.st_mode
, &target_st
->st_mode
);
8504 __put_user(st
.st_uid
, &target_st
->st_uid
);
8505 __put_user(st
.st_gid
, &target_st
->st_gid
);
8506 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8507 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8508 __put_user(st
.st_size
, &target_st
->st_size
);
8509 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8510 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8511 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8512 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8513 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8514 unlock_user_struct(target_st
, arg2
, 1);
8519 case TARGET_NR_vhangup
:
8520 return get_errno(vhangup());
8521 #ifdef TARGET_NR_syscall
8522 case TARGET_NR_syscall
:
8523 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8524 arg6
, arg7
, arg8
, 0);
8526 case TARGET_NR_wait4
:
8529 abi_long status_ptr
= arg2
;
8530 struct rusage rusage
, *rusage_ptr
;
8531 abi_ulong target_rusage
= arg4
;
8532 abi_long rusage_err
;
8534 rusage_ptr
= &rusage
;
8537 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8538 if (!is_error(ret
)) {
8539 if (status_ptr
&& ret
) {
8540 status
= host_to_target_waitstatus(status
);
8541 if (put_user_s32(status
, status_ptr
))
8542 return -TARGET_EFAULT
;
8544 if (target_rusage
) {
8545 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8553 #ifdef TARGET_NR_swapoff
8554 case TARGET_NR_swapoff
:
8555 if (!(p
= lock_user_string(arg1
)))
8556 return -TARGET_EFAULT
;
8557 ret
= get_errno(swapoff(p
));
8558 unlock_user(p
, arg1
, 0);
8561 case TARGET_NR_sysinfo
:
8563 struct target_sysinfo
*target_value
;
8564 struct sysinfo value
;
8565 ret
= get_errno(sysinfo(&value
));
8566 if (!is_error(ret
) && arg1
)
8568 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8569 return -TARGET_EFAULT
;
8570 __put_user(value
.uptime
, &target_value
->uptime
);
8571 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8572 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8573 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8574 __put_user(value
.totalram
, &target_value
->totalram
);
8575 __put_user(value
.freeram
, &target_value
->freeram
);
8576 __put_user(value
.sharedram
, &target_value
->sharedram
);
8577 __put_user(value
.bufferram
, &target_value
->bufferram
);
8578 __put_user(value
.totalswap
, &target_value
->totalswap
);
8579 __put_user(value
.freeswap
, &target_value
->freeswap
);
8580 __put_user(value
.procs
, &target_value
->procs
);
8581 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8582 __put_user(value
.freehigh
, &target_value
->freehigh
);
8583 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8584 unlock_user_struct(target_value
, arg1
, 1);
8588 #ifdef TARGET_NR_ipc
8590 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8592 #ifdef TARGET_NR_semget
8593 case TARGET_NR_semget
:
8594 return get_errno(semget(arg1
, arg2
, arg3
));
8596 #ifdef TARGET_NR_semop
8597 case TARGET_NR_semop
:
8598 return do_semop(arg1
, arg2
, arg3
);
8600 #ifdef TARGET_NR_semctl
8601 case TARGET_NR_semctl
:
8602 return do_semctl(arg1
, arg2
, arg3
, arg4
);
8604 #ifdef TARGET_NR_msgctl
8605 case TARGET_NR_msgctl
:
8606 return do_msgctl(arg1
, arg2
, arg3
);
8608 #ifdef TARGET_NR_msgget
8609 case TARGET_NR_msgget
:
8610 return get_errno(msgget(arg1
, arg2
));
8612 #ifdef TARGET_NR_msgrcv
8613 case TARGET_NR_msgrcv
:
8614 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8616 #ifdef TARGET_NR_msgsnd
8617 case TARGET_NR_msgsnd
:
8618 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8620 #ifdef TARGET_NR_shmget
8621 case TARGET_NR_shmget
:
8622 return get_errno(shmget(arg1
, arg2
, arg3
));
8624 #ifdef TARGET_NR_shmctl
8625 case TARGET_NR_shmctl
:
8626 return do_shmctl(arg1
, arg2
, arg3
);
8628 #ifdef TARGET_NR_shmat
8629 case TARGET_NR_shmat
:
8630 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
8632 #ifdef TARGET_NR_shmdt
8633 case TARGET_NR_shmdt
:
8634 return do_shmdt(arg1
);
8636 case TARGET_NR_fsync
:
8637 return get_errno(fsync(arg1
));
8638 case TARGET_NR_clone
:
8639 /* Linux manages to have three different orderings for its
8640 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8641 * match the kernel's CONFIG_CLONE_* settings.
8642 * Microblaze is further special in that it uses a sixth
8643 * implicit argument to clone for the TLS pointer.
8645 #if defined(TARGET_MICROBLAZE)
8646 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8647 #elif defined(TARGET_CLONE_BACKWARDS)
8648 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8649 #elif defined(TARGET_CLONE_BACKWARDS2)
8650 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8652 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8655 #ifdef __NR_exit_group
8656 /* new thread calls */
8657 case TARGET_NR_exit_group
:
8658 preexit_cleanup(cpu_env
, arg1
);
8659 return get_errno(exit_group(arg1
));
8661 case TARGET_NR_setdomainname
:
8662 if (!(p
= lock_user_string(arg1
)))
8663 return -TARGET_EFAULT
;
8664 ret
= get_errno(setdomainname(p
, arg2
));
8665 unlock_user(p
, arg1
, 0);
8667 case TARGET_NR_uname
:
8668 /* no need to transcode because we use the linux syscall */
8670 struct new_utsname
* buf
;
8672 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8673 return -TARGET_EFAULT
;
8674 ret
= get_errno(sys_uname(buf
));
8675 if (!is_error(ret
)) {
8676 /* Overwrite the native machine name with whatever is being
8678 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
8679 sizeof(buf
->machine
));
8680 /* Allow the user to override the reported release. */
8681 if (qemu_uname_release
&& *qemu_uname_release
) {
8682 g_strlcpy(buf
->release
, qemu_uname_release
,
8683 sizeof(buf
->release
));
8686 unlock_user_struct(buf
, arg1
, 1);
8690 case TARGET_NR_modify_ldt
:
8691 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8692 #if !defined(TARGET_X86_64)
8693 case TARGET_NR_vm86
:
8694 return do_vm86(cpu_env
, arg1
, arg2
);
8697 case TARGET_NR_adjtimex
:
8699 struct timex host_buf
;
8701 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
8702 return -TARGET_EFAULT
;
8704 ret
= get_errno(adjtimex(&host_buf
));
8705 if (!is_error(ret
)) {
8706 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
8707 return -TARGET_EFAULT
;
8712 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8713 case TARGET_NR_clock_adjtime
:
8715 struct timex htx
, *phtx
= &htx
;
8717 if (target_to_host_timex(phtx
, arg2
) != 0) {
8718 return -TARGET_EFAULT
;
8720 ret
= get_errno(clock_adjtime(arg1
, phtx
));
8721 if (!is_error(ret
) && phtx
) {
8722 if (host_to_target_timex(arg2
, phtx
) != 0) {
8723 return -TARGET_EFAULT
;
8729 case TARGET_NR_getpgid
:
8730 return get_errno(getpgid(arg1
));
8731 case TARGET_NR_fchdir
:
8732 return get_errno(fchdir(arg1
));
8733 case TARGET_NR_personality
:
8734 return get_errno(personality(arg1
));
8735 #ifdef TARGET_NR__llseek /* Not on alpha */
8736 case TARGET_NR__llseek
:
8739 #if !defined(__NR_llseek)
8740 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
8742 ret
= get_errno(res
);
8747 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8749 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8750 return -TARGET_EFAULT
;
8755 #ifdef TARGET_NR_getdents
8756 case TARGET_NR_getdents
:
8757 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8758 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8760 struct target_dirent
*target_dirp
;
8761 struct linux_dirent
*dirp
;
8762 abi_long count
= arg3
;
8764 dirp
= g_try_malloc(count
);
8766 return -TARGET_ENOMEM
;
8769 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8770 if (!is_error(ret
)) {
8771 struct linux_dirent
*de
;
8772 struct target_dirent
*tde
;
8774 int reclen
, treclen
;
8775 int count1
, tnamelen
;
8779 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8780 return -TARGET_EFAULT
;
8783 reclen
= de
->d_reclen
;
8784 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8785 assert(tnamelen
>= 0);
8786 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8787 assert(count1
+ treclen
<= count
);
8788 tde
->d_reclen
= tswap16(treclen
);
8789 tde
->d_ino
= tswapal(de
->d_ino
);
8790 tde
->d_off
= tswapal(de
->d_off
);
8791 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8792 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8794 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8798 unlock_user(target_dirp
, arg2
, ret
);
8804 struct linux_dirent
*dirp
;
8805 abi_long count
= arg3
;
8807 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8808 return -TARGET_EFAULT
;
8809 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8810 if (!is_error(ret
)) {
8811 struct linux_dirent
*de
;
8816 reclen
= de
->d_reclen
;
8819 de
->d_reclen
= tswap16(reclen
);
8820 tswapls(&de
->d_ino
);
8821 tswapls(&de
->d_off
);
8822 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8826 unlock_user(dirp
, arg2
, ret
);
8830 /* Implement getdents in terms of getdents64 */
8832 struct linux_dirent64
*dirp
;
8833 abi_long count
= arg3
;
8835 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8837 return -TARGET_EFAULT
;
8839 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8840 if (!is_error(ret
)) {
8841 /* Convert the dirent64 structs to target dirent. We do this
8842 * in-place, since we can guarantee that a target_dirent is no
8843 * larger than a dirent64; however this means we have to be
8844 * careful to read everything before writing in the new format.
8846 struct linux_dirent64
*de
;
8847 struct target_dirent
*tde
;
8852 tde
= (struct target_dirent
*)dirp
;
8854 int namelen
, treclen
;
8855 int reclen
= de
->d_reclen
;
8856 uint64_t ino
= de
->d_ino
;
8857 int64_t off
= de
->d_off
;
8858 uint8_t type
= de
->d_type
;
8860 namelen
= strlen(de
->d_name
);
8861 treclen
= offsetof(struct target_dirent
, d_name
)
8863 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8865 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8866 tde
->d_ino
= tswapal(ino
);
8867 tde
->d_off
= tswapal(off
);
8868 tde
->d_reclen
= tswap16(treclen
);
8869 /* The target_dirent type is in what was formerly a padding
8870 * byte at the end of the structure:
8872 *(((char *)tde
) + treclen
- 1) = type
;
8874 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8875 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8881 unlock_user(dirp
, arg2
, ret
);
8885 #endif /* TARGET_NR_getdents */
8886 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8887 case TARGET_NR_getdents64
:
8889 struct linux_dirent64
*dirp
;
8890 abi_long count
= arg3
;
8891 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8892 return -TARGET_EFAULT
;
8893 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8894 if (!is_error(ret
)) {
8895 struct linux_dirent64
*de
;
8900 reclen
= de
->d_reclen
;
8903 de
->d_reclen
= tswap16(reclen
);
8904 tswap64s((uint64_t *)&de
->d_ino
);
8905 tswap64s((uint64_t *)&de
->d_off
);
8906 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8910 unlock_user(dirp
, arg2
, ret
);
8913 #endif /* TARGET_NR_getdents64 */
8914 #if defined(TARGET_NR__newselect)
8915 case TARGET_NR__newselect
:
8916 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8918 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8919 # ifdef TARGET_NR_poll
8920 case TARGET_NR_poll
:
8922 # ifdef TARGET_NR_ppoll
8923 case TARGET_NR_ppoll
:
8926 struct target_pollfd
*target_pfd
;
8927 unsigned int nfds
= arg2
;
8934 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
8935 return -TARGET_EINVAL
;
8938 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8939 sizeof(struct target_pollfd
) * nfds
, 1);
8941 return -TARGET_EFAULT
;
8944 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8945 for (i
= 0; i
< nfds
; i
++) {
8946 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8947 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8952 # ifdef TARGET_NR_ppoll
8953 case TARGET_NR_ppoll
:
8955 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8956 target_sigset_t
*target_set
;
8957 sigset_t _set
, *set
= &_set
;
8960 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8961 unlock_user(target_pfd
, arg1
, 0);
8962 return -TARGET_EFAULT
;
8969 if (arg5
!= sizeof(target_sigset_t
)) {
8970 unlock_user(target_pfd
, arg1
, 0);
8971 return -TARGET_EINVAL
;
8974 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8976 unlock_user(target_pfd
, arg1
, 0);
8977 return -TARGET_EFAULT
;
8979 target_to_host_sigset(set
, target_set
);
8984 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
8985 set
, SIGSET_T_SIZE
));
8987 if (!is_error(ret
) && arg3
) {
8988 host_to_target_timespec(arg3
, timeout_ts
);
8991 unlock_user(target_set
, arg4
, 0);
8996 # ifdef TARGET_NR_poll
8997 case TARGET_NR_poll
:
8999 struct timespec ts
, *pts
;
9002 /* Convert ms to secs, ns */
9003 ts
.tv_sec
= arg3
/ 1000;
9004 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9007 /* -ve poll() timeout means "infinite" */
9010 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9015 g_assert_not_reached();
9018 if (!is_error(ret
)) {
9019 for(i
= 0; i
< nfds
; i
++) {
9020 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9023 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9027 case TARGET_NR_flock
:
9028 /* NOTE: the flock constant seems to be the same for every
9030 return get_errno(safe_flock(arg1
, arg2
));
9031 case TARGET_NR_readv
:
9033 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9035 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9036 unlock_iovec(vec
, arg2
, arg3
, 1);
9038 ret
= -host_to_target_errno(errno
);
9042 case TARGET_NR_writev
:
9044 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9046 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9047 unlock_iovec(vec
, arg2
, arg3
, 0);
9049 ret
= -host_to_target_errno(errno
);
9053 #if defined(TARGET_NR_preadv)
9054 case TARGET_NR_preadv
:
9056 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9058 unsigned long low
, high
;
9060 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9061 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9062 unlock_iovec(vec
, arg2
, arg3
, 1);
9064 ret
= -host_to_target_errno(errno
);
9069 #if defined(TARGET_NR_pwritev)
9070 case TARGET_NR_pwritev
:
9072 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9074 unsigned long low
, high
;
9076 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9077 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9078 unlock_iovec(vec
, arg2
, arg3
, 0);
9080 ret
= -host_to_target_errno(errno
);
9085 case TARGET_NR_getsid
:
9086 return get_errno(getsid(arg1
));
9087 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9088 case TARGET_NR_fdatasync
:
9089 return get_errno(fdatasync(arg1
));
9091 #ifdef TARGET_NR__sysctl
9092 case TARGET_NR__sysctl
:
9093 /* We don't implement this, but ENOTDIR is always a safe
9095 return -TARGET_ENOTDIR
;
9097 case TARGET_NR_sched_getaffinity
:
9099 unsigned int mask_size
;
9100 unsigned long *mask
;
9103 * sched_getaffinity needs multiples of ulong, so need to take
9104 * care of mismatches between target ulong and host ulong sizes.
9106 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9107 return -TARGET_EINVAL
;
9109 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9111 mask
= alloca(mask_size
);
9112 memset(mask
, 0, mask_size
);
9113 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9115 if (!is_error(ret
)) {
9117 /* More data returned than the caller's buffer will fit.
9118 * This only happens if sizeof(abi_long) < sizeof(long)
9119 * and the caller passed us a buffer holding an odd number
9120 * of abi_longs. If the host kernel is actually using the
9121 * extra 4 bytes then fail EINVAL; otherwise we can just
9122 * ignore them and only copy the interesting part.
9124 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9125 if (numcpus
> arg2
* 8) {
9126 return -TARGET_EINVAL
;
9131 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9132 return -TARGET_EFAULT
;
9137 case TARGET_NR_sched_setaffinity
:
9139 unsigned int mask_size
;
9140 unsigned long *mask
;
9143 * sched_setaffinity needs multiples of ulong, so need to take
9144 * care of mismatches between target ulong and host ulong sizes.
9146 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9147 return -TARGET_EINVAL
;
9149 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9150 mask
= alloca(mask_size
);
9152 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9157 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9159 case TARGET_NR_getcpu
:
9162 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9163 arg2
? &node
: NULL
,
9165 if (is_error(ret
)) {
9168 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9169 return -TARGET_EFAULT
;
9171 if (arg2
&& put_user_u32(node
, arg2
)) {
9172 return -TARGET_EFAULT
;
9176 case TARGET_NR_sched_setparam
:
9178 struct sched_param
*target_schp
;
9179 struct sched_param schp
;
9182 return -TARGET_EINVAL
;
9184 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9185 return -TARGET_EFAULT
;
9186 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9187 unlock_user_struct(target_schp
, arg2
, 0);
9188 return get_errno(sched_setparam(arg1
, &schp
));
9190 case TARGET_NR_sched_getparam
:
9192 struct sched_param
*target_schp
;
9193 struct sched_param schp
;
9196 return -TARGET_EINVAL
;
9198 ret
= get_errno(sched_getparam(arg1
, &schp
));
9199 if (!is_error(ret
)) {
9200 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9201 return -TARGET_EFAULT
;
9202 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9203 unlock_user_struct(target_schp
, arg2
, 1);
9207 case TARGET_NR_sched_setscheduler
:
9209 struct sched_param
*target_schp
;
9210 struct sched_param schp
;
9212 return -TARGET_EINVAL
;
9214 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9215 return -TARGET_EFAULT
;
9216 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9217 unlock_user_struct(target_schp
, arg3
, 0);
9218 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9220 case TARGET_NR_sched_getscheduler
:
9221 return get_errno(sched_getscheduler(arg1
));
9222 case TARGET_NR_sched_yield
:
9223 return get_errno(sched_yield());
9224 case TARGET_NR_sched_get_priority_max
:
9225 return get_errno(sched_get_priority_max(arg1
));
9226 case TARGET_NR_sched_get_priority_min
:
9227 return get_errno(sched_get_priority_min(arg1
));
9228 case TARGET_NR_sched_rr_get_interval
:
9231 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9232 if (!is_error(ret
)) {
9233 ret
= host_to_target_timespec(arg2
, &ts
);
9237 case TARGET_NR_nanosleep
:
9239 struct timespec req
, rem
;
9240 target_to_host_timespec(&req
, arg1
);
9241 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9242 if (is_error(ret
) && arg2
) {
9243 host_to_target_timespec(arg2
, &rem
);
9247 case TARGET_NR_prctl
:
9249 case PR_GET_PDEATHSIG
:
9252 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9253 if (!is_error(ret
) && arg2
9254 && put_user_ual(deathsig
, arg2
)) {
9255 return -TARGET_EFAULT
;
9262 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9264 return -TARGET_EFAULT
;
9266 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9268 unlock_user(name
, arg2
, 16);
9273 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9275 return -TARGET_EFAULT
;
9277 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9279 unlock_user(name
, arg2
, 0);
9283 #ifdef TARGET_AARCH64
9284 case TARGET_PR_SVE_SET_VL
:
9286 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9287 * PR_SVE_VL_INHERIT. Note the kernel definition
9288 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9289 * even though the current architectural maximum is VQ=16.
9291 ret
= -TARGET_EINVAL
;
9292 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
9293 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9294 CPUARMState
*env
= cpu_env
;
9295 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9296 uint32_t vq
, old_vq
;
9298 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9299 vq
= MAX(arg2
/ 16, 1);
9300 vq
= MIN(vq
, cpu
->sve_max_vq
);
9303 aarch64_sve_narrow_vq(env
, vq
);
9305 env
->vfp
.zcr_el
[1] = vq
- 1;
9309 case TARGET_PR_SVE_GET_VL
:
9310 ret
= -TARGET_EINVAL
;
9311 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
9312 CPUARMState
*env
= cpu_env
;
9313 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9316 #endif /* AARCH64 */
9317 case PR_GET_SECCOMP
:
9318 case PR_SET_SECCOMP
:
9319 /* Disable seccomp to prevent the target disabling syscalls we
9321 return -TARGET_EINVAL
;
9323 /* Most prctl options have no pointer arguments */
9324 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9327 #ifdef TARGET_NR_arch_prctl
9328 case TARGET_NR_arch_prctl
:
9329 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9330 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9335 #ifdef TARGET_NR_pread64
9336 case TARGET_NR_pread64
:
9337 if (regpairs_aligned(cpu_env
, num
)) {
9341 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9342 return -TARGET_EFAULT
;
9343 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9344 unlock_user(p
, arg2
, ret
);
9346 case TARGET_NR_pwrite64
:
9347 if (regpairs_aligned(cpu_env
, num
)) {
9351 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9352 return -TARGET_EFAULT
;
9353 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9354 unlock_user(p
, arg2
, 0);
9357 case TARGET_NR_getcwd
:
9358 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9359 return -TARGET_EFAULT
;
9360 ret
= get_errno(sys_getcwd1(p
, arg2
));
9361 unlock_user(p
, arg1
, ret
);
9363 case TARGET_NR_capget
:
9364 case TARGET_NR_capset
:
9366 struct target_user_cap_header
*target_header
;
9367 struct target_user_cap_data
*target_data
= NULL
;
9368 struct __user_cap_header_struct header
;
9369 struct __user_cap_data_struct data
[2];
9370 struct __user_cap_data_struct
*dataptr
= NULL
;
9371 int i
, target_datalen
;
9374 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9375 return -TARGET_EFAULT
;
9377 header
.version
= tswap32(target_header
->version
);
9378 header
.pid
= tswap32(target_header
->pid
);
9380 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9381 /* Version 2 and up takes pointer to two user_data structs */
9385 target_datalen
= sizeof(*target_data
) * data_items
;
9388 if (num
== TARGET_NR_capget
) {
9389 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9391 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9394 unlock_user_struct(target_header
, arg1
, 0);
9395 return -TARGET_EFAULT
;
9398 if (num
== TARGET_NR_capset
) {
9399 for (i
= 0; i
< data_items
; i
++) {
9400 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9401 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9402 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9409 if (num
== TARGET_NR_capget
) {
9410 ret
= get_errno(capget(&header
, dataptr
));
9412 ret
= get_errno(capset(&header
, dataptr
));
9415 /* The kernel always updates version for both capget and capset */
9416 target_header
->version
= tswap32(header
.version
);
9417 unlock_user_struct(target_header
, arg1
, 1);
9420 if (num
== TARGET_NR_capget
) {
9421 for (i
= 0; i
< data_items
; i
++) {
9422 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9423 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9424 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9426 unlock_user(target_data
, arg2
, target_datalen
);
9428 unlock_user(target_data
, arg2
, 0);
9433 case TARGET_NR_sigaltstack
:
9434 return do_sigaltstack(arg1
, arg2
,
9435 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9437 #ifdef CONFIG_SENDFILE
9438 #ifdef TARGET_NR_sendfile
9439 case TARGET_NR_sendfile
:
9444 ret
= get_user_sal(off
, arg3
);
9445 if (is_error(ret
)) {
9450 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9451 if (!is_error(ret
) && arg3
) {
9452 abi_long ret2
= put_user_sal(off
, arg3
);
9453 if (is_error(ret2
)) {
9460 #ifdef TARGET_NR_sendfile64
9461 case TARGET_NR_sendfile64
:
9466 ret
= get_user_s64(off
, arg3
);
9467 if (is_error(ret
)) {
9472 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9473 if (!is_error(ret
) && arg3
) {
9474 abi_long ret2
= put_user_s64(off
, arg3
);
9475 if (is_error(ret2
)) {
9483 #ifdef TARGET_NR_vfork
9484 case TARGET_NR_vfork
:
9485 return get_errno(do_fork(cpu_env
,
9486 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
9489 #ifdef TARGET_NR_ugetrlimit
9490 case TARGET_NR_ugetrlimit
:
9493 int resource
= target_to_host_resource(arg1
);
9494 ret
= get_errno(getrlimit(resource
, &rlim
));
9495 if (!is_error(ret
)) {
9496 struct target_rlimit
*target_rlim
;
9497 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9498 return -TARGET_EFAULT
;
9499 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9500 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9501 unlock_user_struct(target_rlim
, arg2
, 1);
9506 #ifdef TARGET_NR_truncate64
9507 case TARGET_NR_truncate64
:
9508 if (!(p
= lock_user_string(arg1
)))
9509 return -TARGET_EFAULT
;
9510 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9511 unlock_user(p
, arg1
, 0);
9514 #ifdef TARGET_NR_ftruncate64
9515 case TARGET_NR_ftruncate64
:
9516 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9518 #ifdef TARGET_NR_stat64
9519 case TARGET_NR_stat64
:
9520 if (!(p
= lock_user_string(arg1
))) {
9521 return -TARGET_EFAULT
;
9523 ret
= get_errno(stat(path(p
), &st
));
9524 unlock_user(p
, arg1
, 0);
9526 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9529 #ifdef TARGET_NR_lstat64
9530 case TARGET_NR_lstat64
:
9531 if (!(p
= lock_user_string(arg1
))) {
9532 return -TARGET_EFAULT
;
9534 ret
= get_errno(lstat(path(p
), &st
));
9535 unlock_user(p
, arg1
, 0);
9537 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9540 #ifdef TARGET_NR_fstat64
9541 case TARGET_NR_fstat64
:
9542 ret
= get_errno(fstat(arg1
, &st
));
9544 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9547 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9548 #ifdef TARGET_NR_fstatat64
9549 case TARGET_NR_fstatat64
:
9551 #ifdef TARGET_NR_newfstatat
9552 case TARGET_NR_newfstatat
:
9554 if (!(p
= lock_user_string(arg2
))) {
9555 return -TARGET_EFAULT
;
9557 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9558 unlock_user(p
, arg2
, 0);
9560 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9563 #ifdef TARGET_NR_lchown
9564 case TARGET_NR_lchown
:
9565 if (!(p
= lock_user_string(arg1
)))
9566 return -TARGET_EFAULT
;
9567 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9568 unlock_user(p
, arg1
, 0);
9571 #ifdef TARGET_NR_getuid
9572 case TARGET_NR_getuid
:
9573 return get_errno(high2lowuid(getuid()));
9575 #ifdef TARGET_NR_getgid
9576 case TARGET_NR_getgid
:
9577 return get_errno(high2lowgid(getgid()));
9579 #ifdef TARGET_NR_geteuid
9580 case TARGET_NR_geteuid
:
9581 return get_errno(high2lowuid(geteuid()));
9583 #ifdef TARGET_NR_getegid
9584 case TARGET_NR_getegid
:
9585 return get_errno(high2lowgid(getegid()));
9587 case TARGET_NR_setreuid
:
9588 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9589 case TARGET_NR_setregid
:
9590 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9591 case TARGET_NR_getgroups
:
9593 int gidsetsize
= arg1
;
9594 target_id
*target_grouplist
;
9598 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9599 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9600 if (gidsetsize
== 0)
9602 if (!is_error(ret
)) {
9603 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9604 if (!target_grouplist
)
9605 return -TARGET_EFAULT
;
9606 for(i
= 0;i
< ret
; i
++)
9607 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9608 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9612 case TARGET_NR_setgroups
:
9614 int gidsetsize
= arg1
;
9615 target_id
*target_grouplist
;
9616 gid_t
*grouplist
= NULL
;
9619 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9620 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9621 if (!target_grouplist
) {
9622 return -TARGET_EFAULT
;
9624 for (i
= 0; i
< gidsetsize
; i
++) {
9625 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9627 unlock_user(target_grouplist
, arg2
, 0);
9629 return get_errno(setgroups(gidsetsize
, grouplist
));
9631 case TARGET_NR_fchown
:
9632 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9633 #if defined(TARGET_NR_fchownat)
9634 case TARGET_NR_fchownat
:
9635 if (!(p
= lock_user_string(arg2
)))
9636 return -TARGET_EFAULT
;
9637 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9638 low2highgid(arg4
), arg5
));
9639 unlock_user(p
, arg2
, 0);
9642 #ifdef TARGET_NR_setresuid
9643 case TARGET_NR_setresuid
:
9644 return get_errno(sys_setresuid(low2highuid(arg1
),
9646 low2highuid(arg3
)));
9648 #ifdef TARGET_NR_getresuid
9649 case TARGET_NR_getresuid
:
9651 uid_t ruid
, euid
, suid
;
9652 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9653 if (!is_error(ret
)) {
9654 if (put_user_id(high2lowuid(ruid
), arg1
)
9655 || put_user_id(high2lowuid(euid
), arg2
)
9656 || put_user_id(high2lowuid(suid
), arg3
))
9657 return -TARGET_EFAULT
;
9662 #ifdef TARGET_NR_getresgid
9663 case TARGET_NR_setresgid
:
9664 return get_errno(sys_setresgid(low2highgid(arg1
),
9666 low2highgid(arg3
)));
9668 #ifdef TARGET_NR_getresgid
9669 case TARGET_NR_getresgid
:
9671 gid_t rgid
, egid
, sgid
;
9672 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9673 if (!is_error(ret
)) {
9674 if (put_user_id(high2lowgid(rgid
), arg1
)
9675 || put_user_id(high2lowgid(egid
), arg2
)
9676 || put_user_id(high2lowgid(sgid
), arg3
))
9677 return -TARGET_EFAULT
;
9682 #ifdef TARGET_NR_chown
9683 case TARGET_NR_chown
:
9684 if (!(p
= lock_user_string(arg1
)))
9685 return -TARGET_EFAULT
;
9686 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9687 unlock_user(p
, arg1
, 0);
9690 case TARGET_NR_setuid
:
9691 return get_errno(sys_setuid(low2highuid(arg1
)));
9692 case TARGET_NR_setgid
:
9693 return get_errno(sys_setgid(low2highgid(arg1
)));
9694 case TARGET_NR_setfsuid
:
9695 return get_errno(setfsuid(arg1
));
9696 case TARGET_NR_setfsgid
:
9697 return get_errno(setfsgid(arg1
));
9699 #ifdef TARGET_NR_lchown32
9700 case TARGET_NR_lchown32
:
9701 if (!(p
= lock_user_string(arg1
)))
9702 return -TARGET_EFAULT
;
9703 ret
= get_errno(lchown(p
, arg2
, arg3
));
9704 unlock_user(p
, arg1
, 0);
9707 #ifdef TARGET_NR_getuid32
9708 case TARGET_NR_getuid32
:
9709 return get_errno(getuid());
9712 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9713 /* Alpha specific */
9714 case TARGET_NR_getxuid
:
9718 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9720 return get_errno(getuid());
9722 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9723 /* Alpha specific */
9724 case TARGET_NR_getxgid
:
9728 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9730 return get_errno(getgid());
9732 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9733 /* Alpha specific */
9734 case TARGET_NR_osf_getsysinfo
:
9735 ret
= -TARGET_EOPNOTSUPP
;
9737 case TARGET_GSI_IEEE_FP_CONTROL
:
9739 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9741 /* Copied from linux ieee_fpcr_to_swcr. */
9742 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9743 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9744 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9745 | SWCR_TRAP_ENABLE_DZE
9746 | SWCR_TRAP_ENABLE_OVF
);
9747 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9748 | SWCR_TRAP_ENABLE_INE
);
9749 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9750 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9752 if (put_user_u64 (swcr
, arg2
))
9753 return -TARGET_EFAULT
;
9758 /* case GSI_IEEE_STATE_AT_SIGNAL:
9759 -- Not implemented in linux kernel.
9761 -- Retrieves current unaligned access state; not much used.
9763 -- Retrieves implver information; surely not used.
9765 -- Grabs a copy of the HWRPB; surely not used.
9770 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9771 /* Alpha specific */
9772 case TARGET_NR_osf_setsysinfo
:
9773 ret
= -TARGET_EOPNOTSUPP
;
9775 case TARGET_SSI_IEEE_FP_CONTROL
:
9777 uint64_t swcr
, fpcr
, orig_fpcr
;
9779 if (get_user_u64 (swcr
, arg2
)) {
9780 return -TARGET_EFAULT
;
9782 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9783 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9785 /* Copied from linux ieee_swcr_to_fpcr. */
9786 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9787 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9788 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9789 | SWCR_TRAP_ENABLE_DZE
9790 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9791 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9792 | SWCR_TRAP_ENABLE_INE
)) << 57;
9793 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9794 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9796 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9801 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9803 uint64_t exc
, fpcr
, orig_fpcr
;
9806 if (get_user_u64(exc
, arg2
)) {
9807 return -TARGET_EFAULT
;
9810 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9812 /* We only add to the exception status here. */
9813 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9815 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9818 /* Old exceptions are not signaled. */
9819 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9821 /* If any exceptions set by this call,
9822 and are unmasked, send a signal. */
9824 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9825 si_code
= TARGET_FPE_FLTRES
;
9827 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9828 si_code
= TARGET_FPE_FLTUND
;
9830 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9831 si_code
= TARGET_FPE_FLTOVF
;
9833 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9834 si_code
= TARGET_FPE_FLTDIV
;
9836 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9837 si_code
= TARGET_FPE_FLTINV
;
9840 target_siginfo_t info
;
9841 info
.si_signo
= SIGFPE
;
9843 info
.si_code
= si_code
;
9844 info
._sifields
._sigfault
._addr
9845 = ((CPUArchState
*)cpu_env
)->pc
;
9846 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
9847 QEMU_SI_FAULT
, &info
);
9852 /* case SSI_NVPAIRS:
9853 -- Used with SSIN_UACPROC to enable unaligned accesses.
9854 case SSI_IEEE_STATE_AT_SIGNAL:
9855 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9856 -- Not implemented in linux kernel
9861 #ifdef TARGET_NR_osf_sigprocmask
9862 /* Alpha specific. */
9863 case TARGET_NR_osf_sigprocmask
:
9867 sigset_t set
, oldset
;
9870 case TARGET_SIG_BLOCK
:
9873 case TARGET_SIG_UNBLOCK
:
9876 case TARGET_SIG_SETMASK
:
9880 return -TARGET_EINVAL
;
9883 target_to_host_old_sigset(&set
, &mask
);
9884 ret
= do_sigprocmask(how
, &set
, &oldset
);
9886 host_to_target_old_sigset(&mask
, &oldset
);
9893 #ifdef TARGET_NR_getgid32
9894 case TARGET_NR_getgid32
:
9895 return get_errno(getgid());
9897 #ifdef TARGET_NR_geteuid32
9898 case TARGET_NR_geteuid32
:
9899 return get_errno(geteuid());
9901 #ifdef TARGET_NR_getegid32
9902 case TARGET_NR_getegid32
:
9903 return get_errno(getegid());
9905 #ifdef TARGET_NR_setreuid32
9906 case TARGET_NR_setreuid32
:
9907 return get_errno(setreuid(arg1
, arg2
));
9909 #ifdef TARGET_NR_setregid32
9910 case TARGET_NR_setregid32
:
9911 return get_errno(setregid(arg1
, arg2
));
9913 #ifdef TARGET_NR_getgroups32
9914 case TARGET_NR_getgroups32
:
9916 int gidsetsize
= arg1
;
9917 uint32_t *target_grouplist
;
9921 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9922 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9923 if (gidsetsize
== 0)
9925 if (!is_error(ret
)) {
9926 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9927 if (!target_grouplist
) {
9928 return -TARGET_EFAULT
;
9930 for(i
= 0;i
< ret
; i
++)
9931 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9932 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9937 #ifdef TARGET_NR_setgroups32
9938 case TARGET_NR_setgroups32
:
9940 int gidsetsize
= arg1
;
9941 uint32_t *target_grouplist
;
9945 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9946 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9947 if (!target_grouplist
) {
9948 return -TARGET_EFAULT
;
9950 for(i
= 0;i
< gidsetsize
; i
++)
9951 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9952 unlock_user(target_grouplist
, arg2
, 0);
9953 return get_errno(setgroups(gidsetsize
, grouplist
));
9956 #ifdef TARGET_NR_fchown32
9957 case TARGET_NR_fchown32
:
9958 return get_errno(fchown(arg1
, arg2
, arg3
));
9960 #ifdef TARGET_NR_setresuid32
9961 case TARGET_NR_setresuid32
:
9962 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
9964 #ifdef TARGET_NR_getresuid32
9965 case TARGET_NR_getresuid32
:
9967 uid_t ruid
, euid
, suid
;
9968 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9969 if (!is_error(ret
)) {
9970 if (put_user_u32(ruid
, arg1
)
9971 || put_user_u32(euid
, arg2
)
9972 || put_user_u32(suid
, arg3
))
9973 return -TARGET_EFAULT
;
9978 #ifdef TARGET_NR_setresgid32
9979 case TARGET_NR_setresgid32
:
9980 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
9982 #ifdef TARGET_NR_getresgid32
9983 case TARGET_NR_getresgid32
:
9985 gid_t rgid
, egid
, sgid
;
9986 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9987 if (!is_error(ret
)) {
9988 if (put_user_u32(rgid
, arg1
)
9989 || put_user_u32(egid
, arg2
)
9990 || put_user_u32(sgid
, arg3
))
9991 return -TARGET_EFAULT
;
9996 #ifdef TARGET_NR_chown32
9997 case TARGET_NR_chown32
:
9998 if (!(p
= lock_user_string(arg1
)))
9999 return -TARGET_EFAULT
;
10000 ret
= get_errno(chown(p
, arg2
, arg3
));
10001 unlock_user(p
, arg1
, 0);
10004 #ifdef TARGET_NR_setuid32
10005 case TARGET_NR_setuid32
:
10006 return get_errno(sys_setuid(arg1
));
10008 #ifdef TARGET_NR_setgid32
10009 case TARGET_NR_setgid32
:
10010 return get_errno(sys_setgid(arg1
));
10012 #ifdef TARGET_NR_setfsuid32
10013 case TARGET_NR_setfsuid32
:
10014 return get_errno(setfsuid(arg1
));
10016 #ifdef TARGET_NR_setfsgid32
10017 case TARGET_NR_setfsgid32
:
10018 return get_errno(setfsgid(arg1
));
10020 #ifdef TARGET_NR_mincore
10021 case TARGET_NR_mincore
:
10023 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10025 return -TARGET_ENOMEM
;
10027 p
= lock_user_string(arg3
);
10029 ret
= -TARGET_EFAULT
;
10031 ret
= get_errno(mincore(a
, arg2
, p
));
10032 unlock_user(p
, arg3
, ret
);
10034 unlock_user(a
, arg1
, 0);
10038 #ifdef TARGET_NR_arm_fadvise64_64
10039 case TARGET_NR_arm_fadvise64_64
:
10040 /* arm_fadvise64_64 looks like fadvise64_64 but
10041 * with different argument order: fd, advice, offset, len
10042 * rather than the usual fd, offset, len, advice.
10043 * Note that offset and len are both 64-bit so appear as
10044 * pairs of 32-bit registers.
10046 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10047 target_offset64(arg5
, arg6
), arg2
);
10048 return -host_to_target_errno(ret
);
10051 #if TARGET_ABI_BITS == 32
10053 #ifdef TARGET_NR_fadvise64_64
10054 case TARGET_NR_fadvise64_64
:
10055 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10056 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10064 /* 6 args: fd, offset (high, low), len (high, low), advice */
10065 if (regpairs_aligned(cpu_env
, num
)) {
10066 /* offset is in (3,4), len in (5,6) and advice in 7 */
10074 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10075 target_offset64(arg4
, arg5
), arg6
);
10076 return -host_to_target_errno(ret
);
10079 #ifdef TARGET_NR_fadvise64
10080 case TARGET_NR_fadvise64
:
10081 /* 5 args: fd, offset (high, low), len, advice */
10082 if (regpairs_aligned(cpu_env
, num
)) {
10083 /* offset is in (3,4), len in 5 and advice in 6 */
10089 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10090 return -host_to_target_errno(ret
);
10093 #else /* not a 32-bit ABI */
10094 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10095 #ifdef TARGET_NR_fadvise64_64
10096 case TARGET_NR_fadvise64_64
:
10098 #ifdef TARGET_NR_fadvise64
10099 case TARGET_NR_fadvise64
:
10101 #ifdef TARGET_S390X
10103 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10104 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10105 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10106 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10110 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10112 #endif /* end of 64-bit ABI fadvise handling */
10114 #ifdef TARGET_NR_madvise
10115 case TARGET_NR_madvise
:
10116 /* A straight passthrough may not be safe because qemu sometimes
10117 turns private file-backed mappings into anonymous mappings.
10118 This will break MADV_DONTNEED.
10119 This is a hint, so ignoring and returning success is ok. */
10122 #if TARGET_ABI_BITS == 32
10123 case TARGET_NR_fcntl64
:
10127 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10128 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10131 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10132 copyfrom
= copy_from_user_oabi_flock64
;
10133 copyto
= copy_to_user_oabi_flock64
;
10137 cmd
= target_to_host_fcntl_cmd(arg2
);
10138 if (cmd
== -TARGET_EINVAL
) {
10143 case TARGET_F_GETLK64
:
10144 ret
= copyfrom(&fl
, arg3
);
10148 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10150 ret
= copyto(arg3
, &fl
);
10154 case TARGET_F_SETLK64
:
10155 case TARGET_F_SETLKW64
:
10156 ret
= copyfrom(&fl
, arg3
);
10160 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10163 ret
= do_fcntl(arg1
, arg2
, arg3
);
10169 #ifdef TARGET_NR_cacheflush
10170 case TARGET_NR_cacheflush
:
10171 /* self-modifying code is handled automatically, so nothing needed */
10174 #ifdef TARGET_NR_getpagesize
10175 case TARGET_NR_getpagesize
:
10176 return TARGET_PAGE_SIZE
;
10178 case TARGET_NR_gettid
:
10179 return get_errno(gettid());
10180 #ifdef TARGET_NR_readahead
10181 case TARGET_NR_readahead
:
10182 #if TARGET_ABI_BITS == 32
10183 if (regpairs_aligned(cpu_env
, num
)) {
10188 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10190 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10195 #ifdef TARGET_NR_setxattr
10196 case TARGET_NR_listxattr
:
10197 case TARGET_NR_llistxattr
:
10201 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10203 return -TARGET_EFAULT
;
10206 p
= lock_user_string(arg1
);
10208 if (num
== TARGET_NR_listxattr
) {
10209 ret
= get_errno(listxattr(p
, b
, arg3
));
10211 ret
= get_errno(llistxattr(p
, b
, arg3
));
10214 ret
= -TARGET_EFAULT
;
10216 unlock_user(p
, arg1
, 0);
10217 unlock_user(b
, arg2
, arg3
);
10220 case TARGET_NR_flistxattr
:
10224 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10226 return -TARGET_EFAULT
;
10229 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10230 unlock_user(b
, arg2
, arg3
);
10233 case TARGET_NR_setxattr
:
10234 case TARGET_NR_lsetxattr
:
10236 void *p
, *n
, *v
= 0;
10238 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10240 return -TARGET_EFAULT
;
10243 p
= lock_user_string(arg1
);
10244 n
= lock_user_string(arg2
);
10246 if (num
== TARGET_NR_setxattr
) {
10247 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10249 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10252 ret
= -TARGET_EFAULT
;
10254 unlock_user(p
, arg1
, 0);
10255 unlock_user(n
, arg2
, 0);
10256 unlock_user(v
, arg3
, 0);
10259 case TARGET_NR_fsetxattr
:
10263 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10265 return -TARGET_EFAULT
;
10268 n
= lock_user_string(arg2
);
10270 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10272 ret
= -TARGET_EFAULT
;
10274 unlock_user(n
, arg2
, 0);
10275 unlock_user(v
, arg3
, 0);
10278 case TARGET_NR_getxattr
:
10279 case TARGET_NR_lgetxattr
:
10281 void *p
, *n
, *v
= 0;
10283 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10285 return -TARGET_EFAULT
;
10288 p
= lock_user_string(arg1
);
10289 n
= lock_user_string(arg2
);
10291 if (num
== TARGET_NR_getxattr
) {
10292 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10294 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10297 ret
= -TARGET_EFAULT
;
10299 unlock_user(p
, arg1
, 0);
10300 unlock_user(n
, arg2
, 0);
10301 unlock_user(v
, arg3
, arg4
);
10304 case TARGET_NR_fgetxattr
:
10308 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10310 return -TARGET_EFAULT
;
10313 n
= lock_user_string(arg2
);
10315 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10317 ret
= -TARGET_EFAULT
;
10319 unlock_user(n
, arg2
, 0);
10320 unlock_user(v
, arg3
, arg4
);
10323 case TARGET_NR_removexattr
:
10324 case TARGET_NR_lremovexattr
:
10327 p
= lock_user_string(arg1
);
10328 n
= lock_user_string(arg2
);
10330 if (num
== TARGET_NR_removexattr
) {
10331 ret
= get_errno(removexattr(p
, n
));
10333 ret
= get_errno(lremovexattr(p
, n
));
10336 ret
= -TARGET_EFAULT
;
10338 unlock_user(p
, arg1
, 0);
10339 unlock_user(n
, arg2
, 0);
10342 case TARGET_NR_fremovexattr
:
10345 n
= lock_user_string(arg2
);
10347 ret
= get_errno(fremovexattr(arg1
, n
));
10349 ret
= -TARGET_EFAULT
;
10351 unlock_user(n
, arg2
, 0);
10355 #endif /* CONFIG_ATTR */
10356 #ifdef TARGET_NR_set_thread_area
10357 case TARGET_NR_set_thread_area
:
10358 #if defined(TARGET_MIPS)
10359 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10361 #elif defined(TARGET_CRIS)
10363 ret
= -TARGET_EINVAL
;
10365 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10369 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10370 return do_set_thread_area(cpu_env
, arg1
);
10371 #elif defined(TARGET_M68K)
10373 TaskState
*ts
= cpu
->opaque
;
10374 ts
->tp_value
= arg1
;
10378 return -TARGET_ENOSYS
;
10381 #ifdef TARGET_NR_get_thread_area
10382 case TARGET_NR_get_thread_area
:
10383 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10384 return do_get_thread_area(cpu_env
, arg1
);
10385 #elif defined(TARGET_M68K)
10387 TaskState
*ts
= cpu
->opaque
;
10388 return ts
->tp_value
;
10391 return -TARGET_ENOSYS
;
10394 #ifdef TARGET_NR_getdomainname
10395 case TARGET_NR_getdomainname
:
10396 return -TARGET_ENOSYS
;
10399 #ifdef TARGET_NR_clock_settime
10400 case TARGET_NR_clock_settime
:
10402 struct timespec ts
;
10404 ret
= target_to_host_timespec(&ts
, arg2
);
10405 if (!is_error(ret
)) {
10406 ret
= get_errno(clock_settime(arg1
, &ts
));
10411 #ifdef TARGET_NR_clock_gettime
10412 case TARGET_NR_clock_gettime
:
10414 struct timespec ts
;
10415 ret
= get_errno(clock_gettime(arg1
, &ts
));
10416 if (!is_error(ret
)) {
10417 ret
= host_to_target_timespec(arg2
, &ts
);
10422 #ifdef TARGET_NR_clock_getres
10423 case TARGET_NR_clock_getres
:
10425 struct timespec ts
;
10426 ret
= get_errno(clock_getres(arg1
, &ts
));
10427 if (!is_error(ret
)) {
10428 host_to_target_timespec(arg2
, &ts
);
10433 #ifdef TARGET_NR_clock_nanosleep
10434 case TARGET_NR_clock_nanosleep
:
10436 struct timespec ts
;
10437 target_to_host_timespec(&ts
, arg3
);
10438 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10439 &ts
, arg4
? &ts
: NULL
));
10441 host_to_target_timespec(arg4
, &ts
);
10443 #if defined(TARGET_PPC)
10444 /* clock_nanosleep is odd in that it returns positive errno values.
10445 * On PPC, CR0 bit 3 should be set in such a situation. */
10446 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10447 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10454 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10455 case TARGET_NR_set_tid_address
:
10456 return get_errno(set_tid_address((int *)g2h(arg1
)));
10459 case TARGET_NR_tkill
:
10460 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10462 case TARGET_NR_tgkill
:
10463 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10464 target_to_host_signal(arg3
)));
10466 #ifdef TARGET_NR_set_robust_list
10467 case TARGET_NR_set_robust_list
:
10468 case TARGET_NR_get_robust_list
:
10469 /* The ABI for supporting robust futexes has userspace pass
10470 * the kernel a pointer to a linked list which is updated by
10471 * userspace after the syscall; the list is walked by the kernel
10472 * when the thread exits. Since the linked list in QEMU guest
10473 * memory isn't a valid linked list for the host and we have
10474 * no way to reliably intercept the thread-death event, we can't
10475 * support these. Silently return ENOSYS so that guest userspace
10476 * falls back to a non-robust futex implementation (which should
10477 * be OK except in the corner case of the guest crashing while
10478 * holding a mutex that is shared with another process via
10481 return -TARGET_ENOSYS
;
10484 #if defined(TARGET_NR_utimensat)
10485 case TARGET_NR_utimensat
:
10487 struct timespec
*tsp
, ts
[2];
10491 target_to_host_timespec(ts
, arg3
);
10492 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10496 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10498 if (!(p
= lock_user_string(arg2
))) {
10499 return -TARGET_EFAULT
;
10501 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10502 unlock_user(p
, arg2
, 0);
10507 case TARGET_NR_futex
:
10508 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10510 case TARGET_NR_inotify_init
:
10511 ret
= get_errno(sys_inotify_init());
10513 fd_trans_register(ret
, &target_inotify_trans
);
10517 #ifdef CONFIG_INOTIFY1
10518 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10519 case TARGET_NR_inotify_init1
:
10520 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
10521 fcntl_flags_tbl
)));
10523 fd_trans_register(ret
, &target_inotify_trans
);
10528 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10529 case TARGET_NR_inotify_add_watch
:
10530 p
= lock_user_string(arg2
);
10531 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10532 unlock_user(p
, arg2
, 0);
10535 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10536 case TARGET_NR_inotify_rm_watch
:
10537 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10540 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10541 case TARGET_NR_mq_open
:
10543 struct mq_attr posix_mq_attr
;
10544 struct mq_attr
*pposix_mq_attr
;
10547 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
10548 pposix_mq_attr
= NULL
;
10550 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
10551 return -TARGET_EFAULT
;
10553 pposix_mq_attr
= &posix_mq_attr
;
10555 p
= lock_user_string(arg1
- 1);
10557 return -TARGET_EFAULT
;
10559 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
10560 unlock_user (p
, arg1
, 0);
10564 case TARGET_NR_mq_unlink
:
10565 p
= lock_user_string(arg1
- 1);
10567 return -TARGET_EFAULT
;
10569 ret
= get_errno(mq_unlink(p
));
10570 unlock_user (p
, arg1
, 0);
10573 case TARGET_NR_mq_timedsend
:
10575 struct timespec ts
;
10577 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10579 target_to_host_timespec(&ts
, arg5
);
10580 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10581 host_to_target_timespec(arg5
, &ts
);
10583 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10585 unlock_user (p
, arg2
, arg3
);
10589 case TARGET_NR_mq_timedreceive
:
10591 struct timespec ts
;
10594 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10596 target_to_host_timespec(&ts
, arg5
);
10597 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10599 host_to_target_timespec(arg5
, &ts
);
10601 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10604 unlock_user (p
, arg2
, arg3
);
10606 put_user_u32(prio
, arg4
);
10610 /* Not implemented for now... */
10611 /* case TARGET_NR_mq_notify: */
10614 case TARGET_NR_mq_getsetattr
:
10616 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10619 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10620 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
10621 &posix_mq_attr_out
));
10622 } else if (arg3
!= 0) {
10623 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
10625 if (ret
== 0 && arg3
!= 0) {
10626 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10632 #ifdef CONFIG_SPLICE
10633 #ifdef TARGET_NR_tee
10634 case TARGET_NR_tee
:
10636 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10640 #ifdef TARGET_NR_splice
10641 case TARGET_NR_splice
:
10643 loff_t loff_in
, loff_out
;
10644 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10646 if (get_user_u64(loff_in
, arg2
)) {
10647 return -TARGET_EFAULT
;
10649 ploff_in
= &loff_in
;
10652 if (get_user_u64(loff_out
, arg4
)) {
10653 return -TARGET_EFAULT
;
10655 ploff_out
= &loff_out
;
10657 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10659 if (put_user_u64(loff_in
, arg2
)) {
10660 return -TARGET_EFAULT
;
10664 if (put_user_u64(loff_out
, arg4
)) {
10665 return -TARGET_EFAULT
;
10671 #ifdef TARGET_NR_vmsplice
10672 case TARGET_NR_vmsplice
:
10674 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10676 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10677 unlock_iovec(vec
, arg2
, arg3
, 0);
10679 ret
= -host_to_target_errno(errno
);
10684 #endif /* CONFIG_SPLICE */
10685 #ifdef CONFIG_EVENTFD
10686 #if defined(TARGET_NR_eventfd)
10687 case TARGET_NR_eventfd
:
10688 ret
= get_errno(eventfd(arg1
, 0));
10690 fd_trans_register(ret
, &target_eventfd_trans
);
10694 #if defined(TARGET_NR_eventfd2)
10695 case TARGET_NR_eventfd2
:
10697 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10698 if (arg2
& TARGET_O_NONBLOCK
) {
10699 host_flags
|= O_NONBLOCK
;
10701 if (arg2
& TARGET_O_CLOEXEC
) {
10702 host_flags
|= O_CLOEXEC
;
10704 ret
= get_errno(eventfd(arg1
, host_flags
));
10706 fd_trans_register(ret
, &target_eventfd_trans
);
10711 #endif /* CONFIG_EVENTFD */
10712 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10713 case TARGET_NR_fallocate
:
10714 #if TARGET_ABI_BITS == 32
10715 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10716 target_offset64(arg5
, arg6
)));
10718 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10722 #if defined(CONFIG_SYNC_FILE_RANGE)
10723 #if defined(TARGET_NR_sync_file_range)
10724 case TARGET_NR_sync_file_range
:
10725 #if TARGET_ABI_BITS == 32
10726 #if defined(TARGET_MIPS)
10727 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10728 target_offset64(arg5
, arg6
), arg7
));
10730 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10731 target_offset64(arg4
, arg5
), arg6
));
10732 #endif /* !TARGET_MIPS */
10734 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10738 #if defined(TARGET_NR_sync_file_range2)
10739 case TARGET_NR_sync_file_range2
:
10740 /* This is like sync_file_range but the arguments are reordered */
10741 #if TARGET_ABI_BITS == 32
10742 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10743 target_offset64(arg5
, arg6
), arg2
));
10745 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10750 #if defined(TARGET_NR_signalfd4)
10751 case TARGET_NR_signalfd4
:
10752 return do_signalfd4(arg1
, arg2
, arg4
);
10754 #if defined(TARGET_NR_signalfd)
10755 case TARGET_NR_signalfd
:
10756 return do_signalfd4(arg1
, arg2
, 0);
10758 #if defined(CONFIG_EPOLL)
10759 #if defined(TARGET_NR_epoll_create)
10760 case TARGET_NR_epoll_create
:
10761 return get_errno(epoll_create(arg1
));
10763 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10764 case TARGET_NR_epoll_create1
:
10765 return get_errno(epoll_create1(arg1
));
10767 #if defined(TARGET_NR_epoll_ctl)
10768 case TARGET_NR_epoll_ctl
:
10770 struct epoll_event ep
;
10771 struct epoll_event
*epp
= 0;
10773 struct target_epoll_event
*target_ep
;
10774 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10775 return -TARGET_EFAULT
;
10777 ep
.events
= tswap32(target_ep
->events
);
10778 /* The epoll_data_t union is just opaque data to the kernel,
10779 * so we transfer all 64 bits across and need not worry what
10780 * actual data type it is.
10782 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10783 unlock_user_struct(target_ep
, arg4
, 0);
10786 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10790 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10791 #if defined(TARGET_NR_epoll_wait)
10792 case TARGET_NR_epoll_wait
:
10794 #if defined(TARGET_NR_epoll_pwait)
10795 case TARGET_NR_epoll_pwait
:
10798 struct target_epoll_event
*target_ep
;
10799 struct epoll_event
*ep
;
10801 int maxevents
= arg3
;
10802 int timeout
= arg4
;
10804 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
10805 return -TARGET_EINVAL
;
10808 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10809 maxevents
* sizeof(struct target_epoll_event
), 1);
10811 return -TARGET_EFAULT
;
10814 ep
= g_try_new(struct epoll_event
, maxevents
);
10816 unlock_user(target_ep
, arg2
, 0);
10817 return -TARGET_ENOMEM
;
10821 #if defined(TARGET_NR_epoll_pwait)
10822 case TARGET_NR_epoll_pwait
:
10824 target_sigset_t
*target_set
;
10825 sigset_t _set
, *set
= &_set
;
10828 if (arg6
!= sizeof(target_sigset_t
)) {
10829 ret
= -TARGET_EINVAL
;
10833 target_set
= lock_user(VERIFY_READ
, arg5
,
10834 sizeof(target_sigset_t
), 1);
10836 ret
= -TARGET_EFAULT
;
10839 target_to_host_sigset(set
, target_set
);
10840 unlock_user(target_set
, arg5
, 0);
10845 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10846 set
, SIGSET_T_SIZE
));
10850 #if defined(TARGET_NR_epoll_wait)
10851 case TARGET_NR_epoll_wait
:
10852 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10857 ret
= -TARGET_ENOSYS
;
10859 if (!is_error(ret
)) {
10861 for (i
= 0; i
< ret
; i
++) {
10862 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10863 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10865 unlock_user(target_ep
, arg2
,
10866 ret
* sizeof(struct target_epoll_event
));
10868 unlock_user(target_ep
, arg2
, 0);
10875 #ifdef TARGET_NR_prlimit64
10876 case TARGET_NR_prlimit64
:
10878 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10879 struct target_rlimit64
*target_rnew
, *target_rold
;
10880 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10881 int resource
= target_to_host_resource(arg2
);
10883 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10884 return -TARGET_EFAULT
;
10886 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10887 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10888 unlock_user_struct(target_rnew
, arg3
, 0);
10892 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10893 if (!is_error(ret
) && arg4
) {
10894 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10895 return -TARGET_EFAULT
;
10897 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10898 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10899 unlock_user_struct(target_rold
, arg4
, 1);
10904 #ifdef TARGET_NR_gethostname
10905 case TARGET_NR_gethostname
:
10907 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10909 ret
= get_errno(gethostname(name
, arg2
));
10910 unlock_user(name
, arg1
, arg2
);
10912 ret
= -TARGET_EFAULT
;
10917 #ifdef TARGET_NR_atomic_cmpxchg_32
10918 case TARGET_NR_atomic_cmpxchg_32
:
10920 /* should use start_exclusive from main.c */
10921 abi_ulong mem_value
;
10922 if (get_user_u32(mem_value
, arg6
)) {
10923 target_siginfo_t info
;
10924 info
.si_signo
= SIGSEGV
;
10926 info
.si_code
= TARGET_SEGV_MAPERR
;
10927 info
._sifields
._sigfault
._addr
= arg6
;
10928 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10929 QEMU_SI_FAULT
, &info
);
10933 if (mem_value
== arg2
)
10934 put_user_u32(arg1
, arg6
);
10938 #ifdef TARGET_NR_atomic_barrier
10939 case TARGET_NR_atomic_barrier
:
10940 /* Like the kernel implementation and the
10941 qemu arm barrier, no-op this? */
10945 #ifdef TARGET_NR_timer_create
10946 case TARGET_NR_timer_create
:
10948 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10950 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10953 int timer_index
= next_free_host_timer();
10955 if (timer_index
< 0) {
10956 ret
= -TARGET_EAGAIN
;
10958 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10961 phost_sevp
= &host_sevp
;
10962 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10968 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10972 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10973 return -TARGET_EFAULT
;
10981 #ifdef TARGET_NR_timer_settime
10982 case TARGET_NR_timer_settime
:
10984 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10985 * struct itimerspec * old_value */
10986 target_timer_t timerid
= get_timer_id(arg1
);
10990 } else if (arg3
== 0) {
10991 ret
= -TARGET_EINVAL
;
10993 timer_t htimer
= g_posix_timers
[timerid
];
10994 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10996 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
10997 return -TARGET_EFAULT
;
11000 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11001 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11002 return -TARGET_EFAULT
;
11009 #ifdef TARGET_NR_timer_gettime
11010 case TARGET_NR_timer_gettime
:
11012 /* args: timer_t timerid, struct itimerspec *curr_value */
11013 target_timer_t timerid
= get_timer_id(arg1
);
11017 } else if (!arg2
) {
11018 ret
= -TARGET_EFAULT
;
11020 timer_t htimer
= g_posix_timers
[timerid
];
11021 struct itimerspec hspec
;
11022 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11024 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11025 ret
= -TARGET_EFAULT
;
11032 #ifdef TARGET_NR_timer_getoverrun
11033 case TARGET_NR_timer_getoverrun
:
11035 /* args: timer_t timerid */
11036 target_timer_t timerid
= get_timer_id(arg1
);
11041 timer_t htimer
= g_posix_timers
[timerid
];
11042 ret
= get_errno(timer_getoverrun(htimer
));
11044 fd_trans_unregister(ret
);
11049 #ifdef TARGET_NR_timer_delete
11050 case TARGET_NR_timer_delete
:
11052 /* args: timer_t timerid */
11053 target_timer_t timerid
= get_timer_id(arg1
);
11058 timer_t htimer
= g_posix_timers
[timerid
];
11059 ret
= get_errno(timer_delete(htimer
));
11060 g_posix_timers
[timerid
] = 0;
11066 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11067 case TARGET_NR_timerfd_create
:
11068 return get_errno(timerfd_create(arg1
,
11069 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11072 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11073 case TARGET_NR_timerfd_gettime
:
11075 struct itimerspec its_curr
;
11077 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11079 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11080 return -TARGET_EFAULT
;
11086 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11087 case TARGET_NR_timerfd_settime
:
11089 struct itimerspec its_new
, its_old
, *p_new
;
11092 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11093 return -TARGET_EFAULT
;
11100 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11102 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11103 return -TARGET_EFAULT
;
11109 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11110 case TARGET_NR_ioprio_get
:
11111 return get_errno(ioprio_get(arg1
, arg2
));
11114 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11115 case TARGET_NR_ioprio_set
:
11116 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11119 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11120 case TARGET_NR_setns
:
11121 return get_errno(setns(arg1
, arg2
));
11123 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11124 case TARGET_NR_unshare
:
11125 return get_errno(unshare(arg1
));
11127 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11128 case TARGET_NR_kcmp
:
11129 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11131 #ifdef TARGET_NR_swapcontext
11132 case TARGET_NR_swapcontext
:
11133 /* PowerPC specific. */
11134 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11138 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11139 return -TARGET_ENOSYS
;
11144 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11145 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11146 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11149 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
11152 #ifdef DEBUG_ERESTARTSYS
11153 /* Debug-only code for exercising the syscall-restart code paths
11154 * in the per-architecture cpu main loops: restart every syscall
11155 * the guest makes once before letting it through.
11161 return -TARGET_ERESTARTSYS
;
11166 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11167 arg5
, arg6
, arg7
, arg8
);
11169 if (unlikely(do_strace
)) {
11170 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11171 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11172 arg5
, arg6
, arg7
, arg8
);
11173 print_syscall_ret(num
, ret
);
11175 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11176 arg5
, arg6
, arg7
, arg8
);
11179 trace_guest_user_syscall_ret(cpu
, num
, ret
);