4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
59 #include <sys/timerfd.h>
62 #include <sys/eventfd.h>
65 #include <sys/epoll.h>
68 #include "qemu/xattr.h"
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
87 #include <linux/mtio.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include <linux/if_alg.h>
106 #include "linux_loop.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
115 #define CLONE_IO 0x80000000 /* Clone io context */
118 /* We can't directly call the host clone syscall, because this will
119 * badly confuse libc (breaking mutexes, for example). So we must
120 * divide clone flags into:
121 * * flag combinations that look like pthread_create()
122 * * flag combinations that look like fork()
123 * * flags we can implement within QEMU itself
124 * * flags we can't support and will return an error for
126 /* For thread creation, all these flags must be present; for
127 * fork, none must be present.
129 #define CLONE_THREAD_FLAGS \
130 (CLONE_VM | CLONE_FS | CLONE_FILES | \
131 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 /* These flags are ignored:
134 * CLONE_DETACHED is now ignored by the kernel;
135 * CLONE_IO is just an optimisation hint to the I/O scheduler
137 #define CLONE_IGNORED_FLAGS \
138 (CLONE_DETACHED | CLONE_IO)
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS \
142 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
143 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 #define CLONE_INVALID_FORK_FLAGS \
151 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 #define CLONE_INVALID_THREAD_FLAGS \
154 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
155 CLONE_IGNORED_FLAGS))
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158 * have almost all been allocated. We cannot support any of
159 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161 * The checks against the invalid thread masks above will catch these.
162 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166 * once. This exercises the codepaths for restart.
168 //#define DEBUG_ERESTARTSYS
170 //#include <linux/msdos_fs.h>
171 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
172 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
182 #define _syscall0(type,name) \
183 static type name (void) \
185 return syscall(__NR_##name); \
188 #define _syscall1(type,name,type1,arg1) \
189 static type name (type1 arg1) \
191 return syscall(__NR_##name, arg1); \
194 #define _syscall2(type,name,type1,arg1,type2,arg2) \
195 static type name (type1 arg1,type2 arg2) \
197 return syscall(__NR_##name, arg1, arg2); \
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
201 static type name (type1 arg1,type2 arg2,type3 arg3) \
203 return syscall(__NR_##name, arg1, arg2, arg3); \
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
209 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 type5,arg5,type6,arg6) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid
)
254 /* For the 64-bit guest on 32-bit host case we must emulate
255 * getdents using getdents64, because otherwise the host
256 * might hand us back more dirent records than we can fit
257 * into the guest buffer after structure format conversion.
258 * Otherwise we emulate getdents with getdents if the host has it.
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
267 #if (defined(TARGET_NR_getdents) && \
268 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
274 loff_t
*, res
, uint
, wh
);
276 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
277 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
279 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group
,int,error_code
)
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address
,int *,tidptr
)
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
288 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
292 unsigned long *, user_mask_ptr
);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
295 unsigned long *, user_mask_ptr
);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
298 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
300 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
301 struct __user_cap_data_struct
*, data
);
302 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
303 struct __user_cap_data_struct
*, data
);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get
, int, which
, int, who
)
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
316 unsigned long, idx1
, unsigned long, idx2
)
319 static bitmask_transtbl fcntl_flags_tbl
[] = {
320 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
321 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
322 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
323 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
324 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
325 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
326 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
327 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
328 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
329 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
330 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
331 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
332 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
333 #if defined(O_DIRECT)
334 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
336 #if defined(O_NOATIME)
337 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
339 #if defined(O_CLOEXEC)
340 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
343 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
345 #if defined(O_TMPFILE)
346 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
348 /* Don't terminate the list prematurely on 64-bit host+guest. */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
355 static int sys_getcwd1(char *buf
, size_t size
)
357 if (getcwd(buf
, size
) == NULL
) {
358 /* getcwd() sets errno */
361 return strlen(buf
)+1;
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
368 const struct timespec
*,tsp
,int,flags
)
370 static int sys_utimensat(int dirfd
, const char *pathname
,
371 const struct timespec times
[2], int flags
)
377 #endif /* TARGET_NR_utimensat */
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
383 const char *, new, unsigned int, flags
)
385 static int sys_renameat2(int oldfd
, const char *old
,
386 int newfd
, const char *new, int flags
)
389 return renameat(oldfd
, old
, newfd
, new);
395 #endif /* TARGET_NR_renameat2 */
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
403 return (inotify_init());
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
409 return (inotify_add_watch(fd
, pathname
, mask
));
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
415 return (inotify_rm_watch(fd
, wd
));
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags
)
422 return (inotify_init1(flags
));
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY */
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64
{
444 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
445 const struct host_rlimit64
*, new_limit
,
446 struct host_rlimit64
*, old_limit
)
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers
[32] = { 0, } ;
454 static inline int next_free_host_timer(void)
457 /* FIXME: Does finding the next free slot require a lock? */
458 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
459 if (g_posix_timers
[k
] == 0) {
460 g_posix_timers
[k
] = (timer_t
) 1;
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
470 static inline int regpairs_aligned(void *cpu_env
, int num
)
472 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478 * of registers which translates to the same as ARM/MIPS, because we start with
480 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env
, int num
)
486 case TARGET_NR_pread64
:
487 case TARGET_NR_pwrite64
:
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
497 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
512 [EAGAIN
] = TARGET_EAGAIN
,
513 [EIDRM
] = TARGET_EIDRM
,
514 [ECHRNG
] = TARGET_ECHRNG
,
515 [EL2NSYNC
] = TARGET_EL2NSYNC
,
516 [EL3HLT
] = TARGET_EL3HLT
,
517 [EL3RST
] = TARGET_EL3RST
,
518 [ELNRNG
] = TARGET_ELNRNG
,
519 [EUNATCH
] = TARGET_EUNATCH
,
520 [ENOCSI
] = TARGET_ENOCSI
,
521 [EL2HLT
] = TARGET_EL2HLT
,
522 [EDEADLK
] = TARGET_EDEADLK
,
523 [ENOLCK
] = TARGET_ENOLCK
,
524 [EBADE
] = TARGET_EBADE
,
525 [EBADR
] = TARGET_EBADR
,
526 [EXFULL
] = TARGET_EXFULL
,
527 [ENOANO
] = TARGET_ENOANO
,
528 [EBADRQC
] = TARGET_EBADRQC
,
529 [EBADSLT
] = TARGET_EBADSLT
,
530 [EBFONT
] = TARGET_EBFONT
,
531 [ENOSTR
] = TARGET_ENOSTR
,
532 [ENODATA
] = TARGET_ENODATA
,
533 [ETIME
] = TARGET_ETIME
,
534 [ENOSR
] = TARGET_ENOSR
,
535 [ENONET
] = TARGET_ENONET
,
536 [ENOPKG
] = TARGET_ENOPKG
,
537 [EREMOTE
] = TARGET_EREMOTE
,
538 [ENOLINK
] = TARGET_ENOLINK
,
539 [EADV
] = TARGET_EADV
,
540 [ESRMNT
] = TARGET_ESRMNT
,
541 [ECOMM
] = TARGET_ECOMM
,
542 [EPROTO
] = TARGET_EPROTO
,
543 [EDOTDOT
] = TARGET_EDOTDOT
,
544 [EMULTIHOP
] = TARGET_EMULTIHOP
,
545 [EBADMSG
] = TARGET_EBADMSG
,
546 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
547 [EOVERFLOW
] = TARGET_EOVERFLOW
,
548 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
549 [EBADFD
] = TARGET_EBADFD
,
550 [EREMCHG
] = TARGET_EREMCHG
,
551 [ELIBACC
] = TARGET_ELIBACC
,
552 [ELIBBAD
] = TARGET_ELIBBAD
,
553 [ELIBSCN
] = TARGET_ELIBSCN
,
554 [ELIBMAX
] = TARGET_ELIBMAX
,
555 [ELIBEXEC
] = TARGET_ELIBEXEC
,
556 [EILSEQ
] = TARGET_EILSEQ
,
557 [ENOSYS
] = TARGET_ENOSYS
,
558 [ELOOP
] = TARGET_ELOOP
,
559 [ERESTART
] = TARGET_ERESTART
,
560 [ESTRPIPE
] = TARGET_ESTRPIPE
,
561 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
562 [EUSERS
] = TARGET_EUSERS
,
563 [ENOTSOCK
] = TARGET_ENOTSOCK
,
564 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
565 [EMSGSIZE
] = TARGET_EMSGSIZE
,
566 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
567 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
568 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
569 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
570 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
571 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
572 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
573 [EADDRINUSE
] = TARGET_EADDRINUSE
,
574 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
575 [ENETDOWN
] = TARGET_ENETDOWN
,
576 [ENETUNREACH
] = TARGET_ENETUNREACH
,
577 [ENETRESET
] = TARGET_ENETRESET
,
578 [ECONNABORTED
] = TARGET_ECONNABORTED
,
579 [ECONNRESET
] = TARGET_ECONNRESET
,
580 [ENOBUFS
] = TARGET_ENOBUFS
,
581 [EISCONN
] = TARGET_EISCONN
,
582 [ENOTCONN
] = TARGET_ENOTCONN
,
583 [EUCLEAN
] = TARGET_EUCLEAN
,
584 [ENOTNAM
] = TARGET_ENOTNAM
,
585 [ENAVAIL
] = TARGET_ENAVAIL
,
586 [EISNAM
] = TARGET_EISNAM
,
587 [EREMOTEIO
] = TARGET_EREMOTEIO
,
588 [EDQUOT
] = TARGET_EDQUOT
,
589 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
590 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
591 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
592 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
593 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
594 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
595 [EALREADY
] = TARGET_EALREADY
,
596 [EINPROGRESS
] = TARGET_EINPROGRESS
,
597 [ESTALE
] = TARGET_ESTALE
,
598 [ECANCELED
] = TARGET_ECANCELED
,
599 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
600 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
602 [ENOKEY
] = TARGET_ENOKEY
,
605 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
608 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
611 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
614 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
616 #ifdef ENOTRECOVERABLE
617 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
620 [ENOMSG
] = TARGET_ENOMSG
,
623 [ERFKILL
] = TARGET_ERFKILL
,
626 [EHWPOISON
] = TARGET_EHWPOISON
,
630 static inline int host_to_target_errno(int err
)
632 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
633 host_to_target_errno_table
[err
]) {
634 return host_to_target_errno_table
[err
];
639 static inline int target_to_host_errno(int err
)
641 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
642 target_to_host_errno_table
[err
]) {
643 return target_to_host_errno_table
[err
];
648 static inline abi_long
get_errno(abi_long ret
)
651 return -host_to_target_errno(errno
);
656 const char *target_strerror(int err
)
658 if (err
== TARGET_ERESTARTSYS
) {
659 return "To be restarted";
661 if (err
== TARGET_QEMU_ESIGRETURN
) {
662 return "Successful exit from sigreturn";
665 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
668 return strerror(target_to_host_errno(err
));
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
674 return safe_syscall(__NR_##name); \
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
680 return safe_syscall(__NR_##name, arg1); \
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
686 return safe_syscall(__NR_##name, arg1, arg2); \
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
692 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
699 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703 type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713 type5 arg5, type6 arg6) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
718 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
719 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
720 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
721 int, flags
, mode_t
, mode
)
722 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
723 struct rusage
*, rusage
)
724 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
725 int, options
, struct rusage
*, rusage
)
726 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
727 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
728 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
729 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
730 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
732 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
733 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
735 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
736 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
737 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
738 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
739 safe_syscall2(int, tkill
, int, tid
, int, sig
)
740 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
741 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
742 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
743 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
744 unsigned long, pos_l
, unsigned long, pos_h
)
745 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
746 unsigned long, pos_l
, unsigned long, pos_h
)
747 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
749 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
750 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
751 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
752 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
753 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
754 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
755 safe_syscall2(int, flock
, int, fd
, int, operation
)
756 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
757 const struct timespec
*, uts
, size_t, sigsetsize
)
758 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
760 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
761 struct timespec
*, rem
)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
764 const struct timespec
*, req
, struct timespec
*, rem
)
767 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
768 void *, ptr
, long, fifth
)
771 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
775 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
776 long, msgtype
, int, flags
)
778 #ifdef __NR_semtimedop
779 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
780 unsigned, nsops
, const struct timespec
*, timeout
)
782 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
783 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
784 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
785 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
786 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
788 /* We do ioctl like this rather than via safe_syscall3 to preserve the
789 * "third argument might be integer or pointer or not present" behaviour of
792 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
793 /* Similarly for fcntl. Note that callers must always:
794 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
795 * use the flock64 struct rather than unsuffixed flock
796 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
801 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
804 static inline int host_to_target_sock_type(int host_type
)
808 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
810 target_type
= TARGET_SOCK_DGRAM
;
813 target_type
= TARGET_SOCK_STREAM
;
816 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
820 #if defined(SOCK_CLOEXEC)
821 if (host_type
& SOCK_CLOEXEC
) {
822 target_type
|= TARGET_SOCK_CLOEXEC
;
826 #if defined(SOCK_NONBLOCK)
827 if (host_type
& SOCK_NONBLOCK
) {
828 target_type
|= TARGET_SOCK_NONBLOCK
;
835 static abi_ulong target_brk
;
836 static abi_ulong target_original_brk
;
837 static abi_ulong brk_page
;
839 void target_set_brk(abi_ulong new_brk
)
841 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
842 brk_page
= HOST_PAGE_ALIGN(target_brk
);
845 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
846 #define DEBUGF_BRK(message, args...)
848 /* do_brk() must return target values and target errnos. */
849 abi_long
do_brk(abi_ulong new_brk
)
851 abi_long mapped_addr
;
852 abi_ulong new_alloc_size
;
854 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
857 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
860 if (new_brk
< target_original_brk
) {
861 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
866 /* If the new brk is less than the highest page reserved to the
867 * target heap allocation, set it and we're almost done... */
868 if (new_brk
<= brk_page
) {
869 /* Heap contents are initialized to zero, as for anonymous
871 if (new_brk
> target_brk
) {
872 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
874 target_brk
= new_brk
;
875 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
879 /* We need to allocate more memory after the brk... Note that
880 * we don't use MAP_FIXED because that will map over the top of
881 * any existing mapping (like the one with the host libc or qemu
882 * itself); instead we treat "mapped but at wrong address" as
883 * a failure and unmap again.
885 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
886 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
887 PROT_READ
|PROT_WRITE
,
888 MAP_ANON
|MAP_PRIVATE
, 0, 0));
890 if (mapped_addr
== brk_page
) {
891 /* Heap contents are initialized to zero, as for anonymous
892 * mapped pages. Technically the new pages are already
893 * initialized to zero since they *are* anonymous mapped
894 * pages, however we have to take care with the contents that
895 * come from the remaining part of the previous page: it may
896 * contains garbage data due to a previous heap usage (grown
898 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
900 target_brk
= new_brk
;
901 brk_page
= HOST_PAGE_ALIGN(target_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
905 } else if (mapped_addr
!= -1) {
906 /* Mapped but at wrong address, meaning there wasn't actually
907 * enough space for this brk.
909 target_munmap(mapped_addr
, new_alloc_size
);
911 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
914 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
917 #if defined(TARGET_ALPHA)
918 /* We (partially) emulate OSF/1 on Alpha, which requires we
919 return a proper errno, not an unchanged brk value. */
920 return -TARGET_ENOMEM
;
922 /* For everything else, return the previous break. */
926 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
927 abi_ulong target_fds_addr
,
931 abi_ulong b
, *target_fds
;
933 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
934 if (!(target_fds
= lock_user(VERIFY_READ
,
936 sizeof(abi_ulong
) * nw
,
938 return -TARGET_EFAULT
;
942 for (i
= 0; i
< nw
; i
++) {
943 /* grab the abi_ulong */
944 __get_user(b
, &target_fds
[i
]);
945 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
946 /* check the bit inside the abi_ulong */
953 unlock_user(target_fds
, target_fds_addr
, 0);
958 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
959 abi_ulong target_fds_addr
,
962 if (target_fds_addr
) {
963 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
964 return -TARGET_EFAULT
;
972 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
978 abi_ulong
*target_fds
;
980 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
981 if (!(target_fds
= lock_user(VERIFY_WRITE
,
983 sizeof(abi_ulong
) * nw
,
985 return -TARGET_EFAULT
;
988 for (i
= 0; i
< nw
; i
++) {
990 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
991 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
994 __put_user(v
, &target_fds
[i
]);
997 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1002 #if defined(__alpha__)
1003 #define HOST_HZ 1024
1008 static inline abi_long
host_to_target_clock_t(long ticks
)
1010 #if HOST_HZ == TARGET_HZ
1013 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1017 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1018 const struct rusage
*rusage
)
1020 struct target_rusage
*target_rusage
;
1022 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1023 return -TARGET_EFAULT
;
1024 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1025 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1026 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1027 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1028 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1029 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1030 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1031 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1032 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1033 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1034 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1035 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1036 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1037 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1038 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1039 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1040 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1041 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1042 unlock_user_struct(target_rusage
, target_addr
, 1);
1047 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1049 abi_ulong target_rlim_swap
;
1052 target_rlim_swap
= tswapal(target_rlim
);
1053 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1054 return RLIM_INFINITY
;
1056 result
= target_rlim_swap
;
1057 if (target_rlim_swap
!= (rlim_t
)result
)
1058 return RLIM_INFINITY
;
1063 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1065 abi_ulong target_rlim_swap
;
1068 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1069 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1071 target_rlim_swap
= rlim
;
1072 result
= tswapal(target_rlim_swap
);
1077 static inline int target_to_host_resource(int code
)
1080 case TARGET_RLIMIT_AS
:
1082 case TARGET_RLIMIT_CORE
:
1084 case TARGET_RLIMIT_CPU
:
1086 case TARGET_RLIMIT_DATA
:
1088 case TARGET_RLIMIT_FSIZE
:
1089 return RLIMIT_FSIZE
;
1090 case TARGET_RLIMIT_LOCKS
:
1091 return RLIMIT_LOCKS
;
1092 case TARGET_RLIMIT_MEMLOCK
:
1093 return RLIMIT_MEMLOCK
;
1094 case TARGET_RLIMIT_MSGQUEUE
:
1095 return RLIMIT_MSGQUEUE
;
1096 case TARGET_RLIMIT_NICE
:
1098 case TARGET_RLIMIT_NOFILE
:
1099 return RLIMIT_NOFILE
;
1100 case TARGET_RLIMIT_NPROC
:
1101 return RLIMIT_NPROC
;
1102 case TARGET_RLIMIT_RSS
:
1104 case TARGET_RLIMIT_RTPRIO
:
1105 return RLIMIT_RTPRIO
;
1106 case TARGET_RLIMIT_SIGPENDING
:
1107 return RLIMIT_SIGPENDING
;
1108 case TARGET_RLIMIT_STACK
:
1109 return RLIMIT_STACK
;
1115 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1116 abi_ulong target_tv_addr
)
1118 struct target_timeval
*target_tv
;
1120 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1121 return -TARGET_EFAULT
;
1123 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1124 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1126 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1131 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1132 const struct timeval
*tv
)
1134 struct target_timeval
*target_tv
;
1136 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1137 return -TARGET_EFAULT
;
1139 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1140 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1142 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1147 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1148 abi_ulong target_tz_addr
)
1150 struct target_timezone
*target_tz
;
1152 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1153 return -TARGET_EFAULT
;
1156 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1157 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1159 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1164 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1167 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1168 abi_ulong target_mq_attr_addr
)
1170 struct target_mq_attr
*target_mq_attr
;
1172 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1173 target_mq_attr_addr
, 1))
1174 return -TARGET_EFAULT
;
1176 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1177 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1178 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1179 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1181 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1186 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1187 const struct mq_attr
*attr
)
1189 struct target_mq_attr
*target_mq_attr
;
1191 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1192 target_mq_attr_addr
, 0))
1193 return -TARGET_EFAULT
;
1195 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1196 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1197 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1198 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1200 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1206 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1207 /* do_select() must return target values and target errnos. */
1208 static abi_long
do_select(int n
,
1209 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1210 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1212 fd_set rfds
, wfds
, efds
;
1213 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1215 struct timespec ts
, *ts_ptr
;
1218 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1222 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1226 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1231 if (target_tv_addr
) {
1232 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1233 return -TARGET_EFAULT
;
1234 ts
.tv_sec
= tv
.tv_sec
;
1235 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1241 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1244 if (!is_error(ret
)) {
1245 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1246 return -TARGET_EFAULT
;
1247 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1248 return -TARGET_EFAULT
;
1249 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1250 return -TARGET_EFAULT
;
1252 if (target_tv_addr
) {
1253 tv
.tv_sec
= ts
.tv_sec
;
1254 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1255 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1256 return -TARGET_EFAULT
;
1264 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1265 static abi_long
do_old_select(abi_ulong arg1
)
1267 struct target_sel_arg_struct
*sel
;
1268 abi_ulong inp
, outp
, exp
, tvp
;
1271 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1272 return -TARGET_EFAULT
;
1275 nsel
= tswapal(sel
->n
);
1276 inp
= tswapal(sel
->inp
);
1277 outp
= tswapal(sel
->outp
);
1278 exp
= tswapal(sel
->exp
);
1279 tvp
= tswapal(sel
->tvp
);
1281 unlock_user_struct(sel
, arg1
, 0);
1283 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1288 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1291 return pipe2(host_pipe
, flags
);
1297 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1298 int flags
, int is_pipe2
)
1302 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1305 return get_errno(ret
);
1307 /* Several targets have special calling conventions for the original
1308 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1310 #if defined(TARGET_ALPHA)
1311 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1312 return host_pipe
[0];
1313 #elif defined(TARGET_MIPS)
1314 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1315 return host_pipe
[0];
1316 #elif defined(TARGET_SH4)
1317 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1318 return host_pipe
[0];
1319 #elif defined(TARGET_SPARC)
1320 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1321 return host_pipe
[0];
1325 if (put_user_s32(host_pipe
[0], pipedes
)
1326 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1327 return -TARGET_EFAULT
;
1328 return get_errno(ret
);
1331 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1332 abi_ulong target_addr
,
1335 struct target_ip_mreqn
*target_smreqn
;
1337 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1339 return -TARGET_EFAULT
;
1340 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1341 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1342 if (len
== sizeof(struct target_ip_mreqn
))
1343 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1344 unlock_user(target_smreqn
, target_addr
, 0);
1349 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1350 abi_ulong target_addr
,
1353 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1354 sa_family_t sa_family
;
1355 struct target_sockaddr
*target_saddr
;
1357 if (fd_trans_target_to_host_addr(fd
)) {
1358 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1361 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1363 return -TARGET_EFAULT
;
1365 sa_family
= tswap16(target_saddr
->sa_family
);
1367 /* Oops. The caller might send a incomplete sun_path; sun_path
1368 * must be terminated by \0 (see the manual page), but
1369 * unfortunately it is quite common to specify sockaddr_un
1370 * length as "strlen(x->sun_path)" while it should be
1371 * "strlen(...) + 1". We'll fix that here if needed.
1372 * Linux kernel has a similar feature.
1375 if (sa_family
== AF_UNIX
) {
1376 if (len
< unix_maxlen
&& len
> 0) {
1377 char *cp
= (char*)target_saddr
;
1379 if ( cp
[len
-1] && !cp
[len
] )
1382 if (len
> unix_maxlen
)
1386 memcpy(addr
, target_saddr
, len
);
1387 addr
->sa_family
= sa_family
;
1388 if (sa_family
== AF_NETLINK
) {
1389 struct sockaddr_nl
*nladdr
;
1391 nladdr
= (struct sockaddr_nl
*)addr
;
1392 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1393 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1394 } else if (sa_family
== AF_PACKET
) {
1395 struct target_sockaddr_ll
*lladdr
;
1397 lladdr
= (struct target_sockaddr_ll
*)addr
;
1398 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1399 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1401 unlock_user(target_saddr
, target_addr
, 0);
1406 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1407 struct sockaddr
*addr
,
1410 struct target_sockaddr
*target_saddr
;
1417 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1419 return -TARGET_EFAULT
;
1420 memcpy(target_saddr
, addr
, len
);
1421 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1422 sizeof(target_saddr
->sa_family
)) {
1423 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1425 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1426 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1427 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1428 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1429 } else if (addr
->sa_family
== AF_PACKET
) {
1430 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1431 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1432 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1433 } else if (addr
->sa_family
== AF_INET6
&&
1434 len
>= sizeof(struct target_sockaddr_in6
)) {
1435 struct target_sockaddr_in6
*target_in6
=
1436 (struct target_sockaddr_in6
*)target_saddr
;
1437 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1439 unlock_user(target_saddr
, target_addr
, len
);
1444 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1445 struct target_msghdr
*target_msgh
)
1447 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1448 abi_long msg_controllen
;
1449 abi_ulong target_cmsg_addr
;
1450 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1451 socklen_t space
= 0;
1453 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1454 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1456 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1457 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1458 target_cmsg_start
= target_cmsg
;
1460 return -TARGET_EFAULT
;
1462 while (cmsg
&& target_cmsg
) {
1463 void *data
= CMSG_DATA(cmsg
);
1464 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1466 int len
= tswapal(target_cmsg
->cmsg_len
)
1467 - sizeof(struct target_cmsghdr
);
1469 space
+= CMSG_SPACE(len
);
1470 if (space
> msgh
->msg_controllen
) {
1471 space
-= CMSG_SPACE(len
);
1472 /* This is a QEMU bug, since we allocated the payload
1473 * area ourselves (unlike overflow in host-to-target
1474 * conversion, which is just the guest giving us a buffer
1475 * that's too small). It can't happen for the payload types
1476 * we currently support; if it becomes an issue in future
1477 * we would need to improve our allocation strategy to
1478 * something more intelligent than "twice the size of the
1479 * target buffer we're reading from".
1481 gemu_log("Host cmsg overflow\n");
1485 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1486 cmsg
->cmsg_level
= SOL_SOCKET
;
1488 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1490 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1491 cmsg
->cmsg_len
= CMSG_LEN(len
);
1493 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1494 int *fd
= (int *)data
;
1495 int *target_fd
= (int *)target_data
;
1496 int i
, numfds
= len
/ sizeof(int);
1498 for (i
= 0; i
< numfds
; i
++) {
1499 __get_user(fd
[i
], target_fd
+ i
);
1501 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1502 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1503 struct ucred
*cred
= (struct ucred
*)data
;
1504 struct target_ucred
*target_cred
=
1505 (struct target_ucred
*)target_data
;
1507 __get_user(cred
->pid
, &target_cred
->pid
);
1508 __get_user(cred
->uid
, &target_cred
->uid
);
1509 __get_user(cred
->gid
, &target_cred
->gid
);
1511 gemu_log("Unsupported ancillary data: %d/%d\n",
1512 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1513 memcpy(data
, target_data
, len
);
1516 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1517 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1520 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1522 msgh
->msg_controllen
= space
;
1526 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1527 struct msghdr
*msgh
)
1529 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1530 abi_long msg_controllen
;
1531 abi_ulong target_cmsg_addr
;
1532 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1533 socklen_t space
= 0;
1535 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1536 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1538 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1539 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1540 target_cmsg_start
= target_cmsg
;
1542 return -TARGET_EFAULT
;
1544 while (cmsg
&& target_cmsg
) {
1545 void *data
= CMSG_DATA(cmsg
);
1546 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1548 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1549 int tgt_len
, tgt_space
;
1551 /* We never copy a half-header but may copy half-data;
1552 * this is Linux's behaviour in put_cmsg(). Note that
1553 * truncation here is a guest problem (which we report
1554 * to the guest via the CTRUNC bit), unlike truncation
1555 * in target_to_host_cmsg, which is a QEMU bug.
1557 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1558 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1562 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1563 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1565 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1567 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1569 /* Payload types which need a different size of payload on
1570 * the target must adjust tgt_len here.
1573 switch (cmsg
->cmsg_level
) {
1575 switch (cmsg
->cmsg_type
) {
1577 tgt_len
= sizeof(struct target_timeval
);
1587 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1588 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1589 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1592 /* We must now copy-and-convert len bytes of payload
1593 * into tgt_len bytes of destination space. Bear in mind
1594 * that in both source and destination we may be dealing
1595 * with a truncated value!
1597 switch (cmsg
->cmsg_level
) {
1599 switch (cmsg
->cmsg_type
) {
1602 int *fd
= (int *)data
;
1603 int *target_fd
= (int *)target_data
;
1604 int i
, numfds
= tgt_len
/ sizeof(int);
1606 for (i
= 0; i
< numfds
; i
++) {
1607 __put_user(fd
[i
], target_fd
+ i
);
1613 struct timeval
*tv
= (struct timeval
*)data
;
1614 struct target_timeval
*target_tv
=
1615 (struct target_timeval
*)target_data
;
1617 if (len
!= sizeof(struct timeval
) ||
1618 tgt_len
!= sizeof(struct target_timeval
)) {
1622 /* copy struct timeval to target */
1623 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1624 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1627 case SCM_CREDENTIALS
:
1629 struct ucred
*cred
= (struct ucred
*)data
;
1630 struct target_ucred
*target_cred
=
1631 (struct target_ucred
*)target_data
;
1633 __put_user(cred
->pid
, &target_cred
->pid
);
1634 __put_user(cred
->uid
, &target_cred
->uid
);
1635 __put_user(cred
->gid
, &target_cred
->gid
);
1644 switch (cmsg
->cmsg_type
) {
1647 uint32_t *v
= (uint32_t *)data
;
1648 uint32_t *t_int
= (uint32_t *)target_data
;
1650 if (len
!= sizeof(uint32_t) ||
1651 tgt_len
!= sizeof(uint32_t)) {
1654 __put_user(*v
, t_int
);
1660 struct sock_extended_err ee
;
1661 struct sockaddr_in offender
;
1663 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1664 struct errhdr_t
*target_errh
=
1665 (struct errhdr_t
*)target_data
;
1667 if (len
!= sizeof(struct errhdr_t
) ||
1668 tgt_len
!= sizeof(struct errhdr_t
)) {
1671 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1672 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1673 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1674 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1675 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1676 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1677 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1678 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1679 (void *) &errh
->offender
, sizeof(errh
->offender
));
1688 switch (cmsg
->cmsg_type
) {
1691 uint32_t *v
= (uint32_t *)data
;
1692 uint32_t *t_int
= (uint32_t *)target_data
;
1694 if (len
!= sizeof(uint32_t) ||
1695 tgt_len
!= sizeof(uint32_t)) {
1698 __put_user(*v
, t_int
);
1704 struct sock_extended_err ee
;
1705 struct sockaddr_in6 offender
;
1707 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1708 struct errhdr6_t
*target_errh
=
1709 (struct errhdr6_t
*)target_data
;
1711 if (len
!= sizeof(struct errhdr6_t
) ||
1712 tgt_len
!= sizeof(struct errhdr6_t
)) {
1715 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1716 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1717 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1718 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1719 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1720 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1721 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1722 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1723 (void *) &errh
->offender
, sizeof(errh
->offender
));
1733 gemu_log("Unsupported ancillary data: %d/%d\n",
1734 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1735 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1736 if (tgt_len
> len
) {
1737 memset(target_data
+ len
, 0, tgt_len
- len
);
1741 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1742 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1743 if (msg_controllen
< tgt_space
) {
1744 tgt_space
= msg_controllen
;
1746 msg_controllen
-= tgt_space
;
1748 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1749 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1752 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1754 target_msgh
->msg_controllen
= tswapal(space
);
1758 /* do_setsockopt() Must return target values and target errnos. */
1759 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1760 abi_ulong optval_addr
, socklen_t optlen
)
1764 struct ip_mreqn
*ip_mreq
;
1765 struct ip_mreq_source
*ip_mreq_source
;
1769 /* TCP options all take an 'int' value. */
1770 if (optlen
< sizeof(uint32_t))
1771 return -TARGET_EINVAL
;
1773 if (get_user_u32(val
, optval_addr
))
1774 return -TARGET_EFAULT
;
1775 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1782 case IP_ROUTER_ALERT
:
1786 case IP_MTU_DISCOVER
:
1793 case IP_MULTICAST_TTL
:
1794 case IP_MULTICAST_LOOP
:
1796 if (optlen
>= sizeof(uint32_t)) {
1797 if (get_user_u32(val
, optval_addr
))
1798 return -TARGET_EFAULT
;
1799 } else if (optlen
>= 1) {
1800 if (get_user_u8(val
, optval_addr
))
1801 return -TARGET_EFAULT
;
1803 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1805 case IP_ADD_MEMBERSHIP
:
1806 case IP_DROP_MEMBERSHIP
:
1807 if (optlen
< sizeof (struct target_ip_mreq
) ||
1808 optlen
> sizeof (struct target_ip_mreqn
))
1809 return -TARGET_EINVAL
;
1811 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1812 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1813 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1816 case IP_BLOCK_SOURCE
:
1817 case IP_UNBLOCK_SOURCE
:
1818 case IP_ADD_SOURCE_MEMBERSHIP
:
1819 case IP_DROP_SOURCE_MEMBERSHIP
:
1820 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1821 return -TARGET_EINVAL
;
1823 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1824 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1825 unlock_user (ip_mreq_source
, optval_addr
, 0);
1834 case IPV6_MTU_DISCOVER
:
1837 case IPV6_RECVPKTINFO
:
1838 case IPV6_UNICAST_HOPS
:
1839 case IPV6_MULTICAST_HOPS
:
1840 case IPV6_MULTICAST_LOOP
:
1842 case IPV6_RECVHOPLIMIT
:
1843 case IPV6_2292HOPLIMIT
:
1846 case IPV6_2292PKTINFO
:
1847 case IPV6_RECVTCLASS
:
1848 case IPV6_RECVRTHDR
:
1849 case IPV6_2292RTHDR
:
1850 case IPV6_RECVHOPOPTS
:
1851 case IPV6_2292HOPOPTS
:
1852 case IPV6_RECVDSTOPTS
:
1853 case IPV6_2292DSTOPTS
:
1855 #ifdef IPV6_RECVPATHMTU
1856 case IPV6_RECVPATHMTU
:
1858 #ifdef IPV6_TRANSPARENT
1859 case IPV6_TRANSPARENT
:
1861 #ifdef IPV6_FREEBIND
1864 #ifdef IPV6_RECVORIGDSTADDR
1865 case IPV6_RECVORIGDSTADDR
:
1868 if (optlen
< sizeof(uint32_t)) {
1869 return -TARGET_EINVAL
;
1871 if (get_user_u32(val
, optval_addr
)) {
1872 return -TARGET_EFAULT
;
1874 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1875 &val
, sizeof(val
)));
1879 struct in6_pktinfo pki
;
1881 if (optlen
< sizeof(pki
)) {
1882 return -TARGET_EINVAL
;
1885 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1886 return -TARGET_EFAULT
;
1889 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1891 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1892 &pki
, sizeof(pki
)));
1895 case IPV6_ADD_MEMBERSHIP
:
1896 case IPV6_DROP_MEMBERSHIP
:
1898 struct ipv6_mreq ipv6mreq
;
1900 if (optlen
< sizeof(ipv6mreq
)) {
1901 return -TARGET_EINVAL
;
1904 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1905 return -TARGET_EFAULT
;
1908 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1910 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1911 &ipv6mreq
, sizeof(ipv6mreq
)));
1922 struct icmp6_filter icmp6f
;
1924 if (optlen
> sizeof(icmp6f
)) {
1925 optlen
= sizeof(icmp6f
);
1928 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1929 return -TARGET_EFAULT
;
1932 for (val
= 0; val
< 8; val
++) {
1933 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1936 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1948 /* those take an u32 value */
1949 if (optlen
< sizeof(uint32_t)) {
1950 return -TARGET_EINVAL
;
1953 if (get_user_u32(val
, optval_addr
)) {
1954 return -TARGET_EFAULT
;
1956 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1957 &val
, sizeof(val
)));
1964 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
1969 char *alg_key
= g_malloc(optlen
);
1972 return -TARGET_ENOMEM
;
1974 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
1976 return -TARGET_EFAULT
;
1978 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1983 case ALG_SET_AEAD_AUTHSIZE
:
1985 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1994 case TARGET_SOL_SOCKET
:
1996 case TARGET_SO_RCVTIMEO
:
2000 optname
= SO_RCVTIMEO
;
2003 if (optlen
!= sizeof(struct target_timeval
)) {
2004 return -TARGET_EINVAL
;
2007 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2008 return -TARGET_EFAULT
;
2011 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2015 case TARGET_SO_SNDTIMEO
:
2016 optname
= SO_SNDTIMEO
;
2018 case TARGET_SO_ATTACH_FILTER
:
2020 struct target_sock_fprog
*tfprog
;
2021 struct target_sock_filter
*tfilter
;
2022 struct sock_fprog fprog
;
2023 struct sock_filter
*filter
;
2026 if (optlen
!= sizeof(*tfprog
)) {
2027 return -TARGET_EINVAL
;
2029 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2030 return -TARGET_EFAULT
;
2032 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2033 tswapal(tfprog
->filter
), 0)) {
2034 unlock_user_struct(tfprog
, optval_addr
, 1);
2035 return -TARGET_EFAULT
;
2038 fprog
.len
= tswap16(tfprog
->len
);
2039 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2040 if (filter
== NULL
) {
2041 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2042 unlock_user_struct(tfprog
, optval_addr
, 1);
2043 return -TARGET_ENOMEM
;
2045 for (i
= 0; i
< fprog
.len
; i
++) {
2046 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2047 filter
[i
].jt
= tfilter
[i
].jt
;
2048 filter
[i
].jf
= tfilter
[i
].jf
;
2049 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2051 fprog
.filter
= filter
;
2053 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2054 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2057 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2058 unlock_user_struct(tfprog
, optval_addr
, 1);
2061 case TARGET_SO_BINDTODEVICE
:
2063 char *dev_ifname
, *addr_ifname
;
2065 if (optlen
> IFNAMSIZ
- 1) {
2066 optlen
= IFNAMSIZ
- 1;
2068 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2070 return -TARGET_EFAULT
;
2072 optname
= SO_BINDTODEVICE
;
2073 addr_ifname
= alloca(IFNAMSIZ
);
2074 memcpy(addr_ifname
, dev_ifname
, optlen
);
2075 addr_ifname
[optlen
] = 0;
2076 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2077 addr_ifname
, optlen
));
2078 unlock_user (dev_ifname
, optval_addr
, 0);
2081 case TARGET_SO_LINGER
:
2084 struct target_linger
*tlg
;
2086 if (optlen
!= sizeof(struct target_linger
)) {
2087 return -TARGET_EINVAL
;
2089 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2090 return -TARGET_EFAULT
;
2092 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2093 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2094 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2096 unlock_user_struct(tlg
, optval_addr
, 0);
2099 /* Options with 'int' argument. */
2100 case TARGET_SO_DEBUG
:
2103 case TARGET_SO_REUSEADDR
:
2104 optname
= SO_REUSEADDR
;
2107 case TARGET_SO_REUSEPORT
:
2108 optname
= SO_REUSEPORT
;
2111 case TARGET_SO_TYPE
:
2114 case TARGET_SO_ERROR
:
2117 case TARGET_SO_DONTROUTE
:
2118 optname
= SO_DONTROUTE
;
2120 case TARGET_SO_BROADCAST
:
2121 optname
= SO_BROADCAST
;
2123 case TARGET_SO_SNDBUF
:
2124 optname
= SO_SNDBUF
;
2126 case TARGET_SO_SNDBUFFORCE
:
2127 optname
= SO_SNDBUFFORCE
;
2129 case TARGET_SO_RCVBUF
:
2130 optname
= SO_RCVBUF
;
2132 case TARGET_SO_RCVBUFFORCE
:
2133 optname
= SO_RCVBUFFORCE
;
2135 case TARGET_SO_KEEPALIVE
:
2136 optname
= SO_KEEPALIVE
;
2138 case TARGET_SO_OOBINLINE
:
2139 optname
= SO_OOBINLINE
;
2141 case TARGET_SO_NO_CHECK
:
2142 optname
= SO_NO_CHECK
;
2144 case TARGET_SO_PRIORITY
:
2145 optname
= SO_PRIORITY
;
2148 case TARGET_SO_BSDCOMPAT
:
2149 optname
= SO_BSDCOMPAT
;
2152 case TARGET_SO_PASSCRED
:
2153 optname
= SO_PASSCRED
;
2155 case TARGET_SO_PASSSEC
:
2156 optname
= SO_PASSSEC
;
2158 case TARGET_SO_TIMESTAMP
:
2159 optname
= SO_TIMESTAMP
;
2161 case TARGET_SO_RCVLOWAT
:
2162 optname
= SO_RCVLOWAT
;
2167 if (optlen
< sizeof(uint32_t))
2168 return -TARGET_EINVAL
;
2170 if (get_user_u32(val
, optval_addr
))
2171 return -TARGET_EFAULT
;
2172 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2176 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2177 ret
= -TARGET_ENOPROTOOPT
;
2182 /* do_getsockopt() Must return target values and target errnos. */
2183 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2184 abi_ulong optval_addr
, abi_ulong optlen
)
2191 case TARGET_SOL_SOCKET
:
2194 /* These don't just return a single integer */
2195 case TARGET_SO_RCVTIMEO
:
2196 case TARGET_SO_SNDTIMEO
:
2197 case TARGET_SO_PEERNAME
:
2199 case TARGET_SO_PEERCRED
: {
2202 struct target_ucred
*tcr
;
2204 if (get_user_u32(len
, optlen
)) {
2205 return -TARGET_EFAULT
;
2208 return -TARGET_EINVAL
;
2212 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2220 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2221 return -TARGET_EFAULT
;
2223 __put_user(cr
.pid
, &tcr
->pid
);
2224 __put_user(cr
.uid
, &tcr
->uid
);
2225 __put_user(cr
.gid
, &tcr
->gid
);
2226 unlock_user_struct(tcr
, optval_addr
, 1);
2227 if (put_user_u32(len
, optlen
)) {
2228 return -TARGET_EFAULT
;
2232 case TARGET_SO_LINGER
:
2236 struct target_linger
*tlg
;
2238 if (get_user_u32(len
, optlen
)) {
2239 return -TARGET_EFAULT
;
2242 return -TARGET_EINVAL
;
2246 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2254 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2255 return -TARGET_EFAULT
;
2257 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2258 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2259 unlock_user_struct(tlg
, optval_addr
, 1);
2260 if (put_user_u32(len
, optlen
)) {
2261 return -TARGET_EFAULT
;
2265 /* Options with 'int' argument. */
2266 case TARGET_SO_DEBUG
:
2269 case TARGET_SO_REUSEADDR
:
2270 optname
= SO_REUSEADDR
;
2273 case TARGET_SO_REUSEPORT
:
2274 optname
= SO_REUSEPORT
;
2277 case TARGET_SO_TYPE
:
2280 case TARGET_SO_ERROR
:
2283 case TARGET_SO_DONTROUTE
:
2284 optname
= SO_DONTROUTE
;
2286 case TARGET_SO_BROADCAST
:
2287 optname
= SO_BROADCAST
;
2289 case TARGET_SO_SNDBUF
:
2290 optname
= SO_SNDBUF
;
2292 case TARGET_SO_RCVBUF
:
2293 optname
= SO_RCVBUF
;
2295 case TARGET_SO_KEEPALIVE
:
2296 optname
= SO_KEEPALIVE
;
2298 case TARGET_SO_OOBINLINE
:
2299 optname
= SO_OOBINLINE
;
2301 case TARGET_SO_NO_CHECK
:
2302 optname
= SO_NO_CHECK
;
2304 case TARGET_SO_PRIORITY
:
2305 optname
= SO_PRIORITY
;
2308 case TARGET_SO_BSDCOMPAT
:
2309 optname
= SO_BSDCOMPAT
;
2312 case TARGET_SO_PASSCRED
:
2313 optname
= SO_PASSCRED
;
2315 case TARGET_SO_TIMESTAMP
:
2316 optname
= SO_TIMESTAMP
;
2318 case TARGET_SO_RCVLOWAT
:
2319 optname
= SO_RCVLOWAT
;
2321 case TARGET_SO_ACCEPTCONN
:
2322 optname
= SO_ACCEPTCONN
;
2329 /* TCP options all take an 'int' value. */
2331 if (get_user_u32(len
, optlen
))
2332 return -TARGET_EFAULT
;
2334 return -TARGET_EINVAL
;
2336 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2339 if (optname
== SO_TYPE
) {
2340 val
= host_to_target_sock_type(val
);
2345 if (put_user_u32(val
, optval_addr
))
2346 return -TARGET_EFAULT
;
2348 if (put_user_u8(val
, optval_addr
))
2349 return -TARGET_EFAULT
;
2351 if (put_user_u32(len
, optlen
))
2352 return -TARGET_EFAULT
;
2359 case IP_ROUTER_ALERT
:
2363 case IP_MTU_DISCOVER
:
2369 case IP_MULTICAST_TTL
:
2370 case IP_MULTICAST_LOOP
:
2371 if (get_user_u32(len
, optlen
))
2372 return -TARGET_EFAULT
;
2374 return -TARGET_EINVAL
;
2376 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2379 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2381 if (put_user_u32(len
, optlen
)
2382 || put_user_u8(val
, optval_addr
))
2383 return -TARGET_EFAULT
;
2385 if (len
> sizeof(int))
2387 if (put_user_u32(len
, optlen
)
2388 || put_user_u32(val
, optval_addr
))
2389 return -TARGET_EFAULT
;
2393 ret
= -TARGET_ENOPROTOOPT
;
2399 case IPV6_MTU_DISCOVER
:
2402 case IPV6_RECVPKTINFO
:
2403 case IPV6_UNICAST_HOPS
:
2404 case IPV6_MULTICAST_HOPS
:
2405 case IPV6_MULTICAST_LOOP
:
2407 case IPV6_RECVHOPLIMIT
:
2408 case IPV6_2292HOPLIMIT
:
2411 case IPV6_2292PKTINFO
:
2412 case IPV6_RECVTCLASS
:
2413 case IPV6_RECVRTHDR
:
2414 case IPV6_2292RTHDR
:
2415 case IPV6_RECVHOPOPTS
:
2416 case IPV6_2292HOPOPTS
:
2417 case IPV6_RECVDSTOPTS
:
2418 case IPV6_2292DSTOPTS
:
2420 #ifdef IPV6_RECVPATHMTU
2421 case IPV6_RECVPATHMTU
:
2423 #ifdef IPV6_TRANSPARENT
2424 case IPV6_TRANSPARENT
:
2426 #ifdef IPV6_FREEBIND
2429 #ifdef IPV6_RECVORIGDSTADDR
2430 case IPV6_RECVORIGDSTADDR
:
2432 if (get_user_u32(len
, optlen
))
2433 return -TARGET_EFAULT
;
2435 return -TARGET_EINVAL
;
2437 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2440 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2442 if (put_user_u32(len
, optlen
)
2443 || put_user_u8(val
, optval_addr
))
2444 return -TARGET_EFAULT
;
2446 if (len
> sizeof(int))
2448 if (put_user_u32(len
, optlen
)
2449 || put_user_u32(val
, optval_addr
))
2450 return -TARGET_EFAULT
;
2454 ret
= -TARGET_ENOPROTOOPT
;
2460 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2462 ret
= -TARGET_EOPNOTSUPP
;
2468 /* Convert target low/high pair representing file offset into the host
2469 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2470 * as the kernel doesn't handle them either.
2472 static void target_to_host_low_high(abi_ulong tlow
,
2474 unsigned long *hlow
,
2475 unsigned long *hhigh
)
2477 uint64_t off
= tlow
|
2478 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2479 TARGET_LONG_BITS
/ 2;
2482 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2485 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2486 abi_ulong count
, int copy
)
2488 struct target_iovec
*target_vec
;
2490 abi_ulong total_len
, max_len
;
2493 bool bad_address
= false;
2499 if (count
> IOV_MAX
) {
2504 vec
= g_try_new0(struct iovec
, count
);
2510 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2511 count
* sizeof(struct target_iovec
), 1);
2512 if (target_vec
== NULL
) {
2517 /* ??? If host page size > target page size, this will result in a
2518 value larger than what we can actually support. */
2519 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2522 for (i
= 0; i
< count
; i
++) {
2523 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2524 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2529 } else if (len
== 0) {
2530 /* Zero length pointer is ignored. */
2531 vec
[i
].iov_base
= 0;
2533 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2534 /* If the first buffer pointer is bad, this is a fault. But
2535 * subsequent bad buffers will result in a partial write; this
2536 * is realized by filling the vector with null pointers and
2538 if (!vec
[i
].iov_base
) {
2549 if (len
> max_len
- total_len
) {
2550 len
= max_len
- total_len
;
2553 vec
[i
].iov_len
= len
;
2557 unlock_user(target_vec
, target_addr
, 0);
2562 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2563 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2566 unlock_user(target_vec
, target_addr
, 0);
2573 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2574 abi_ulong count
, int copy
)
2576 struct target_iovec
*target_vec
;
2579 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2580 count
* sizeof(struct target_iovec
), 1);
2582 for (i
= 0; i
< count
; i
++) {
2583 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2584 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2588 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2590 unlock_user(target_vec
, target_addr
, 0);
2596 static inline int target_to_host_sock_type(int *type
)
2599 int target_type
= *type
;
2601 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2602 case TARGET_SOCK_DGRAM
:
2603 host_type
= SOCK_DGRAM
;
2605 case TARGET_SOCK_STREAM
:
2606 host_type
= SOCK_STREAM
;
2609 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2612 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2613 #if defined(SOCK_CLOEXEC)
2614 host_type
|= SOCK_CLOEXEC
;
2616 return -TARGET_EINVAL
;
2619 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2620 #if defined(SOCK_NONBLOCK)
2621 host_type
|= SOCK_NONBLOCK
;
2622 #elif !defined(O_NONBLOCK)
2623 return -TARGET_EINVAL
;
2630 /* Try to emulate socket type flags after socket creation. */
2631 static int sock_flags_fixup(int fd
, int target_type
)
2633 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2634 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2635 int flags
= fcntl(fd
, F_GETFL
);
2636 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2638 return -TARGET_EINVAL
;
2645 /* do_socket() Must return target values and target errnos. */
2646 static abi_long
do_socket(int domain
, int type
, int protocol
)
2648 int target_type
= type
;
2651 ret
= target_to_host_sock_type(&type
);
2656 if (domain
== PF_NETLINK
&& !(
2657 #ifdef CONFIG_RTNETLINK
2658 protocol
== NETLINK_ROUTE
||
2660 protocol
== NETLINK_KOBJECT_UEVENT
||
2661 protocol
== NETLINK_AUDIT
)) {
2662 return -EPFNOSUPPORT
;
2665 if (domain
== AF_PACKET
||
2666 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2667 protocol
= tswap16(protocol
);
2670 ret
= get_errno(socket(domain
, type
, protocol
));
2672 ret
= sock_flags_fixup(ret
, target_type
);
2673 if (type
== SOCK_PACKET
) {
2674 /* Manage an obsolete case :
2675 * if socket type is SOCK_PACKET, bind by name
2677 fd_trans_register(ret
, &target_packet_trans
);
2678 } else if (domain
== PF_NETLINK
) {
2680 #ifdef CONFIG_RTNETLINK
2682 fd_trans_register(ret
, &target_netlink_route_trans
);
2685 case NETLINK_KOBJECT_UEVENT
:
2686 /* nothing to do: messages are strings */
2689 fd_trans_register(ret
, &target_netlink_audit_trans
);
2692 g_assert_not_reached();
2699 /* do_bind() Must return target values and target errnos. */
2700 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2706 if ((int)addrlen
< 0) {
2707 return -TARGET_EINVAL
;
2710 addr
= alloca(addrlen
+1);
2712 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2716 return get_errno(bind(sockfd
, addr
, addrlen
));
2719 /* do_connect() Must return target values and target errnos. */
2720 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2726 if ((int)addrlen
< 0) {
2727 return -TARGET_EINVAL
;
2730 addr
= alloca(addrlen
+1);
2732 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2736 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2739 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2740 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2741 int flags
, int send
)
2747 abi_ulong target_vec
;
2749 if (msgp
->msg_name
) {
2750 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2751 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2752 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2753 tswapal(msgp
->msg_name
),
2755 if (ret
== -TARGET_EFAULT
) {
2756 /* For connected sockets msg_name and msg_namelen must
2757 * be ignored, so returning EFAULT immediately is wrong.
2758 * Instead, pass a bad msg_name to the host kernel, and
2759 * let it decide whether to return EFAULT or not.
2761 msg
.msg_name
= (void *)-1;
2766 msg
.msg_name
= NULL
;
2767 msg
.msg_namelen
= 0;
2769 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2770 msg
.msg_control
= alloca(msg
.msg_controllen
);
2771 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2773 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2775 count
= tswapal(msgp
->msg_iovlen
);
2776 target_vec
= tswapal(msgp
->msg_iov
);
2778 if (count
> IOV_MAX
) {
2779 /* sendrcvmsg returns a different errno for this condition than
2780 * readv/writev, so we must catch it here before lock_iovec() does.
2782 ret
= -TARGET_EMSGSIZE
;
2786 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2787 target_vec
, count
, send
);
2789 ret
= -host_to_target_errno(errno
);
2792 msg
.msg_iovlen
= count
;
2796 if (fd_trans_target_to_host_data(fd
)) {
2799 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2800 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2801 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2802 msg
.msg_iov
->iov_len
);
2804 msg
.msg_iov
->iov_base
= host_msg
;
2805 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2809 ret
= target_to_host_cmsg(&msg
, msgp
);
2811 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2815 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2816 if (!is_error(ret
)) {
2818 if (fd_trans_host_to_target_data(fd
)) {
2819 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2820 MIN(msg
.msg_iov
->iov_len
, len
));
2822 ret
= host_to_target_cmsg(msgp
, &msg
);
2824 if (!is_error(ret
)) {
2825 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2826 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
2827 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2828 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2829 msg
.msg_name
, msg
.msg_namelen
);
2841 unlock_iovec(vec
, target_vec
, count
, !send
);
2846 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2847 int flags
, int send
)
2850 struct target_msghdr
*msgp
;
2852 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2856 return -TARGET_EFAULT
;
2858 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2859 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2863 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2864 * so it might not have this *mmsg-specific flag either.
2866 #ifndef MSG_WAITFORONE
2867 #define MSG_WAITFORONE 0x10000
2870 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2871 unsigned int vlen
, unsigned int flags
,
2874 struct target_mmsghdr
*mmsgp
;
2878 if (vlen
> UIO_MAXIOV
) {
2882 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2884 return -TARGET_EFAULT
;
2887 for (i
= 0; i
< vlen
; i
++) {
2888 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2889 if (is_error(ret
)) {
2892 mmsgp
[i
].msg_len
= tswap32(ret
);
2893 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2894 if (flags
& MSG_WAITFORONE
) {
2895 flags
|= MSG_DONTWAIT
;
2899 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2901 /* Return number of datagrams sent if we sent any at all;
2902 * otherwise return the error.
2910 /* do_accept4() Must return target values and target errnos. */
2911 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2912 abi_ulong target_addrlen_addr
, int flags
)
2914 socklen_t addrlen
, ret_addrlen
;
2919 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2921 if (target_addr
== 0) {
2922 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2925 /* linux returns EINVAL if addrlen pointer is invalid */
2926 if (get_user_u32(addrlen
, target_addrlen_addr
))
2927 return -TARGET_EINVAL
;
2929 if ((int)addrlen
< 0) {
2930 return -TARGET_EINVAL
;
2933 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2934 return -TARGET_EINVAL
;
2936 addr
= alloca(addrlen
);
2938 ret_addrlen
= addrlen
;
2939 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
2940 if (!is_error(ret
)) {
2941 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2942 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2943 ret
= -TARGET_EFAULT
;
2949 /* do_getpeername() Must return target values and target errnos. */
2950 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2951 abi_ulong target_addrlen_addr
)
2953 socklen_t addrlen
, ret_addrlen
;
2957 if (get_user_u32(addrlen
, target_addrlen_addr
))
2958 return -TARGET_EFAULT
;
2960 if ((int)addrlen
< 0) {
2961 return -TARGET_EINVAL
;
2964 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2965 return -TARGET_EFAULT
;
2967 addr
= alloca(addrlen
);
2969 ret_addrlen
= addrlen
;
2970 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
2971 if (!is_error(ret
)) {
2972 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2973 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2974 ret
= -TARGET_EFAULT
;
2980 /* do_getsockname() Must return target values and target errnos. */
2981 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2982 abi_ulong target_addrlen_addr
)
2984 socklen_t addrlen
, ret_addrlen
;
2988 if (get_user_u32(addrlen
, target_addrlen_addr
))
2989 return -TARGET_EFAULT
;
2991 if ((int)addrlen
< 0) {
2992 return -TARGET_EINVAL
;
2995 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2996 return -TARGET_EFAULT
;
2998 addr
= alloca(addrlen
);
3000 ret_addrlen
= addrlen
;
3001 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3002 if (!is_error(ret
)) {
3003 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3004 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3005 ret
= -TARGET_EFAULT
;
3011 /* do_socketpair() Must return target values and target errnos. */
3012 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3013 abi_ulong target_tab_addr
)
3018 target_to_host_sock_type(&type
);
3020 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3021 if (!is_error(ret
)) {
3022 if (put_user_s32(tab
[0], target_tab_addr
)
3023 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3024 ret
= -TARGET_EFAULT
;
3029 /* do_sendto() Must return target values and target errnos. */
3030 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3031 abi_ulong target_addr
, socklen_t addrlen
)
3035 void *copy_msg
= NULL
;
3038 if ((int)addrlen
< 0) {
3039 return -TARGET_EINVAL
;
3042 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3044 return -TARGET_EFAULT
;
3045 if (fd_trans_target_to_host_data(fd
)) {
3046 copy_msg
= host_msg
;
3047 host_msg
= g_malloc(len
);
3048 memcpy(host_msg
, copy_msg
, len
);
3049 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3055 addr
= alloca(addrlen
+1);
3056 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3060 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3062 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3067 host_msg
= copy_msg
;
3069 unlock_user(host_msg
, msg
, 0);
3073 /* do_recvfrom() Must return target values and target errnos. */
3074 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3075 abi_ulong target_addr
,
3076 abi_ulong target_addrlen
)
3078 socklen_t addrlen
, ret_addrlen
;
3083 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3085 return -TARGET_EFAULT
;
3087 if (get_user_u32(addrlen
, target_addrlen
)) {
3088 ret
= -TARGET_EFAULT
;
3091 if ((int)addrlen
< 0) {
3092 ret
= -TARGET_EINVAL
;
3095 addr
= alloca(addrlen
);
3096 ret_addrlen
= addrlen
;
3097 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3098 addr
, &ret_addrlen
));
3100 addr
= NULL
; /* To keep compiler quiet. */
3101 addrlen
= 0; /* To keep compiler quiet. */
3102 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3104 if (!is_error(ret
)) {
3105 if (fd_trans_host_to_target_data(fd
)) {
3107 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3108 if (is_error(trans
)) {
3114 host_to_target_sockaddr(target_addr
, addr
,
3115 MIN(addrlen
, ret_addrlen
));
3116 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3117 ret
= -TARGET_EFAULT
;
3121 unlock_user(host_msg
, msg
, len
);
3124 unlock_user(host_msg
, msg
, 0);
3129 #ifdef TARGET_NR_socketcall
3130 /* do_socketcall() must return target values and target errnos. */
3131 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3133 static const unsigned nargs
[] = { /* number of arguments per operation */
3134 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3135 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3136 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3137 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3138 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3139 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3140 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3141 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3142 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3143 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3144 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3145 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3146 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3147 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3148 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3149 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3150 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3151 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3152 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3153 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3155 abi_long a
[6]; /* max 6 args */
3158 /* check the range of the first argument num */
3159 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3160 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3161 return -TARGET_EINVAL
;
3163 /* ensure we have space for args */
3164 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3165 return -TARGET_EINVAL
;
3167 /* collect the arguments in a[] according to nargs[] */
3168 for (i
= 0; i
< nargs
[num
]; ++i
) {
3169 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3170 return -TARGET_EFAULT
;
3173 /* now when we have the args, invoke the appropriate underlying function */
3175 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3176 return do_socket(a
[0], a
[1], a
[2]);
3177 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3178 return do_bind(a
[0], a
[1], a
[2]);
3179 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3180 return do_connect(a
[0], a
[1], a
[2]);
3181 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3182 return get_errno(listen(a
[0], a
[1]));
3183 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3184 return do_accept4(a
[0], a
[1], a
[2], 0);
3185 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3186 return do_getsockname(a
[0], a
[1], a
[2]);
3187 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3188 return do_getpeername(a
[0], a
[1], a
[2]);
3189 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3190 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3191 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3192 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3193 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3194 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3195 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3196 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3197 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3198 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3199 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3200 return get_errno(shutdown(a
[0], a
[1]));
3201 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3202 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3203 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3204 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3205 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3206 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3207 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3208 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3209 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3210 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3211 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3212 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3213 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3214 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3216 gemu_log("Unsupported socketcall: %d\n", num
);
3217 return -TARGET_EINVAL
;
3222 #define N_SHM_REGIONS 32
3224 static struct shm_region
{
3228 } shm_regions
[N_SHM_REGIONS
];
3230 #ifndef TARGET_SEMID64_DS
3231 /* asm-generic version of this struct */
3232 struct target_semid64_ds
3234 struct target_ipc_perm sem_perm
;
3235 abi_ulong sem_otime
;
3236 #if TARGET_ABI_BITS == 32
3237 abi_ulong __unused1
;
3239 abi_ulong sem_ctime
;
3240 #if TARGET_ABI_BITS == 32
3241 abi_ulong __unused2
;
3243 abi_ulong sem_nsems
;
3244 abi_ulong __unused3
;
3245 abi_ulong __unused4
;
3249 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3250 abi_ulong target_addr
)
3252 struct target_ipc_perm
*target_ip
;
3253 struct target_semid64_ds
*target_sd
;
3255 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3256 return -TARGET_EFAULT
;
3257 target_ip
= &(target_sd
->sem_perm
);
3258 host_ip
->__key
= tswap32(target_ip
->__key
);
3259 host_ip
->uid
= tswap32(target_ip
->uid
);
3260 host_ip
->gid
= tswap32(target_ip
->gid
);
3261 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3262 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3264 host_ip
->mode
= tswap32(target_ip
->mode
);
3266 host_ip
->mode
= tswap16(target_ip
->mode
);
3268 #if defined(TARGET_PPC)
3269 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3271 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3273 unlock_user_struct(target_sd
, target_addr
, 0);
3277 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3278 struct ipc_perm
*host_ip
)
3280 struct target_ipc_perm
*target_ip
;
3281 struct target_semid64_ds
*target_sd
;
3283 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3284 return -TARGET_EFAULT
;
3285 target_ip
= &(target_sd
->sem_perm
);
3286 target_ip
->__key
= tswap32(host_ip
->__key
);
3287 target_ip
->uid
= tswap32(host_ip
->uid
);
3288 target_ip
->gid
= tswap32(host_ip
->gid
);
3289 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3290 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3292 target_ip
->mode
= tswap32(host_ip
->mode
);
3294 target_ip
->mode
= tswap16(host_ip
->mode
);
3296 #if defined(TARGET_PPC)
3297 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3299 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3301 unlock_user_struct(target_sd
, target_addr
, 1);
3305 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3306 abi_ulong target_addr
)
3308 struct target_semid64_ds
*target_sd
;
3310 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3311 return -TARGET_EFAULT
;
3312 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3313 return -TARGET_EFAULT
;
3314 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3315 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3316 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3317 unlock_user_struct(target_sd
, target_addr
, 0);
3321 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3322 struct semid_ds
*host_sd
)
3324 struct target_semid64_ds
*target_sd
;
3326 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3327 return -TARGET_EFAULT
;
3328 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3329 return -TARGET_EFAULT
;
3330 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3331 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3332 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3333 unlock_user_struct(target_sd
, target_addr
, 1);
3337 struct target_seminfo
{
3350 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3351 struct seminfo
*host_seminfo
)
3353 struct target_seminfo
*target_seminfo
;
3354 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3355 return -TARGET_EFAULT
;
3356 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3357 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3358 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3359 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3360 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3361 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3362 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3363 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3364 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3365 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3366 unlock_user_struct(target_seminfo
, target_addr
, 1);
3372 struct semid_ds
*buf
;
3373 unsigned short *array
;
3374 struct seminfo
*__buf
;
3377 union target_semun
{
3384 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3385 abi_ulong target_addr
)
3388 unsigned short *array
;
3390 struct semid_ds semid_ds
;
3393 semun
.buf
= &semid_ds
;
3395 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3397 return get_errno(ret
);
3399 nsems
= semid_ds
.sem_nsems
;
3401 *host_array
= g_try_new(unsigned short, nsems
);
3403 return -TARGET_ENOMEM
;
3405 array
= lock_user(VERIFY_READ
, target_addr
,
3406 nsems
*sizeof(unsigned short), 1);
3408 g_free(*host_array
);
3409 return -TARGET_EFAULT
;
3412 for(i
=0; i
<nsems
; i
++) {
3413 __get_user((*host_array
)[i
], &array
[i
]);
3415 unlock_user(array
, target_addr
, 0);
3420 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3421 unsigned short **host_array
)
3424 unsigned short *array
;
3426 struct semid_ds semid_ds
;
3429 semun
.buf
= &semid_ds
;
3431 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3433 return get_errno(ret
);
3435 nsems
= semid_ds
.sem_nsems
;
3437 array
= lock_user(VERIFY_WRITE
, target_addr
,
3438 nsems
*sizeof(unsigned short), 0);
3440 return -TARGET_EFAULT
;
3442 for(i
=0; i
<nsems
; i
++) {
3443 __put_user((*host_array
)[i
], &array
[i
]);
3445 g_free(*host_array
);
3446 unlock_user(array
, target_addr
, 1);
3451 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3452 abi_ulong target_arg
)
3454 union target_semun target_su
= { .buf
= target_arg
};
3456 struct semid_ds dsarg
;
3457 unsigned short *array
= NULL
;
3458 struct seminfo seminfo
;
3459 abi_long ret
= -TARGET_EINVAL
;
3466 /* In 64 bit cross-endian situations, we will erroneously pick up
3467 * the wrong half of the union for the "val" element. To rectify
3468 * this, the entire 8-byte structure is byteswapped, followed by
3469 * a swap of the 4 byte val field. In other cases, the data is
3470 * already in proper host byte order. */
3471 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3472 target_su
.buf
= tswapal(target_su
.buf
);
3473 arg
.val
= tswap32(target_su
.val
);
3475 arg
.val
= target_su
.val
;
3477 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3481 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3485 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3486 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3493 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3497 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3498 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3504 arg
.__buf
= &seminfo
;
3505 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3506 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3514 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3521 struct target_sembuf
{
3522 unsigned short sem_num
;
3527 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3528 abi_ulong target_addr
,
3531 struct target_sembuf
*target_sembuf
;
3534 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3535 nsops
*sizeof(struct target_sembuf
), 1);
3537 return -TARGET_EFAULT
;
3539 for(i
=0; i
<nsops
; i
++) {
3540 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3541 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3542 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3545 unlock_user(target_sembuf
, target_addr
, 0);
3550 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3552 struct sembuf sops
[nsops
];
3555 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3556 return -TARGET_EFAULT
;
3558 ret
= -TARGET_ENOSYS
;
3559 #ifdef __NR_semtimedop
3560 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3563 if (ret
== -TARGET_ENOSYS
) {
3564 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3570 struct target_msqid_ds
3572 struct target_ipc_perm msg_perm
;
3573 abi_ulong msg_stime
;
3574 #if TARGET_ABI_BITS == 32
3575 abi_ulong __unused1
;
3577 abi_ulong msg_rtime
;
3578 #if TARGET_ABI_BITS == 32
3579 abi_ulong __unused2
;
3581 abi_ulong msg_ctime
;
3582 #if TARGET_ABI_BITS == 32
3583 abi_ulong __unused3
;
3585 abi_ulong __msg_cbytes
;
3587 abi_ulong msg_qbytes
;
3588 abi_ulong msg_lspid
;
3589 abi_ulong msg_lrpid
;
3590 abi_ulong __unused4
;
3591 abi_ulong __unused5
;
3594 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3595 abi_ulong target_addr
)
3597 struct target_msqid_ds
*target_md
;
3599 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3600 return -TARGET_EFAULT
;
3601 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3602 return -TARGET_EFAULT
;
3603 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3604 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3605 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3606 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3607 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3608 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3609 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3610 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3611 unlock_user_struct(target_md
, target_addr
, 0);
3615 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3616 struct msqid_ds
*host_md
)
3618 struct target_msqid_ds
*target_md
;
3620 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3621 return -TARGET_EFAULT
;
3622 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3623 return -TARGET_EFAULT
;
3624 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3625 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3626 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3627 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3628 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3629 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3630 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3631 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3632 unlock_user_struct(target_md
, target_addr
, 1);
3636 struct target_msginfo
{
3644 unsigned short int msgseg
;
3647 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3648 struct msginfo
*host_msginfo
)
3650 struct target_msginfo
*target_msginfo
;
3651 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3652 return -TARGET_EFAULT
;
3653 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3654 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3655 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3656 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3657 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3658 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3659 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3660 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3661 unlock_user_struct(target_msginfo
, target_addr
, 1);
3665 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3667 struct msqid_ds dsarg
;
3668 struct msginfo msginfo
;
3669 abi_long ret
= -TARGET_EINVAL
;
3677 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3678 return -TARGET_EFAULT
;
3679 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3680 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3681 return -TARGET_EFAULT
;
3684 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3688 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3689 if (host_to_target_msginfo(ptr
, &msginfo
))
3690 return -TARGET_EFAULT
;
3697 struct target_msgbuf
{
3702 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3703 ssize_t msgsz
, int msgflg
)
3705 struct target_msgbuf
*target_mb
;
3706 struct msgbuf
*host_mb
;
3710 return -TARGET_EINVAL
;
3713 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3714 return -TARGET_EFAULT
;
3715 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3717 unlock_user_struct(target_mb
, msgp
, 0);
3718 return -TARGET_ENOMEM
;
3720 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3721 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3722 ret
= -TARGET_ENOSYS
;
3724 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3727 if (ret
== -TARGET_ENOSYS
) {
3728 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3733 unlock_user_struct(target_mb
, msgp
, 0);
3738 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3739 ssize_t msgsz
, abi_long msgtyp
,
3742 struct target_msgbuf
*target_mb
;
3744 struct msgbuf
*host_mb
;
3748 return -TARGET_EINVAL
;
3751 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3752 return -TARGET_EFAULT
;
3754 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3756 ret
= -TARGET_ENOMEM
;
3759 ret
= -TARGET_ENOSYS
;
3761 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3764 if (ret
== -TARGET_ENOSYS
) {
3765 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
3766 msgflg
, host_mb
, msgtyp
));
3771 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3772 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3773 if (!target_mtext
) {
3774 ret
= -TARGET_EFAULT
;
3777 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3778 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3781 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3785 unlock_user_struct(target_mb
, msgp
, 1);
3790 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3791 abi_ulong target_addr
)
3793 struct target_shmid_ds
*target_sd
;
3795 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3796 return -TARGET_EFAULT
;
3797 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3798 return -TARGET_EFAULT
;
3799 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3800 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3801 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3802 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3803 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3804 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3805 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3806 unlock_user_struct(target_sd
, target_addr
, 0);
3810 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3811 struct shmid_ds
*host_sd
)
3813 struct target_shmid_ds
*target_sd
;
3815 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3816 return -TARGET_EFAULT
;
3817 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3818 return -TARGET_EFAULT
;
3819 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3820 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3821 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3822 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3823 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3824 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3825 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3826 unlock_user_struct(target_sd
, target_addr
, 1);
3830 struct target_shminfo
{
3838 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3839 struct shminfo
*host_shminfo
)
3841 struct target_shminfo
*target_shminfo
;
3842 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3843 return -TARGET_EFAULT
;
3844 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3845 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3846 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3847 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3848 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3849 unlock_user_struct(target_shminfo
, target_addr
, 1);
3853 struct target_shm_info
{
3858 abi_ulong swap_attempts
;
3859 abi_ulong swap_successes
;
3862 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3863 struct shm_info
*host_shm_info
)
3865 struct target_shm_info
*target_shm_info
;
3866 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3867 return -TARGET_EFAULT
;
3868 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3869 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3870 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3871 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3872 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3873 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3874 unlock_user_struct(target_shm_info
, target_addr
, 1);
3878 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3880 struct shmid_ds dsarg
;
3881 struct shminfo shminfo
;
3882 struct shm_info shm_info
;
3883 abi_long ret
= -TARGET_EINVAL
;
3891 if (target_to_host_shmid_ds(&dsarg
, buf
))
3892 return -TARGET_EFAULT
;
3893 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3894 if (host_to_target_shmid_ds(buf
, &dsarg
))
3895 return -TARGET_EFAULT
;
3898 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3899 if (host_to_target_shminfo(buf
, &shminfo
))
3900 return -TARGET_EFAULT
;
3903 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3904 if (host_to_target_shm_info(buf
, &shm_info
))
3905 return -TARGET_EFAULT
;
3910 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3917 #ifndef TARGET_FORCE_SHMLBA
3918 /* For most architectures, SHMLBA is the same as the page size;
3919 * some architectures have larger values, in which case they should
3920 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3921 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3922 * and defining its own value for SHMLBA.
3924 * The kernel also permits SHMLBA to be set by the architecture to a
3925 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3926 * this means that addresses are rounded to the large size if
3927 * SHM_RND is set but addresses not aligned to that size are not rejected
3928 * as long as they are at least page-aligned. Since the only architecture
3929 * which uses this is ia64 this code doesn't provide for that oddity.
3931 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3933 return TARGET_PAGE_SIZE
;
3937 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3938 int shmid
, abi_ulong shmaddr
, int shmflg
)
3942 struct shmid_ds shm_info
;
3946 /* find out the length of the shared memory segment */
3947 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3948 if (is_error(ret
)) {
3949 /* can't get length, bail out */
3953 shmlba
= target_shmlba(cpu_env
);
3955 if (shmaddr
& (shmlba
- 1)) {
3956 if (shmflg
& SHM_RND
) {
3957 shmaddr
&= ~(shmlba
- 1);
3959 return -TARGET_EINVAL
;
3962 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3963 return -TARGET_EINVAL
;
3969 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3971 abi_ulong mmap_start
;
3973 /* In order to use the host shmat, we need to honor host SHMLBA. */
3974 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
3976 if (mmap_start
== -1) {
3978 host_raddr
= (void *)-1;
3980 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3983 if (host_raddr
== (void *)-1) {
3985 return get_errno((long)host_raddr
);
3987 raddr
=h2g((unsigned long)host_raddr
);
3989 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3990 PAGE_VALID
| PAGE_READ
|
3991 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3993 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3994 if (!shm_regions
[i
].in_use
) {
3995 shm_regions
[i
].in_use
= true;
3996 shm_regions
[i
].start
= raddr
;
3997 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4007 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4014 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4015 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4016 shm_regions
[i
].in_use
= false;
4017 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4021 rv
= get_errno(shmdt(g2h(shmaddr
)));
4028 #ifdef TARGET_NR_ipc
4029 /* ??? This only works with linear mappings. */
4030 /* do_ipc() must return target values and target errnos. */
4031 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4032 unsigned int call
, abi_long first
,
4033 abi_long second
, abi_long third
,
4034 abi_long ptr
, abi_long fifth
)
4039 version
= call
>> 16;
4044 ret
= do_semop(first
, ptr
, second
);
4048 ret
= get_errno(semget(first
, second
, third
));
4051 case IPCOP_semctl
: {
4052 /* The semun argument to semctl is passed by value, so dereference the
4055 get_user_ual(atptr
, ptr
);
4056 ret
= do_semctl(first
, second
, third
, atptr
);
4061 ret
= get_errno(msgget(first
, second
));
4065 ret
= do_msgsnd(first
, ptr
, second
, third
);
4069 ret
= do_msgctl(first
, second
, ptr
);
4076 struct target_ipc_kludge
{
4081 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4082 ret
= -TARGET_EFAULT
;
4086 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4088 unlock_user_struct(tmp
, ptr
, 0);
4092 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4101 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4102 if (is_error(raddr
))
4103 return get_errno(raddr
);
4104 if (put_user_ual(raddr
, third
))
4105 return -TARGET_EFAULT
;
4109 ret
= -TARGET_EINVAL
;
4114 ret
= do_shmdt(ptr
);
4118 /* IPC_* flag values are the same on all linux platforms */
4119 ret
= get_errno(shmget(first
, second
, third
));
4122 /* IPC_* and SHM_* command values are the same on all linux platforms */
4124 ret
= do_shmctl(first
, second
, ptr
);
4127 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4128 ret
= -TARGET_ENOSYS
;
4135 /* kernel structure types definitions */
4137 #define STRUCT(name, ...) STRUCT_ ## name,
4138 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4140 #include "syscall_types.h"
4144 #undef STRUCT_SPECIAL
4146 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4147 #define STRUCT_SPECIAL(name)
4148 #include "syscall_types.h"
4150 #undef STRUCT_SPECIAL
4152 typedef struct IOCTLEntry IOCTLEntry
;
4154 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4155 int fd
, int cmd
, abi_long arg
);
4159 unsigned int host_cmd
;
4162 do_ioctl_fn
*do_ioctl
;
4163 const argtype arg_type
[5];
4166 #define IOC_R 0x0001
4167 #define IOC_W 0x0002
4168 #define IOC_RW (IOC_R | IOC_W)
4170 #define MAX_STRUCT_SIZE 4096
4172 #ifdef CONFIG_FIEMAP
4173 /* So fiemap access checks don't overflow on 32 bit systems.
4174 * This is very slightly smaller than the limit imposed by
4175 * the underlying kernel.
4177 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4178 / sizeof(struct fiemap_extent))
4180 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4181 int fd
, int cmd
, abi_long arg
)
4183 /* The parameter for this ioctl is a struct fiemap followed
4184 * by an array of struct fiemap_extent whose size is set
4185 * in fiemap->fm_extent_count. The array is filled in by the
4188 int target_size_in
, target_size_out
;
4190 const argtype
*arg_type
= ie
->arg_type
;
4191 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4194 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4198 assert(arg_type
[0] == TYPE_PTR
);
4199 assert(ie
->access
== IOC_RW
);
4201 target_size_in
= thunk_type_size(arg_type
, 0);
4202 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4204 return -TARGET_EFAULT
;
4206 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4207 unlock_user(argptr
, arg
, 0);
4208 fm
= (struct fiemap
*)buf_temp
;
4209 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4210 return -TARGET_EINVAL
;
4213 outbufsz
= sizeof (*fm
) +
4214 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4216 if (outbufsz
> MAX_STRUCT_SIZE
) {
4217 /* We can't fit all the extents into the fixed size buffer.
4218 * Allocate one that is large enough and use it instead.
4220 fm
= g_try_malloc(outbufsz
);
4222 return -TARGET_ENOMEM
;
4224 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4227 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4228 if (!is_error(ret
)) {
4229 target_size_out
= target_size_in
;
4230 /* An extent_count of 0 means we were only counting the extents
4231 * so there are no structs to copy
4233 if (fm
->fm_extent_count
!= 0) {
4234 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4236 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4238 ret
= -TARGET_EFAULT
;
4240 /* Convert the struct fiemap */
4241 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4242 if (fm
->fm_extent_count
!= 0) {
4243 p
= argptr
+ target_size_in
;
4244 /* ...and then all the struct fiemap_extents */
4245 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4246 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4251 unlock_user(argptr
, arg
, target_size_out
);
4261 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4262 int fd
, int cmd
, abi_long arg
)
4264 const argtype
*arg_type
= ie
->arg_type
;
4268 struct ifconf
*host_ifconf
;
4270 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4271 int target_ifreq_size
;
4276 abi_long target_ifc_buf
;
4280 assert(arg_type
[0] == TYPE_PTR
);
4281 assert(ie
->access
== IOC_RW
);
4284 target_size
= thunk_type_size(arg_type
, 0);
4286 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4288 return -TARGET_EFAULT
;
4289 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4290 unlock_user(argptr
, arg
, 0);
4292 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4293 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4294 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4296 if (target_ifc_buf
!= 0) {
4297 target_ifc_len
= host_ifconf
->ifc_len
;
4298 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4299 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4301 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4302 if (outbufsz
> MAX_STRUCT_SIZE
) {
4304 * We can't fit all the extents into the fixed size buffer.
4305 * Allocate one that is large enough and use it instead.
4307 host_ifconf
= malloc(outbufsz
);
4309 return -TARGET_ENOMEM
;
4311 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4314 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4316 host_ifconf
->ifc_len
= host_ifc_len
;
4318 host_ifc_buf
= NULL
;
4320 host_ifconf
->ifc_buf
= host_ifc_buf
;
4322 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4323 if (!is_error(ret
)) {
4324 /* convert host ifc_len to target ifc_len */
4326 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4327 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4328 host_ifconf
->ifc_len
= target_ifc_len
;
4330 /* restore target ifc_buf */
4332 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4334 /* copy struct ifconf to target user */
4336 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4338 return -TARGET_EFAULT
;
4339 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4340 unlock_user(argptr
, arg
, target_size
);
4342 if (target_ifc_buf
!= 0) {
4343 /* copy ifreq[] to target user */
4344 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4345 for (i
= 0; i
< nb_ifreq
; i
++) {
4346 thunk_convert(argptr
+ i
* target_ifreq_size
,
4347 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4348 ifreq_arg_type
, THUNK_TARGET
);
4350 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4361 #if defined(CONFIG_USBFS)
4362 #if HOST_LONG_BITS > 64
4363 #error USBDEVFS thunks do not support >64 bit hosts yet.
4366 uint64_t target_urb_adr
;
4367 uint64_t target_buf_adr
;
4368 char *target_buf_ptr
;
4369 struct usbdevfs_urb host_urb
;
4372 static GHashTable
*usbdevfs_urb_hashtable(void)
4374 static GHashTable
*urb_hashtable
;
4376 if (!urb_hashtable
) {
4377 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4379 return urb_hashtable
;
4382 static void urb_hashtable_insert(struct live_urb
*urb
)
4384 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4385 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4388 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4390 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4391 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4394 static void urb_hashtable_remove(struct live_urb
*urb
)
4396 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4397 g_hash_table_remove(urb_hashtable
, urb
);
4401 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4402 int fd
, int cmd
, abi_long arg
)
4404 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4405 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4406 struct live_urb
*lurb
;
4410 uintptr_t target_urb_adr
;
4413 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4415 memset(buf_temp
, 0, sizeof(uint64_t));
4416 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4417 if (is_error(ret
)) {
4421 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4422 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4423 if (!lurb
->target_urb_adr
) {
4424 return -TARGET_EFAULT
;
4426 urb_hashtable_remove(lurb
);
4427 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4428 lurb
->host_urb
.buffer_length
);
4429 lurb
->target_buf_ptr
= NULL
;
4431 /* restore the guest buffer pointer */
4432 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4434 /* update the guest urb struct */
4435 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4438 return -TARGET_EFAULT
;
4440 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4441 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4443 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4444 /* write back the urb handle */
4445 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4448 return -TARGET_EFAULT
;
4451 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4452 target_urb_adr
= lurb
->target_urb_adr
;
4453 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4454 unlock_user(argptr
, arg
, target_size
);
4461 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4462 uint8_t *buf_temp
__attribute__((unused
)),
4463 int fd
, int cmd
, abi_long arg
)
4465 struct live_urb
*lurb
;
4467 /* map target address back to host URB with metadata. */
4468 lurb
= urb_hashtable_lookup(arg
);
4470 return -TARGET_EFAULT
;
4472 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4476 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4477 int fd
, int cmd
, abi_long arg
)
4479 const argtype
*arg_type
= ie
->arg_type
;
4484 struct live_urb
*lurb
;
4487 * each submitted URB needs to map to a unique ID for the
4488 * kernel, and that unique ID needs to be a pointer to
4489 * host memory. hence, we need to malloc for each URB.
4490 * isochronous transfers have a variable length struct.
4493 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4495 /* construct host copy of urb and metadata */
4496 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4498 return -TARGET_ENOMEM
;
4501 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4504 return -TARGET_EFAULT
;
4506 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4507 unlock_user(argptr
, arg
, 0);
4509 lurb
->target_urb_adr
= arg
;
4510 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4512 /* buffer space used depends on endpoint type so lock the entire buffer */
4513 /* control type urbs should check the buffer contents for true direction */
4514 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4515 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4516 lurb
->host_urb
.buffer_length
, 1);
4517 if (lurb
->target_buf_ptr
== NULL
) {
4519 return -TARGET_EFAULT
;
4522 /* update buffer pointer in host copy */
4523 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4525 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4526 if (is_error(ret
)) {
4527 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4530 urb_hashtable_insert(lurb
);
4535 #endif /* CONFIG_USBFS */
4537 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4538 int cmd
, abi_long arg
)
4541 struct dm_ioctl
*host_dm
;
4542 abi_long guest_data
;
4543 uint32_t guest_data_size
;
4545 const argtype
*arg_type
= ie
->arg_type
;
4547 void *big_buf
= NULL
;
4551 target_size
= thunk_type_size(arg_type
, 0);
4552 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4554 ret
= -TARGET_EFAULT
;
4557 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4558 unlock_user(argptr
, arg
, 0);
4560 /* buf_temp is too small, so fetch things into a bigger buffer */
4561 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4562 memcpy(big_buf
, buf_temp
, target_size
);
4566 guest_data
= arg
+ host_dm
->data_start
;
4567 if ((guest_data
- arg
) < 0) {
4568 ret
= -TARGET_EINVAL
;
4571 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4572 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4574 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4576 ret
= -TARGET_EFAULT
;
4580 switch (ie
->host_cmd
) {
4582 case DM_LIST_DEVICES
:
4585 case DM_DEV_SUSPEND
:
4588 case DM_TABLE_STATUS
:
4589 case DM_TABLE_CLEAR
:
4591 case DM_LIST_VERSIONS
:
4595 case DM_DEV_SET_GEOMETRY
:
4596 /* data contains only strings */
4597 memcpy(host_data
, argptr
, guest_data_size
);
4600 memcpy(host_data
, argptr
, guest_data_size
);
4601 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4605 void *gspec
= argptr
;
4606 void *cur_data
= host_data
;
4607 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4608 int spec_size
= thunk_type_size(arg_type
, 0);
4611 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4612 struct dm_target_spec
*spec
= cur_data
;
4616 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4617 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4619 spec
->next
= sizeof(*spec
) + slen
;
4620 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4622 cur_data
+= spec
->next
;
4627 ret
= -TARGET_EINVAL
;
4628 unlock_user(argptr
, guest_data
, 0);
4631 unlock_user(argptr
, guest_data
, 0);
4633 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4634 if (!is_error(ret
)) {
4635 guest_data
= arg
+ host_dm
->data_start
;
4636 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4637 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4638 switch (ie
->host_cmd
) {
4643 case DM_DEV_SUSPEND
:
4646 case DM_TABLE_CLEAR
:
4648 case DM_DEV_SET_GEOMETRY
:
4649 /* no return data */
4651 case DM_LIST_DEVICES
:
4653 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4654 uint32_t remaining_data
= guest_data_size
;
4655 void *cur_data
= argptr
;
4656 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4657 int nl_size
= 12; /* can't use thunk_size due to alignment */
4660 uint32_t next
= nl
->next
;
4662 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4664 if (remaining_data
< nl
->next
) {
4665 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4668 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4669 strcpy(cur_data
+ nl_size
, nl
->name
);
4670 cur_data
+= nl
->next
;
4671 remaining_data
-= nl
->next
;
4675 nl
= (void*)nl
+ next
;
4680 case DM_TABLE_STATUS
:
4682 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4683 void *cur_data
= argptr
;
4684 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4685 int spec_size
= thunk_type_size(arg_type
, 0);
4688 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4689 uint32_t next
= spec
->next
;
4690 int slen
= strlen((char*)&spec
[1]) + 1;
4691 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4692 if (guest_data_size
< spec
->next
) {
4693 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4696 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4697 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4698 cur_data
= argptr
+ spec
->next
;
4699 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4705 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4706 int count
= *(uint32_t*)hdata
;
4707 uint64_t *hdev
= hdata
+ 8;
4708 uint64_t *gdev
= argptr
+ 8;
4711 *(uint32_t*)argptr
= tswap32(count
);
4712 for (i
= 0; i
< count
; i
++) {
4713 *gdev
= tswap64(*hdev
);
4719 case DM_LIST_VERSIONS
:
4721 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4722 uint32_t remaining_data
= guest_data_size
;
4723 void *cur_data
= argptr
;
4724 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4725 int vers_size
= thunk_type_size(arg_type
, 0);
4728 uint32_t next
= vers
->next
;
4730 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4732 if (remaining_data
< vers
->next
) {
4733 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4736 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4737 strcpy(cur_data
+ vers_size
, vers
->name
);
4738 cur_data
+= vers
->next
;
4739 remaining_data
-= vers
->next
;
4743 vers
= (void*)vers
+ next
;
4748 unlock_user(argptr
, guest_data
, 0);
4749 ret
= -TARGET_EINVAL
;
4752 unlock_user(argptr
, guest_data
, guest_data_size
);
4754 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4756 ret
= -TARGET_EFAULT
;
4759 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4760 unlock_user(argptr
, arg
, target_size
);
4767 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4768 int cmd
, abi_long arg
)
4772 const argtype
*arg_type
= ie
->arg_type
;
4773 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4776 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4777 struct blkpg_partition host_part
;
4779 /* Read and convert blkpg */
4781 target_size
= thunk_type_size(arg_type
, 0);
4782 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4784 ret
= -TARGET_EFAULT
;
4787 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4788 unlock_user(argptr
, arg
, 0);
4790 switch (host_blkpg
->op
) {
4791 case BLKPG_ADD_PARTITION
:
4792 case BLKPG_DEL_PARTITION
:
4793 /* payload is struct blkpg_partition */
4796 /* Unknown opcode */
4797 ret
= -TARGET_EINVAL
;
4801 /* Read and convert blkpg->data */
4802 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4803 target_size
= thunk_type_size(part_arg_type
, 0);
4804 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4806 ret
= -TARGET_EFAULT
;
4809 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4810 unlock_user(argptr
, arg
, 0);
4812 /* Swizzle the data pointer to our local copy and call! */
4813 host_blkpg
->data
= &host_part
;
4814 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4820 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4821 int fd
, int cmd
, abi_long arg
)
4823 const argtype
*arg_type
= ie
->arg_type
;
4824 const StructEntry
*se
;
4825 const argtype
*field_types
;
4826 const int *dst_offsets
, *src_offsets
;
4829 abi_ulong
*target_rt_dev_ptr
= NULL
;
4830 unsigned long *host_rt_dev_ptr
= NULL
;
4834 assert(ie
->access
== IOC_W
);
4835 assert(*arg_type
== TYPE_PTR
);
4837 assert(*arg_type
== TYPE_STRUCT
);
4838 target_size
= thunk_type_size(arg_type
, 0);
4839 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4841 return -TARGET_EFAULT
;
4844 assert(*arg_type
== (int)STRUCT_rtentry
);
4845 se
= struct_entries
+ *arg_type
++;
4846 assert(se
->convert
[0] == NULL
);
4847 /* convert struct here to be able to catch rt_dev string */
4848 field_types
= se
->field_types
;
4849 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4850 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4851 for (i
= 0; i
< se
->nb_fields
; i
++) {
4852 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4853 assert(*field_types
== TYPE_PTRVOID
);
4854 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4855 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4856 if (*target_rt_dev_ptr
!= 0) {
4857 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4858 tswapal(*target_rt_dev_ptr
));
4859 if (!*host_rt_dev_ptr
) {
4860 unlock_user(argptr
, arg
, 0);
4861 return -TARGET_EFAULT
;
4864 *host_rt_dev_ptr
= 0;
4869 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4870 argptr
+ src_offsets
[i
],
4871 field_types
, THUNK_HOST
);
4873 unlock_user(argptr
, arg
, 0);
4875 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4877 assert(host_rt_dev_ptr
!= NULL
);
4878 assert(target_rt_dev_ptr
!= NULL
);
4879 if (*host_rt_dev_ptr
!= 0) {
4880 unlock_user((void *)*host_rt_dev_ptr
,
4881 *target_rt_dev_ptr
, 0);
4886 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4887 int fd
, int cmd
, abi_long arg
)
4889 int sig
= target_to_host_signal(arg
);
4890 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4894 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4895 int fd
, int cmd
, abi_long arg
)
4897 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4898 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4902 static IOCTLEntry ioctl_entries
[] = {
4903 #define IOCTL(cmd, access, ...) \
4904 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4905 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4906 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4907 #define IOCTL_IGNORE(cmd) \
4908 { TARGET_ ## cmd, 0, #cmd },
4913 /* ??? Implement proper locking for ioctls. */
4914 /* do_ioctl() Must return target values and target errnos. */
4915 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4917 const IOCTLEntry
*ie
;
4918 const argtype
*arg_type
;
4920 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4926 if (ie
->target_cmd
== 0) {
4927 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4928 return -TARGET_ENOSYS
;
4930 if (ie
->target_cmd
== cmd
)
4934 arg_type
= ie
->arg_type
;
4936 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4937 } else if (!ie
->host_cmd
) {
4938 /* Some architectures define BSD ioctls in their headers
4939 that are not implemented in Linux. */
4940 return -TARGET_ENOSYS
;
4943 switch(arg_type
[0]) {
4946 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4950 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4954 target_size
= thunk_type_size(arg_type
, 0);
4955 switch(ie
->access
) {
4957 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4958 if (!is_error(ret
)) {
4959 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4961 return -TARGET_EFAULT
;
4962 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4963 unlock_user(argptr
, arg
, target_size
);
4967 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4969 return -TARGET_EFAULT
;
4970 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4971 unlock_user(argptr
, arg
, 0);
4972 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4976 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4978 return -TARGET_EFAULT
;
4979 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4980 unlock_user(argptr
, arg
, 0);
4981 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4982 if (!is_error(ret
)) {
4983 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4985 return -TARGET_EFAULT
;
4986 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4987 unlock_user(argptr
, arg
, target_size
);
4993 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4994 (long)cmd
, arg_type
[0]);
4995 ret
= -TARGET_ENOSYS
;
5001 static const bitmask_transtbl iflag_tbl
[] = {
5002 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5003 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5004 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5005 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5006 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5007 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5008 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5009 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5010 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5011 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5012 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5013 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5014 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5015 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5019 static const bitmask_transtbl oflag_tbl
[] = {
5020 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5021 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5022 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5023 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5024 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5025 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5026 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5027 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5028 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5029 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5030 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5031 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5032 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5033 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5034 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5035 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5036 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5037 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5038 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5039 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5040 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5041 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5042 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5043 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5047 static const bitmask_transtbl cflag_tbl
[] = {
5048 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5049 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5050 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5051 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5052 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5053 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5054 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5055 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5056 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5057 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5058 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5059 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5060 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5061 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5062 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5063 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5064 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5065 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5066 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5067 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5068 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5069 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5070 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5071 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5072 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5073 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5074 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5075 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5076 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5077 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5078 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5082 static const bitmask_transtbl lflag_tbl
[] = {
5083 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5084 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5085 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5086 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5087 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5088 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5089 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5090 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5091 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5092 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5093 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5094 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5095 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5096 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5097 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5101 static void target_to_host_termios (void *dst
, const void *src
)
5103 struct host_termios
*host
= dst
;
5104 const struct target_termios
*target
= src
;
5107 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5109 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5111 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5113 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5114 host
->c_line
= target
->c_line
;
5116 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5117 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5118 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5119 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5120 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5121 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5122 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5123 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5124 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5125 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5126 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5127 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5128 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5129 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5130 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5131 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5132 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5133 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5136 static void host_to_target_termios (void *dst
, const void *src
)
5138 struct target_termios
*target
= dst
;
5139 const struct host_termios
*host
= src
;
5142 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5144 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5146 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5148 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5149 target
->c_line
= host
->c_line
;
5151 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5152 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5153 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5154 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5155 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5156 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5157 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5158 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5159 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5160 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5161 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5162 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5163 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5164 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5165 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5166 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5167 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5168 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5171 static const StructEntry struct_termios_def
= {
5172 .convert
= { host_to_target_termios
, target_to_host_termios
},
5173 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5174 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5177 static bitmask_transtbl mmap_flags_tbl
[] = {
5178 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5179 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5180 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5181 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5182 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5183 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5184 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5185 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5186 MAP_DENYWRITE
, MAP_DENYWRITE
},
5187 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5188 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5189 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5190 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5191 MAP_NORESERVE
, MAP_NORESERVE
},
5192 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5193 /* MAP_STACK had been ignored by the kernel for quite some time.
5194 Recognize it for the target insofar as we do not want to pass
5195 it through to the host. */
5196 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5200 #if defined(TARGET_I386)
5202 /* NOTE: there is really one LDT for all the threads */
5203 static uint8_t *ldt_table
;
5205 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5212 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5213 if (size
> bytecount
)
5215 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5217 return -TARGET_EFAULT
;
5218 /* ??? Should this by byteswapped? */
5219 memcpy(p
, ldt_table
, size
);
5220 unlock_user(p
, ptr
, size
);
5224 /* XXX: add locking support */
5225 static abi_long
write_ldt(CPUX86State
*env
,
5226 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5228 struct target_modify_ldt_ldt_s ldt_info
;
5229 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5230 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5231 int seg_not_present
, useable
, lm
;
5232 uint32_t *lp
, entry_1
, entry_2
;
5234 if (bytecount
!= sizeof(ldt_info
))
5235 return -TARGET_EINVAL
;
5236 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5237 return -TARGET_EFAULT
;
5238 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5239 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5240 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5241 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5242 unlock_user_struct(target_ldt_info
, ptr
, 0);
5244 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5245 return -TARGET_EINVAL
;
5246 seg_32bit
= ldt_info
.flags
& 1;
5247 contents
= (ldt_info
.flags
>> 1) & 3;
5248 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5249 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5250 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5251 useable
= (ldt_info
.flags
>> 6) & 1;
5255 lm
= (ldt_info
.flags
>> 7) & 1;
5257 if (contents
== 3) {
5259 return -TARGET_EINVAL
;
5260 if (seg_not_present
== 0)
5261 return -TARGET_EINVAL
;
5263 /* allocate the LDT */
5265 env
->ldt
.base
= target_mmap(0,
5266 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5267 PROT_READ
|PROT_WRITE
,
5268 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5269 if (env
->ldt
.base
== -1)
5270 return -TARGET_ENOMEM
;
5271 memset(g2h(env
->ldt
.base
), 0,
5272 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5273 env
->ldt
.limit
= 0xffff;
5274 ldt_table
= g2h(env
->ldt
.base
);
5277 /* NOTE: same code as Linux kernel */
5278 /* Allow LDTs to be cleared by the user. */
5279 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5282 read_exec_only
== 1 &&
5284 limit_in_pages
== 0 &&
5285 seg_not_present
== 1 &&
5293 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5294 (ldt_info
.limit
& 0x0ffff);
5295 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5296 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5297 (ldt_info
.limit
& 0xf0000) |
5298 ((read_exec_only
^ 1) << 9) |
5300 ((seg_not_present
^ 1) << 15) |
5302 (limit_in_pages
<< 23) |
5306 entry_2
|= (useable
<< 20);
5308 /* Install the new entry ... */
5310 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5311 lp
[0] = tswap32(entry_1
);
5312 lp
[1] = tswap32(entry_2
);
5316 /* specific and weird i386 syscalls */
5317 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5318 unsigned long bytecount
)
5324 ret
= read_ldt(ptr
, bytecount
);
5327 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5330 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5333 ret
= -TARGET_ENOSYS
;
5339 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5340 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5342 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5343 struct target_modify_ldt_ldt_s ldt_info
;
5344 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5345 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5346 int seg_not_present
, useable
, lm
;
5347 uint32_t *lp
, entry_1
, entry_2
;
5350 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5351 if (!target_ldt_info
)
5352 return -TARGET_EFAULT
;
5353 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5354 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5355 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5356 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5357 if (ldt_info
.entry_number
== -1) {
5358 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5359 if (gdt_table
[i
] == 0) {
5360 ldt_info
.entry_number
= i
;
5361 target_ldt_info
->entry_number
= tswap32(i
);
5366 unlock_user_struct(target_ldt_info
, ptr
, 1);
5368 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5369 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5370 return -TARGET_EINVAL
;
5371 seg_32bit
= ldt_info
.flags
& 1;
5372 contents
= (ldt_info
.flags
>> 1) & 3;
5373 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5374 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5375 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5376 useable
= (ldt_info
.flags
>> 6) & 1;
5380 lm
= (ldt_info
.flags
>> 7) & 1;
5383 if (contents
== 3) {
5384 if (seg_not_present
== 0)
5385 return -TARGET_EINVAL
;
5388 /* NOTE: same code as Linux kernel */
5389 /* Allow LDTs to be cleared by the user. */
5390 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5391 if ((contents
== 0 &&
5392 read_exec_only
== 1 &&
5394 limit_in_pages
== 0 &&
5395 seg_not_present
== 1 &&
5403 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5404 (ldt_info
.limit
& 0x0ffff);
5405 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5406 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5407 (ldt_info
.limit
& 0xf0000) |
5408 ((read_exec_only
^ 1) << 9) |
5410 ((seg_not_present
^ 1) << 15) |
5412 (limit_in_pages
<< 23) |
5417 /* Install the new entry ... */
5419 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5420 lp
[0] = tswap32(entry_1
);
5421 lp
[1] = tswap32(entry_2
);
5425 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5427 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5428 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5429 uint32_t base_addr
, limit
, flags
;
5430 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5431 int seg_not_present
, useable
, lm
;
5432 uint32_t *lp
, entry_1
, entry_2
;
5434 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5435 if (!target_ldt_info
)
5436 return -TARGET_EFAULT
;
5437 idx
= tswap32(target_ldt_info
->entry_number
);
5438 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5439 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5440 unlock_user_struct(target_ldt_info
, ptr
, 1);
5441 return -TARGET_EINVAL
;
5443 lp
= (uint32_t *)(gdt_table
+ idx
);
5444 entry_1
= tswap32(lp
[0]);
5445 entry_2
= tswap32(lp
[1]);
5447 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5448 contents
= (entry_2
>> 10) & 3;
5449 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5450 seg_32bit
= (entry_2
>> 22) & 1;
5451 limit_in_pages
= (entry_2
>> 23) & 1;
5452 useable
= (entry_2
>> 20) & 1;
5456 lm
= (entry_2
>> 21) & 1;
5458 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5459 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5460 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5461 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5462 base_addr
= (entry_1
>> 16) |
5463 (entry_2
& 0xff000000) |
5464 ((entry_2
& 0xff) << 16);
5465 target_ldt_info
->base_addr
= tswapal(base_addr
);
5466 target_ldt_info
->limit
= tswap32(limit
);
5467 target_ldt_info
->flags
= tswap32(flags
);
5468 unlock_user_struct(target_ldt_info
, ptr
, 1);
5471 #endif /* TARGET_I386 && TARGET_ABI32 */
5473 #ifndef TARGET_ABI32
5474 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5481 case TARGET_ARCH_SET_GS
:
5482 case TARGET_ARCH_SET_FS
:
5483 if (code
== TARGET_ARCH_SET_GS
)
5487 cpu_x86_load_seg(env
, idx
, 0);
5488 env
->segs
[idx
].base
= addr
;
5490 case TARGET_ARCH_GET_GS
:
5491 case TARGET_ARCH_GET_FS
:
5492 if (code
== TARGET_ARCH_GET_GS
)
5496 val
= env
->segs
[idx
].base
;
5497 if (put_user(val
, addr
, abi_ulong
))
5498 ret
= -TARGET_EFAULT
;
5501 ret
= -TARGET_EINVAL
;
5508 #endif /* defined(TARGET_I386) */
5510 #define NEW_STACK_SIZE 0x40000
5513 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5516 pthread_mutex_t mutex
;
5517 pthread_cond_t cond
;
5520 abi_ulong child_tidptr
;
5521 abi_ulong parent_tidptr
;
5525 static void *clone_func(void *arg
)
5527 new_thread_info
*info
= arg
;
5532 rcu_register_thread();
5533 tcg_register_thread();
5537 ts
= (TaskState
*)cpu
->opaque
;
5538 info
->tid
= sys_gettid();
5540 if (info
->child_tidptr
)
5541 put_user_u32(info
->tid
, info
->child_tidptr
);
5542 if (info
->parent_tidptr
)
5543 put_user_u32(info
->tid
, info
->parent_tidptr
);
5544 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5545 /* Enable signals. */
5546 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5547 /* Signal to the parent that we're ready. */
5548 pthread_mutex_lock(&info
->mutex
);
5549 pthread_cond_broadcast(&info
->cond
);
5550 pthread_mutex_unlock(&info
->mutex
);
5551 /* Wait until the parent has finished initializing the tls state. */
5552 pthread_mutex_lock(&clone_lock
);
5553 pthread_mutex_unlock(&clone_lock
);
5559 /* do_fork() Must return host values and target errnos (unlike most
5560 do_*() functions). */
5561 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5562 abi_ulong parent_tidptr
, target_ulong newtls
,
5563 abi_ulong child_tidptr
)
5565 CPUState
*cpu
= env_cpu(env
);
5569 CPUArchState
*new_env
;
5572 flags
&= ~CLONE_IGNORED_FLAGS
;
5574 /* Emulate vfork() with fork() */
5575 if (flags
& CLONE_VFORK
)
5576 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5578 if (flags
& CLONE_VM
) {
5579 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5580 new_thread_info info
;
5581 pthread_attr_t attr
;
5583 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5584 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5585 return -TARGET_EINVAL
;
5588 ts
= g_new0(TaskState
, 1);
5589 init_task_state(ts
);
5591 /* Grab a mutex so that thread setup appears atomic. */
5592 pthread_mutex_lock(&clone_lock
);
5594 /* we create a new CPU instance. */
5595 new_env
= cpu_copy(env
);
5596 /* Init regs that differ from the parent. */
5597 cpu_clone_regs(new_env
, newsp
);
5598 new_cpu
= env_cpu(new_env
);
5599 new_cpu
->opaque
= ts
;
5600 ts
->bprm
= parent_ts
->bprm
;
5601 ts
->info
= parent_ts
->info
;
5602 ts
->signal_mask
= parent_ts
->signal_mask
;
5604 if (flags
& CLONE_CHILD_CLEARTID
) {
5605 ts
->child_tidptr
= child_tidptr
;
5608 if (flags
& CLONE_SETTLS
) {
5609 cpu_set_tls (new_env
, newtls
);
5612 memset(&info
, 0, sizeof(info
));
5613 pthread_mutex_init(&info
.mutex
, NULL
);
5614 pthread_mutex_lock(&info
.mutex
);
5615 pthread_cond_init(&info
.cond
, NULL
);
5617 if (flags
& CLONE_CHILD_SETTID
) {
5618 info
.child_tidptr
= child_tidptr
;
5620 if (flags
& CLONE_PARENT_SETTID
) {
5621 info
.parent_tidptr
= parent_tidptr
;
5624 ret
= pthread_attr_init(&attr
);
5625 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5626 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5627 /* It is not safe to deliver signals until the child has finished
5628 initializing, so temporarily block all signals. */
5629 sigfillset(&sigmask
);
5630 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5631 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5633 /* If this is our first additional thread, we need to ensure we
5634 * generate code for parallel execution and flush old translations.
5636 if (!parallel_cpus
) {
5637 parallel_cpus
= true;
5641 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5642 /* TODO: Free new CPU state if thread creation failed. */
5644 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5645 pthread_attr_destroy(&attr
);
5647 /* Wait for the child to initialize. */
5648 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5653 pthread_mutex_unlock(&info
.mutex
);
5654 pthread_cond_destroy(&info
.cond
);
5655 pthread_mutex_destroy(&info
.mutex
);
5656 pthread_mutex_unlock(&clone_lock
);
5658 /* if no CLONE_VM, we consider it is a fork */
5659 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5660 return -TARGET_EINVAL
;
5663 /* We can't support custom termination signals */
5664 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5665 return -TARGET_EINVAL
;
5668 if (block_signals()) {
5669 return -TARGET_ERESTARTSYS
;
5675 /* Child Process. */
5676 cpu_clone_regs(env
, newsp
);
5678 /* There is a race condition here. The parent process could
5679 theoretically read the TID in the child process before the child
5680 tid is set. This would require using either ptrace
5681 (not implemented) or having *_tidptr to point at a shared memory
5682 mapping. We can't repeat the spinlock hack used above because
5683 the child process gets its own copy of the lock. */
5684 if (flags
& CLONE_CHILD_SETTID
)
5685 put_user_u32(sys_gettid(), child_tidptr
);
5686 if (flags
& CLONE_PARENT_SETTID
)
5687 put_user_u32(sys_gettid(), parent_tidptr
);
5688 ts
= (TaskState
*)cpu
->opaque
;
5689 if (flags
& CLONE_SETTLS
)
5690 cpu_set_tls (env
, newtls
);
5691 if (flags
& CLONE_CHILD_CLEARTID
)
5692 ts
->child_tidptr
= child_tidptr
;
5700 /* warning : doesn't handle linux specific flags... */
5701 static int target_to_host_fcntl_cmd(int cmd
)
5706 case TARGET_F_DUPFD
:
5707 case TARGET_F_GETFD
:
5708 case TARGET_F_SETFD
:
5709 case TARGET_F_GETFL
:
5710 case TARGET_F_SETFL
:
5713 case TARGET_F_GETLK
:
5716 case TARGET_F_SETLK
:
5719 case TARGET_F_SETLKW
:
5722 case TARGET_F_GETOWN
:
5725 case TARGET_F_SETOWN
:
5728 case TARGET_F_GETSIG
:
5731 case TARGET_F_SETSIG
:
5734 #if TARGET_ABI_BITS == 32
5735 case TARGET_F_GETLK64
:
5738 case TARGET_F_SETLK64
:
5741 case TARGET_F_SETLKW64
:
5745 case TARGET_F_SETLEASE
:
5748 case TARGET_F_GETLEASE
:
5751 #ifdef F_DUPFD_CLOEXEC
5752 case TARGET_F_DUPFD_CLOEXEC
:
5753 ret
= F_DUPFD_CLOEXEC
;
5756 case TARGET_F_NOTIFY
:
5760 case TARGET_F_GETOWN_EX
:
5765 case TARGET_F_SETOWN_EX
:
5770 case TARGET_F_SETPIPE_SZ
:
5773 case TARGET_F_GETPIPE_SZ
:
5778 ret
= -TARGET_EINVAL
;
5782 #if defined(__powerpc64__)
5783 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5784 * is not supported by kernel. The glibc fcntl call actually adjusts
5785 * them to 5, 6 and 7 before making the syscall(). Since we make the
5786 * syscall directly, adjust to what is supported by the kernel.
5788 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5789 ret
-= F_GETLK64
- 5;
5796 #define FLOCK_TRANSTBL \
5798 TRANSTBL_CONVERT(F_RDLCK); \
5799 TRANSTBL_CONVERT(F_WRLCK); \
5800 TRANSTBL_CONVERT(F_UNLCK); \
5801 TRANSTBL_CONVERT(F_EXLCK); \
5802 TRANSTBL_CONVERT(F_SHLCK); \
5805 static int target_to_host_flock(int type
)
5807 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5809 #undef TRANSTBL_CONVERT
5810 return -TARGET_EINVAL
;
5813 static int host_to_target_flock(int type
)
5815 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5817 #undef TRANSTBL_CONVERT
5818 /* if we don't know how to convert the value coming
5819 * from the host we copy to the target field as-is
5824 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5825 abi_ulong target_flock_addr
)
5827 struct target_flock
*target_fl
;
5830 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5831 return -TARGET_EFAULT
;
5834 __get_user(l_type
, &target_fl
->l_type
);
5835 l_type
= target_to_host_flock(l_type
);
5839 fl
->l_type
= l_type
;
5840 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5841 __get_user(fl
->l_start
, &target_fl
->l_start
);
5842 __get_user(fl
->l_len
, &target_fl
->l_len
);
5843 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5844 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5848 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5849 const struct flock64
*fl
)
5851 struct target_flock
*target_fl
;
5854 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5855 return -TARGET_EFAULT
;
5858 l_type
= host_to_target_flock(fl
->l_type
);
5859 __put_user(l_type
, &target_fl
->l_type
);
5860 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5861 __put_user(fl
->l_start
, &target_fl
->l_start
);
5862 __put_user(fl
->l_len
, &target_fl
->l_len
);
5863 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5864 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5868 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5869 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5871 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5872 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5873 abi_ulong target_flock_addr
)
5875 struct target_oabi_flock64
*target_fl
;
5878 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5879 return -TARGET_EFAULT
;
5882 __get_user(l_type
, &target_fl
->l_type
);
5883 l_type
= target_to_host_flock(l_type
);
5887 fl
->l_type
= l_type
;
5888 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5889 __get_user(fl
->l_start
, &target_fl
->l_start
);
5890 __get_user(fl
->l_len
, &target_fl
->l_len
);
5891 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5892 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5896 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5897 const struct flock64
*fl
)
5899 struct target_oabi_flock64
*target_fl
;
5902 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5903 return -TARGET_EFAULT
;
5906 l_type
= host_to_target_flock(fl
->l_type
);
5907 __put_user(l_type
, &target_fl
->l_type
);
5908 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5909 __put_user(fl
->l_start
, &target_fl
->l_start
);
5910 __put_user(fl
->l_len
, &target_fl
->l_len
);
5911 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5912 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5917 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5918 abi_ulong target_flock_addr
)
5920 struct target_flock64
*target_fl
;
5923 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5924 return -TARGET_EFAULT
;
5927 __get_user(l_type
, &target_fl
->l_type
);
5928 l_type
= target_to_host_flock(l_type
);
5932 fl
->l_type
= l_type
;
5933 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5934 __get_user(fl
->l_start
, &target_fl
->l_start
);
5935 __get_user(fl
->l_len
, &target_fl
->l_len
);
5936 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5937 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5941 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5942 const struct flock64
*fl
)
5944 struct target_flock64
*target_fl
;
5947 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5948 return -TARGET_EFAULT
;
5951 l_type
= host_to_target_flock(fl
->l_type
);
5952 __put_user(l_type
, &target_fl
->l_type
);
5953 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5954 __put_user(fl
->l_start
, &target_fl
->l_start
);
5955 __put_user(fl
->l_len
, &target_fl
->l_len
);
5956 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5957 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5961 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5963 struct flock64 fl64
;
5965 struct f_owner_ex fox
;
5966 struct target_f_owner_ex
*target_fox
;
5969 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5971 if (host_cmd
== -TARGET_EINVAL
)
5975 case TARGET_F_GETLK
:
5976 ret
= copy_from_user_flock(&fl64
, arg
);
5980 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5982 ret
= copy_to_user_flock(arg
, &fl64
);
5986 case TARGET_F_SETLK
:
5987 case TARGET_F_SETLKW
:
5988 ret
= copy_from_user_flock(&fl64
, arg
);
5992 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5995 case TARGET_F_GETLK64
:
5996 ret
= copy_from_user_flock64(&fl64
, arg
);
6000 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6002 ret
= copy_to_user_flock64(arg
, &fl64
);
6005 case TARGET_F_SETLK64
:
6006 case TARGET_F_SETLKW64
:
6007 ret
= copy_from_user_flock64(&fl64
, arg
);
6011 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6014 case TARGET_F_GETFL
:
6015 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6017 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6021 case TARGET_F_SETFL
:
6022 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6023 target_to_host_bitmask(arg
,
6028 case TARGET_F_GETOWN_EX
:
6029 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6031 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6032 return -TARGET_EFAULT
;
6033 target_fox
->type
= tswap32(fox
.type
);
6034 target_fox
->pid
= tswap32(fox
.pid
);
6035 unlock_user_struct(target_fox
, arg
, 1);
6041 case TARGET_F_SETOWN_EX
:
6042 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6043 return -TARGET_EFAULT
;
6044 fox
.type
= tswap32(target_fox
->type
);
6045 fox
.pid
= tswap32(target_fox
->pid
);
6046 unlock_user_struct(target_fox
, arg
, 0);
6047 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6051 case TARGET_F_SETOWN
:
6052 case TARGET_F_GETOWN
:
6053 case TARGET_F_SETSIG
:
6054 case TARGET_F_GETSIG
:
6055 case TARGET_F_SETLEASE
:
6056 case TARGET_F_GETLEASE
:
6057 case TARGET_F_SETPIPE_SZ
:
6058 case TARGET_F_GETPIPE_SZ
:
6059 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6063 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6071 static inline int high2lowuid(int uid
)
6079 static inline int high2lowgid(int gid
)
6087 static inline int low2highuid(int uid
)
6089 if ((int16_t)uid
== -1)
6095 static inline int low2highgid(int gid
)
6097 if ((int16_t)gid
== -1)
6102 static inline int tswapid(int id
)
6107 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6109 #else /* !USE_UID16 */
6110 static inline int high2lowuid(int uid
)
6114 static inline int high2lowgid(int gid
)
6118 static inline int low2highuid(int uid
)
6122 static inline int low2highgid(int gid
)
6126 static inline int tswapid(int id
)
6131 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6133 #endif /* USE_UID16 */
6135 /* We must do direct syscalls for setting UID/GID, because we want to
6136 * implement the Linux system call semantics of "change only for this thread",
6137 * not the libc/POSIX semantics of "change for all threads in process".
6138 * (See http://ewontfix.com/17/ for more details.)
6139 * We use the 32-bit version of the syscalls if present; if it is not
6140 * then either the host architecture supports 32-bit UIDs natively with
6141 * the standard syscall, or the 16-bit UID is the best we can do.
6143 #ifdef __NR_setuid32
6144 #define __NR_sys_setuid __NR_setuid32
6146 #define __NR_sys_setuid __NR_setuid
6148 #ifdef __NR_setgid32
6149 #define __NR_sys_setgid __NR_setgid32
6151 #define __NR_sys_setgid __NR_setgid
6153 #ifdef __NR_setresuid32
6154 #define __NR_sys_setresuid __NR_setresuid32
6156 #define __NR_sys_setresuid __NR_setresuid
6158 #ifdef __NR_setresgid32
6159 #define __NR_sys_setresgid __NR_setresgid32
6161 #define __NR_sys_setresgid __NR_setresgid
6164 _syscall1(int, sys_setuid
, uid_t
, uid
)
6165 _syscall1(int, sys_setgid
, gid_t
, gid
)
6166 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6167 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6169 void syscall_init(void)
6172 const argtype
*arg_type
;
6176 thunk_init(STRUCT_MAX
);
6178 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6179 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6180 #include "syscall_types.h"
6182 #undef STRUCT_SPECIAL
6184 /* Build target_to_host_errno_table[] table from
6185 * host_to_target_errno_table[]. */
6186 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6187 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6190 /* we patch the ioctl size if necessary. We rely on the fact that
6191 no ioctl has all the bits at '1' in the size field */
6193 while (ie
->target_cmd
!= 0) {
6194 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6195 TARGET_IOC_SIZEMASK
) {
6196 arg_type
= ie
->arg_type
;
6197 if (arg_type
[0] != TYPE_PTR
) {
6198 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6203 size
= thunk_type_size(arg_type
, 0);
6204 ie
->target_cmd
= (ie
->target_cmd
&
6205 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6206 (size
<< TARGET_IOC_SIZESHIFT
);
6209 /* automatic consistency check if same arch */
6210 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6211 (defined(__x86_64__) && defined(TARGET_X86_64))
6212 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6213 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6214 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6221 #if TARGET_ABI_BITS == 32
6222 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6224 #ifdef TARGET_WORDS_BIGENDIAN
6225 return ((uint64_t)word0
<< 32) | word1
;
6227 return ((uint64_t)word1
<< 32) | word0
;
6230 #else /* TARGET_ABI_BITS == 32 */
6231 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6235 #endif /* TARGET_ABI_BITS != 32 */
6237 #ifdef TARGET_NR_truncate64
6238 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6243 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6247 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6251 #ifdef TARGET_NR_ftruncate64
6252 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6257 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6261 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6265 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6266 abi_ulong target_addr
)
6268 struct target_timespec
*target_ts
;
6270 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6271 return -TARGET_EFAULT
;
6272 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6273 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6274 unlock_user_struct(target_ts
, target_addr
, 0);
6278 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6279 struct timespec
*host_ts
)
6281 struct target_timespec
*target_ts
;
6283 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6284 return -TARGET_EFAULT
;
6285 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6286 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6287 unlock_user_struct(target_ts
, target_addr
, 1);
6291 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6292 abi_ulong target_addr
)
6294 struct target_itimerspec
*target_itspec
;
6296 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6297 return -TARGET_EFAULT
;
6300 host_itspec
->it_interval
.tv_sec
=
6301 tswapal(target_itspec
->it_interval
.tv_sec
);
6302 host_itspec
->it_interval
.tv_nsec
=
6303 tswapal(target_itspec
->it_interval
.tv_nsec
);
6304 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6305 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6307 unlock_user_struct(target_itspec
, target_addr
, 1);
6311 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6312 struct itimerspec
*host_its
)
6314 struct target_itimerspec
*target_itspec
;
6316 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6317 return -TARGET_EFAULT
;
6320 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6321 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6323 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6324 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6326 unlock_user_struct(target_itspec
, target_addr
, 0);
6330 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6331 abi_long target_addr
)
6333 struct target_timex
*target_tx
;
6335 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6336 return -TARGET_EFAULT
;
6339 __get_user(host_tx
->modes
, &target_tx
->modes
);
6340 __get_user(host_tx
->offset
, &target_tx
->offset
);
6341 __get_user(host_tx
->freq
, &target_tx
->freq
);
6342 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6343 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6344 __get_user(host_tx
->status
, &target_tx
->status
);
6345 __get_user(host_tx
->constant
, &target_tx
->constant
);
6346 __get_user(host_tx
->precision
, &target_tx
->precision
);
6347 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6348 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6349 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6350 __get_user(host_tx
->tick
, &target_tx
->tick
);
6351 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6352 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6353 __get_user(host_tx
->shift
, &target_tx
->shift
);
6354 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6355 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6356 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6357 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6358 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6359 __get_user(host_tx
->tai
, &target_tx
->tai
);
6361 unlock_user_struct(target_tx
, target_addr
, 0);
6365 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6366 struct timex
*host_tx
)
6368 struct target_timex
*target_tx
;
6370 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6371 return -TARGET_EFAULT
;
6374 __put_user(host_tx
->modes
, &target_tx
->modes
);
6375 __put_user(host_tx
->offset
, &target_tx
->offset
);
6376 __put_user(host_tx
->freq
, &target_tx
->freq
);
6377 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6378 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6379 __put_user(host_tx
->status
, &target_tx
->status
);
6380 __put_user(host_tx
->constant
, &target_tx
->constant
);
6381 __put_user(host_tx
->precision
, &target_tx
->precision
);
6382 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6383 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6384 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6385 __put_user(host_tx
->tick
, &target_tx
->tick
);
6386 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6387 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6388 __put_user(host_tx
->shift
, &target_tx
->shift
);
6389 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6390 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6391 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6392 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6393 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6394 __put_user(host_tx
->tai
, &target_tx
->tai
);
6396 unlock_user_struct(target_tx
, target_addr
, 1);
6401 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6402 abi_ulong target_addr
)
6404 struct target_sigevent
*target_sevp
;
6406 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6407 return -TARGET_EFAULT
;
6410 /* This union is awkward on 64 bit systems because it has a 32 bit
6411 * integer and a pointer in it; we follow the conversion approach
6412 * used for handling sigval types in signal.c so the guest should get
6413 * the correct value back even if we did a 64 bit byteswap and it's
6414 * using the 32 bit integer.
6416 host_sevp
->sigev_value
.sival_ptr
=
6417 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6418 host_sevp
->sigev_signo
=
6419 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6420 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6421 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6423 unlock_user_struct(target_sevp
, target_addr
, 1);
6427 #if defined(TARGET_NR_mlockall)
6428 static inline int target_to_host_mlockall_arg(int arg
)
6432 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6433 result
|= MCL_CURRENT
;
6435 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6436 result
|= MCL_FUTURE
;
6442 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6443 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6444 defined(TARGET_NR_newfstatat))
6445 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6446 abi_ulong target_addr
,
6447 struct stat
*host_st
)
6449 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6450 if (((CPUARMState
*)cpu_env
)->eabi
) {
6451 struct target_eabi_stat64
*target_st
;
6453 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6454 return -TARGET_EFAULT
;
6455 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6456 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6457 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6458 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6459 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6461 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6462 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6463 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6464 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6465 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6466 __put_user(host_st
->st_size
, &target_st
->st_size
);
6467 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6468 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6469 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6470 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6471 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6472 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6473 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6474 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6475 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6477 unlock_user_struct(target_st
, target_addr
, 1);
6481 #if defined(TARGET_HAS_STRUCT_STAT64)
6482 struct target_stat64
*target_st
;
6484 struct target_stat
*target_st
;
6487 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6488 return -TARGET_EFAULT
;
6489 memset(target_st
, 0, sizeof(*target_st
));
6490 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6491 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6492 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6493 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6495 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6496 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6497 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6498 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6499 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6500 /* XXX: better use of kernel struct */
6501 __put_user(host_st
->st_size
, &target_st
->st_size
);
6502 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6503 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6504 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6505 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6506 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6507 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6508 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6509 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6510 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6512 unlock_user_struct(target_st
, target_addr
, 1);
6519 /* ??? Using host futex calls even when target atomic operations
6520 are not really atomic probably breaks things. However implementing
6521 futexes locally would make futexes shared between multiple processes
6522 tricky. However they're probably useless because guest atomic
6523 operations won't work either. */
6524 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6525 target_ulong uaddr2
, int val3
)
6527 struct timespec ts
, *pts
;
6530 /* ??? We assume FUTEX_* constants are the same on both host
6532 #ifdef FUTEX_CMD_MASK
6533 base_op
= op
& FUTEX_CMD_MASK
;
6539 case FUTEX_WAIT_BITSET
:
6542 target_to_host_timespec(pts
, timeout
);
6546 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6549 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6551 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6553 case FUTEX_CMP_REQUEUE
:
6555 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6556 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6557 But the prototype takes a `struct timespec *'; insert casts
6558 to satisfy the compiler. We do not need to tswap TIMEOUT
6559 since it's not compared to guest memory. */
6560 pts
= (struct timespec
*)(uintptr_t) timeout
;
6561 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6563 (base_op
== FUTEX_CMP_REQUEUE
6567 return -TARGET_ENOSYS
;
6570 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6571 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6572 abi_long handle
, abi_long mount_id
,
6575 struct file_handle
*target_fh
;
6576 struct file_handle
*fh
;
6580 unsigned int size
, total_size
;
6582 if (get_user_s32(size
, handle
)) {
6583 return -TARGET_EFAULT
;
6586 name
= lock_user_string(pathname
);
6588 return -TARGET_EFAULT
;
6591 total_size
= sizeof(struct file_handle
) + size
;
6592 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6594 unlock_user(name
, pathname
, 0);
6595 return -TARGET_EFAULT
;
6598 fh
= g_malloc0(total_size
);
6599 fh
->handle_bytes
= size
;
6601 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6602 unlock_user(name
, pathname
, 0);
6604 /* man name_to_handle_at(2):
6605 * Other than the use of the handle_bytes field, the caller should treat
6606 * the file_handle structure as an opaque data type
6609 memcpy(target_fh
, fh
, total_size
);
6610 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6611 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6613 unlock_user(target_fh
, handle
, total_size
);
6615 if (put_user_s32(mid
, mount_id
)) {
6616 return -TARGET_EFAULT
;
6624 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6625 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6628 struct file_handle
*target_fh
;
6629 struct file_handle
*fh
;
6630 unsigned int size
, total_size
;
6633 if (get_user_s32(size
, handle
)) {
6634 return -TARGET_EFAULT
;
6637 total_size
= sizeof(struct file_handle
) + size
;
6638 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6640 return -TARGET_EFAULT
;
6643 fh
= g_memdup(target_fh
, total_size
);
6644 fh
->handle_bytes
= size
;
6645 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6647 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6648 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6652 unlock_user(target_fh
, handle
, total_size
);
6658 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6660 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6663 target_sigset_t
*target_mask
;
6667 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6668 return -TARGET_EINVAL
;
6670 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6671 return -TARGET_EFAULT
;
6674 target_to_host_sigset(&host_mask
, target_mask
);
6676 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6678 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6680 fd_trans_register(ret
, &target_signalfd_trans
);
6683 unlock_user_struct(target_mask
, mask
, 0);
6689 /* Map host to target signal numbers for the wait family of syscalls.
6690 Assume all other status bits are the same. */
6691 int host_to_target_waitstatus(int status
)
6693 if (WIFSIGNALED(status
)) {
6694 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6696 if (WIFSTOPPED(status
)) {
6697 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6703 static int open_self_cmdline(void *cpu_env
, int fd
)
6705 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6706 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6709 for (i
= 0; i
< bprm
->argc
; i
++) {
6710 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6712 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6720 static int open_self_maps(void *cpu_env
, int fd
)
6722 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6723 TaskState
*ts
= cpu
->opaque
;
6729 fp
= fopen("/proc/self/maps", "r");
6734 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6735 int fields
, dev_maj
, dev_min
, inode
;
6736 uint64_t min
, max
, offset
;
6737 char flag_r
, flag_w
, flag_x
, flag_p
;
6738 char path
[512] = "";
6739 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6740 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6741 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6743 if ((fields
< 10) || (fields
> 11)) {
6746 if (h2g_valid(min
)) {
6747 int flags
= page_get_flags(h2g(min
));
6748 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6749 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6752 if (h2g(min
) == ts
->info
->stack_limit
) {
6753 pstrcpy(path
, sizeof(path
), " [stack]");
6755 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6756 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6757 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6758 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6759 path
[0] ? " " : "", path
);
6769 static int open_self_stat(void *cpu_env
, int fd
)
6771 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6772 TaskState
*ts
= cpu
->opaque
;
6773 abi_ulong start_stack
= ts
->info
->start_stack
;
6776 for (i
= 0; i
< 44; i
++) {
6784 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6785 } else if (i
== 1) {
6787 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6788 } else if (i
== 27) {
6791 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6793 /* for the rest, there is MasterCard */
6794 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6798 if (write(fd
, buf
, len
) != len
) {
6806 static int open_self_auxv(void *cpu_env
, int fd
)
6808 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6809 TaskState
*ts
= cpu
->opaque
;
6810 abi_ulong auxv
= ts
->info
->saved_auxv
;
6811 abi_ulong len
= ts
->info
->auxv_len
;
6815 * Auxiliary vector is stored in target process stack.
6816 * read in whole auxv vector and copy it to file
6818 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6822 r
= write(fd
, ptr
, len
);
6829 lseek(fd
, 0, SEEK_SET
);
6830 unlock_user(ptr
, auxv
, len
);
6836 static int is_proc_myself(const char *filename
, const char *entry
)
6838 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6839 filename
+= strlen("/proc/");
6840 if (!strncmp(filename
, "self/", strlen("self/"))) {
6841 filename
+= strlen("self/");
6842 } else if (*filename
>= '1' && *filename
<= '9') {
6844 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6845 if (!strncmp(filename
, myself
, strlen(myself
))) {
6846 filename
+= strlen(myself
);
6853 if (!strcmp(filename
, entry
)) {
6860 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6861 defined(TARGET_SPARC) || defined(TARGET_M68K)
6862 static int is_proc(const char *filename
, const char *entry
)
6864 return strcmp(filename
, entry
) == 0;
6868 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6869 static int open_net_route(void *cpu_env
, int fd
)
6876 fp
= fopen("/proc/net/route", "r");
6883 read
= getline(&line
, &len
, fp
);
6884 dprintf(fd
, "%s", line
);
6888 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6890 uint32_t dest
, gw
, mask
;
6891 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6894 fields
= sscanf(line
,
6895 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6896 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6897 &mask
, &mtu
, &window
, &irtt
);
6901 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6902 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6903 metric
, tswap32(mask
), mtu
, window
, irtt
);
6913 #if defined(TARGET_SPARC)
6914 static int open_cpuinfo(void *cpu_env
, int fd
)
6916 dprintf(fd
, "type\t\t: sun4u\n");
6921 #if defined(TARGET_M68K)
6922 static int open_hardware(void *cpu_env
, int fd
)
6924 dprintf(fd
, "Model:\t\tqemu-m68k\n");
6929 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6932 const char *filename
;
6933 int (*fill
)(void *cpu_env
, int fd
);
6934 int (*cmp
)(const char *s1
, const char *s2
);
6936 const struct fake_open
*fake_open
;
6937 static const struct fake_open fakes
[] = {
6938 { "maps", open_self_maps
, is_proc_myself
},
6939 { "stat", open_self_stat
, is_proc_myself
},
6940 { "auxv", open_self_auxv
, is_proc_myself
},
6941 { "cmdline", open_self_cmdline
, is_proc_myself
},
6942 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6943 { "/proc/net/route", open_net_route
, is_proc
},
6945 #if defined(TARGET_SPARC)
6946 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
6948 #if defined(TARGET_M68K)
6949 { "/proc/hardware", open_hardware
, is_proc
},
6951 { NULL
, NULL
, NULL
}
6954 if (is_proc_myself(pathname
, "exe")) {
6955 int execfd
= qemu_getauxval(AT_EXECFD
);
6956 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6959 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6960 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6965 if (fake_open
->filename
) {
6967 char filename
[PATH_MAX
];
6970 /* create temporary file to map stat to */
6971 tmpdir
= getenv("TMPDIR");
6974 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6975 fd
= mkstemp(filename
);
6981 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6987 lseek(fd
, 0, SEEK_SET
);
6992 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6995 #define TIMER_MAGIC 0x0caf0000
6996 #define TIMER_MAGIC_MASK 0xffff0000
6998 /* Convert QEMU provided timer ID back to internal 16bit index format */
6999 static target_timer_t
get_timer_id(abi_long arg
)
7001 target_timer_t timerid
= arg
;
7003 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7004 return -TARGET_EINVAL
;
7009 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7010 return -TARGET_EINVAL
;
7016 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7018 abi_ulong target_addr
,
7021 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7022 unsigned host_bits
= sizeof(*host_mask
) * 8;
7023 abi_ulong
*target_mask
;
7026 assert(host_size
>= target_size
);
7028 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7030 return -TARGET_EFAULT
;
7032 memset(host_mask
, 0, host_size
);
7034 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7035 unsigned bit
= i
* target_bits
;
7038 __get_user(val
, &target_mask
[i
]);
7039 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7040 if (val
& (1UL << j
)) {
7041 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7046 unlock_user(target_mask
, target_addr
, 0);
7050 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7052 abi_ulong target_addr
,
7055 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7056 unsigned host_bits
= sizeof(*host_mask
) * 8;
7057 abi_ulong
*target_mask
;
7060 assert(host_size
>= target_size
);
7062 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7064 return -TARGET_EFAULT
;
7067 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7068 unsigned bit
= i
* target_bits
;
7071 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7072 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7076 __put_user(val
, &target_mask
[i
]);
7079 unlock_user(target_mask
, target_addr
, target_size
);
7083 /* This is an internal helper for do_syscall so that it is easier
7084 * to have a single return point, so that actions, such as logging
7085 * of syscall results, can be performed.
7086 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7088 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7089 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7090 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7093 CPUState
*cpu
= env_cpu(cpu_env
);
7095 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7096 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7097 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7100 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7101 || defined(TARGET_NR_fstatfs)
7107 case TARGET_NR_exit
:
7108 /* In old applications this may be used to implement _exit(2).
7109 However in threaded applictions it is used for thread termination,
7110 and _exit_group is used for application termination.
7111 Do thread termination if we have more then one thread. */
7113 if (block_signals()) {
7114 return -TARGET_ERESTARTSYS
;
7119 if (CPU_NEXT(first_cpu
)) {
7122 /* Remove the CPU from the list. */
7123 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7128 if (ts
->child_tidptr
) {
7129 put_user_u32(0, ts
->child_tidptr
);
7130 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7134 object_unref(OBJECT(cpu
));
7136 rcu_unregister_thread();
7141 preexit_cleanup(cpu_env
, arg1
);
7143 return 0; /* avoid warning */
7144 case TARGET_NR_read
:
7145 if (arg2
== 0 && arg3
== 0) {
7146 return get_errno(safe_read(arg1
, 0, 0));
7148 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7149 return -TARGET_EFAULT
;
7150 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7152 fd_trans_host_to_target_data(arg1
)) {
7153 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7155 unlock_user(p
, arg2
, ret
);
7158 case TARGET_NR_write
:
7159 if (arg2
== 0 && arg3
== 0) {
7160 return get_errno(safe_write(arg1
, 0, 0));
7162 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7163 return -TARGET_EFAULT
;
7164 if (fd_trans_target_to_host_data(arg1
)) {
7165 void *copy
= g_malloc(arg3
);
7166 memcpy(copy
, p
, arg3
);
7167 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7169 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7173 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7175 unlock_user(p
, arg2
, 0);
7178 #ifdef TARGET_NR_open
7179 case TARGET_NR_open
:
7180 if (!(p
= lock_user_string(arg1
)))
7181 return -TARGET_EFAULT
;
7182 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7183 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7185 fd_trans_unregister(ret
);
7186 unlock_user(p
, arg1
, 0);
7189 case TARGET_NR_openat
:
7190 if (!(p
= lock_user_string(arg2
)))
7191 return -TARGET_EFAULT
;
7192 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7193 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7195 fd_trans_unregister(ret
);
7196 unlock_user(p
, arg2
, 0);
7198 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7199 case TARGET_NR_name_to_handle_at
:
7200 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7203 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7204 case TARGET_NR_open_by_handle_at
:
7205 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7206 fd_trans_unregister(ret
);
7209 case TARGET_NR_close
:
7210 fd_trans_unregister(arg1
);
7211 return get_errno(close(arg1
));
7214 return do_brk(arg1
);
7215 #ifdef TARGET_NR_fork
7216 case TARGET_NR_fork
:
7217 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7219 #ifdef TARGET_NR_waitpid
7220 case TARGET_NR_waitpid
:
7223 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7224 if (!is_error(ret
) && arg2
&& ret
7225 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7226 return -TARGET_EFAULT
;
7230 #ifdef TARGET_NR_waitid
7231 case TARGET_NR_waitid
:
7235 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7236 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7237 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7238 return -TARGET_EFAULT
;
7239 host_to_target_siginfo(p
, &info
);
7240 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7245 #ifdef TARGET_NR_creat /* not on alpha */
7246 case TARGET_NR_creat
:
7247 if (!(p
= lock_user_string(arg1
)))
7248 return -TARGET_EFAULT
;
7249 ret
= get_errno(creat(p
, arg2
));
7250 fd_trans_unregister(ret
);
7251 unlock_user(p
, arg1
, 0);
7254 #ifdef TARGET_NR_link
7255 case TARGET_NR_link
:
7258 p
= lock_user_string(arg1
);
7259 p2
= lock_user_string(arg2
);
7261 ret
= -TARGET_EFAULT
;
7263 ret
= get_errno(link(p
, p2
));
7264 unlock_user(p2
, arg2
, 0);
7265 unlock_user(p
, arg1
, 0);
7269 #if defined(TARGET_NR_linkat)
7270 case TARGET_NR_linkat
:
7274 return -TARGET_EFAULT
;
7275 p
= lock_user_string(arg2
);
7276 p2
= lock_user_string(arg4
);
7278 ret
= -TARGET_EFAULT
;
7280 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7281 unlock_user(p
, arg2
, 0);
7282 unlock_user(p2
, arg4
, 0);
7286 #ifdef TARGET_NR_unlink
7287 case TARGET_NR_unlink
:
7288 if (!(p
= lock_user_string(arg1
)))
7289 return -TARGET_EFAULT
;
7290 ret
= get_errno(unlink(p
));
7291 unlock_user(p
, arg1
, 0);
7294 #if defined(TARGET_NR_unlinkat)
7295 case TARGET_NR_unlinkat
:
7296 if (!(p
= lock_user_string(arg2
)))
7297 return -TARGET_EFAULT
;
7298 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7299 unlock_user(p
, arg2
, 0);
7302 case TARGET_NR_execve
:
7304 char **argp
, **envp
;
7307 abi_ulong guest_argp
;
7308 abi_ulong guest_envp
;
7315 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7316 if (get_user_ual(addr
, gp
))
7317 return -TARGET_EFAULT
;
7324 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7325 if (get_user_ual(addr
, gp
))
7326 return -TARGET_EFAULT
;
7332 argp
= g_new0(char *, argc
+ 1);
7333 envp
= g_new0(char *, envc
+ 1);
7335 for (gp
= guest_argp
, q
= argp
; gp
;
7336 gp
+= sizeof(abi_ulong
), q
++) {
7337 if (get_user_ual(addr
, gp
))
7341 if (!(*q
= lock_user_string(addr
)))
7343 total_size
+= strlen(*q
) + 1;
7347 for (gp
= guest_envp
, q
= envp
; gp
;
7348 gp
+= sizeof(abi_ulong
), q
++) {
7349 if (get_user_ual(addr
, gp
))
7353 if (!(*q
= lock_user_string(addr
)))
7355 total_size
+= strlen(*q
) + 1;
7359 if (!(p
= lock_user_string(arg1
)))
7361 /* Although execve() is not an interruptible syscall it is
7362 * a special case where we must use the safe_syscall wrapper:
7363 * if we allow a signal to happen before we make the host
7364 * syscall then we will 'lose' it, because at the point of
7365 * execve the process leaves QEMU's control. So we use the
7366 * safe syscall wrapper to ensure that we either take the
7367 * signal as a guest signal, or else it does not happen
7368 * before the execve completes and makes it the other
7369 * program's problem.
7371 ret
= get_errno(safe_execve(p
, argp
, envp
));
7372 unlock_user(p
, arg1
, 0);
7377 ret
= -TARGET_EFAULT
;
7380 for (gp
= guest_argp
, q
= argp
; *q
;
7381 gp
+= sizeof(abi_ulong
), q
++) {
7382 if (get_user_ual(addr
, gp
)
7385 unlock_user(*q
, addr
, 0);
7387 for (gp
= guest_envp
, q
= envp
; *q
;
7388 gp
+= sizeof(abi_ulong
), q
++) {
7389 if (get_user_ual(addr
, gp
)
7392 unlock_user(*q
, addr
, 0);
7399 case TARGET_NR_chdir
:
7400 if (!(p
= lock_user_string(arg1
)))
7401 return -TARGET_EFAULT
;
7402 ret
= get_errno(chdir(p
));
7403 unlock_user(p
, arg1
, 0);
7405 #ifdef TARGET_NR_time
7406 case TARGET_NR_time
:
7409 ret
= get_errno(time(&host_time
));
7412 && put_user_sal(host_time
, arg1
))
7413 return -TARGET_EFAULT
;
7417 #ifdef TARGET_NR_mknod
7418 case TARGET_NR_mknod
:
7419 if (!(p
= lock_user_string(arg1
)))
7420 return -TARGET_EFAULT
;
7421 ret
= get_errno(mknod(p
, arg2
, arg3
));
7422 unlock_user(p
, arg1
, 0);
7425 #if defined(TARGET_NR_mknodat)
7426 case TARGET_NR_mknodat
:
7427 if (!(p
= lock_user_string(arg2
)))
7428 return -TARGET_EFAULT
;
7429 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7430 unlock_user(p
, arg2
, 0);
7433 #ifdef TARGET_NR_chmod
7434 case TARGET_NR_chmod
:
7435 if (!(p
= lock_user_string(arg1
)))
7436 return -TARGET_EFAULT
;
7437 ret
= get_errno(chmod(p
, arg2
));
7438 unlock_user(p
, arg1
, 0);
7441 #ifdef TARGET_NR_lseek
7442 case TARGET_NR_lseek
:
7443 return get_errno(lseek(arg1
, arg2
, arg3
));
7445 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7446 /* Alpha specific */
7447 case TARGET_NR_getxpid
:
7448 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7449 return get_errno(getpid());
7451 #ifdef TARGET_NR_getpid
7452 case TARGET_NR_getpid
:
7453 return get_errno(getpid());
7455 case TARGET_NR_mount
:
7457 /* need to look at the data field */
7461 p
= lock_user_string(arg1
);
7463 return -TARGET_EFAULT
;
7469 p2
= lock_user_string(arg2
);
7472 unlock_user(p
, arg1
, 0);
7474 return -TARGET_EFAULT
;
7478 p3
= lock_user_string(arg3
);
7481 unlock_user(p
, arg1
, 0);
7483 unlock_user(p2
, arg2
, 0);
7484 return -TARGET_EFAULT
;
7490 /* FIXME - arg5 should be locked, but it isn't clear how to
7491 * do that since it's not guaranteed to be a NULL-terminated
7495 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7497 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7499 ret
= get_errno(ret
);
7502 unlock_user(p
, arg1
, 0);
7504 unlock_user(p2
, arg2
, 0);
7506 unlock_user(p3
, arg3
, 0);
7510 #ifdef TARGET_NR_umount
7511 case TARGET_NR_umount
:
7512 if (!(p
= lock_user_string(arg1
)))
7513 return -TARGET_EFAULT
;
7514 ret
= get_errno(umount(p
));
7515 unlock_user(p
, arg1
, 0);
7518 #ifdef TARGET_NR_stime /* not on alpha */
7519 case TARGET_NR_stime
:
7522 if (get_user_sal(host_time
, arg1
))
7523 return -TARGET_EFAULT
;
7524 return get_errno(stime(&host_time
));
7527 #ifdef TARGET_NR_alarm /* not on alpha */
7528 case TARGET_NR_alarm
:
7531 #ifdef TARGET_NR_pause /* not on alpha */
7532 case TARGET_NR_pause
:
7533 if (!block_signals()) {
7534 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7536 return -TARGET_EINTR
;
7538 #ifdef TARGET_NR_utime
7539 case TARGET_NR_utime
:
7541 struct utimbuf tbuf
, *host_tbuf
;
7542 struct target_utimbuf
*target_tbuf
;
7544 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7545 return -TARGET_EFAULT
;
7546 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7547 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7548 unlock_user_struct(target_tbuf
, arg2
, 0);
7553 if (!(p
= lock_user_string(arg1
)))
7554 return -TARGET_EFAULT
;
7555 ret
= get_errno(utime(p
, host_tbuf
));
7556 unlock_user(p
, arg1
, 0);
7560 #ifdef TARGET_NR_utimes
7561 case TARGET_NR_utimes
:
7563 struct timeval
*tvp
, tv
[2];
7565 if (copy_from_user_timeval(&tv
[0], arg2
)
7566 || copy_from_user_timeval(&tv
[1],
7567 arg2
+ sizeof(struct target_timeval
)))
7568 return -TARGET_EFAULT
;
7573 if (!(p
= lock_user_string(arg1
)))
7574 return -TARGET_EFAULT
;
7575 ret
= get_errno(utimes(p
, tvp
));
7576 unlock_user(p
, arg1
, 0);
7580 #if defined(TARGET_NR_futimesat)
7581 case TARGET_NR_futimesat
:
7583 struct timeval
*tvp
, tv
[2];
7585 if (copy_from_user_timeval(&tv
[0], arg3
)
7586 || copy_from_user_timeval(&tv
[1],
7587 arg3
+ sizeof(struct target_timeval
)))
7588 return -TARGET_EFAULT
;
7593 if (!(p
= lock_user_string(arg2
))) {
7594 return -TARGET_EFAULT
;
7596 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7597 unlock_user(p
, arg2
, 0);
7601 #ifdef TARGET_NR_access
7602 case TARGET_NR_access
:
7603 if (!(p
= lock_user_string(arg1
))) {
7604 return -TARGET_EFAULT
;
7606 ret
= get_errno(access(path(p
), arg2
));
7607 unlock_user(p
, arg1
, 0);
7610 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7611 case TARGET_NR_faccessat
:
7612 if (!(p
= lock_user_string(arg2
))) {
7613 return -TARGET_EFAULT
;
7615 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7616 unlock_user(p
, arg2
, 0);
7619 #ifdef TARGET_NR_nice /* not on alpha */
7620 case TARGET_NR_nice
:
7621 return get_errno(nice(arg1
));
7623 case TARGET_NR_sync
:
7626 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7627 case TARGET_NR_syncfs
:
7628 return get_errno(syncfs(arg1
));
7630 case TARGET_NR_kill
:
7631 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7632 #ifdef TARGET_NR_rename
7633 case TARGET_NR_rename
:
7636 p
= lock_user_string(arg1
);
7637 p2
= lock_user_string(arg2
);
7639 ret
= -TARGET_EFAULT
;
7641 ret
= get_errno(rename(p
, p2
));
7642 unlock_user(p2
, arg2
, 0);
7643 unlock_user(p
, arg1
, 0);
7647 #if defined(TARGET_NR_renameat)
7648 case TARGET_NR_renameat
:
7651 p
= lock_user_string(arg2
);
7652 p2
= lock_user_string(arg4
);
7654 ret
= -TARGET_EFAULT
;
7656 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7657 unlock_user(p2
, arg4
, 0);
7658 unlock_user(p
, arg2
, 0);
7662 #if defined(TARGET_NR_renameat2)
7663 case TARGET_NR_renameat2
:
7666 p
= lock_user_string(arg2
);
7667 p2
= lock_user_string(arg4
);
7669 ret
= -TARGET_EFAULT
;
7671 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7673 unlock_user(p2
, arg4
, 0);
7674 unlock_user(p
, arg2
, 0);
7678 #ifdef TARGET_NR_mkdir
7679 case TARGET_NR_mkdir
:
7680 if (!(p
= lock_user_string(arg1
)))
7681 return -TARGET_EFAULT
;
7682 ret
= get_errno(mkdir(p
, arg2
));
7683 unlock_user(p
, arg1
, 0);
7686 #if defined(TARGET_NR_mkdirat)
7687 case TARGET_NR_mkdirat
:
7688 if (!(p
= lock_user_string(arg2
)))
7689 return -TARGET_EFAULT
;
7690 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7691 unlock_user(p
, arg2
, 0);
7694 #ifdef TARGET_NR_rmdir
7695 case TARGET_NR_rmdir
:
7696 if (!(p
= lock_user_string(arg1
)))
7697 return -TARGET_EFAULT
;
7698 ret
= get_errno(rmdir(p
));
7699 unlock_user(p
, arg1
, 0);
7703 ret
= get_errno(dup(arg1
));
7705 fd_trans_dup(arg1
, ret
);
7708 #ifdef TARGET_NR_pipe
7709 case TARGET_NR_pipe
:
7710 return do_pipe(cpu_env
, arg1
, 0, 0);
7712 #ifdef TARGET_NR_pipe2
7713 case TARGET_NR_pipe2
:
7714 return do_pipe(cpu_env
, arg1
,
7715 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7717 case TARGET_NR_times
:
7719 struct target_tms
*tmsp
;
7721 ret
= get_errno(times(&tms
));
7723 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7725 return -TARGET_EFAULT
;
7726 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7727 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7728 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7729 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7732 ret
= host_to_target_clock_t(ret
);
7735 case TARGET_NR_acct
:
7737 ret
= get_errno(acct(NULL
));
7739 if (!(p
= lock_user_string(arg1
))) {
7740 return -TARGET_EFAULT
;
7742 ret
= get_errno(acct(path(p
)));
7743 unlock_user(p
, arg1
, 0);
7746 #ifdef TARGET_NR_umount2
7747 case TARGET_NR_umount2
:
7748 if (!(p
= lock_user_string(arg1
)))
7749 return -TARGET_EFAULT
;
7750 ret
= get_errno(umount2(p
, arg2
));
7751 unlock_user(p
, arg1
, 0);
7754 case TARGET_NR_ioctl
:
7755 return do_ioctl(arg1
, arg2
, arg3
);
7756 #ifdef TARGET_NR_fcntl
7757 case TARGET_NR_fcntl
:
7758 return do_fcntl(arg1
, arg2
, arg3
);
7760 case TARGET_NR_setpgid
:
7761 return get_errno(setpgid(arg1
, arg2
));
7762 case TARGET_NR_umask
:
7763 return get_errno(umask(arg1
));
7764 case TARGET_NR_chroot
:
7765 if (!(p
= lock_user_string(arg1
)))
7766 return -TARGET_EFAULT
;
7767 ret
= get_errno(chroot(p
));
7768 unlock_user(p
, arg1
, 0);
7770 #ifdef TARGET_NR_dup2
7771 case TARGET_NR_dup2
:
7772 ret
= get_errno(dup2(arg1
, arg2
));
7774 fd_trans_dup(arg1
, arg2
);
7778 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7779 case TARGET_NR_dup3
:
7783 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7786 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7787 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7789 fd_trans_dup(arg1
, arg2
);
7794 #ifdef TARGET_NR_getppid /* not on alpha */
7795 case TARGET_NR_getppid
:
7796 return get_errno(getppid());
7798 #ifdef TARGET_NR_getpgrp
7799 case TARGET_NR_getpgrp
:
7800 return get_errno(getpgrp());
7802 case TARGET_NR_setsid
:
7803 return get_errno(setsid());
7804 #ifdef TARGET_NR_sigaction
7805 case TARGET_NR_sigaction
:
7807 #if defined(TARGET_ALPHA)
7808 struct target_sigaction act
, oact
, *pact
= 0;
7809 struct target_old_sigaction
*old_act
;
7811 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7812 return -TARGET_EFAULT
;
7813 act
._sa_handler
= old_act
->_sa_handler
;
7814 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7815 act
.sa_flags
= old_act
->sa_flags
;
7816 act
.sa_restorer
= 0;
7817 unlock_user_struct(old_act
, arg2
, 0);
7820 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7821 if (!is_error(ret
) && arg3
) {
7822 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7823 return -TARGET_EFAULT
;
7824 old_act
->_sa_handler
= oact
._sa_handler
;
7825 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7826 old_act
->sa_flags
= oact
.sa_flags
;
7827 unlock_user_struct(old_act
, arg3
, 1);
7829 #elif defined(TARGET_MIPS)
7830 struct target_sigaction act
, oact
, *pact
, *old_act
;
7833 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7834 return -TARGET_EFAULT
;
7835 act
._sa_handler
= old_act
->_sa_handler
;
7836 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7837 act
.sa_flags
= old_act
->sa_flags
;
7838 unlock_user_struct(old_act
, arg2
, 0);
7844 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7846 if (!is_error(ret
) && arg3
) {
7847 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7848 return -TARGET_EFAULT
;
7849 old_act
->_sa_handler
= oact
._sa_handler
;
7850 old_act
->sa_flags
= oact
.sa_flags
;
7851 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7852 old_act
->sa_mask
.sig
[1] = 0;
7853 old_act
->sa_mask
.sig
[2] = 0;
7854 old_act
->sa_mask
.sig
[3] = 0;
7855 unlock_user_struct(old_act
, arg3
, 1);
7858 struct target_old_sigaction
*old_act
;
7859 struct target_sigaction act
, oact
, *pact
;
7861 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7862 return -TARGET_EFAULT
;
7863 act
._sa_handler
= old_act
->_sa_handler
;
7864 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7865 act
.sa_flags
= old_act
->sa_flags
;
7866 act
.sa_restorer
= old_act
->sa_restorer
;
7867 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7868 act
.ka_restorer
= 0;
7870 unlock_user_struct(old_act
, arg2
, 0);
7875 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7876 if (!is_error(ret
) && arg3
) {
7877 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7878 return -TARGET_EFAULT
;
7879 old_act
->_sa_handler
= oact
._sa_handler
;
7880 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7881 old_act
->sa_flags
= oact
.sa_flags
;
7882 old_act
->sa_restorer
= oact
.sa_restorer
;
7883 unlock_user_struct(old_act
, arg3
, 1);
7889 case TARGET_NR_rt_sigaction
:
7891 #if defined(TARGET_ALPHA)
7892 /* For Alpha and SPARC this is a 5 argument syscall, with
7893 * a 'restorer' parameter which must be copied into the
7894 * sa_restorer field of the sigaction struct.
7895 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7896 * and arg5 is the sigsetsize.
7897 * Alpha also has a separate rt_sigaction struct that it uses
7898 * here; SPARC uses the usual sigaction struct.
7900 struct target_rt_sigaction
*rt_act
;
7901 struct target_sigaction act
, oact
, *pact
= 0;
7903 if (arg4
!= sizeof(target_sigset_t
)) {
7904 return -TARGET_EINVAL
;
7907 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7908 return -TARGET_EFAULT
;
7909 act
._sa_handler
= rt_act
->_sa_handler
;
7910 act
.sa_mask
= rt_act
->sa_mask
;
7911 act
.sa_flags
= rt_act
->sa_flags
;
7912 act
.sa_restorer
= arg5
;
7913 unlock_user_struct(rt_act
, arg2
, 0);
7916 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7917 if (!is_error(ret
) && arg3
) {
7918 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7919 return -TARGET_EFAULT
;
7920 rt_act
->_sa_handler
= oact
._sa_handler
;
7921 rt_act
->sa_mask
= oact
.sa_mask
;
7922 rt_act
->sa_flags
= oact
.sa_flags
;
7923 unlock_user_struct(rt_act
, arg3
, 1);
7927 target_ulong restorer
= arg4
;
7928 target_ulong sigsetsize
= arg5
;
7930 target_ulong sigsetsize
= arg4
;
7932 struct target_sigaction
*act
;
7933 struct target_sigaction
*oact
;
7935 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7936 return -TARGET_EINVAL
;
7939 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7940 return -TARGET_EFAULT
;
7942 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7943 act
->ka_restorer
= restorer
;
7949 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7950 ret
= -TARGET_EFAULT
;
7951 goto rt_sigaction_fail
;
7955 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7958 unlock_user_struct(act
, arg2
, 0);
7960 unlock_user_struct(oact
, arg3
, 1);
7964 #ifdef TARGET_NR_sgetmask /* not on alpha */
7965 case TARGET_NR_sgetmask
:
7968 abi_ulong target_set
;
7969 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7971 host_to_target_old_sigset(&target_set
, &cur_set
);
7977 #ifdef TARGET_NR_ssetmask /* not on alpha */
7978 case TARGET_NR_ssetmask
:
7981 abi_ulong target_set
= arg1
;
7982 target_to_host_old_sigset(&set
, &target_set
);
7983 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7985 host_to_target_old_sigset(&target_set
, &oset
);
7991 #ifdef TARGET_NR_sigprocmask
7992 case TARGET_NR_sigprocmask
:
7994 #if defined(TARGET_ALPHA)
7995 sigset_t set
, oldset
;
8000 case TARGET_SIG_BLOCK
:
8003 case TARGET_SIG_UNBLOCK
:
8006 case TARGET_SIG_SETMASK
:
8010 return -TARGET_EINVAL
;
8013 target_to_host_old_sigset(&set
, &mask
);
8015 ret
= do_sigprocmask(how
, &set
, &oldset
);
8016 if (!is_error(ret
)) {
8017 host_to_target_old_sigset(&mask
, &oldset
);
8019 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8022 sigset_t set
, oldset
, *set_ptr
;
8027 case TARGET_SIG_BLOCK
:
8030 case TARGET_SIG_UNBLOCK
:
8033 case TARGET_SIG_SETMASK
:
8037 return -TARGET_EINVAL
;
8039 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8040 return -TARGET_EFAULT
;
8041 target_to_host_old_sigset(&set
, p
);
8042 unlock_user(p
, arg2
, 0);
8048 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8049 if (!is_error(ret
) && arg3
) {
8050 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8051 return -TARGET_EFAULT
;
8052 host_to_target_old_sigset(p
, &oldset
);
8053 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8059 case TARGET_NR_rt_sigprocmask
:
8062 sigset_t set
, oldset
, *set_ptr
;
8064 if (arg4
!= sizeof(target_sigset_t
)) {
8065 return -TARGET_EINVAL
;
8070 case TARGET_SIG_BLOCK
:
8073 case TARGET_SIG_UNBLOCK
:
8076 case TARGET_SIG_SETMASK
:
8080 return -TARGET_EINVAL
;
8082 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8083 return -TARGET_EFAULT
;
8084 target_to_host_sigset(&set
, p
);
8085 unlock_user(p
, arg2
, 0);
8091 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8092 if (!is_error(ret
) && arg3
) {
8093 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8094 return -TARGET_EFAULT
;
8095 host_to_target_sigset(p
, &oldset
);
8096 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8100 #ifdef TARGET_NR_sigpending
8101 case TARGET_NR_sigpending
:
8104 ret
= get_errno(sigpending(&set
));
8105 if (!is_error(ret
)) {
8106 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8107 return -TARGET_EFAULT
;
8108 host_to_target_old_sigset(p
, &set
);
8109 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8114 case TARGET_NR_rt_sigpending
:
8118 /* Yes, this check is >, not != like most. We follow the kernel's
8119 * logic and it does it like this because it implements
8120 * NR_sigpending through the same code path, and in that case
8121 * the old_sigset_t is smaller in size.
8123 if (arg2
> sizeof(target_sigset_t
)) {
8124 return -TARGET_EINVAL
;
8127 ret
= get_errno(sigpending(&set
));
8128 if (!is_error(ret
)) {
8129 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8130 return -TARGET_EFAULT
;
8131 host_to_target_sigset(p
, &set
);
8132 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8136 #ifdef TARGET_NR_sigsuspend
8137 case TARGET_NR_sigsuspend
:
8139 TaskState
*ts
= cpu
->opaque
;
8140 #if defined(TARGET_ALPHA)
8141 abi_ulong mask
= arg1
;
8142 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8144 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8145 return -TARGET_EFAULT
;
8146 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8147 unlock_user(p
, arg1
, 0);
8149 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8151 if (ret
!= -TARGET_ERESTARTSYS
) {
8152 ts
->in_sigsuspend
= 1;
8157 case TARGET_NR_rt_sigsuspend
:
8159 TaskState
*ts
= cpu
->opaque
;
8161 if (arg2
!= sizeof(target_sigset_t
)) {
8162 return -TARGET_EINVAL
;
8164 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8165 return -TARGET_EFAULT
;
8166 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8167 unlock_user(p
, arg1
, 0);
8168 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8170 if (ret
!= -TARGET_ERESTARTSYS
) {
8171 ts
->in_sigsuspend
= 1;
8175 case TARGET_NR_rt_sigtimedwait
:
8178 struct timespec uts
, *puts
;
8181 if (arg4
!= sizeof(target_sigset_t
)) {
8182 return -TARGET_EINVAL
;
8185 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8186 return -TARGET_EFAULT
;
8187 target_to_host_sigset(&set
, p
);
8188 unlock_user(p
, arg1
, 0);
8191 target_to_host_timespec(puts
, arg3
);
8195 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8197 if (!is_error(ret
)) {
8199 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8202 return -TARGET_EFAULT
;
8204 host_to_target_siginfo(p
, &uinfo
);
8205 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8207 ret
= host_to_target_signal(ret
);
8211 case TARGET_NR_rt_sigqueueinfo
:
8215 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8217 return -TARGET_EFAULT
;
8219 target_to_host_siginfo(&uinfo
, p
);
8220 unlock_user(p
, arg3
, 0);
8221 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8224 case TARGET_NR_rt_tgsigqueueinfo
:
8228 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8230 return -TARGET_EFAULT
;
8232 target_to_host_siginfo(&uinfo
, p
);
8233 unlock_user(p
, arg4
, 0);
8234 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8237 #ifdef TARGET_NR_sigreturn
8238 case TARGET_NR_sigreturn
:
8239 if (block_signals()) {
8240 return -TARGET_ERESTARTSYS
;
8242 return do_sigreturn(cpu_env
);
8244 case TARGET_NR_rt_sigreturn
:
8245 if (block_signals()) {
8246 return -TARGET_ERESTARTSYS
;
8248 return do_rt_sigreturn(cpu_env
);
8249 case TARGET_NR_sethostname
:
8250 if (!(p
= lock_user_string(arg1
)))
8251 return -TARGET_EFAULT
;
8252 ret
= get_errno(sethostname(p
, arg2
));
8253 unlock_user(p
, arg1
, 0);
8255 #ifdef TARGET_NR_setrlimit
8256 case TARGET_NR_setrlimit
:
8258 int resource
= target_to_host_resource(arg1
);
8259 struct target_rlimit
*target_rlim
;
8261 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8262 return -TARGET_EFAULT
;
8263 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8264 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8265 unlock_user_struct(target_rlim
, arg2
, 0);
8267 * If we just passed through resource limit settings for memory then
8268 * they would also apply to QEMU's own allocations, and QEMU will
8269 * crash or hang or die if its allocations fail. Ideally we would
8270 * track the guest allocations in QEMU and apply the limits ourselves.
8271 * For now, just tell the guest the call succeeded but don't actually
8274 if (resource
!= RLIMIT_AS
&&
8275 resource
!= RLIMIT_DATA
&&
8276 resource
!= RLIMIT_STACK
) {
8277 return get_errno(setrlimit(resource
, &rlim
));
8283 #ifdef TARGET_NR_getrlimit
8284 case TARGET_NR_getrlimit
:
8286 int resource
= target_to_host_resource(arg1
);
8287 struct target_rlimit
*target_rlim
;
8290 ret
= get_errno(getrlimit(resource
, &rlim
));
8291 if (!is_error(ret
)) {
8292 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8293 return -TARGET_EFAULT
;
8294 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8295 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8296 unlock_user_struct(target_rlim
, arg2
, 1);
8301 case TARGET_NR_getrusage
:
8303 struct rusage rusage
;
8304 ret
= get_errno(getrusage(arg1
, &rusage
));
8305 if (!is_error(ret
)) {
8306 ret
= host_to_target_rusage(arg2
, &rusage
);
8310 case TARGET_NR_gettimeofday
:
8313 ret
= get_errno(gettimeofday(&tv
, NULL
));
8314 if (!is_error(ret
)) {
8315 if (copy_to_user_timeval(arg1
, &tv
))
8316 return -TARGET_EFAULT
;
8320 case TARGET_NR_settimeofday
:
8322 struct timeval tv
, *ptv
= NULL
;
8323 struct timezone tz
, *ptz
= NULL
;
8326 if (copy_from_user_timeval(&tv
, arg1
)) {
8327 return -TARGET_EFAULT
;
8333 if (copy_from_user_timezone(&tz
, arg2
)) {
8334 return -TARGET_EFAULT
;
8339 return get_errno(settimeofday(ptv
, ptz
));
8341 #if defined(TARGET_NR_select)
8342 case TARGET_NR_select
:
8343 #if defined(TARGET_WANT_NI_OLD_SELECT)
8344 /* some architectures used to have old_select here
8345 * but now ENOSYS it.
8347 ret
= -TARGET_ENOSYS
;
8348 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8349 ret
= do_old_select(arg1
);
8351 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8355 #ifdef TARGET_NR_pselect6
8356 case TARGET_NR_pselect6
:
8358 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8359 fd_set rfds
, wfds
, efds
;
8360 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8361 struct timespec ts
, *ts_ptr
;
8364 * The 6th arg is actually two args smashed together,
8365 * so we cannot use the C library.
8373 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8374 target_sigset_t
*target_sigset
;
8382 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8386 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8390 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8396 * This takes a timespec, and not a timeval, so we cannot
8397 * use the do_select() helper ...
8400 if (target_to_host_timespec(&ts
, ts_addr
)) {
8401 return -TARGET_EFAULT
;
8408 /* Extract the two packed args for the sigset */
8411 sig
.size
= SIGSET_T_SIZE
;
8413 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8415 return -TARGET_EFAULT
;
8417 arg_sigset
= tswapal(arg7
[0]);
8418 arg_sigsize
= tswapal(arg7
[1]);
8419 unlock_user(arg7
, arg6
, 0);
8423 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8424 /* Like the kernel, we enforce correct size sigsets */
8425 return -TARGET_EINVAL
;
8427 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8428 sizeof(*target_sigset
), 1);
8429 if (!target_sigset
) {
8430 return -TARGET_EFAULT
;
8432 target_to_host_sigset(&set
, target_sigset
);
8433 unlock_user(target_sigset
, arg_sigset
, 0);
8441 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8444 if (!is_error(ret
)) {
8445 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8446 return -TARGET_EFAULT
;
8447 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8448 return -TARGET_EFAULT
;
8449 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8450 return -TARGET_EFAULT
;
8452 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8453 return -TARGET_EFAULT
;
8458 #ifdef TARGET_NR_symlink
8459 case TARGET_NR_symlink
:
8462 p
= lock_user_string(arg1
);
8463 p2
= lock_user_string(arg2
);
8465 ret
= -TARGET_EFAULT
;
8467 ret
= get_errno(symlink(p
, p2
));
8468 unlock_user(p2
, arg2
, 0);
8469 unlock_user(p
, arg1
, 0);
8473 #if defined(TARGET_NR_symlinkat)
8474 case TARGET_NR_symlinkat
:
8477 p
= lock_user_string(arg1
);
8478 p2
= lock_user_string(arg3
);
8480 ret
= -TARGET_EFAULT
;
8482 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8483 unlock_user(p2
, arg3
, 0);
8484 unlock_user(p
, arg1
, 0);
8488 #ifdef TARGET_NR_readlink
8489 case TARGET_NR_readlink
:
8492 p
= lock_user_string(arg1
);
8493 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8495 ret
= -TARGET_EFAULT
;
8497 /* Short circuit this for the magic exe check. */
8498 ret
= -TARGET_EINVAL
;
8499 } else if (is_proc_myself((const char *)p
, "exe")) {
8500 char real
[PATH_MAX
], *temp
;
8501 temp
= realpath(exec_path
, real
);
8502 /* Return value is # of bytes that we wrote to the buffer. */
8504 ret
= get_errno(-1);
8506 /* Don't worry about sign mismatch as earlier mapping
8507 * logic would have thrown a bad address error. */
8508 ret
= MIN(strlen(real
), arg3
);
8509 /* We cannot NUL terminate the string. */
8510 memcpy(p2
, real
, ret
);
8513 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8515 unlock_user(p2
, arg2
, ret
);
8516 unlock_user(p
, arg1
, 0);
8520 #if defined(TARGET_NR_readlinkat)
8521 case TARGET_NR_readlinkat
:
8524 p
= lock_user_string(arg2
);
8525 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8527 ret
= -TARGET_EFAULT
;
8528 } else if (is_proc_myself((const char *)p
, "exe")) {
8529 char real
[PATH_MAX
], *temp
;
8530 temp
= realpath(exec_path
, real
);
8531 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8532 snprintf((char *)p2
, arg4
, "%s", real
);
8534 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8536 unlock_user(p2
, arg3
, ret
);
8537 unlock_user(p
, arg2
, 0);
8541 #ifdef TARGET_NR_swapon
8542 case TARGET_NR_swapon
:
8543 if (!(p
= lock_user_string(arg1
)))
8544 return -TARGET_EFAULT
;
8545 ret
= get_errno(swapon(p
, arg2
));
8546 unlock_user(p
, arg1
, 0);
8549 case TARGET_NR_reboot
:
8550 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8551 /* arg4 must be ignored in all other cases */
8552 p
= lock_user_string(arg4
);
8554 return -TARGET_EFAULT
;
8556 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8557 unlock_user(p
, arg4
, 0);
8559 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8562 #ifdef TARGET_NR_mmap
8563 case TARGET_NR_mmap
:
8564 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8565 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8566 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8567 || defined(TARGET_S390X)
8570 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8571 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8572 return -TARGET_EFAULT
;
8579 unlock_user(v
, arg1
, 0);
8580 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8581 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8585 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8586 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8592 #ifdef TARGET_NR_mmap2
8593 case TARGET_NR_mmap2
:
8595 #define MMAP_SHIFT 12
8597 ret
= target_mmap(arg1
, arg2
, arg3
,
8598 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8599 arg5
, arg6
<< MMAP_SHIFT
);
8600 return get_errno(ret
);
8602 case TARGET_NR_munmap
:
8603 return get_errno(target_munmap(arg1
, arg2
));
8604 case TARGET_NR_mprotect
:
8606 TaskState
*ts
= cpu
->opaque
;
8607 /* Special hack to detect libc making the stack executable. */
8608 if ((arg3
& PROT_GROWSDOWN
)
8609 && arg1
>= ts
->info
->stack_limit
8610 && arg1
<= ts
->info
->start_stack
) {
8611 arg3
&= ~PROT_GROWSDOWN
;
8612 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8613 arg1
= ts
->info
->stack_limit
;
8616 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8617 #ifdef TARGET_NR_mremap
8618 case TARGET_NR_mremap
:
8619 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8621 /* ??? msync/mlock/munlock are broken for softmmu. */
8622 #ifdef TARGET_NR_msync
8623 case TARGET_NR_msync
:
8624 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8626 #ifdef TARGET_NR_mlock
8627 case TARGET_NR_mlock
:
8628 return get_errno(mlock(g2h(arg1
), arg2
));
8630 #ifdef TARGET_NR_munlock
8631 case TARGET_NR_munlock
:
8632 return get_errno(munlock(g2h(arg1
), arg2
));
8634 #ifdef TARGET_NR_mlockall
8635 case TARGET_NR_mlockall
:
8636 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8638 #ifdef TARGET_NR_munlockall
8639 case TARGET_NR_munlockall
:
8640 return get_errno(munlockall());
8642 #ifdef TARGET_NR_truncate
8643 case TARGET_NR_truncate
:
8644 if (!(p
= lock_user_string(arg1
)))
8645 return -TARGET_EFAULT
;
8646 ret
= get_errno(truncate(p
, arg2
));
8647 unlock_user(p
, arg1
, 0);
8650 #ifdef TARGET_NR_ftruncate
8651 case TARGET_NR_ftruncate
:
8652 return get_errno(ftruncate(arg1
, arg2
));
8654 case TARGET_NR_fchmod
:
8655 return get_errno(fchmod(arg1
, arg2
));
8656 #if defined(TARGET_NR_fchmodat)
8657 case TARGET_NR_fchmodat
:
8658 if (!(p
= lock_user_string(arg2
)))
8659 return -TARGET_EFAULT
;
8660 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8661 unlock_user(p
, arg2
, 0);
8664 case TARGET_NR_getpriority
:
8665 /* Note that negative values are valid for getpriority, so we must
8666 differentiate based on errno settings. */
8668 ret
= getpriority(arg1
, arg2
);
8669 if (ret
== -1 && errno
!= 0) {
8670 return -host_to_target_errno(errno
);
8673 /* Return value is the unbiased priority. Signal no error. */
8674 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8676 /* Return value is a biased priority to avoid negative numbers. */
8680 case TARGET_NR_setpriority
:
8681 return get_errno(setpriority(arg1
, arg2
, arg3
));
8682 #ifdef TARGET_NR_statfs
8683 case TARGET_NR_statfs
:
8684 if (!(p
= lock_user_string(arg1
))) {
8685 return -TARGET_EFAULT
;
8687 ret
= get_errno(statfs(path(p
), &stfs
));
8688 unlock_user(p
, arg1
, 0);
8690 if (!is_error(ret
)) {
8691 struct target_statfs
*target_stfs
;
8693 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8694 return -TARGET_EFAULT
;
8695 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8696 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8697 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8698 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8699 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8700 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8701 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8702 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8703 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8704 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8705 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8706 #ifdef _STATFS_F_FLAGS
8707 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8709 __put_user(0, &target_stfs
->f_flags
);
8711 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8712 unlock_user_struct(target_stfs
, arg2
, 1);
8716 #ifdef TARGET_NR_fstatfs
8717 case TARGET_NR_fstatfs
:
8718 ret
= get_errno(fstatfs(arg1
, &stfs
));
8719 goto convert_statfs
;
8721 #ifdef TARGET_NR_statfs64
8722 case TARGET_NR_statfs64
:
8723 if (!(p
= lock_user_string(arg1
))) {
8724 return -TARGET_EFAULT
;
8726 ret
= get_errno(statfs(path(p
), &stfs
));
8727 unlock_user(p
, arg1
, 0);
8729 if (!is_error(ret
)) {
8730 struct target_statfs64
*target_stfs
;
8732 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8733 return -TARGET_EFAULT
;
8734 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8735 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8736 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8737 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8738 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8739 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8740 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8741 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8742 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8743 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8744 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8745 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8746 unlock_user_struct(target_stfs
, arg3
, 1);
8749 case TARGET_NR_fstatfs64
:
8750 ret
= get_errno(fstatfs(arg1
, &stfs
));
8751 goto convert_statfs64
;
8753 #ifdef TARGET_NR_socketcall
8754 case TARGET_NR_socketcall
:
8755 return do_socketcall(arg1
, arg2
);
8757 #ifdef TARGET_NR_accept
8758 case TARGET_NR_accept
:
8759 return do_accept4(arg1
, arg2
, arg3
, 0);
8761 #ifdef TARGET_NR_accept4
8762 case TARGET_NR_accept4
:
8763 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8765 #ifdef TARGET_NR_bind
8766 case TARGET_NR_bind
:
8767 return do_bind(arg1
, arg2
, arg3
);
8769 #ifdef TARGET_NR_connect
8770 case TARGET_NR_connect
:
8771 return do_connect(arg1
, arg2
, arg3
);
8773 #ifdef TARGET_NR_getpeername
8774 case TARGET_NR_getpeername
:
8775 return do_getpeername(arg1
, arg2
, arg3
);
8777 #ifdef TARGET_NR_getsockname
8778 case TARGET_NR_getsockname
:
8779 return do_getsockname(arg1
, arg2
, arg3
);
8781 #ifdef TARGET_NR_getsockopt
8782 case TARGET_NR_getsockopt
:
8783 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8785 #ifdef TARGET_NR_listen
8786 case TARGET_NR_listen
:
8787 return get_errno(listen(arg1
, arg2
));
8789 #ifdef TARGET_NR_recv
8790 case TARGET_NR_recv
:
8791 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8793 #ifdef TARGET_NR_recvfrom
8794 case TARGET_NR_recvfrom
:
8795 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8797 #ifdef TARGET_NR_recvmsg
8798 case TARGET_NR_recvmsg
:
8799 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8801 #ifdef TARGET_NR_send
8802 case TARGET_NR_send
:
8803 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8805 #ifdef TARGET_NR_sendmsg
8806 case TARGET_NR_sendmsg
:
8807 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8809 #ifdef TARGET_NR_sendmmsg
8810 case TARGET_NR_sendmmsg
:
8811 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8812 case TARGET_NR_recvmmsg
:
8813 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8815 #ifdef TARGET_NR_sendto
8816 case TARGET_NR_sendto
:
8817 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8819 #ifdef TARGET_NR_shutdown
8820 case TARGET_NR_shutdown
:
8821 return get_errno(shutdown(arg1
, arg2
));
8823 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8824 case TARGET_NR_getrandom
:
8825 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8827 return -TARGET_EFAULT
;
8829 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8830 unlock_user(p
, arg1
, ret
);
8833 #ifdef TARGET_NR_socket
8834 case TARGET_NR_socket
:
8835 return do_socket(arg1
, arg2
, arg3
);
8837 #ifdef TARGET_NR_socketpair
8838 case TARGET_NR_socketpair
:
8839 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8841 #ifdef TARGET_NR_setsockopt
8842 case TARGET_NR_setsockopt
:
8843 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8845 #if defined(TARGET_NR_syslog)
8846 case TARGET_NR_syslog
:
8851 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8852 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8853 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8854 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8855 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8856 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8857 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8858 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8859 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8860 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8861 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8862 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8865 return -TARGET_EINVAL
;
8870 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8872 return -TARGET_EFAULT
;
8874 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8875 unlock_user(p
, arg2
, arg3
);
8879 return -TARGET_EINVAL
;
8884 case TARGET_NR_setitimer
:
8886 struct itimerval value
, ovalue
, *pvalue
;
8890 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8891 || copy_from_user_timeval(&pvalue
->it_value
,
8892 arg2
+ sizeof(struct target_timeval
)))
8893 return -TARGET_EFAULT
;
8897 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8898 if (!is_error(ret
) && arg3
) {
8899 if (copy_to_user_timeval(arg3
,
8900 &ovalue
.it_interval
)
8901 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8903 return -TARGET_EFAULT
;
8907 case TARGET_NR_getitimer
:
8909 struct itimerval value
;
8911 ret
= get_errno(getitimer(arg1
, &value
));
8912 if (!is_error(ret
) && arg2
) {
8913 if (copy_to_user_timeval(arg2
,
8915 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8917 return -TARGET_EFAULT
;
8921 #ifdef TARGET_NR_stat
8922 case TARGET_NR_stat
:
8923 if (!(p
= lock_user_string(arg1
))) {
8924 return -TARGET_EFAULT
;
8926 ret
= get_errno(stat(path(p
), &st
));
8927 unlock_user(p
, arg1
, 0);
8930 #ifdef TARGET_NR_lstat
8931 case TARGET_NR_lstat
:
8932 if (!(p
= lock_user_string(arg1
))) {
8933 return -TARGET_EFAULT
;
8935 ret
= get_errno(lstat(path(p
), &st
));
8936 unlock_user(p
, arg1
, 0);
8939 #ifdef TARGET_NR_fstat
8940 case TARGET_NR_fstat
:
8942 ret
= get_errno(fstat(arg1
, &st
));
8943 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8946 if (!is_error(ret
)) {
8947 struct target_stat
*target_st
;
8949 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8950 return -TARGET_EFAULT
;
8951 memset(target_st
, 0, sizeof(*target_st
));
8952 __put_user(st
.st_dev
, &target_st
->st_dev
);
8953 __put_user(st
.st_ino
, &target_st
->st_ino
);
8954 __put_user(st
.st_mode
, &target_st
->st_mode
);
8955 __put_user(st
.st_uid
, &target_st
->st_uid
);
8956 __put_user(st
.st_gid
, &target_st
->st_gid
);
8957 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8958 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8959 __put_user(st
.st_size
, &target_st
->st_size
);
8960 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8961 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8962 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8963 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8964 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8965 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8966 defined(TARGET_STAT_HAVE_NSEC)
8967 __put_user(st
.st_atim
.tv_nsec
,
8968 &target_st
->target_st_atime_nsec
);
8969 __put_user(st
.st_mtim
.tv_nsec
,
8970 &target_st
->target_st_mtime_nsec
);
8971 __put_user(st
.st_ctim
.tv_nsec
,
8972 &target_st
->target_st_ctime_nsec
);
8974 unlock_user_struct(target_st
, arg2
, 1);
8979 case TARGET_NR_vhangup
:
8980 return get_errno(vhangup());
8981 #ifdef TARGET_NR_syscall
8982 case TARGET_NR_syscall
:
8983 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8984 arg6
, arg7
, arg8
, 0);
8986 case TARGET_NR_wait4
:
8989 abi_long status_ptr
= arg2
;
8990 struct rusage rusage
, *rusage_ptr
;
8991 abi_ulong target_rusage
= arg4
;
8992 abi_long rusage_err
;
8994 rusage_ptr
= &rusage
;
8997 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8998 if (!is_error(ret
)) {
8999 if (status_ptr
&& ret
) {
9000 status
= host_to_target_waitstatus(status
);
9001 if (put_user_s32(status
, status_ptr
))
9002 return -TARGET_EFAULT
;
9004 if (target_rusage
) {
9005 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9013 #ifdef TARGET_NR_swapoff
9014 case TARGET_NR_swapoff
:
9015 if (!(p
= lock_user_string(arg1
)))
9016 return -TARGET_EFAULT
;
9017 ret
= get_errno(swapoff(p
));
9018 unlock_user(p
, arg1
, 0);
9021 case TARGET_NR_sysinfo
:
9023 struct target_sysinfo
*target_value
;
9024 struct sysinfo value
;
9025 ret
= get_errno(sysinfo(&value
));
9026 if (!is_error(ret
) && arg1
)
9028 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9029 return -TARGET_EFAULT
;
9030 __put_user(value
.uptime
, &target_value
->uptime
);
9031 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9032 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9033 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9034 __put_user(value
.totalram
, &target_value
->totalram
);
9035 __put_user(value
.freeram
, &target_value
->freeram
);
9036 __put_user(value
.sharedram
, &target_value
->sharedram
);
9037 __put_user(value
.bufferram
, &target_value
->bufferram
);
9038 __put_user(value
.totalswap
, &target_value
->totalswap
);
9039 __put_user(value
.freeswap
, &target_value
->freeswap
);
9040 __put_user(value
.procs
, &target_value
->procs
);
9041 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9042 __put_user(value
.freehigh
, &target_value
->freehigh
);
9043 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9044 unlock_user_struct(target_value
, arg1
, 1);
9048 #ifdef TARGET_NR_ipc
9050 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9052 #ifdef TARGET_NR_semget
9053 case TARGET_NR_semget
:
9054 return get_errno(semget(arg1
, arg2
, arg3
));
9056 #ifdef TARGET_NR_semop
9057 case TARGET_NR_semop
:
9058 return do_semop(arg1
, arg2
, arg3
);
9060 #ifdef TARGET_NR_semctl
9061 case TARGET_NR_semctl
:
9062 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9064 #ifdef TARGET_NR_msgctl
9065 case TARGET_NR_msgctl
:
9066 return do_msgctl(arg1
, arg2
, arg3
);
9068 #ifdef TARGET_NR_msgget
9069 case TARGET_NR_msgget
:
9070 return get_errno(msgget(arg1
, arg2
));
9072 #ifdef TARGET_NR_msgrcv
9073 case TARGET_NR_msgrcv
:
9074 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9076 #ifdef TARGET_NR_msgsnd
9077 case TARGET_NR_msgsnd
:
9078 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9080 #ifdef TARGET_NR_shmget
9081 case TARGET_NR_shmget
:
9082 return get_errno(shmget(arg1
, arg2
, arg3
));
9084 #ifdef TARGET_NR_shmctl
9085 case TARGET_NR_shmctl
:
9086 return do_shmctl(arg1
, arg2
, arg3
);
9088 #ifdef TARGET_NR_shmat
9089 case TARGET_NR_shmat
:
9090 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9092 #ifdef TARGET_NR_shmdt
9093 case TARGET_NR_shmdt
:
9094 return do_shmdt(arg1
);
9096 case TARGET_NR_fsync
:
9097 return get_errno(fsync(arg1
));
9098 case TARGET_NR_clone
:
9099 /* Linux manages to have three different orderings for its
9100 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9101 * match the kernel's CONFIG_CLONE_* settings.
9102 * Microblaze is further special in that it uses a sixth
9103 * implicit argument to clone for the TLS pointer.
9105 #if defined(TARGET_MICROBLAZE)
9106 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9107 #elif defined(TARGET_CLONE_BACKWARDS)
9108 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9109 #elif defined(TARGET_CLONE_BACKWARDS2)
9110 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9112 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9115 #ifdef __NR_exit_group
9116 /* new thread calls */
9117 case TARGET_NR_exit_group
:
9118 preexit_cleanup(cpu_env
, arg1
);
9119 return get_errno(exit_group(arg1
));
9121 case TARGET_NR_setdomainname
:
9122 if (!(p
= lock_user_string(arg1
)))
9123 return -TARGET_EFAULT
;
9124 ret
= get_errno(setdomainname(p
, arg2
));
9125 unlock_user(p
, arg1
, 0);
9127 case TARGET_NR_uname
:
9128 /* no need to transcode because we use the linux syscall */
9130 struct new_utsname
* buf
;
9132 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9133 return -TARGET_EFAULT
;
9134 ret
= get_errno(sys_uname(buf
));
9135 if (!is_error(ret
)) {
9136 /* Overwrite the native machine name with whatever is being
9138 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9139 sizeof(buf
->machine
));
9140 /* Allow the user to override the reported release. */
9141 if (qemu_uname_release
&& *qemu_uname_release
) {
9142 g_strlcpy(buf
->release
, qemu_uname_release
,
9143 sizeof(buf
->release
));
9146 unlock_user_struct(buf
, arg1
, 1);
9150 case TARGET_NR_modify_ldt
:
9151 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9152 #if !defined(TARGET_X86_64)
9153 case TARGET_NR_vm86
:
9154 return do_vm86(cpu_env
, arg1
, arg2
);
9157 case TARGET_NR_adjtimex
:
9159 struct timex host_buf
;
9161 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9162 return -TARGET_EFAULT
;
9164 ret
= get_errno(adjtimex(&host_buf
));
9165 if (!is_error(ret
)) {
9166 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9167 return -TARGET_EFAULT
;
9172 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9173 case TARGET_NR_clock_adjtime
:
9175 struct timex htx
, *phtx
= &htx
;
9177 if (target_to_host_timex(phtx
, arg2
) != 0) {
9178 return -TARGET_EFAULT
;
9180 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9181 if (!is_error(ret
) && phtx
) {
9182 if (host_to_target_timex(arg2
, phtx
) != 0) {
9183 return -TARGET_EFAULT
;
9189 case TARGET_NR_getpgid
:
9190 return get_errno(getpgid(arg1
));
9191 case TARGET_NR_fchdir
:
9192 return get_errno(fchdir(arg1
));
9193 case TARGET_NR_personality
:
9194 return get_errno(personality(arg1
));
9195 #ifdef TARGET_NR__llseek /* Not on alpha */
9196 case TARGET_NR__llseek
:
9199 #if !defined(__NR_llseek)
9200 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9202 ret
= get_errno(res
);
9207 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9209 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9210 return -TARGET_EFAULT
;
9215 #ifdef TARGET_NR_getdents
9216 case TARGET_NR_getdents
:
9217 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9218 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9220 struct target_dirent
*target_dirp
;
9221 struct linux_dirent
*dirp
;
9222 abi_long count
= arg3
;
9224 dirp
= g_try_malloc(count
);
9226 return -TARGET_ENOMEM
;
9229 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9230 if (!is_error(ret
)) {
9231 struct linux_dirent
*de
;
9232 struct target_dirent
*tde
;
9234 int reclen
, treclen
;
9235 int count1
, tnamelen
;
9239 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9240 return -TARGET_EFAULT
;
9243 reclen
= de
->d_reclen
;
9244 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9245 assert(tnamelen
>= 0);
9246 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9247 assert(count1
+ treclen
<= count
);
9248 tde
->d_reclen
= tswap16(treclen
);
9249 tde
->d_ino
= tswapal(de
->d_ino
);
9250 tde
->d_off
= tswapal(de
->d_off
);
9251 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9252 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9254 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9258 unlock_user(target_dirp
, arg2
, ret
);
9264 struct linux_dirent
*dirp
;
9265 abi_long count
= arg3
;
9267 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9268 return -TARGET_EFAULT
;
9269 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9270 if (!is_error(ret
)) {
9271 struct linux_dirent
*de
;
9276 reclen
= de
->d_reclen
;
9279 de
->d_reclen
= tswap16(reclen
);
9280 tswapls(&de
->d_ino
);
9281 tswapls(&de
->d_off
);
9282 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9286 unlock_user(dirp
, arg2
, ret
);
9290 /* Implement getdents in terms of getdents64 */
9292 struct linux_dirent64
*dirp
;
9293 abi_long count
= arg3
;
9295 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9297 return -TARGET_EFAULT
;
9299 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9300 if (!is_error(ret
)) {
9301 /* Convert the dirent64 structs to target dirent. We do this
9302 * in-place, since we can guarantee that a target_dirent is no
9303 * larger than a dirent64; however this means we have to be
9304 * careful to read everything before writing in the new format.
9306 struct linux_dirent64
*de
;
9307 struct target_dirent
*tde
;
9312 tde
= (struct target_dirent
*)dirp
;
9314 int namelen
, treclen
;
9315 int reclen
= de
->d_reclen
;
9316 uint64_t ino
= de
->d_ino
;
9317 int64_t off
= de
->d_off
;
9318 uint8_t type
= de
->d_type
;
9320 namelen
= strlen(de
->d_name
);
9321 treclen
= offsetof(struct target_dirent
, d_name
)
9323 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9325 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9326 tde
->d_ino
= tswapal(ino
);
9327 tde
->d_off
= tswapal(off
);
9328 tde
->d_reclen
= tswap16(treclen
);
9329 /* The target_dirent type is in what was formerly a padding
9330 * byte at the end of the structure:
9332 *(((char *)tde
) + treclen
- 1) = type
;
9334 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9335 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9341 unlock_user(dirp
, arg2
, ret
);
9345 #endif /* TARGET_NR_getdents */
9346 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9347 case TARGET_NR_getdents64
:
9349 struct linux_dirent64
*dirp
;
9350 abi_long count
= arg3
;
9351 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9352 return -TARGET_EFAULT
;
9353 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9354 if (!is_error(ret
)) {
9355 struct linux_dirent64
*de
;
9360 reclen
= de
->d_reclen
;
9363 de
->d_reclen
= tswap16(reclen
);
9364 tswap64s((uint64_t *)&de
->d_ino
);
9365 tswap64s((uint64_t *)&de
->d_off
);
9366 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9370 unlock_user(dirp
, arg2
, ret
);
9373 #endif /* TARGET_NR_getdents64 */
9374 #if defined(TARGET_NR__newselect)
9375 case TARGET_NR__newselect
:
9376 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9378 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9379 # ifdef TARGET_NR_poll
9380 case TARGET_NR_poll
:
9382 # ifdef TARGET_NR_ppoll
9383 case TARGET_NR_ppoll
:
9386 struct target_pollfd
*target_pfd
;
9387 unsigned int nfds
= arg2
;
9394 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9395 return -TARGET_EINVAL
;
9398 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9399 sizeof(struct target_pollfd
) * nfds
, 1);
9401 return -TARGET_EFAULT
;
9404 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9405 for (i
= 0; i
< nfds
; i
++) {
9406 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9407 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9412 # ifdef TARGET_NR_ppoll
9413 case TARGET_NR_ppoll
:
9415 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9416 target_sigset_t
*target_set
;
9417 sigset_t _set
, *set
= &_set
;
9420 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9421 unlock_user(target_pfd
, arg1
, 0);
9422 return -TARGET_EFAULT
;
9429 if (arg5
!= sizeof(target_sigset_t
)) {
9430 unlock_user(target_pfd
, arg1
, 0);
9431 return -TARGET_EINVAL
;
9434 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9436 unlock_user(target_pfd
, arg1
, 0);
9437 return -TARGET_EFAULT
;
9439 target_to_host_sigset(set
, target_set
);
9444 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9445 set
, SIGSET_T_SIZE
));
9447 if (!is_error(ret
) && arg3
) {
9448 host_to_target_timespec(arg3
, timeout_ts
);
9451 unlock_user(target_set
, arg4
, 0);
9456 # ifdef TARGET_NR_poll
9457 case TARGET_NR_poll
:
9459 struct timespec ts
, *pts
;
9462 /* Convert ms to secs, ns */
9463 ts
.tv_sec
= arg3
/ 1000;
9464 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9467 /* -ve poll() timeout means "infinite" */
9470 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9475 g_assert_not_reached();
9478 if (!is_error(ret
)) {
9479 for(i
= 0; i
< nfds
; i
++) {
9480 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9483 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9487 case TARGET_NR_flock
:
9488 /* NOTE: the flock constant seems to be the same for every
9490 return get_errno(safe_flock(arg1
, arg2
));
9491 case TARGET_NR_readv
:
9493 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9495 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9496 unlock_iovec(vec
, arg2
, arg3
, 1);
9498 ret
= -host_to_target_errno(errno
);
9502 case TARGET_NR_writev
:
9504 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9506 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9507 unlock_iovec(vec
, arg2
, arg3
, 0);
9509 ret
= -host_to_target_errno(errno
);
9513 #if defined(TARGET_NR_preadv)
9514 case TARGET_NR_preadv
:
9516 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9518 unsigned long low
, high
;
9520 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9521 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9522 unlock_iovec(vec
, arg2
, arg3
, 1);
9524 ret
= -host_to_target_errno(errno
);
9529 #if defined(TARGET_NR_pwritev)
9530 case TARGET_NR_pwritev
:
9532 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9534 unsigned long low
, high
;
9536 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9537 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9538 unlock_iovec(vec
, arg2
, arg3
, 0);
9540 ret
= -host_to_target_errno(errno
);
9545 case TARGET_NR_getsid
:
9546 return get_errno(getsid(arg1
));
9547 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9548 case TARGET_NR_fdatasync
:
9549 return get_errno(fdatasync(arg1
));
9551 #ifdef TARGET_NR__sysctl
9552 case TARGET_NR__sysctl
:
9553 /* We don't implement this, but ENOTDIR is always a safe
9555 return -TARGET_ENOTDIR
;
9557 case TARGET_NR_sched_getaffinity
:
9559 unsigned int mask_size
;
9560 unsigned long *mask
;
9563 * sched_getaffinity needs multiples of ulong, so need to take
9564 * care of mismatches between target ulong and host ulong sizes.
9566 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9567 return -TARGET_EINVAL
;
9569 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9571 mask
= alloca(mask_size
);
9572 memset(mask
, 0, mask_size
);
9573 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9575 if (!is_error(ret
)) {
9577 /* More data returned than the caller's buffer will fit.
9578 * This only happens if sizeof(abi_long) < sizeof(long)
9579 * and the caller passed us a buffer holding an odd number
9580 * of abi_longs. If the host kernel is actually using the
9581 * extra 4 bytes then fail EINVAL; otherwise we can just
9582 * ignore them and only copy the interesting part.
9584 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9585 if (numcpus
> arg2
* 8) {
9586 return -TARGET_EINVAL
;
9591 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9592 return -TARGET_EFAULT
;
9597 case TARGET_NR_sched_setaffinity
:
9599 unsigned int mask_size
;
9600 unsigned long *mask
;
9603 * sched_setaffinity needs multiples of ulong, so need to take
9604 * care of mismatches between target ulong and host ulong sizes.
9606 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9607 return -TARGET_EINVAL
;
9609 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9610 mask
= alloca(mask_size
);
9612 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9617 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9619 case TARGET_NR_getcpu
:
9622 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9623 arg2
? &node
: NULL
,
9625 if (is_error(ret
)) {
9628 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9629 return -TARGET_EFAULT
;
9631 if (arg2
&& put_user_u32(node
, arg2
)) {
9632 return -TARGET_EFAULT
;
9636 case TARGET_NR_sched_setparam
:
9638 struct sched_param
*target_schp
;
9639 struct sched_param schp
;
9642 return -TARGET_EINVAL
;
9644 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9645 return -TARGET_EFAULT
;
9646 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9647 unlock_user_struct(target_schp
, arg2
, 0);
9648 return get_errno(sched_setparam(arg1
, &schp
));
9650 case TARGET_NR_sched_getparam
:
9652 struct sched_param
*target_schp
;
9653 struct sched_param schp
;
9656 return -TARGET_EINVAL
;
9658 ret
= get_errno(sched_getparam(arg1
, &schp
));
9659 if (!is_error(ret
)) {
9660 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9661 return -TARGET_EFAULT
;
9662 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9663 unlock_user_struct(target_schp
, arg2
, 1);
9667 case TARGET_NR_sched_setscheduler
:
9669 struct sched_param
*target_schp
;
9670 struct sched_param schp
;
9672 return -TARGET_EINVAL
;
9674 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9675 return -TARGET_EFAULT
;
9676 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9677 unlock_user_struct(target_schp
, arg3
, 0);
9678 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9680 case TARGET_NR_sched_getscheduler
:
9681 return get_errno(sched_getscheduler(arg1
));
9682 case TARGET_NR_sched_yield
:
9683 return get_errno(sched_yield());
9684 case TARGET_NR_sched_get_priority_max
:
9685 return get_errno(sched_get_priority_max(arg1
));
9686 case TARGET_NR_sched_get_priority_min
:
9687 return get_errno(sched_get_priority_min(arg1
));
9688 case TARGET_NR_sched_rr_get_interval
:
9691 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9692 if (!is_error(ret
)) {
9693 ret
= host_to_target_timespec(arg2
, &ts
);
9697 case TARGET_NR_nanosleep
:
9699 struct timespec req
, rem
;
9700 target_to_host_timespec(&req
, arg1
);
9701 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9702 if (is_error(ret
) && arg2
) {
9703 host_to_target_timespec(arg2
, &rem
);
9707 case TARGET_NR_prctl
:
9709 case PR_GET_PDEATHSIG
:
9712 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9713 if (!is_error(ret
) && arg2
9714 && put_user_ual(deathsig
, arg2
)) {
9715 return -TARGET_EFAULT
;
9722 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9724 return -TARGET_EFAULT
;
9726 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9728 unlock_user(name
, arg2
, 16);
9733 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9735 return -TARGET_EFAULT
;
9737 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9739 unlock_user(name
, arg2
, 0);
9744 case TARGET_PR_GET_FP_MODE
:
9746 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9748 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9749 ret
|= TARGET_PR_FP_MODE_FR
;
9751 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9752 ret
|= TARGET_PR_FP_MODE_FRE
;
9756 case TARGET_PR_SET_FP_MODE
:
9758 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9759 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9760 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9761 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9762 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9764 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9765 TARGET_PR_FP_MODE_FRE
;
9767 /* If nothing to change, return right away, successfully. */
9768 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9771 /* Check the value is valid */
9772 if (arg2
& ~known_bits
) {
9773 return -TARGET_EOPNOTSUPP
;
9775 /* Setting FRE without FR is not supported. */
9776 if (new_fre
&& !new_fr
) {
9777 return -TARGET_EOPNOTSUPP
;
9779 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9780 /* FR1 is not supported */
9781 return -TARGET_EOPNOTSUPP
;
9783 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9784 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9785 /* cannot set FR=0 */
9786 return -TARGET_EOPNOTSUPP
;
9788 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9789 /* Cannot set FRE=1 */
9790 return -TARGET_EOPNOTSUPP
;
9794 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9795 for (i
= 0; i
< 32 ; i
+= 2) {
9796 if (!old_fr
&& new_fr
) {
9797 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9798 } else if (old_fr
&& !new_fr
) {
9799 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9804 env
->CP0_Status
|= (1 << CP0St_FR
);
9805 env
->hflags
|= MIPS_HFLAG_F64
;
9807 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9808 env
->hflags
&= ~MIPS_HFLAG_F64
;
9811 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9812 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9813 env
->hflags
|= MIPS_HFLAG_FRE
;
9816 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9817 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9823 #ifdef TARGET_AARCH64
9824 case TARGET_PR_SVE_SET_VL
:
9826 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9827 * PR_SVE_VL_INHERIT. Note the kernel definition
9828 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9829 * even though the current architectural maximum is VQ=16.
9831 ret
= -TARGET_EINVAL
;
9832 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
9833 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9834 CPUARMState
*env
= cpu_env
;
9835 ARMCPU
*cpu
= env_archcpu(env
);
9836 uint32_t vq
, old_vq
;
9838 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9839 vq
= MAX(arg2
/ 16, 1);
9840 vq
= MIN(vq
, cpu
->sve_max_vq
);
9843 aarch64_sve_narrow_vq(env
, vq
);
9845 env
->vfp
.zcr_el
[1] = vq
- 1;
9849 case TARGET_PR_SVE_GET_VL
:
9850 ret
= -TARGET_EINVAL
;
9852 ARMCPU
*cpu
= env_archcpu(cpu_env
);
9853 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9854 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9858 case TARGET_PR_PAC_RESET_KEYS
:
9860 CPUARMState
*env
= cpu_env
;
9861 ARMCPU
*cpu
= env_archcpu(env
);
9863 if (arg3
|| arg4
|| arg5
) {
9864 return -TARGET_EINVAL
;
9866 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
9867 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
9868 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
9869 TARGET_PR_PAC_APGAKEY
);
9875 } else if (arg2
& ~all
) {
9876 return -TARGET_EINVAL
;
9878 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
9879 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
9880 sizeof(ARMPACKey
), &err
);
9882 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
9883 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
9884 sizeof(ARMPACKey
), &err
);
9886 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
9887 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
9888 sizeof(ARMPACKey
), &err
);
9890 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
9891 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
9892 sizeof(ARMPACKey
), &err
);
9894 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
9895 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
9896 sizeof(ARMPACKey
), &err
);
9900 * Some unknown failure in the crypto. The best
9901 * we can do is log it and fail the syscall.
9902 * The real syscall cannot fail this way.
9904 qemu_log_mask(LOG_UNIMP
,
9905 "PR_PAC_RESET_KEYS: Crypto failure: %s",
9906 error_get_pretty(err
));
9913 return -TARGET_EINVAL
;
9914 #endif /* AARCH64 */
9915 case PR_GET_SECCOMP
:
9916 case PR_SET_SECCOMP
:
9917 /* Disable seccomp to prevent the target disabling syscalls we
9919 return -TARGET_EINVAL
;
9921 /* Most prctl options have no pointer arguments */
9922 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9925 #ifdef TARGET_NR_arch_prctl
9926 case TARGET_NR_arch_prctl
:
9927 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9928 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9933 #ifdef TARGET_NR_pread64
9934 case TARGET_NR_pread64
:
9935 if (regpairs_aligned(cpu_env
, num
)) {
9939 if (arg2
== 0 && arg3
== 0) {
9940 /* Special-case NULL buffer and zero length, which should succeed */
9943 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9945 return -TARGET_EFAULT
;
9948 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9949 unlock_user(p
, arg2
, ret
);
9951 case TARGET_NR_pwrite64
:
9952 if (regpairs_aligned(cpu_env
, num
)) {
9956 if (arg2
== 0 && arg3
== 0) {
9957 /* Special-case NULL buffer and zero length, which should succeed */
9960 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
9962 return -TARGET_EFAULT
;
9965 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9966 unlock_user(p
, arg2
, 0);
9969 case TARGET_NR_getcwd
:
9970 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9971 return -TARGET_EFAULT
;
9972 ret
= get_errno(sys_getcwd1(p
, arg2
));
9973 unlock_user(p
, arg1
, ret
);
9975 case TARGET_NR_capget
:
9976 case TARGET_NR_capset
:
9978 struct target_user_cap_header
*target_header
;
9979 struct target_user_cap_data
*target_data
= NULL
;
9980 struct __user_cap_header_struct header
;
9981 struct __user_cap_data_struct data
[2];
9982 struct __user_cap_data_struct
*dataptr
= NULL
;
9983 int i
, target_datalen
;
9986 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9987 return -TARGET_EFAULT
;
9989 header
.version
= tswap32(target_header
->version
);
9990 header
.pid
= tswap32(target_header
->pid
);
9992 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9993 /* Version 2 and up takes pointer to two user_data structs */
9997 target_datalen
= sizeof(*target_data
) * data_items
;
10000 if (num
== TARGET_NR_capget
) {
10001 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10003 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10005 if (!target_data
) {
10006 unlock_user_struct(target_header
, arg1
, 0);
10007 return -TARGET_EFAULT
;
10010 if (num
== TARGET_NR_capset
) {
10011 for (i
= 0; i
< data_items
; i
++) {
10012 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10013 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10014 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10021 if (num
== TARGET_NR_capget
) {
10022 ret
= get_errno(capget(&header
, dataptr
));
10024 ret
= get_errno(capset(&header
, dataptr
));
10027 /* The kernel always updates version for both capget and capset */
10028 target_header
->version
= tswap32(header
.version
);
10029 unlock_user_struct(target_header
, arg1
, 1);
10032 if (num
== TARGET_NR_capget
) {
10033 for (i
= 0; i
< data_items
; i
++) {
10034 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10035 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10036 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10038 unlock_user(target_data
, arg2
, target_datalen
);
10040 unlock_user(target_data
, arg2
, 0);
10045 case TARGET_NR_sigaltstack
:
10046 return do_sigaltstack(arg1
, arg2
,
10047 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10049 #ifdef CONFIG_SENDFILE
10050 #ifdef TARGET_NR_sendfile
10051 case TARGET_NR_sendfile
:
10053 off_t
*offp
= NULL
;
10056 ret
= get_user_sal(off
, arg3
);
10057 if (is_error(ret
)) {
10062 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10063 if (!is_error(ret
) && arg3
) {
10064 abi_long ret2
= put_user_sal(off
, arg3
);
10065 if (is_error(ret2
)) {
10072 #ifdef TARGET_NR_sendfile64
10073 case TARGET_NR_sendfile64
:
10075 off_t
*offp
= NULL
;
10078 ret
= get_user_s64(off
, arg3
);
10079 if (is_error(ret
)) {
10084 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10085 if (!is_error(ret
) && arg3
) {
10086 abi_long ret2
= put_user_s64(off
, arg3
);
10087 if (is_error(ret2
)) {
10095 #ifdef TARGET_NR_vfork
10096 case TARGET_NR_vfork
:
10097 return get_errno(do_fork(cpu_env
,
10098 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10101 #ifdef TARGET_NR_ugetrlimit
10102 case TARGET_NR_ugetrlimit
:
10104 struct rlimit rlim
;
10105 int resource
= target_to_host_resource(arg1
);
10106 ret
= get_errno(getrlimit(resource
, &rlim
));
10107 if (!is_error(ret
)) {
10108 struct target_rlimit
*target_rlim
;
10109 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10110 return -TARGET_EFAULT
;
10111 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10112 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10113 unlock_user_struct(target_rlim
, arg2
, 1);
10118 #ifdef TARGET_NR_truncate64
10119 case TARGET_NR_truncate64
:
10120 if (!(p
= lock_user_string(arg1
)))
10121 return -TARGET_EFAULT
;
10122 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10123 unlock_user(p
, arg1
, 0);
10126 #ifdef TARGET_NR_ftruncate64
10127 case TARGET_NR_ftruncate64
:
10128 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10130 #ifdef TARGET_NR_stat64
10131 case TARGET_NR_stat64
:
10132 if (!(p
= lock_user_string(arg1
))) {
10133 return -TARGET_EFAULT
;
10135 ret
= get_errno(stat(path(p
), &st
));
10136 unlock_user(p
, arg1
, 0);
10137 if (!is_error(ret
))
10138 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10141 #ifdef TARGET_NR_lstat64
10142 case TARGET_NR_lstat64
:
10143 if (!(p
= lock_user_string(arg1
))) {
10144 return -TARGET_EFAULT
;
10146 ret
= get_errno(lstat(path(p
), &st
));
10147 unlock_user(p
, arg1
, 0);
10148 if (!is_error(ret
))
10149 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10152 #ifdef TARGET_NR_fstat64
10153 case TARGET_NR_fstat64
:
10154 ret
= get_errno(fstat(arg1
, &st
));
10155 if (!is_error(ret
))
10156 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10159 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10160 #ifdef TARGET_NR_fstatat64
10161 case TARGET_NR_fstatat64
:
10163 #ifdef TARGET_NR_newfstatat
10164 case TARGET_NR_newfstatat
:
10166 if (!(p
= lock_user_string(arg2
))) {
10167 return -TARGET_EFAULT
;
10169 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10170 unlock_user(p
, arg2
, 0);
10171 if (!is_error(ret
))
10172 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10175 #ifdef TARGET_NR_lchown
10176 case TARGET_NR_lchown
:
10177 if (!(p
= lock_user_string(arg1
)))
10178 return -TARGET_EFAULT
;
10179 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10180 unlock_user(p
, arg1
, 0);
10183 #ifdef TARGET_NR_getuid
10184 case TARGET_NR_getuid
:
10185 return get_errno(high2lowuid(getuid()));
10187 #ifdef TARGET_NR_getgid
10188 case TARGET_NR_getgid
:
10189 return get_errno(high2lowgid(getgid()));
10191 #ifdef TARGET_NR_geteuid
10192 case TARGET_NR_geteuid
:
10193 return get_errno(high2lowuid(geteuid()));
10195 #ifdef TARGET_NR_getegid
10196 case TARGET_NR_getegid
:
10197 return get_errno(high2lowgid(getegid()));
10199 case TARGET_NR_setreuid
:
10200 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10201 case TARGET_NR_setregid
:
10202 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10203 case TARGET_NR_getgroups
:
10205 int gidsetsize
= arg1
;
10206 target_id
*target_grouplist
;
10210 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10211 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10212 if (gidsetsize
== 0)
10214 if (!is_error(ret
)) {
10215 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10216 if (!target_grouplist
)
10217 return -TARGET_EFAULT
;
10218 for(i
= 0;i
< ret
; i
++)
10219 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10220 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10224 case TARGET_NR_setgroups
:
10226 int gidsetsize
= arg1
;
10227 target_id
*target_grouplist
;
10228 gid_t
*grouplist
= NULL
;
10231 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10232 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10233 if (!target_grouplist
) {
10234 return -TARGET_EFAULT
;
10236 for (i
= 0; i
< gidsetsize
; i
++) {
10237 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10239 unlock_user(target_grouplist
, arg2
, 0);
10241 return get_errno(setgroups(gidsetsize
, grouplist
));
10243 case TARGET_NR_fchown
:
10244 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10245 #if defined(TARGET_NR_fchownat)
10246 case TARGET_NR_fchownat
:
10247 if (!(p
= lock_user_string(arg2
)))
10248 return -TARGET_EFAULT
;
10249 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10250 low2highgid(arg4
), arg5
));
10251 unlock_user(p
, arg2
, 0);
10254 #ifdef TARGET_NR_setresuid
10255 case TARGET_NR_setresuid
:
10256 return get_errno(sys_setresuid(low2highuid(arg1
),
10258 low2highuid(arg3
)));
10260 #ifdef TARGET_NR_getresuid
10261 case TARGET_NR_getresuid
:
10263 uid_t ruid
, euid
, suid
;
10264 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10265 if (!is_error(ret
)) {
10266 if (put_user_id(high2lowuid(ruid
), arg1
)
10267 || put_user_id(high2lowuid(euid
), arg2
)
10268 || put_user_id(high2lowuid(suid
), arg3
))
10269 return -TARGET_EFAULT
;
10274 #ifdef TARGET_NR_getresgid
10275 case TARGET_NR_setresgid
:
10276 return get_errno(sys_setresgid(low2highgid(arg1
),
10278 low2highgid(arg3
)));
10280 #ifdef TARGET_NR_getresgid
10281 case TARGET_NR_getresgid
:
10283 gid_t rgid
, egid
, sgid
;
10284 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10285 if (!is_error(ret
)) {
10286 if (put_user_id(high2lowgid(rgid
), arg1
)
10287 || put_user_id(high2lowgid(egid
), arg2
)
10288 || put_user_id(high2lowgid(sgid
), arg3
))
10289 return -TARGET_EFAULT
;
10294 #ifdef TARGET_NR_chown
10295 case TARGET_NR_chown
:
10296 if (!(p
= lock_user_string(arg1
)))
10297 return -TARGET_EFAULT
;
10298 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10299 unlock_user(p
, arg1
, 0);
10302 case TARGET_NR_setuid
:
10303 return get_errno(sys_setuid(low2highuid(arg1
)));
10304 case TARGET_NR_setgid
:
10305 return get_errno(sys_setgid(low2highgid(arg1
)));
10306 case TARGET_NR_setfsuid
:
10307 return get_errno(setfsuid(arg1
));
10308 case TARGET_NR_setfsgid
:
10309 return get_errno(setfsgid(arg1
));
10311 #ifdef TARGET_NR_lchown32
10312 case TARGET_NR_lchown32
:
10313 if (!(p
= lock_user_string(arg1
)))
10314 return -TARGET_EFAULT
;
10315 ret
= get_errno(lchown(p
, arg2
, arg3
));
10316 unlock_user(p
, arg1
, 0);
10319 #ifdef TARGET_NR_getuid32
10320 case TARGET_NR_getuid32
:
10321 return get_errno(getuid());
10324 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10325 /* Alpha specific */
10326 case TARGET_NR_getxuid
:
10330 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10332 return get_errno(getuid());
10334 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10335 /* Alpha specific */
10336 case TARGET_NR_getxgid
:
10340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10342 return get_errno(getgid());
10344 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10345 /* Alpha specific */
10346 case TARGET_NR_osf_getsysinfo
:
10347 ret
= -TARGET_EOPNOTSUPP
;
10349 case TARGET_GSI_IEEE_FP_CONTROL
:
10351 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10352 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10354 swcr
&= ~SWCR_STATUS_MASK
;
10355 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10357 if (put_user_u64 (swcr
, arg2
))
10358 return -TARGET_EFAULT
;
10363 /* case GSI_IEEE_STATE_AT_SIGNAL:
10364 -- Not implemented in linux kernel.
10366 -- Retrieves current unaligned access state; not much used.
10367 case GSI_PROC_TYPE:
10368 -- Retrieves implver information; surely not used.
10369 case GSI_GET_HWRPB:
10370 -- Grabs a copy of the HWRPB; surely not used.
10375 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10376 /* Alpha specific */
10377 case TARGET_NR_osf_setsysinfo
:
10378 ret
= -TARGET_EOPNOTSUPP
;
10380 case TARGET_SSI_IEEE_FP_CONTROL
:
10382 uint64_t swcr
, fpcr
;
10384 if (get_user_u64 (swcr
, arg2
)) {
10385 return -TARGET_EFAULT
;
10389 * The kernel calls swcr_update_status to update the
10390 * status bits from the fpcr at every point that it
10391 * could be queried. Therefore, we store the status
10392 * bits only in FPCR.
10394 ((CPUAlphaState
*)cpu_env
)->swcr
10395 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10397 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10398 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10399 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10400 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10405 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10407 uint64_t exc
, fpcr
, fex
;
10409 if (get_user_u64(exc
, arg2
)) {
10410 return -TARGET_EFAULT
;
10412 exc
&= SWCR_STATUS_MASK
;
10413 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10415 /* Old exceptions are not signaled. */
10416 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10418 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10419 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10421 /* Update the hardware fpcr. */
10422 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10423 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10426 int si_code
= TARGET_FPE_FLTUNK
;
10427 target_siginfo_t info
;
10429 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10430 si_code
= TARGET_FPE_FLTUND
;
10432 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10433 si_code
= TARGET_FPE_FLTRES
;
10435 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10436 si_code
= TARGET_FPE_FLTUND
;
10438 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10439 si_code
= TARGET_FPE_FLTOVF
;
10441 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10442 si_code
= TARGET_FPE_FLTDIV
;
10444 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10445 si_code
= TARGET_FPE_FLTINV
;
10448 info
.si_signo
= SIGFPE
;
10450 info
.si_code
= si_code
;
10451 info
._sifields
._sigfault
._addr
10452 = ((CPUArchState
*)cpu_env
)->pc
;
10453 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10454 QEMU_SI_FAULT
, &info
);
10460 /* case SSI_NVPAIRS:
10461 -- Used with SSIN_UACPROC to enable unaligned accesses.
10462 case SSI_IEEE_STATE_AT_SIGNAL:
10463 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10464 -- Not implemented in linux kernel
10469 #ifdef TARGET_NR_osf_sigprocmask
10470 /* Alpha specific. */
10471 case TARGET_NR_osf_sigprocmask
:
10475 sigset_t set
, oldset
;
10478 case TARGET_SIG_BLOCK
:
10481 case TARGET_SIG_UNBLOCK
:
10484 case TARGET_SIG_SETMASK
:
10488 return -TARGET_EINVAL
;
10491 target_to_host_old_sigset(&set
, &mask
);
10492 ret
= do_sigprocmask(how
, &set
, &oldset
);
10494 host_to_target_old_sigset(&mask
, &oldset
);
10501 #ifdef TARGET_NR_getgid32
10502 case TARGET_NR_getgid32
:
10503 return get_errno(getgid());
10505 #ifdef TARGET_NR_geteuid32
10506 case TARGET_NR_geteuid32
:
10507 return get_errno(geteuid());
10509 #ifdef TARGET_NR_getegid32
10510 case TARGET_NR_getegid32
:
10511 return get_errno(getegid());
10513 #ifdef TARGET_NR_setreuid32
10514 case TARGET_NR_setreuid32
:
10515 return get_errno(setreuid(arg1
, arg2
));
10517 #ifdef TARGET_NR_setregid32
10518 case TARGET_NR_setregid32
:
10519 return get_errno(setregid(arg1
, arg2
));
10521 #ifdef TARGET_NR_getgroups32
10522 case TARGET_NR_getgroups32
:
10524 int gidsetsize
= arg1
;
10525 uint32_t *target_grouplist
;
10529 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10530 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10531 if (gidsetsize
== 0)
10533 if (!is_error(ret
)) {
10534 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10535 if (!target_grouplist
) {
10536 return -TARGET_EFAULT
;
10538 for(i
= 0;i
< ret
; i
++)
10539 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10540 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10545 #ifdef TARGET_NR_setgroups32
10546 case TARGET_NR_setgroups32
:
10548 int gidsetsize
= arg1
;
10549 uint32_t *target_grouplist
;
10553 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10554 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10555 if (!target_grouplist
) {
10556 return -TARGET_EFAULT
;
10558 for(i
= 0;i
< gidsetsize
; i
++)
10559 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10560 unlock_user(target_grouplist
, arg2
, 0);
10561 return get_errno(setgroups(gidsetsize
, grouplist
));
10564 #ifdef TARGET_NR_fchown32
10565 case TARGET_NR_fchown32
:
10566 return get_errno(fchown(arg1
, arg2
, arg3
));
10568 #ifdef TARGET_NR_setresuid32
10569 case TARGET_NR_setresuid32
:
10570 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10572 #ifdef TARGET_NR_getresuid32
10573 case TARGET_NR_getresuid32
:
10575 uid_t ruid
, euid
, suid
;
10576 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10577 if (!is_error(ret
)) {
10578 if (put_user_u32(ruid
, arg1
)
10579 || put_user_u32(euid
, arg2
)
10580 || put_user_u32(suid
, arg3
))
10581 return -TARGET_EFAULT
;
10586 #ifdef TARGET_NR_setresgid32
10587 case TARGET_NR_setresgid32
:
10588 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10590 #ifdef TARGET_NR_getresgid32
10591 case TARGET_NR_getresgid32
:
10593 gid_t rgid
, egid
, sgid
;
10594 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10595 if (!is_error(ret
)) {
10596 if (put_user_u32(rgid
, arg1
)
10597 || put_user_u32(egid
, arg2
)
10598 || put_user_u32(sgid
, arg3
))
10599 return -TARGET_EFAULT
;
10604 #ifdef TARGET_NR_chown32
10605 case TARGET_NR_chown32
:
10606 if (!(p
= lock_user_string(arg1
)))
10607 return -TARGET_EFAULT
;
10608 ret
= get_errno(chown(p
, arg2
, arg3
));
10609 unlock_user(p
, arg1
, 0);
10612 #ifdef TARGET_NR_setuid32
10613 case TARGET_NR_setuid32
:
10614 return get_errno(sys_setuid(arg1
));
10616 #ifdef TARGET_NR_setgid32
10617 case TARGET_NR_setgid32
:
10618 return get_errno(sys_setgid(arg1
));
10620 #ifdef TARGET_NR_setfsuid32
10621 case TARGET_NR_setfsuid32
:
10622 return get_errno(setfsuid(arg1
));
10624 #ifdef TARGET_NR_setfsgid32
10625 case TARGET_NR_setfsgid32
:
10626 return get_errno(setfsgid(arg1
));
10628 #ifdef TARGET_NR_mincore
10629 case TARGET_NR_mincore
:
10631 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10633 return -TARGET_ENOMEM
;
10635 p
= lock_user_string(arg3
);
10637 ret
= -TARGET_EFAULT
;
10639 ret
= get_errno(mincore(a
, arg2
, p
));
10640 unlock_user(p
, arg3
, ret
);
10642 unlock_user(a
, arg1
, 0);
10646 #ifdef TARGET_NR_arm_fadvise64_64
10647 case TARGET_NR_arm_fadvise64_64
:
10648 /* arm_fadvise64_64 looks like fadvise64_64 but
10649 * with different argument order: fd, advice, offset, len
10650 * rather than the usual fd, offset, len, advice.
10651 * Note that offset and len are both 64-bit so appear as
10652 * pairs of 32-bit registers.
10654 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10655 target_offset64(arg5
, arg6
), arg2
);
10656 return -host_to_target_errno(ret
);
10659 #if TARGET_ABI_BITS == 32
10661 #ifdef TARGET_NR_fadvise64_64
10662 case TARGET_NR_fadvise64_64
:
10663 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10664 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10672 /* 6 args: fd, offset (high, low), len (high, low), advice */
10673 if (regpairs_aligned(cpu_env
, num
)) {
10674 /* offset is in (3,4), len in (5,6) and advice in 7 */
10682 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10683 target_offset64(arg4
, arg5
), arg6
);
10684 return -host_to_target_errno(ret
);
10687 #ifdef TARGET_NR_fadvise64
10688 case TARGET_NR_fadvise64
:
10689 /* 5 args: fd, offset (high, low), len, advice */
10690 if (regpairs_aligned(cpu_env
, num
)) {
10691 /* offset is in (3,4), len in 5 and advice in 6 */
10697 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10698 return -host_to_target_errno(ret
);
10701 #else /* not a 32-bit ABI */
10702 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10703 #ifdef TARGET_NR_fadvise64_64
10704 case TARGET_NR_fadvise64_64
:
10706 #ifdef TARGET_NR_fadvise64
10707 case TARGET_NR_fadvise64
:
10709 #ifdef TARGET_S390X
10711 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10712 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10713 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10714 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10718 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10720 #endif /* end of 64-bit ABI fadvise handling */
10722 #ifdef TARGET_NR_madvise
10723 case TARGET_NR_madvise
:
10724 /* A straight passthrough may not be safe because qemu sometimes
10725 turns private file-backed mappings into anonymous mappings.
10726 This will break MADV_DONTNEED.
10727 This is a hint, so ignoring and returning success is ok. */
10730 #if TARGET_ABI_BITS == 32
10731 case TARGET_NR_fcntl64
:
10735 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10736 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10739 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10740 copyfrom
= copy_from_user_oabi_flock64
;
10741 copyto
= copy_to_user_oabi_flock64
;
10745 cmd
= target_to_host_fcntl_cmd(arg2
);
10746 if (cmd
== -TARGET_EINVAL
) {
10751 case TARGET_F_GETLK64
:
10752 ret
= copyfrom(&fl
, arg3
);
10756 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10758 ret
= copyto(arg3
, &fl
);
10762 case TARGET_F_SETLK64
:
10763 case TARGET_F_SETLKW64
:
10764 ret
= copyfrom(&fl
, arg3
);
10768 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10771 ret
= do_fcntl(arg1
, arg2
, arg3
);
10777 #ifdef TARGET_NR_cacheflush
10778 case TARGET_NR_cacheflush
:
10779 /* self-modifying code is handled automatically, so nothing needed */
10782 #ifdef TARGET_NR_getpagesize
10783 case TARGET_NR_getpagesize
:
10784 return TARGET_PAGE_SIZE
;
10786 case TARGET_NR_gettid
:
10787 return get_errno(sys_gettid());
10788 #ifdef TARGET_NR_readahead
10789 case TARGET_NR_readahead
:
10790 #if TARGET_ABI_BITS == 32
10791 if (regpairs_aligned(cpu_env
, num
)) {
10796 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10798 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10803 #ifdef TARGET_NR_setxattr
10804 case TARGET_NR_listxattr
:
10805 case TARGET_NR_llistxattr
:
10809 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10811 return -TARGET_EFAULT
;
10814 p
= lock_user_string(arg1
);
10816 if (num
== TARGET_NR_listxattr
) {
10817 ret
= get_errno(listxattr(p
, b
, arg3
));
10819 ret
= get_errno(llistxattr(p
, b
, arg3
));
10822 ret
= -TARGET_EFAULT
;
10824 unlock_user(p
, arg1
, 0);
10825 unlock_user(b
, arg2
, arg3
);
10828 case TARGET_NR_flistxattr
:
10832 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10834 return -TARGET_EFAULT
;
10837 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10838 unlock_user(b
, arg2
, arg3
);
10841 case TARGET_NR_setxattr
:
10842 case TARGET_NR_lsetxattr
:
10844 void *p
, *n
, *v
= 0;
10846 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10848 return -TARGET_EFAULT
;
10851 p
= lock_user_string(arg1
);
10852 n
= lock_user_string(arg2
);
10854 if (num
== TARGET_NR_setxattr
) {
10855 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10857 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10860 ret
= -TARGET_EFAULT
;
10862 unlock_user(p
, arg1
, 0);
10863 unlock_user(n
, arg2
, 0);
10864 unlock_user(v
, arg3
, 0);
10867 case TARGET_NR_fsetxattr
:
10871 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10873 return -TARGET_EFAULT
;
10876 n
= lock_user_string(arg2
);
10878 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10880 ret
= -TARGET_EFAULT
;
10882 unlock_user(n
, arg2
, 0);
10883 unlock_user(v
, arg3
, 0);
10886 case TARGET_NR_getxattr
:
10887 case TARGET_NR_lgetxattr
:
10889 void *p
, *n
, *v
= 0;
10891 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10893 return -TARGET_EFAULT
;
10896 p
= lock_user_string(arg1
);
10897 n
= lock_user_string(arg2
);
10899 if (num
== TARGET_NR_getxattr
) {
10900 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10902 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10905 ret
= -TARGET_EFAULT
;
10907 unlock_user(p
, arg1
, 0);
10908 unlock_user(n
, arg2
, 0);
10909 unlock_user(v
, arg3
, arg4
);
10912 case TARGET_NR_fgetxattr
:
10916 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10918 return -TARGET_EFAULT
;
10921 n
= lock_user_string(arg2
);
10923 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10925 ret
= -TARGET_EFAULT
;
10927 unlock_user(n
, arg2
, 0);
10928 unlock_user(v
, arg3
, arg4
);
10931 case TARGET_NR_removexattr
:
10932 case TARGET_NR_lremovexattr
:
10935 p
= lock_user_string(arg1
);
10936 n
= lock_user_string(arg2
);
10938 if (num
== TARGET_NR_removexattr
) {
10939 ret
= get_errno(removexattr(p
, n
));
10941 ret
= get_errno(lremovexattr(p
, n
));
10944 ret
= -TARGET_EFAULT
;
10946 unlock_user(p
, arg1
, 0);
10947 unlock_user(n
, arg2
, 0);
10950 case TARGET_NR_fremovexattr
:
10953 n
= lock_user_string(arg2
);
10955 ret
= get_errno(fremovexattr(arg1
, n
));
10957 ret
= -TARGET_EFAULT
;
10959 unlock_user(n
, arg2
, 0);
10963 #endif /* CONFIG_ATTR */
10964 #ifdef TARGET_NR_set_thread_area
10965 case TARGET_NR_set_thread_area
:
10966 #if defined(TARGET_MIPS)
10967 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10969 #elif defined(TARGET_CRIS)
10971 ret
= -TARGET_EINVAL
;
10973 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10977 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10978 return do_set_thread_area(cpu_env
, arg1
);
10979 #elif defined(TARGET_M68K)
10981 TaskState
*ts
= cpu
->opaque
;
10982 ts
->tp_value
= arg1
;
10986 return -TARGET_ENOSYS
;
10989 #ifdef TARGET_NR_get_thread_area
10990 case TARGET_NR_get_thread_area
:
10991 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10992 return do_get_thread_area(cpu_env
, arg1
);
10993 #elif defined(TARGET_M68K)
10995 TaskState
*ts
= cpu
->opaque
;
10996 return ts
->tp_value
;
10999 return -TARGET_ENOSYS
;
11002 #ifdef TARGET_NR_getdomainname
11003 case TARGET_NR_getdomainname
:
11004 return -TARGET_ENOSYS
;
11007 #ifdef TARGET_NR_clock_settime
11008 case TARGET_NR_clock_settime
:
11010 struct timespec ts
;
11012 ret
= target_to_host_timespec(&ts
, arg2
);
11013 if (!is_error(ret
)) {
11014 ret
= get_errno(clock_settime(arg1
, &ts
));
11019 #ifdef TARGET_NR_clock_gettime
11020 case TARGET_NR_clock_gettime
:
11022 struct timespec ts
;
11023 ret
= get_errno(clock_gettime(arg1
, &ts
));
11024 if (!is_error(ret
)) {
11025 ret
= host_to_target_timespec(arg2
, &ts
);
11030 #ifdef TARGET_NR_clock_getres
11031 case TARGET_NR_clock_getres
:
11033 struct timespec ts
;
11034 ret
= get_errno(clock_getres(arg1
, &ts
));
11035 if (!is_error(ret
)) {
11036 host_to_target_timespec(arg2
, &ts
);
11041 #ifdef TARGET_NR_clock_nanosleep
11042 case TARGET_NR_clock_nanosleep
:
11044 struct timespec ts
;
11045 target_to_host_timespec(&ts
, arg3
);
11046 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11047 &ts
, arg4
? &ts
: NULL
));
11049 host_to_target_timespec(arg4
, &ts
);
11051 #if defined(TARGET_PPC)
11052 /* clock_nanosleep is odd in that it returns positive errno values.
11053 * On PPC, CR0 bit 3 should be set in such a situation. */
11054 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11055 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11062 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11063 case TARGET_NR_set_tid_address
:
11064 return get_errno(set_tid_address((int *)g2h(arg1
)));
11067 case TARGET_NR_tkill
:
11068 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11070 case TARGET_NR_tgkill
:
11071 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11072 target_to_host_signal(arg3
)));
11074 #ifdef TARGET_NR_set_robust_list
11075 case TARGET_NR_set_robust_list
:
11076 case TARGET_NR_get_robust_list
:
11077 /* The ABI for supporting robust futexes has userspace pass
11078 * the kernel a pointer to a linked list which is updated by
11079 * userspace after the syscall; the list is walked by the kernel
11080 * when the thread exits. Since the linked list in QEMU guest
11081 * memory isn't a valid linked list for the host and we have
11082 * no way to reliably intercept the thread-death event, we can't
11083 * support these. Silently return ENOSYS so that guest userspace
11084 * falls back to a non-robust futex implementation (which should
11085 * be OK except in the corner case of the guest crashing while
11086 * holding a mutex that is shared with another process via
11089 return -TARGET_ENOSYS
;
11092 #if defined(TARGET_NR_utimensat)
11093 case TARGET_NR_utimensat
:
11095 struct timespec
*tsp
, ts
[2];
11099 target_to_host_timespec(ts
, arg3
);
11100 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11104 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11106 if (!(p
= lock_user_string(arg2
))) {
11107 return -TARGET_EFAULT
;
11109 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11110 unlock_user(p
, arg2
, 0);
11115 case TARGET_NR_futex
:
11116 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11117 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11118 case TARGET_NR_inotify_init
:
11119 ret
= get_errno(sys_inotify_init());
11121 fd_trans_register(ret
, &target_inotify_trans
);
11125 #ifdef CONFIG_INOTIFY1
11126 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11127 case TARGET_NR_inotify_init1
:
11128 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11129 fcntl_flags_tbl
)));
11131 fd_trans_register(ret
, &target_inotify_trans
);
11136 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11137 case TARGET_NR_inotify_add_watch
:
11138 p
= lock_user_string(arg2
);
11139 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11140 unlock_user(p
, arg2
, 0);
11143 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11144 case TARGET_NR_inotify_rm_watch
:
11145 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11148 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11149 case TARGET_NR_mq_open
:
11151 struct mq_attr posix_mq_attr
;
11152 struct mq_attr
*pposix_mq_attr
;
11155 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11156 pposix_mq_attr
= NULL
;
11158 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11159 return -TARGET_EFAULT
;
11161 pposix_mq_attr
= &posix_mq_attr
;
11163 p
= lock_user_string(arg1
- 1);
11165 return -TARGET_EFAULT
;
11167 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11168 unlock_user (p
, arg1
, 0);
11172 case TARGET_NR_mq_unlink
:
11173 p
= lock_user_string(arg1
- 1);
11175 return -TARGET_EFAULT
;
11177 ret
= get_errno(mq_unlink(p
));
11178 unlock_user (p
, arg1
, 0);
11181 case TARGET_NR_mq_timedsend
:
11183 struct timespec ts
;
11185 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11187 target_to_host_timespec(&ts
, arg5
);
11188 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11189 host_to_target_timespec(arg5
, &ts
);
11191 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11193 unlock_user (p
, arg2
, arg3
);
11197 case TARGET_NR_mq_timedreceive
:
11199 struct timespec ts
;
11202 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11204 target_to_host_timespec(&ts
, arg5
);
11205 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11207 host_to_target_timespec(arg5
, &ts
);
11209 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11212 unlock_user (p
, arg2
, arg3
);
11214 put_user_u32(prio
, arg4
);
11218 /* Not implemented for now... */
11219 /* case TARGET_NR_mq_notify: */
11222 case TARGET_NR_mq_getsetattr
:
11224 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11227 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11228 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11229 &posix_mq_attr_out
));
11230 } else if (arg3
!= 0) {
11231 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11233 if (ret
== 0 && arg3
!= 0) {
11234 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11240 #ifdef CONFIG_SPLICE
11241 #ifdef TARGET_NR_tee
11242 case TARGET_NR_tee
:
11244 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11248 #ifdef TARGET_NR_splice
11249 case TARGET_NR_splice
:
11251 loff_t loff_in
, loff_out
;
11252 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11254 if (get_user_u64(loff_in
, arg2
)) {
11255 return -TARGET_EFAULT
;
11257 ploff_in
= &loff_in
;
11260 if (get_user_u64(loff_out
, arg4
)) {
11261 return -TARGET_EFAULT
;
11263 ploff_out
= &loff_out
;
11265 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11267 if (put_user_u64(loff_in
, arg2
)) {
11268 return -TARGET_EFAULT
;
11272 if (put_user_u64(loff_out
, arg4
)) {
11273 return -TARGET_EFAULT
;
11279 #ifdef TARGET_NR_vmsplice
11280 case TARGET_NR_vmsplice
:
11282 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11284 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11285 unlock_iovec(vec
, arg2
, arg3
, 0);
11287 ret
= -host_to_target_errno(errno
);
11292 #endif /* CONFIG_SPLICE */
11293 #ifdef CONFIG_EVENTFD
11294 #if defined(TARGET_NR_eventfd)
11295 case TARGET_NR_eventfd
:
11296 ret
= get_errno(eventfd(arg1
, 0));
11298 fd_trans_register(ret
, &target_eventfd_trans
);
11302 #if defined(TARGET_NR_eventfd2)
11303 case TARGET_NR_eventfd2
:
11305 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11306 if (arg2
& TARGET_O_NONBLOCK
) {
11307 host_flags
|= O_NONBLOCK
;
11309 if (arg2
& TARGET_O_CLOEXEC
) {
11310 host_flags
|= O_CLOEXEC
;
11312 ret
= get_errno(eventfd(arg1
, host_flags
));
11314 fd_trans_register(ret
, &target_eventfd_trans
);
11319 #endif /* CONFIG_EVENTFD */
11320 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11321 case TARGET_NR_fallocate
:
11322 #if TARGET_ABI_BITS == 32
11323 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11324 target_offset64(arg5
, arg6
)));
11326 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11330 #if defined(CONFIG_SYNC_FILE_RANGE)
11331 #if defined(TARGET_NR_sync_file_range)
11332 case TARGET_NR_sync_file_range
:
11333 #if TARGET_ABI_BITS == 32
11334 #if defined(TARGET_MIPS)
11335 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11336 target_offset64(arg5
, arg6
), arg7
));
11338 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11339 target_offset64(arg4
, arg5
), arg6
));
11340 #endif /* !TARGET_MIPS */
11342 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11346 #if defined(TARGET_NR_sync_file_range2)
11347 case TARGET_NR_sync_file_range2
:
11348 /* This is like sync_file_range but the arguments are reordered */
11349 #if TARGET_ABI_BITS == 32
11350 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11351 target_offset64(arg5
, arg6
), arg2
));
11353 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11358 #if defined(TARGET_NR_signalfd4)
11359 case TARGET_NR_signalfd4
:
11360 return do_signalfd4(arg1
, arg2
, arg4
);
11362 #if defined(TARGET_NR_signalfd)
11363 case TARGET_NR_signalfd
:
11364 return do_signalfd4(arg1
, arg2
, 0);
11366 #if defined(CONFIG_EPOLL)
11367 #if defined(TARGET_NR_epoll_create)
11368 case TARGET_NR_epoll_create
:
11369 return get_errno(epoll_create(arg1
));
11371 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11372 case TARGET_NR_epoll_create1
:
11373 return get_errno(epoll_create1(arg1
));
11375 #if defined(TARGET_NR_epoll_ctl)
11376 case TARGET_NR_epoll_ctl
:
11378 struct epoll_event ep
;
11379 struct epoll_event
*epp
= 0;
11381 struct target_epoll_event
*target_ep
;
11382 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11383 return -TARGET_EFAULT
;
11385 ep
.events
= tswap32(target_ep
->events
);
11386 /* The epoll_data_t union is just opaque data to the kernel,
11387 * so we transfer all 64 bits across and need not worry what
11388 * actual data type it is.
11390 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11391 unlock_user_struct(target_ep
, arg4
, 0);
11394 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11398 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11399 #if defined(TARGET_NR_epoll_wait)
11400 case TARGET_NR_epoll_wait
:
11402 #if defined(TARGET_NR_epoll_pwait)
11403 case TARGET_NR_epoll_pwait
:
11406 struct target_epoll_event
*target_ep
;
11407 struct epoll_event
*ep
;
11409 int maxevents
= arg3
;
11410 int timeout
= arg4
;
11412 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11413 return -TARGET_EINVAL
;
11416 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11417 maxevents
* sizeof(struct target_epoll_event
), 1);
11419 return -TARGET_EFAULT
;
11422 ep
= g_try_new(struct epoll_event
, maxevents
);
11424 unlock_user(target_ep
, arg2
, 0);
11425 return -TARGET_ENOMEM
;
11429 #if defined(TARGET_NR_epoll_pwait)
11430 case TARGET_NR_epoll_pwait
:
11432 target_sigset_t
*target_set
;
11433 sigset_t _set
, *set
= &_set
;
11436 if (arg6
!= sizeof(target_sigset_t
)) {
11437 ret
= -TARGET_EINVAL
;
11441 target_set
= lock_user(VERIFY_READ
, arg5
,
11442 sizeof(target_sigset_t
), 1);
11444 ret
= -TARGET_EFAULT
;
11447 target_to_host_sigset(set
, target_set
);
11448 unlock_user(target_set
, arg5
, 0);
11453 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11454 set
, SIGSET_T_SIZE
));
11458 #if defined(TARGET_NR_epoll_wait)
11459 case TARGET_NR_epoll_wait
:
11460 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11465 ret
= -TARGET_ENOSYS
;
11467 if (!is_error(ret
)) {
11469 for (i
= 0; i
< ret
; i
++) {
11470 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11471 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11473 unlock_user(target_ep
, arg2
,
11474 ret
* sizeof(struct target_epoll_event
));
11476 unlock_user(target_ep
, arg2
, 0);
11483 #ifdef TARGET_NR_prlimit64
11484 case TARGET_NR_prlimit64
:
11486 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11487 struct target_rlimit64
*target_rnew
, *target_rold
;
11488 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11489 int resource
= target_to_host_resource(arg2
);
11491 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11492 return -TARGET_EFAULT
;
11494 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11495 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11496 unlock_user_struct(target_rnew
, arg3
, 0);
11500 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11501 if (!is_error(ret
) && arg4
) {
11502 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11503 return -TARGET_EFAULT
;
11505 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11506 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11507 unlock_user_struct(target_rold
, arg4
, 1);
11512 #ifdef TARGET_NR_gethostname
11513 case TARGET_NR_gethostname
:
11515 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11517 ret
= get_errno(gethostname(name
, arg2
));
11518 unlock_user(name
, arg1
, arg2
);
11520 ret
= -TARGET_EFAULT
;
11525 #ifdef TARGET_NR_atomic_cmpxchg_32
11526 case TARGET_NR_atomic_cmpxchg_32
:
11528 /* should use start_exclusive from main.c */
11529 abi_ulong mem_value
;
11530 if (get_user_u32(mem_value
, arg6
)) {
11531 target_siginfo_t info
;
11532 info
.si_signo
= SIGSEGV
;
11534 info
.si_code
= TARGET_SEGV_MAPERR
;
11535 info
._sifields
._sigfault
._addr
= arg6
;
11536 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11537 QEMU_SI_FAULT
, &info
);
11541 if (mem_value
== arg2
)
11542 put_user_u32(arg1
, arg6
);
11546 #ifdef TARGET_NR_atomic_barrier
11547 case TARGET_NR_atomic_barrier
:
11548 /* Like the kernel implementation and the
11549 qemu arm barrier, no-op this? */
11553 #ifdef TARGET_NR_timer_create
11554 case TARGET_NR_timer_create
:
11556 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11558 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11561 int timer_index
= next_free_host_timer();
11563 if (timer_index
< 0) {
11564 ret
= -TARGET_EAGAIN
;
11566 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11569 phost_sevp
= &host_sevp
;
11570 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11576 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11580 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11581 return -TARGET_EFAULT
;
11589 #ifdef TARGET_NR_timer_settime
11590 case TARGET_NR_timer_settime
:
11592 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11593 * struct itimerspec * old_value */
11594 target_timer_t timerid
= get_timer_id(arg1
);
11598 } else if (arg3
== 0) {
11599 ret
= -TARGET_EINVAL
;
11601 timer_t htimer
= g_posix_timers
[timerid
];
11602 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11604 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11605 return -TARGET_EFAULT
;
11608 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11609 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11610 return -TARGET_EFAULT
;
11617 #ifdef TARGET_NR_timer_gettime
11618 case TARGET_NR_timer_gettime
:
11620 /* args: timer_t timerid, struct itimerspec *curr_value */
11621 target_timer_t timerid
= get_timer_id(arg1
);
11625 } else if (!arg2
) {
11626 ret
= -TARGET_EFAULT
;
11628 timer_t htimer
= g_posix_timers
[timerid
];
11629 struct itimerspec hspec
;
11630 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11632 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11633 ret
= -TARGET_EFAULT
;
11640 #ifdef TARGET_NR_timer_getoverrun
11641 case TARGET_NR_timer_getoverrun
:
11643 /* args: timer_t timerid */
11644 target_timer_t timerid
= get_timer_id(arg1
);
11649 timer_t htimer
= g_posix_timers
[timerid
];
11650 ret
= get_errno(timer_getoverrun(htimer
));
11652 fd_trans_unregister(ret
);
11657 #ifdef TARGET_NR_timer_delete
11658 case TARGET_NR_timer_delete
:
11660 /* args: timer_t timerid */
11661 target_timer_t timerid
= get_timer_id(arg1
);
11666 timer_t htimer
= g_posix_timers
[timerid
];
11667 ret
= get_errno(timer_delete(htimer
));
11668 g_posix_timers
[timerid
] = 0;
11674 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11675 case TARGET_NR_timerfd_create
:
11676 return get_errno(timerfd_create(arg1
,
11677 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11680 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11681 case TARGET_NR_timerfd_gettime
:
11683 struct itimerspec its_curr
;
11685 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11687 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11688 return -TARGET_EFAULT
;
11694 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11695 case TARGET_NR_timerfd_settime
:
11697 struct itimerspec its_new
, its_old
, *p_new
;
11700 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11701 return -TARGET_EFAULT
;
11708 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11710 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11711 return -TARGET_EFAULT
;
11717 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11718 case TARGET_NR_ioprio_get
:
11719 return get_errno(ioprio_get(arg1
, arg2
));
11722 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11723 case TARGET_NR_ioprio_set
:
11724 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11727 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11728 case TARGET_NR_setns
:
11729 return get_errno(setns(arg1
, arg2
));
11731 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11732 case TARGET_NR_unshare
:
11733 return get_errno(unshare(arg1
));
11735 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11736 case TARGET_NR_kcmp
:
11737 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11739 #ifdef TARGET_NR_swapcontext
11740 case TARGET_NR_swapcontext
:
11741 /* PowerPC specific. */
11742 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11746 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11747 return -TARGET_ENOSYS
;
11752 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11753 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11754 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11757 CPUState
*cpu
= env_cpu(cpu_env
);
11760 #ifdef DEBUG_ERESTARTSYS
11761 /* Debug-only code for exercising the syscall-restart code paths
11762 * in the per-architecture cpu main loops: restart every syscall
11763 * the guest makes once before letting it through.
11769 return -TARGET_ERESTARTSYS
;
11774 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11775 arg5
, arg6
, arg7
, arg8
);
11777 if (unlikely(do_strace
)) {
11778 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11779 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11780 arg5
, arg6
, arg7
, arg8
);
11781 print_syscall_ret(num
, ret
);
11783 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11784 arg5
, arg6
, arg7
, arg8
);
11787 trace_guest_user_syscall_ret(cpu
, num
, ret
);