hw/sd/pl181: Replace fprintf(stderr, "*\n") with error_report()
[qemu/ar7.git] / linux-user / syscall.c
blob945fc252791ce79d968dba5f9545be24bd28697d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
129 #ifndef CLONE_IO
130 #define CLONE_IO 0x80000000 /* Clone io context */
131 #endif
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
238 type6 arg6) \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
295 loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299 siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309 const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313 const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317 unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320 unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324 void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326 struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328 struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341 unsigned long, idx1, unsigned long, idx2)
342 #endif
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349 unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
357 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
358 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
359 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
360 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
361 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
362 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
363 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
364 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
365 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
366 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
367 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
371 #endif
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
374 #endif
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
377 #endif
378 #if defined(O_PATH)
379 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
380 #endif
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
383 #endif
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388 { 0, 0, 0, 0 }
391 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 #ifdef TARGET_NR_utimensat
394 #if defined(__NR_utimensat)
395 #define __NR_sys_utimensat __NR_utimensat
396 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
397 const struct timespec *,tsp,int,flags)
398 #else
399 static int sys_utimensat(int dirfd, const char *pathname,
400 const struct timespec times[2], int flags)
402 errno = ENOSYS;
403 return -1;
405 #endif
406 #endif /* TARGET_NR_utimensat */
408 #ifdef TARGET_NR_renameat2
409 #if defined(__NR_renameat2)
410 #define __NR_sys_renameat2 __NR_renameat2
411 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
412 const char *, new, unsigned int, flags)
413 #else
414 static int sys_renameat2(int oldfd, const char *old,
415 int newfd, const char *new, int flags)
417 if (flags == 0) {
418 return renameat(oldfd, old, newfd, new);
420 errno = ENOSYS;
421 return -1;
423 #endif
424 #endif /* TARGET_NR_renameat2 */
426 #ifdef CONFIG_INOTIFY
427 #include <sys/inotify.h>
429 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
430 static int sys_inotify_init(void)
432 return (inotify_init());
434 #endif
435 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
436 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 return (inotify_add_watch(fd, pathname, mask));
440 #endif
441 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
442 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 return (inotify_rm_watch(fd, wd));
446 #endif
447 #ifdef CONFIG_INOTIFY1
448 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
449 static int sys_inotify_init1(int flags)
451 return (inotify_init1(flags));
453 #endif
454 #endif
455 #else
456 /* Userspace can usually survive runtime without inotify */
457 #undef TARGET_NR_inotify_init
458 #undef TARGET_NR_inotify_init1
459 #undef TARGET_NR_inotify_add_watch
460 #undef TARGET_NR_inotify_rm_watch
461 #endif /* CONFIG_INOTIFY */
463 #if defined(TARGET_NR_prlimit64)
464 #ifndef __NR_prlimit64
465 # define __NR_prlimit64 -1
466 #endif
467 #define __NR_sys_prlimit64 __NR_prlimit64
468 /* The glibc rlimit structure may not be that used by the underlying syscall */
469 struct host_rlimit64 {
470 uint64_t rlim_cur;
471 uint64_t rlim_max;
473 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
474 const struct host_rlimit64 *, new_limit,
475 struct host_rlimit64 *, old_limit)
476 #endif
479 #if defined(TARGET_NR_timer_create)
480 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
481 static timer_t g_posix_timers[32] = { 0, } ;
483 static inline int next_free_host_timer(void)
485 int k ;
486 /* FIXME: Does finding the next free slot require a lock? */
487 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
488 if (g_posix_timers[k] == 0) {
489 g_posix_timers[k] = (timer_t) 1;
490 return k;
493 return -1;
495 #endif
497 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
498 #ifdef TARGET_ARM
499 static inline int regpairs_aligned(void *cpu_env, int num)
501 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
503 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
504 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
505 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
506 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
507 * of registers which translates to the same as ARM/MIPS, because we start with
508 * r3 as arg1 */
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #elif defined(TARGET_SH4)
511 /* SH4 doesn't align register pairs, except for p{read,write}64 */
512 static inline int regpairs_aligned(void *cpu_env, int num)
514 switch (num) {
515 case TARGET_NR_pread64:
516 case TARGET_NR_pwrite64:
517 return 1;
519 default:
520 return 0;
523 #elif defined(TARGET_XTENSA)
524 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
525 #else
526 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
527 #endif
529 #define ERRNO_TABLE_SIZE 1200
531 /* target_to_host_errno_table[] is initialized from
532 * host_to_target_errno_table[] in syscall_init(). */
533 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
537 * This list is the union of errno values overridden in asm-<arch>/errno.h
538 * minus the errnos that are not actually generic to all archs.
540 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
541 [EAGAIN] = TARGET_EAGAIN,
542 [EIDRM] = TARGET_EIDRM,
543 [ECHRNG] = TARGET_ECHRNG,
544 [EL2NSYNC] = TARGET_EL2NSYNC,
545 [EL3HLT] = TARGET_EL3HLT,
546 [EL3RST] = TARGET_EL3RST,
547 [ELNRNG] = TARGET_ELNRNG,
548 [EUNATCH] = TARGET_EUNATCH,
549 [ENOCSI] = TARGET_ENOCSI,
550 [EL2HLT] = TARGET_EL2HLT,
551 [EDEADLK] = TARGET_EDEADLK,
552 [ENOLCK] = TARGET_ENOLCK,
553 [EBADE] = TARGET_EBADE,
554 [EBADR] = TARGET_EBADR,
555 [EXFULL] = TARGET_EXFULL,
556 [ENOANO] = TARGET_ENOANO,
557 [EBADRQC] = TARGET_EBADRQC,
558 [EBADSLT] = TARGET_EBADSLT,
559 [EBFONT] = TARGET_EBFONT,
560 [ENOSTR] = TARGET_ENOSTR,
561 [ENODATA] = TARGET_ENODATA,
562 [ETIME] = TARGET_ETIME,
563 [ENOSR] = TARGET_ENOSR,
564 [ENONET] = TARGET_ENONET,
565 [ENOPKG] = TARGET_ENOPKG,
566 [EREMOTE] = TARGET_EREMOTE,
567 [ENOLINK] = TARGET_ENOLINK,
568 [EADV] = TARGET_EADV,
569 [ESRMNT] = TARGET_ESRMNT,
570 [ECOMM] = TARGET_ECOMM,
571 [EPROTO] = TARGET_EPROTO,
572 [EDOTDOT] = TARGET_EDOTDOT,
573 [EMULTIHOP] = TARGET_EMULTIHOP,
574 [EBADMSG] = TARGET_EBADMSG,
575 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
576 [EOVERFLOW] = TARGET_EOVERFLOW,
577 [ENOTUNIQ] = TARGET_ENOTUNIQ,
578 [EBADFD] = TARGET_EBADFD,
579 [EREMCHG] = TARGET_EREMCHG,
580 [ELIBACC] = TARGET_ELIBACC,
581 [ELIBBAD] = TARGET_ELIBBAD,
582 [ELIBSCN] = TARGET_ELIBSCN,
583 [ELIBMAX] = TARGET_ELIBMAX,
584 [ELIBEXEC] = TARGET_ELIBEXEC,
585 [EILSEQ] = TARGET_EILSEQ,
586 [ENOSYS] = TARGET_ENOSYS,
587 [ELOOP] = TARGET_ELOOP,
588 [ERESTART] = TARGET_ERESTART,
589 [ESTRPIPE] = TARGET_ESTRPIPE,
590 [ENOTEMPTY] = TARGET_ENOTEMPTY,
591 [EUSERS] = TARGET_EUSERS,
592 [ENOTSOCK] = TARGET_ENOTSOCK,
593 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
594 [EMSGSIZE] = TARGET_EMSGSIZE,
595 [EPROTOTYPE] = TARGET_EPROTOTYPE,
596 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
597 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
598 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
599 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
600 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
601 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
602 [EADDRINUSE] = TARGET_EADDRINUSE,
603 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
604 [ENETDOWN] = TARGET_ENETDOWN,
605 [ENETUNREACH] = TARGET_ENETUNREACH,
606 [ENETRESET] = TARGET_ENETRESET,
607 [ECONNABORTED] = TARGET_ECONNABORTED,
608 [ECONNRESET] = TARGET_ECONNRESET,
609 [ENOBUFS] = TARGET_ENOBUFS,
610 [EISCONN] = TARGET_EISCONN,
611 [ENOTCONN] = TARGET_ENOTCONN,
612 [EUCLEAN] = TARGET_EUCLEAN,
613 [ENOTNAM] = TARGET_ENOTNAM,
614 [ENAVAIL] = TARGET_ENAVAIL,
615 [EISNAM] = TARGET_EISNAM,
616 [EREMOTEIO] = TARGET_EREMOTEIO,
617 [EDQUOT] = TARGET_EDQUOT,
618 [ESHUTDOWN] = TARGET_ESHUTDOWN,
619 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
620 [ETIMEDOUT] = TARGET_ETIMEDOUT,
621 [ECONNREFUSED] = TARGET_ECONNREFUSED,
622 [EHOSTDOWN] = TARGET_EHOSTDOWN,
623 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
624 [EALREADY] = TARGET_EALREADY,
625 [EINPROGRESS] = TARGET_EINPROGRESS,
626 [ESTALE] = TARGET_ESTALE,
627 [ECANCELED] = TARGET_ECANCELED,
628 [ENOMEDIUM] = TARGET_ENOMEDIUM,
629 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
630 #ifdef ENOKEY
631 [ENOKEY] = TARGET_ENOKEY,
632 #endif
633 #ifdef EKEYEXPIRED
634 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
635 #endif
636 #ifdef EKEYREVOKED
637 [EKEYREVOKED] = TARGET_EKEYREVOKED,
638 #endif
639 #ifdef EKEYREJECTED
640 [EKEYREJECTED] = TARGET_EKEYREJECTED,
641 #endif
642 #ifdef EOWNERDEAD
643 [EOWNERDEAD] = TARGET_EOWNERDEAD,
644 #endif
645 #ifdef ENOTRECOVERABLE
646 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
647 #endif
648 #ifdef ENOMSG
649 [ENOMSG] = TARGET_ENOMSG,
650 #endif
651 #ifdef ERKFILL
652 [ERFKILL] = TARGET_ERFKILL,
653 #endif
654 #ifdef EHWPOISON
655 [EHWPOISON] = TARGET_EHWPOISON,
656 #endif
659 static inline int host_to_target_errno(int err)
661 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
662 host_to_target_errno_table[err]) {
663 return host_to_target_errno_table[err];
665 return err;
668 static inline int target_to_host_errno(int err)
670 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
671 target_to_host_errno_table[err]) {
672 return target_to_host_errno_table[err];
674 return err;
677 static inline abi_long get_errno(abi_long ret)
679 if (ret == -1)
680 return -host_to_target_errno(errno);
681 else
682 return ret;
685 const char *target_strerror(int err)
687 if (err == TARGET_ERESTARTSYS) {
688 return "To be restarted";
690 if (err == TARGET_QEMU_ESIGRETURN) {
691 return "Successful exit from sigreturn";
694 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
695 return NULL;
697 return strerror(target_to_host_errno(err));
700 #define safe_syscall0(type, name) \
701 static type safe_##name(void) \
703 return safe_syscall(__NR_##name); \
706 #define safe_syscall1(type, name, type1, arg1) \
707 static type safe_##name(type1 arg1) \
709 return safe_syscall(__NR_##name, arg1); \
712 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
713 static type safe_##name(type1 arg1, type2 arg2) \
715 return safe_syscall(__NR_##name, arg1, arg2); \
718 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
724 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
725 type4, arg4) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
731 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
732 type4, arg4, type5, arg5) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
734 type5 arg5) \
736 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
739 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
740 type4, arg4, type5, arg5, type6, arg6) \
741 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
742 type5 arg5, type6 arg6) \
744 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
747 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
748 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
749 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
750 int, flags, mode_t, mode)
751 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
752 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
753 struct rusage *, rusage)
754 #endif
755 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
756 int, options, struct rusage *, rusage)
757 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
758 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
759 defined(TARGET_NR_pselect6)
760 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
761 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
762 #endif
763 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
764 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
765 struct timespec *, tsp, const sigset_t *, sigmask,
766 size_t, sigsetsize)
767 #endif
768 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
769 int, maxevents, int, timeout, const sigset_t *, sigmask,
770 size_t, sigsetsize)
771 #if defined(__NR_futex)
772 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
773 const struct timespec *,timeout,int *,uaddr2,int,val3)
774 #endif
775 #if defined(__NR_futex_time64)
776 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
777 const struct timespec *,timeout,int *,uaddr2,int,val3)
778 #endif
779 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
780 safe_syscall2(int, kill, pid_t, pid, int, sig)
781 safe_syscall2(int, tkill, int, tid, int, sig)
782 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
783 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
784 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
785 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
786 unsigned long, pos_l, unsigned long, pos_h)
787 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
788 unsigned long, pos_l, unsigned long, pos_h)
789 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
790 socklen_t, addrlen)
791 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
792 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
793 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
794 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
795 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
796 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
797 safe_syscall2(int, flock, int, fd, int, operation)
798 #ifdef TARGET_NR_rt_sigtimedwait
799 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
800 const struct timespec *, uts, size_t, sigsetsize)
801 #endif
802 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
803 int, flags)
804 #if defined(TARGET_NR_nanosleep)
805 safe_syscall2(int, nanosleep, const struct timespec *, req,
806 struct timespec *, rem)
807 #endif
808 #ifdef TARGET_NR_clock_nanosleep
809 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
810 const struct timespec *, req, struct timespec *, rem)
811 #endif
812 #ifdef __NR_ipc
813 #ifdef __s390x__
814 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
815 void *, ptr)
816 #else
817 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
818 void *, ptr, long, fifth)
819 #endif
820 #endif
821 #ifdef __NR_msgsnd
822 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
823 int, flags)
824 #endif
825 #ifdef __NR_msgrcv
826 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
827 long, msgtype, int, flags)
828 #endif
829 #ifdef __NR_semtimedop
830 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
831 unsigned, nsops, const struct timespec *, timeout)
832 #endif
833 #ifdef TARGET_NR_mq_timedsend
834 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
835 size_t, len, unsigned, prio, const struct timespec *, timeout)
836 #endif
837 #ifdef TARGET_NR_mq_timedreceive
838 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
839 size_t, len, unsigned *, prio, const struct timespec *, timeout)
840 #endif
841 /* We do ioctl like this rather than via safe_syscall3 to preserve the
842 * "third argument might be integer or pointer or not present" behaviour of
843 * the libc function.
845 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
846 /* Similarly for fcntl. Note that callers must always:
847 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
848 * use the flock64 struct rather than unsuffixed flock
849 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
851 #ifdef __NR_fcntl64
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
853 #else
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
855 #endif
857 static inline int host_to_target_sock_type(int host_type)
859 int target_type;
861 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
862 case SOCK_DGRAM:
863 target_type = TARGET_SOCK_DGRAM;
864 break;
865 case SOCK_STREAM:
866 target_type = TARGET_SOCK_STREAM;
867 break;
868 default:
869 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
870 break;
873 #if defined(SOCK_CLOEXEC)
874 if (host_type & SOCK_CLOEXEC) {
875 target_type |= TARGET_SOCK_CLOEXEC;
877 #endif
879 #if defined(SOCK_NONBLOCK)
880 if (host_type & SOCK_NONBLOCK) {
881 target_type |= TARGET_SOCK_NONBLOCK;
883 #endif
885 return target_type;
888 static abi_ulong target_brk;
889 static abi_ulong target_original_brk;
890 static abi_ulong brk_page;
892 void target_set_brk(abi_ulong new_brk)
894 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
895 brk_page = HOST_PAGE_ALIGN(target_brk);
898 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
899 #define DEBUGF_BRK(message, args...)
901 /* do_brk() must return target values and target errnos. */
902 abi_long do_brk(abi_ulong new_brk)
904 abi_long mapped_addr;
905 abi_ulong new_alloc_size;
907 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
909 if (!new_brk) {
910 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
911 return target_brk;
913 if (new_brk < target_original_brk) {
914 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
915 target_brk);
916 return target_brk;
919 /* If the new brk is less than the highest page reserved to the
920 * target heap allocation, set it and we're almost done... */
921 if (new_brk <= brk_page) {
922 /* Heap contents are initialized to zero, as for anonymous
923 * mapped pages. */
924 if (new_brk > target_brk) {
925 memset(g2h(target_brk), 0, new_brk - target_brk);
927 target_brk = new_brk;
928 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
929 return target_brk;
932 /* We need to allocate more memory after the brk... Note that
933 * we don't use MAP_FIXED because that will map over the top of
934 * any existing mapping (like the one with the host libc or qemu
935 * itself); instead we treat "mapped but at wrong address" as
936 * a failure and unmap again.
938 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
939 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
940 PROT_READ|PROT_WRITE,
941 MAP_ANON|MAP_PRIVATE, 0, 0));
943 if (mapped_addr == brk_page) {
944 /* Heap contents are initialized to zero, as for anonymous
945 * mapped pages. Technically the new pages are already
946 * initialized to zero since they *are* anonymous mapped
947 * pages, however we have to take care with the contents that
948 * come from the remaining part of the previous page: it may
949 * contains garbage data due to a previous heap usage (grown
950 * then shrunken). */
951 memset(g2h(target_brk), 0, brk_page - target_brk);
953 target_brk = new_brk;
954 brk_page = HOST_PAGE_ALIGN(target_brk);
955 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
956 target_brk);
957 return target_brk;
958 } else if (mapped_addr != -1) {
959 /* Mapped but at wrong address, meaning there wasn't actually
960 * enough space for this brk.
962 target_munmap(mapped_addr, new_alloc_size);
963 mapped_addr = -1;
964 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
966 else {
967 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
970 #if defined(TARGET_ALPHA)
971 /* We (partially) emulate OSF/1 on Alpha, which requires we
972 return a proper errno, not an unchanged brk value. */
973 return -TARGET_ENOMEM;
974 #endif
975 /* For everything else, return the previous break. */
976 return target_brk;
979 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
980 defined(TARGET_NR_pselect6)
981 static inline abi_long copy_from_user_fdset(fd_set *fds,
982 abi_ulong target_fds_addr,
983 int n)
985 int i, nw, j, k;
986 abi_ulong b, *target_fds;
988 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
989 if (!(target_fds = lock_user(VERIFY_READ,
990 target_fds_addr,
991 sizeof(abi_ulong) * nw,
992 1)))
993 return -TARGET_EFAULT;
995 FD_ZERO(fds);
996 k = 0;
997 for (i = 0; i < nw; i++) {
998 /* grab the abi_ulong */
999 __get_user(b, &target_fds[i]);
1000 for (j = 0; j < TARGET_ABI_BITS; j++) {
1001 /* check the bit inside the abi_ulong */
1002 if ((b >> j) & 1)
1003 FD_SET(k, fds);
1004 k++;
1008 unlock_user(target_fds, target_fds_addr, 0);
1010 return 0;
1013 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1014 abi_ulong target_fds_addr,
1015 int n)
1017 if (target_fds_addr) {
1018 if (copy_from_user_fdset(fds, target_fds_addr, n))
1019 return -TARGET_EFAULT;
1020 *fds_ptr = fds;
1021 } else {
1022 *fds_ptr = NULL;
1024 return 0;
1027 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1028 const fd_set *fds,
1029 int n)
1031 int i, nw, j, k;
1032 abi_long v;
1033 abi_ulong *target_fds;
1035 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1036 if (!(target_fds = lock_user(VERIFY_WRITE,
1037 target_fds_addr,
1038 sizeof(abi_ulong) * nw,
1039 0)))
1040 return -TARGET_EFAULT;
1042 k = 0;
1043 for (i = 0; i < nw; i++) {
1044 v = 0;
1045 for (j = 0; j < TARGET_ABI_BITS; j++) {
1046 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1047 k++;
1049 __put_user(v, &target_fds[i]);
1052 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1054 return 0;
1056 #endif
1058 #if defined(__alpha__)
1059 #define HOST_HZ 1024
1060 #else
1061 #define HOST_HZ 100
1062 #endif
1064 static inline abi_long host_to_target_clock_t(long ticks)
1066 #if HOST_HZ == TARGET_HZ
1067 return ticks;
1068 #else
1069 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1070 #endif
1073 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1074 const struct rusage *rusage)
1076 struct target_rusage *target_rusage;
1078 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1079 return -TARGET_EFAULT;
1080 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1081 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1082 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1083 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1084 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1085 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1086 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1087 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1088 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1089 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1090 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1091 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1092 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1093 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1094 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1095 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1096 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1097 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1098 unlock_user_struct(target_rusage, target_addr, 1);
1100 return 0;
1103 #ifdef TARGET_NR_setrlimit
1104 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1106 abi_ulong target_rlim_swap;
1107 rlim_t result;
1109 target_rlim_swap = tswapal(target_rlim);
1110 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1111 return RLIM_INFINITY;
1113 result = target_rlim_swap;
1114 if (target_rlim_swap != (rlim_t)result)
1115 return RLIM_INFINITY;
1117 return result;
1119 #endif
1121 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1122 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1124 abi_ulong target_rlim_swap;
1125 abi_ulong result;
1127 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1128 target_rlim_swap = TARGET_RLIM_INFINITY;
1129 else
1130 target_rlim_swap = rlim;
1131 result = tswapal(target_rlim_swap);
1133 return result;
1135 #endif
1137 static inline int target_to_host_resource(int code)
1139 switch (code) {
1140 case TARGET_RLIMIT_AS:
1141 return RLIMIT_AS;
1142 case TARGET_RLIMIT_CORE:
1143 return RLIMIT_CORE;
1144 case TARGET_RLIMIT_CPU:
1145 return RLIMIT_CPU;
1146 case TARGET_RLIMIT_DATA:
1147 return RLIMIT_DATA;
1148 case TARGET_RLIMIT_FSIZE:
1149 return RLIMIT_FSIZE;
1150 case TARGET_RLIMIT_LOCKS:
1151 return RLIMIT_LOCKS;
1152 case TARGET_RLIMIT_MEMLOCK:
1153 return RLIMIT_MEMLOCK;
1154 case TARGET_RLIMIT_MSGQUEUE:
1155 return RLIMIT_MSGQUEUE;
1156 case TARGET_RLIMIT_NICE:
1157 return RLIMIT_NICE;
1158 case TARGET_RLIMIT_NOFILE:
1159 return RLIMIT_NOFILE;
1160 case TARGET_RLIMIT_NPROC:
1161 return RLIMIT_NPROC;
1162 case TARGET_RLIMIT_RSS:
1163 return RLIMIT_RSS;
1164 case TARGET_RLIMIT_RTPRIO:
1165 return RLIMIT_RTPRIO;
1166 case TARGET_RLIMIT_SIGPENDING:
1167 return RLIMIT_SIGPENDING;
1168 case TARGET_RLIMIT_STACK:
1169 return RLIMIT_STACK;
1170 default:
1171 return code;
1175 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1176 abi_ulong target_tv_addr)
1178 struct target_timeval *target_tv;
1180 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1181 return -TARGET_EFAULT;
1184 __get_user(tv->tv_sec, &target_tv->tv_sec);
1185 __get_user(tv->tv_usec, &target_tv->tv_usec);
1187 unlock_user_struct(target_tv, target_tv_addr, 0);
1189 return 0;
1192 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1193 const struct timeval *tv)
1195 struct target_timeval *target_tv;
1197 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1198 return -TARGET_EFAULT;
1201 __put_user(tv->tv_sec, &target_tv->tv_sec);
1202 __put_user(tv->tv_usec, &target_tv->tv_usec);
1204 unlock_user_struct(target_tv, target_tv_addr, 1);
1206 return 0;
1209 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1210 const struct timeval *tv)
1212 struct target__kernel_sock_timeval *target_tv;
1214 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1215 return -TARGET_EFAULT;
1218 __put_user(tv->tv_sec, &target_tv->tv_sec);
1219 __put_user(tv->tv_usec, &target_tv->tv_usec);
1221 unlock_user_struct(target_tv, target_tv_addr, 1);
1223 return 0;
1226 #if defined(TARGET_NR_futex) || \
1227 defined(TARGET_NR_rt_sigtimedwait) || \
1228 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1229 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1230 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1231 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1232 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop)
1233 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1234 abi_ulong target_addr)
1236 struct target_timespec *target_ts;
1238 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1239 return -TARGET_EFAULT;
1241 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1242 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243 unlock_user_struct(target_ts, target_addr, 0);
1244 return 0;
1246 #endif
1248 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1249 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1250 abi_ulong target_addr)
1252 struct target__kernel_timespec *target_ts;
1254 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1255 return -TARGET_EFAULT;
1257 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1258 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1259 unlock_user_struct(target_ts, target_addr, 0);
1260 return 0;
1262 #endif
1264 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1265 struct timespec *host_ts)
1267 struct target_timespec *target_ts;
1269 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1270 return -TARGET_EFAULT;
1272 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1273 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1274 unlock_user_struct(target_ts, target_addr, 1);
1275 return 0;
1278 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1279 struct timespec *host_ts)
1281 struct target__kernel_timespec *target_ts;
1283 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1284 return -TARGET_EFAULT;
1286 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1287 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1288 unlock_user_struct(target_ts, target_addr, 1);
1289 return 0;
1292 #if defined(TARGET_NR_gettimeofday)
1293 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1294 struct timezone *tz)
1296 struct target_timezone *target_tz;
1298 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1299 return -TARGET_EFAULT;
1302 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1303 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 unlock_user_struct(target_tz, target_tz_addr, 1);
1307 return 0;
1309 #endif
1311 #if defined(TARGET_NR_settimeofday)
1312 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1313 abi_ulong target_tz_addr)
1315 struct target_timezone *target_tz;
1317 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1318 return -TARGET_EFAULT;
1321 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1322 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324 unlock_user_struct(target_tz, target_tz_addr, 0);
1326 return 0;
1328 #endif
1330 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1331 #include <mqueue.h>
1333 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1334 abi_ulong target_mq_attr_addr)
1336 struct target_mq_attr *target_mq_attr;
1338 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1339 target_mq_attr_addr, 1))
1340 return -TARGET_EFAULT;
1342 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1343 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1344 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1345 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349 return 0;
1352 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1353 const struct mq_attr *attr)
1355 struct target_mq_attr *target_mq_attr;
1357 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1358 target_mq_attr_addr, 0))
1359 return -TARGET_EFAULT;
1361 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1362 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1363 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1364 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368 return 0;
1370 #endif
1372 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1373 /* do_select() must return target values and target errnos. */
1374 static abi_long do_select(int n,
1375 abi_ulong rfd_addr, abi_ulong wfd_addr,
1376 abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 fd_set rfds, wfds, efds;
1379 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1380 struct timeval tv;
1381 struct timespec ts, *ts_ptr;
1382 abi_long ret;
1384 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1385 if (ret) {
1386 return ret;
1388 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1389 if (ret) {
1390 return ret;
1392 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1393 if (ret) {
1394 return ret;
1397 if (target_tv_addr) {
1398 if (copy_from_user_timeval(&tv, target_tv_addr))
1399 return -TARGET_EFAULT;
1400 ts.tv_sec = tv.tv_sec;
1401 ts.tv_nsec = tv.tv_usec * 1000;
1402 ts_ptr = &ts;
1403 } else {
1404 ts_ptr = NULL;
1407 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1408 ts_ptr, NULL));
1410 if (!is_error(ret)) {
1411 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1412 return -TARGET_EFAULT;
1413 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1414 return -TARGET_EFAULT;
1415 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1416 return -TARGET_EFAULT;
1418 if (target_tv_addr) {
1419 tv.tv_sec = ts.tv_sec;
1420 tv.tv_usec = ts.tv_nsec / 1000;
1421 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1422 return -TARGET_EFAULT;
1427 return ret;
1430 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1431 static abi_long do_old_select(abi_ulong arg1)
1433 struct target_sel_arg_struct *sel;
1434 abi_ulong inp, outp, exp, tvp;
1435 long nsel;
1437 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1438 return -TARGET_EFAULT;
1441 nsel = tswapal(sel->n);
1442 inp = tswapal(sel->inp);
1443 outp = tswapal(sel->outp);
1444 exp = tswapal(sel->exp);
1445 tvp = tswapal(sel->tvp);
1447 unlock_user_struct(sel, arg1, 0);
1449 return do_select(nsel, inp, outp, exp, tvp);
1451 #endif
1452 #endif
1454 static abi_long do_pipe2(int host_pipe[], int flags)
1456 #ifdef CONFIG_PIPE2
1457 return pipe2(host_pipe, flags);
1458 #else
1459 return -ENOSYS;
1460 #endif
1463 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1464 int flags, int is_pipe2)
1466 int host_pipe[2];
1467 abi_long ret;
1468 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470 if (is_error(ret))
1471 return get_errno(ret);
1473 /* Several targets have special calling conventions for the original
1474 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1475 if (!is_pipe2) {
1476 #if defined(TARGET_ALPHA)
1477 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1478 return host_pipe[0];
1479 #elif defined(TARGET_MIPS)
1480 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1481 return host_pipe[0];
1482 #elif defined(TARGET_SH4)
1483 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1484 return host_pipe[0];
1485 #elif defined(TARGET_SPARC)
1486 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1487 return host_pipe[0];
1488 #endif
1491 if (put_user_s32(host_pipe[0], pipedes)
1492 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1493 return -TARGET_EFAULT;
1494 return get_errno(ret);
1497 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1498 abi_ulong target_addr,
1499 socklen_t len)
1501 struct target_ip_mreqn *target_smreqn;
1503 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1504 if (!target_smreqn)
1505 return -TARGET_EFAULT;
1506 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1507 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1508 if (len == sizeof(struct target_ip_mreqn))
1509 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1510 unlock_user(target_smreqn, target_addr, 0);
1512 return 0;
1515 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1516 abi_ulong target_addr,
1517 socklen_t len)
1519 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1520 sa_family_t sa_family;
1521 struct target_sockaddr *target_saddr;
1523 if (fd_trans_target_to_host_addr(fd)) {
1524 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1527 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1528 if (!target_saddr)
1529 return -TARGET_EFAULT;
1531 sa_family = tswap16(target_saddr->sa_family);
1533 /* Oops. The caller might send a incomplete sun_path; sun_path
1534 * must be terminated by \0 (see the manual page), but
1535 * unfortunately it is quite common to specify sockaddr_un
1536 * length as "strlen(x->sun_path)" while it should be
1537 * "strlen(...) + 1". We'll fix that here if needed.
1538 * Linux kernel has a similar feature.
1541 if (sa_family == AF_UNIX) {
1542 if (len < unix_maxlen && len > 0) {
1543 char *cp = (char*)target_saddr;
1545 if ( cp[len-1] && !cp[len] )
1546 len++;
1548 if (len > unix_maxlen)
1549 len = unix_maxlen;
1552 memcpy(addr, target_saddr, len);
1553 addr->sa_family = sa_family;
1554 if (sa_family == AF_NETLINK) {
1555 struct sockaddr_nl *nladdr;
1557 nladdr = (struct sockaddr_nl *)addr;
1558 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1559 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1560 } else if (sa_family == AF_PACKET) {
1561 struct target_sockaddr_ll *lladdr;
1563 lladdr = (struct target_sockaddr_ll *)addr;
1564 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1565 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567 unlock_user(target_saddr, target_addr, 0);
1569 return 0;
1572 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1573 struct sockaddr *addr,
1574 socklen_t len)
1576 struct target_sockaddr *target_saddr;
1578 if (len == 0) {
1579 return 0;
1581 assert(addr);
1583 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1584 if (!target_saddr)
1585 return -TARGET_EFAULT;
1586 memcpy(target_saddr, addr, len);
1587 if (len >= offsetof(struct target_sockaddr, sa_family) +
1588 sizeof(target_saddr->sa_family)) {
1589 target_saddr->sa_family = tswap16(addr->sa_family);
1591 if (addr->sa_family == AF_NETLINK &&
1592 len >= sizeof(struct target_sockaddr_nl)) {
1593 struct target_sockaddr_nl *target_nl =
1594 (struct target_sockaddr_nl *)target_saddr;
1595 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1596 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1597 } else if (addr->sa_family == AF_PACKET) {
1598 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1599 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1600 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1601 } else if (addr->sa_family == AF_INET6 &&
1602 len >= sizeof(struct target_sockaddr_in6)) {
1603 struct target_sockaddr_in6 *target_in6 =
1604 (struct target_sockaddr_in6 *)target_saddr;
1605 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607 unlock_user(target_saddr, target_addr, len);
1609 return 0;
1612 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1613 struct target_msghdr *target_msgh)
1615 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1616 abi_long msg_controllen;
1617 abi_ulong target_cmsg_addr;
1618 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1619 socklen_t space = 0;
1621 msg_controllen = tswapal(target_msgh->msg_controllen);
1622 if (msg_controllen < sizeof (struct target_cmsghdr))
1623 goto the_end;
1624 target_cmsg_addr = tswapal(target_msgh->msg_control);
1625 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1626 target_cmsg_start = target_cmsg;
1627 if (!target_cmsg)
1628 return -TARGET_EFAULT;
1630 while (cmsg && target_cmsg) {
1631 void *data = CMSG_DATA(cmsg);
1632 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634 int len = tswapal(target_cmsg->cmsg_len)
1635 - sizeof(struct target_cmsghdr);
1637 space += CMSG_SPACE(len);
1638 if (space > msgh->msg_controllen) {
1639 space -= CMSG_SPACE(len);
1640 /* This is a QEMU bug, since we allocated the payload
1641 * area ourselves (unlike overflow in host-to-target
1642 * conversion, which is just the guest giving us a buffer
1643 * that's too small). It can't happen for the payload types
1644 * we currently support; if it becomes an issue in future
1645 * we would need to improve our allocation strategy to
1646 * something more intelligent than "twice the size of the
1647 * target buffer we're reading from".
1649 qemu_log_mask(LOG_UNIMP,
1650 ("Unsupported ancillary data %d/%d: "
1651 "unhandled msg size\n"),
1652 tswap32(target_cmsg->cmsg_level),
1653 tswap32(target_cmsg->cmsg_type));
1654 break;
1657 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1658 cmsg->cmsg_level = SOL_SOCKET;
1659 } else {
1660 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1663 cmsg->cmsg_len = CMSG_LEN(len);
1665 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1666 int *fd = (int *)data;
1667 int *target_fd = (int *)target_data;
1668 int i, numfds = len / sizeof(int);
1670 for (i = 0; i < numfds; i++) {
1671 __get_user(fd[i], target_fd + i);
1673 } else if (cmsg->cmsg_level == SOL_SOCKET
1674 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1675 struct ucred *cred = (struct ucred *)data;
1676 struct target_ucred *target_cred =
1677 (struct target_ucred *)target_data;
1679 __get_user(cred->pid, &target_cred->pid);
1680 __get_user(cred->uid, &target_cred->uid);
1681 __get_user(cred->gid, &target_cred->gid);
1682 } else {
1683 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1684 cmsg->cmsg_level, cmsg->cmsg_type);
1685 memcpy(data, target_data, len);
1688 cmsg = CMSG_NXTHDR(msgh, cmsg);
1689 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1690 target_cmsg_start);
1692 unlock_user(target_cmsg, target_cmsg_addr, 0);
1693 the_end:
1694 msgh->msg_controllen = space;
1695 return 0;
1698 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1699 struct msghdr *msgh)
1701 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1702 abi_long msg_controllen;
1703 abi_ulong target_cmsg_addr;
1704 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1705 socklen_t space = 0;
1707 msg_controllen = tswapal(target_msgh->msg_controllen);
1708 if (msg_controllen < sizeof (struct target_cmsghdr))
1709 goto the_end;
1710 target_cmsg_addr = tswapal(target_msgh->msg_control);
1711 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1712 target_cmsg_start = target_cmsg;
1713 if (!target_cmsg)
1714 return -TARGET_EFAULT;
1716 while (cmsg && target_cmsg) {
1717 void *data = CMSG_DATA(cmsg);
1718 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1721 int tgt_len, tgt_space;
1723 /* We never copy a half-header but may copy half-data;
1724 * this is Linux's behaviour in put_cmsg(). Note that
1725 * truncation here is a guest problem (which we report
1726 * to the guest via the CTRUNC bit), unlike truncation
1727 * in target_to_host_cmsg, which is a QEMU bug.
1729 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1730 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1731 break;
1734 if (cmsg->cmsg_level == SOL_SOCKET) {
1735 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1736 } else {
1737 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741 /* Payload types which need a different size of payload on
1742 * the target must adjust tgt_len here.
1744 tgt_len = len;
1745 switch (cmsg->cmsg_level) {
1746 case SOL_SOCKET:
1747 switch (cmsg->cmsg_type) {
1748 case SO_TIMESTAMP:
1749 tgt_len = sizeof(struct target_timeval);
1750 break;
1751 default:
1752 break;
1754 break;
1755 default:
1756 break;
1759 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1760 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1761 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1764 /* We must now copy-and-convert len bytes of payload
1765 * into tgt_len bytes of destination space. Bear in mind
1766 * that in both source and destination we may be dealing
1767 * with a truncated value!
1769 switch (cmsg->cmsg_level) {
1770 case SOL_SOCKET:
1771 switch (cmsg->cmsg_type) {
1772 case SCM_RIGHTS:
1774 int *fd = (int *)data;
1775 int *target_fd = (int *)target_data;
1776 int i, numfds = tgt_len / sizeof(int);
1778 for (i = 0; i < numfds; i++) {
1779 __put_user(fd[i], target_fd + i);
1781 break;
1783 case SO_TIMESTAMP:
1785 struct timeval *tv = (struct timeval *)data;
1786 struct target_timeval *target_tv =
1787 (struct target_timeval *)target_data;
1789 if (len != sizeof(struct timeval) ||
1790 tgt_len != sizeof(struct target_timeval)) {
1791 goto unimplemented;
1794 /* copy struct timeval to target */
1795 __put_user(tv->tv_sec, &target_tv->tv_sec);
1796 __put_user(tv->tv_usec, &target_tv->tv_usec);
1797 break;
1799 case SCM_CREDENTIALS:
1801 struct ucred *cred = (struct ucred *)data;
1802 struct target_ucred *target_cred =
1803 (struct target_ucred *)target_data;
1805 __put_user(cred->pid, &target_cred->pid);
1806 __put_user(cred->uid, &target_cred->uid);
1807 __put_user(cred->gid, &target_cred->gid);
1808 break;
1810 default:
1811 goto unimplemented;
1813 break;
1815 case SOL_IP:
1816 switch (cmsg->cmsg_type) {
1817 case IP_TTL:
1819 uint32_t *v = (uint32_t *)data;
1820 uint32_t *t_int = (uint32_t *)target_data;
1822 if (len != sizeof(uint32_t) ||
1823 tgt_len != sizeof(uint32_t)) {
1824 goto unimplemented;
1826 __put_user(*v, t_int);
1827 break;
1829 case IP_RECVERR:
1831 struct errhdr_t {
1832 struct sock_extended_err ee;
1833 struct sockaddr_in offender;
1835 struct errhdr_t *errh = (struct errhdr_t *)data;
1836 struct errhdr_t *target_errh =
1837 (struct errhdr_t *)target_data;
1839 if (len != sizeof(struct errhdr_t) ||
1840 tgt_len != sizeof(struct errhdr_t)) {
1841 goto unimplemented;
1843 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1844 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1845 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1846 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1847 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1848 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1849 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1850 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1851 (void *) &errh->offender, sizeof(errh->offender));
1852 break;
1854 default:
1855 goto unimplemented;
1857 break;
1859 case SOL_IPV6:
1860 switch (cmsg->cmsg_type) {
1861 case IPV6_HOPLIMIT:
1863 uint32_t *v = (uint32_t *)data;
1864 uint32_t *t_int = (uint32_t *)target_data;
1866 if (len != sizeof(uint32_t) ||
1867 tgt_len != sizeof(uint32_t)) {
1868 goto unimplemented;
1870 __put_user(*v, t_int);
1871 break;
1873 case IPV6_RECVERR:
1875 struct errhdr6_t {
1876 struct sock_extended_err ee;
1877 struct sockaddr_in6 offender;
1879 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1880 struct errhdr6_t *target_errh =
1881 (struct errhdr6_t *)target_data;
1883 if (len != sizeof(struct errhdr6_t) ||
1884 tgt_len != sizeof(struct errhdr6_t)) {
1885 goto unimplemented;
1887 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1888 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1889 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1890 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1891 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1892 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1893 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1894 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1895 (void *) &errh->offender, sizeof(errh->offender));
1896 break;
1898 default:
1899 goto unimplemented;
1901 break;
1903 default:
1904 unimplemented:
1905 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1906 cmsg->cmsg_level, cmsg->cmsg_type);
1907 memcpy(target_data, data, MIN(len, tgt_len));
1908 if (tgt_len > len) {
1909 memset(target_data + len, 0, tgt_len - len);
1913 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1914 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1915 if (msg_controllen < tgt_space) {
1916 tgt_space = msg_controllen;
1918 msg_controllen -= tgt_space;
1919 space += tgt_space;
1920 cmsg = CMSG_NXTHDR(msgh, cmsg);
1921 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1922 target_cmsg_start);
1924 unlock_user(target_cmsg, target_cmsg_addr, space);
1925 the_end:
1926 target_msgh->msg_controllen = tswapal(space);
1927 return 0;
1930 /* do_setsockopt() Must return target values and target errnos. */
1931 static abi_long do_setsockopt(int sockfd, int level, int optname,
1932 abi_ulong optval_addr, socklen_t optlen)
1934 abi_long ret;
1935 int val;
1936 struct ip_mreqn *ip_mreq;
1937 struct ip_mreq_source *ip_mreq_source;
1939 switch(level) {
1940 case SOL_TCP:
1941 /* TCP options all take an 'int' value. */
1942 if (optlen < sizeof(uint32_t))
1943 return -TARGET_EINVAL;
1945 if (get_user_u32(val, optval_addr))
1946 return -TARGET_EFAULT;
1947 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1948 break;
1949 case SOL_IP:
1950 switch(optname) {
1951 case IP_TOS:
1952 case IP_TTL:
1953 case IP_HDRINCL:
1954 case IP_ROUTER_ALERT:
1955 case IP_RECVOPTS:
1956 case IP_RETOPTS:
1957 case IP_PKTINFO:
1958 case IP_MTU_DISCOVER:
1959 case IP_RECVERR:
1960 case IP_RECVTTL:
1961 case IP_RECVTOS:
1962 #ifdef IP_FREEBIND
1963 case IP_FREEBIND:
1964 #endif
1965 case IP_MULTICAST_TTL:
1966 case IP_MULTICAST_LOOP:
1967 val = 0;
1968 if (optlen >= sizeof(uint32_t)) {
1969 if (get_user_u32(val, optval_addr))
1970 return -TARGET_EFAULT;
1971 } else if (optlen >= 1) {
1972 if (get_user_u8(val, optval_addr))
1973 return -TARGET_EFAULT;
1975 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1976 break;
1977 case IP_ADD_MEMBERSHIP:
1978 case IP_DROP_MEMBERSHIP:
1979 if (optlen < sizeof (struct target_ip_mreq) ||
1980 optlen > sizeof (struct target_ip_mreqn))
1981 return -TARGET_EINVAL;
1983 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1984 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1985 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1986 break;
1988 case IP_BLOCK_SOURCE:
1989 case IP_UNBLOCK_SOURCE:
1990 case IP_ADD_SOURCE_MEMBERSHIP:
1991 case IP_DROP_SOURCE_MEMBERSHIP:
1992 if (optlen != sizeof (struct target_ip_mreq_source))
1993 return -TARGET_EINVAL;
1995 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1997 unlock_user (ip_mreq_source, optval_addr, 0);
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2004 case SOL_IPV6:
2005 switch (optname) {
2006 case IPV6_MTU_DISCOVER:
2007 case IPV6_MTU:
2008 case IPV6_V6ONLY:
2009 case IPV6_RECVPKTINFO:
2010 case IPV6_UNICAST_HOPS:
2011 case IPV6_MULTICAST_HOPS:
2012 case IPV6_MULTICAST_LOOP:
2013 case IPV6_RECVERR:
2014 case IPV6_RECVHOPLIMIT:
2015 case IPV6_2292HOPLIMIT:
2016 case IPV6_CHECKSUM:
2017 case IPV6_ADDRFORM:
2018 case IPV6_2292PKTINFO:
2019 case IPV6_RECVTCLASS:
2020 case IPV6_RECVRTHDR:
2021 case IPV6_2292RTHDR:
2022 case IPV6_RECVHOPOPTS:
2023 case IPV6_2292HOPOPTS:
2024 case IPV6_RECVDSTOPTS:
2025 case IPV6_2292DSTOPTS:
2026 case IPV6_TCLASS:
2027 #ifdef IPV6_RECVPATHMTU
2028 case IPV6_RECVPATHMTU:
2029 #endif
2030 #ifdef IPV6_TRANSPARENT
2031 case IPV6_TRANSPARENT:
2032 #endif
2033 #ifdef IPV6_FREEBIND
2034 case IPV6_FREEBIND:
2035 #endif
2036 #ifdef IPV6_RECVORIGDSTADDR
2037 case IPV6_RECVORIGDSTADDR:
2038 #endif
2039 val = 0;
2040 if (optlen < sizeof(uint32_t)) {
2041 return -TARGET_EINVAL;
2043 if (get_user_u32(val, optval_addr)) {
2044 return -TARGET_EFAULT;
2046 ret = get_errno(setsockopt(sockfd, level, optname,
2047 &val, sizeof(val)));
2048 break;
2049 case IPV6_PKTINFO:
2051 struct in6_pktinfo pki;
2053 if (optlen < sizeof(pki)) {
2054 return -TARGET_EINVAL;
2057 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2058 return -TARGET_EFAULT;
2061 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063 ret = get_errno(setsockopt(sockfd, level, optname,
2064 &pki, sizeof(pki)));
2065 break;
2067 case IPV6_ADD_MEMBERSHIP:
2068 case IPV6_DROP_MEMBERSHIP:
2070 struct ipv6_mreq ipv6mreq;
2072 if (optlen < sizeof(ipv6mreq)) {
2073 return -TARGET_EINVAL;
2076 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2077 return -TARGET_EFAULT;
2080 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082 ret = get_errno(setsockopt(sockfd, level, optname,
2083 &ipv6mreq, sizeof(ipv6mreq)));
2084 break;
2086 default:
2087 goto unimplemented;
2089 break;
2090 case SOL_ICMPV6:
2091 switch (optname) {
2092 case ICMPV6_FILTER:
2094 struct icmp6_filter icmp6f;
2096 if (optlen > sizeof(icmp6f)) {
2097 optlen = sizeof(icmp6f);
2100 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2101 return -TARGET_EFAULT;
2104 for (val = 0; val < 8; val++) {
2105 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2108 ret = get_errno(setsockopt(sockfd, level, optname,
2109 &icmp6f, optlen));
2110 break;
2112 default:
2113 goto unimplemented;
2115 break;
2116 case SOL_RAW:
2117 switch (optname) {
2118 case ICMP_FILTER:
2119 case IPV6_CHECKSUM:
2120 /* those take an u32 value */
2121 if (optlen < sizeof(uint32_t)) {
2122 return -TARGET_EINVAL;
2125 if (get_user_u32(val, optval_addr)) {
2126 return -TARGET_EFAULT;
2128 ret = get_errno(setsockopt(sockfd, level, optname,
2129 &val, sizeof(val)));
2130 break;
2132 default:
2133 goto unimplemented;
2135 break;
2136 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2137 case SOL_ALG:
2138 switch (optname) {
2139 case ALG_SET_KEY:
2141 char *alg_key = g_malloc(optlen);
2143 if (!alg_key) {
2144 return -TARGET_ENOMEM;
2146 if (copy_from_user(alg_key, optval_addr, optlen)) {
2147 g_free(alg_key);
2148 return -TARGET_EFAULT;
2150 ret = get_errno(setsockopt(sockfd, level, optname,
2151 alg_key, optlen));
2152 g_free(alg_key);
2153 break;
2155 case ALG_SET_AEAD_AUTHSIZE:
2157 ret = get_errno(setsockopt(sockfd, level, optname,
2158 NULL, optlen));
2159 break;
2161 default:
2162 goto unimplemented;
2164 break;
2165 #endif
2166 case TARGET_SOL_SOCKET:
2167 switch (optname) {
2168 case TARGET_SO_RCVTIMEO:
2170 struct timeval tv;
2172 optname = SO_RCVTIMEO;
2174 set_timeout:
2175 if (optlen != sizeof(struct target_timeval)) {
2176 return -TARGET_EINVAL;
2179 if (copy_from_user_timeval(&tv, optval_addr)) {
2180 return -TARGET_EFAULT;
2183 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2184 &tv, sizeof(tv)));
2185 return ret;
2187 case TARGET_SO_SNDTIMEO:
2188 optname = SO_SNDTIMEO;
2189 goto set_timeout;
2190 case TARGET_SO_ATTACH_FILTER:
2192 struct target_sock_fprog *tfprog;
2193 struct target_sock_filter *tfilter;
2194 struct sock_fprog fprog;
2195 struct sock_filter *filter;
2196 int i;
2198 if (optlen != sizeof(*tfprog)) {
2199 return -TARGET_EINVAL;
2201 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2202 return -TARGET_EFAULT;
2204 if (!lock_user_struct(VERIFY_READ, tfilter,
2205 tswapal(tfprog->filter), 0)) {
2206 unlock_user_struct(tfprog, optval_addr, 1);
2207 return -TARGET_EFAULT;
2210 fprog.len = tswap16(tfprog->len);
2211 filter = g_try_new(struct sock_filter, fprog.len);
2212 if (filter == NULL) {
2213 unlock_user_struct(tfilter, tfprog->filter, 1);
2214 unlock_user_struct(tfprog, optval_addr, 1);
2215 return -TARGET_ENOMEM;
2217 for (i = 0; i < fprog.len; i++) {
2218 filter[i].code = tswap16(tfilter[i].code);
2219 filter[i].jt = tfilter[i].jt;
2220 filter[i].jf = tfilter[i].jf;
2221 filter[i].k = tswap32(tfilter[i].k);
2223 fprog.filter = filter;
2225 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2226 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2227 g_free(filter);
2229 unlock_user_struct(tfilter, tfprog->filter, 1);
2230 unlock_user_struct(tfprog, optval_addr, 1);
2231 return ret;
2233 case TARGET_SO_BINDTODEVICE:
2235 char *dev_ifname, *addr_ifname;
2237 if (optlen > IFNAMSIZ - 1) {
2238 optlen = IFNAMSIZ - 1;
2240 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241 if (!dev_ifname) {
2242 return -TARGET_EFAULT;
2244 optname = SO_BINDTODEVICE;
2245 addr_ifname = alloca(IFNAMSIZ);
2246 memcpy(addr_ifname, dev_ifname, optlen);
2247 addr_ifname[optlen] = 0;
2248 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2249 addr_ifname, optlen));
2250 unlock_user (dev_ifname, optval_addr, 0);
2251 return ret;
2253 case TARGET_SO_LINGER:
2255 struct linger lg;
2256 struct target_linger *tlg;
2258 if (optlen != sizeof(struct target_linger)) {
2259 return -TARGET_EINVAL;
2261 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2262 return -TARGET_EFAULT;
2264 __get_user(lg.l_onoff, &tlg->l_onoff);
2265 __get_user(lg.l_linger, &tlg->l_linger);
2266 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2267 &lg, sizeof(lg)));
2268 unlock_user_struct(tlg, optval_addr, 0);
2269 return ret;
2271 /* Options with 'int' argument. */
2272 case TARGET_SO_DEBUG:
2273 optname = SO_DEBUG;
2274 break;
2275 case TARGET_SO_REUSEADDR:
2276 optname = SO_REUSEADDR;
2277 break;
2278 #ifdef SO_REUSEPORT
2279 case TARGET_SO_REUSEPORT:
2280 optname = SO_REUSEPORT;
2281 break;
2282 #endif
2283 case TARGET_SO_TYPE:
2284 optname = SO_TYPE;
2285 break;
2286 case TARGET_SO_ERROR:
2287 optname = SO_ERROR;
2288 break;
2289 case TARGET_SO_DONTROUTE:
2290 optname = SO_DONTROUTE;
2291 break;
2292 case TARGET_SO_BROADCAST:
2293 optname = SO_BROADCAST;
2294 break;
2295 case TARGET_SO_SNDBUF:
2296 optname = SO_SNDBUF;
2297 break;
2298 case TARGET_SO_SNDBUFFORCE:
2299 optname = SO_SNDBUFFORCE;
2300 break;
2301 case TARGET_SO_RCVBUF:
2302 optname = SO_RCVBUF;
2303 break;
2304 case TARGET_SO_RCVBUFFORCE:
2305 optname = SO_RCVBUFFORCE;
2306 break;
2307 case TARGET_SO_KEEPALIVE:
2308 optname = SO_KEEPALIVE;
2309 break;
2310 case TARGET_SO_OOBINLINE:
2311 optname = SO_OOBINLINE;
2312 break;
2313 case TARGET_SO_NO_CHECK:
2314 optname = SO_NO_CHECK;
2315 break;
2316 case TARGET_SO_PRIORITY:
2317 optname = SO_PRIORITY;
2318 break;
2319 #ifdef SO_BSDCOMPAT
2320 case TARGET_SO_BSDCOMPAT:
2321 optname = SO_BSDCOMPAT;
2322 break;
2323 #endif
2324 case TARGET_SO_PASSCRED:
2325 optname = SO_PASSCRED;
2326 break;
2327 case TARGET_SO_PASSSEC:
2328 optname = SO_PASSSEC;
2329 break;
2330 case TARGET_SO_TIMESTAMP:
2331 optname = SO_TIMESTAMP;
2332 break;
2333 case TARGET_SO_RCVLOWAT:
2334 optname = SO_RCVLOWAT;
2335 break;
2336 default:
2337 goto unimplemented;
2339 if (optlen < sizeof(uint32_t))
2340 return -TARGET_EINVAL;
2342 if (get_user_u32(val, optval_addr))
2343 return -TARGET_EFAULT;
2344 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2345 break;
2346 #ifdef SOL_NETLINK
2347 case SOL_NETLINK:
2348 switch (optname) {
2349 case NETLINK_PKTINFO:
2350 case NETLINK_ADD_MEMBERSHIP:
2351 case NETLINK_DROP_MEMBERSHIP:
2352 case NETLINK_BROADCAST_ERROR:
2353 case NETLINK_NO_ENOBUFS:
2354 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2355 case NETLINK_LISTEN_ALL_NSID:
2356 case NETLINK_CAP_ACK:
2357 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2359 case NETLINK_EXT_ACK:
2360 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2362 case NETLINK_GET_STRICT_CHK:
2363 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2364 break;
2365 default:
2366 goto unimplemented;
2368 val = 0;
2369 if (optlen < sizeof(uint32_t)) {
2370 return -TARGET_EINVAL;
2372 if (get_user_u32(val, optval_addr)) {
2373 return -TARGET_EFAULT;
2375 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2376 sizeof(val)));
2377 break;
2378 #endif /* SOL_NETLINK */
2379 default:
2380 unimplemented:
2381 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2382 level, optname);
2383 ret = -TARGET_ENOPROTOOPT;
2385 return ret;
2388 /* do_getsockopt() Must return target values and target errnos. */
2389 static abi_long do_getsockopt(int sockfd, int level, int optname,
2390 abi_ulong optval_addr, abi_ulong optlen)
2392 abi_long ret;
2393 int len, val;
2394 socklen_t lv;
2396 switch(level) {
2397 case TARGET_SOL_SOCKET:
2398 level = SOL_SOCKET;
2399 switch (optname) {
2400 /* These don't just return a single integer */
2401 case TARGET_SO_PEERNAME:
2402 goto unimplemented;
2403 case TARGET_SO_RCVTIMEO: {
2404 struct timeval tv;
2405 socklen_t tvlen;
2407 optname = SO_RCVTIMEO;
2409 get_timeout:
2410 if (get_user_u32(len, optlen)) {
2411 return -TARGET_EFAULT;
2413 if (len < 0) {
2414 return -TARGET_EINVAL;
2417 tvlen = sizeof(tv);
2418 ret = get_errno(getsockopt(sockfd, level, optname,
2419 &tv, &tvlen));
2420 if (ret < 0) {
2421 return ret;
2423 if (len > sizeof(struct target_timeval)) {
2424 len = sizeof(struct target_timeval);
2426 if (copy_to_user_timeval(optval_addr, &tv)) {
2427 return -TARGET_EFAULT;
2429 if (put_user_u32(len, optlen)) {
2430 return -TARGET_EFAULT;
2432 break;
2434 case TARGET_SO_SNDTIMEO:
2435 optname = SO_SNDTIMEO;
2436 goto get_timeout;
2437 case TARGET_SO_PEERCRED: {
2438 struct ucred cr;
2439 socklen_t crlen;
2440 struct target_ucred *tcr;
2442 if (get_user_u32(len, optlen)) {
2443 return -TARGET_EFAULT;
2445 if (len < 0) {
2446 return -TARGET_EINVAL;
2449 crlen = sizeof(cr);
2450 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2451 &cr, &crlen));
2452 if (ret < 0) {
2453 return ret;
2455 if (len > crlen) {
2456 len = crlen;
2458 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2459 return -TARGET_EFAULT;
2461 __put_user(cr.pid, &tcr->pid);
2462 __put_user(cr.uid, &tcr->uid);
2463 __put_user(cr.gid, &tcr->gid);
2464 unlock_user_struct(tcr, optval_addr, 1);
2465 if (put_user_u32(len, optlen)) {
2466 return -TARGET_EFAULT;
2468 break;
2470 case TARGET_SO_PEERSEC: {
2471 char *name;
2473 if (get_user_u32(len, optlen)) {
2474 return -TARGET_EFAULT;
2476 if (len < 0) {
2477 return -TARGET_EINVAL;
2479 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2480 if (!name) {
2481 return -TARGET_EFAULT;
2483 lv = len;
2484 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2485 name, &lv));
2486 if (put_user_u32(lv, optlen)) {
2487 ret = -TARGET_EFAULT;
2489 unlock_user(name, optval_addr, lv);
2490 break;
2492 case TARGET_SO_LINGER:
2494 struct linger lg;
2495 socklen_t lglen;
2496 struct target_linger *tlg;
2498 if (get_user_u32(len, optlen)) {
2499 return -TARGET_EFAULT;
2501 if (len < 0) {
2502 return -TARGET_EINVAL;
2505 lglen = sizeof(lg);
2506 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2507 &lg, &lglen));
2508 if (ret < 0) {
2509 return ret;
2511 if (len > lglen) {
2512 len = lglen;
2514 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2515 return -TARGET_EFAULT;
2517 __put_user(lg.l_onoff, &tlg->l_onoff);
2518 __put_user(lg.l_linger, &tlg->l_linger);
2519 unlock_user_struct(tlg, optval_addr, 1);
2520 if (put_user_u32(len, optlen)) {
2521 return -TARGET_EFAULT;
2523 break;
2525 /* Options with 'int' argument. */
2526 case TARGET_SO_DEBUG:
2527 optname = SO_DEBUG;
2528 goto int_case;
2529 case TARGET_SO_REUSEADDR:
2530 optname = SO_REUSEADDR;
2531 goto int_case;
2532 #ifdef SO_REUSEPORT
2533 case TARGET_SO_REUSEPORT:
2534 optname = SO_REUSEPORT;
2535 goto int_case;
2536 #endif
2537 case TARGET_SO_TYPE:
2538 optname = SO_TYPE;
2539 goto int_case;
2540 case TARGET_SO_ERROR:
2541 optname = SO_ERROR;
2542 goto int_case;
2543 case TARGET_SO_DONTROUTE:
2544 optname = SO_DONTROUTE;
2545 goto int_case;
2546 case TARGET_SO_BROADCAST:
2547 optname = SO_BROADCAST;
2548 goto int_case;
2549 case TARGET_SO_SNDBUF:
2550 optname = SO_SNDBUF;
2551 goto int_case;
2552 case TARGET_SO_RCVBUF:
2553 optname = SO_RCVBUF;
2554 goto int_case;
2555 case TARGET_SO_KEEPALIVE:
2556 optname = SO_KEEPALIVE;
2557 goto int_case;
2558 case TARGET_SO_OOBINLINE:
2559 optname = SO_OOBINLINE;
2560 goto int_case;
2561 case TARGET_SO_NO_CHECK:
2562 optname = SO_NO_CHECK;
2563 goto int_case;
2564 case TARGET_SO_PRIORITY:
2565 optname = SO_PRIORITY;
2566 goto int_case;
2567 #ifdef SO_BSDCOMPAT
2568 case TARGET_SO_BSDCOMPAT:
2569 optname = SO_BSDCOMPAT;
2570 goto int_case;
2571 #endif
2572 case TARGET_SO_PASSCRED:
2573 optname = SO_PASSCRED;
2574 goto int_case;
2575 case TARGET_SO_TIMESTAMP:
2576 optname = SO_TIMESTAMP;
2577 goto int_case;
2578 case TARGET_SO_RCVLOWAT:
2579 optname = SO_RCVLOWAT;
2580 goto int_case;
2581 case TARGET_SO_ACCEPTCONN:
2582 optname = SO_ACCEPTCONN;
2583 goto int_case;
2584 default:
2585 goto int_case;
2587 break;
2588 case SOL_TCP:
2589 /* TCP options all take an 'int' value. */
2590 int_case:
2591 if (get_user_u32(len, optlen))
2592 return -TARGET_EFAULT;
2593 if (len < 0)
2594 return -TARGET_EINVAL;
2595 lv = sizeof(lv);
2596 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2597 if (ret < 0)
2598 return ret;
2599 if (optname == SO_TYPE) {
2600 val = host_to_target_sock_type(val);
2602 if (len > lv)
2603 len = lv;
2604 if (len == 4) {
2605 if (put_user_u32(val, optval_addr))
2606 return -TARGET_EFAULT;
2607 } else {
2608 if (put_user_u8(val, optval_addr))
2609 return -TARGET_EFAULT;
2611 if (put_user_u32(len, optlen))
2612 return -TARGET_EFAULT;
2613 break;
2614 case SOL_IP:
2615 switch(optname) {
2616 case IP_TOS:
2617 case IP_TTL:
2618 case IP_HDRINCL:
2619 case IP_ROUTER_ALERT:
2620 case IP_RECVOPTS:
2621 case IP_RETOPTS:
2622 case IP_PKTINFO:
2623 case IP_MTU_DISCOVER:
2624 case IP_RECVERR:
2625 case IP_RECVTOS:
2626 #ifdef IP_FREEBIND
2627 case IP_FREEBIND:
2628 #endif
2629 case IP_MULTICAST_TTL:
2630 case IP_MULTICAST_LOOP:
2631 if (get_user_u32(len, optlen))
2632 return -TARGET_EFAULT;
2633 if (len < 0)
2634 return -TARGET_EINVAL;
2635 lv = sizeof(lv);
2636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2637 if (ret < 0)
2638 return ret;
2639 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2640 len = 1;
2641 if (put_user_u32(len, optlen)
2642 || put_user_u8(val, optval_addr))
2643 return -TARGET_EFAULT;
2644 } else {
2645 if (len > sizeof(int))
2646 len = sizeof(int);
2647 if (put_user_u32(len, optlen)
2648 || put_user_u32(val, optval_addr))
2649 return -TARGET_EFAULT;
2651 break;
2652 default:
2653 ret = -TARGET_ENOPROTOOPT;
2654 break;
2656 break;
2657 case SOL_IPV6:
2658 switch (optname) {
2659 case IPV6_MTU_DISCOVER:
2660 case IPV6_MTU:
2661 case IPV6_V6ONLY:
2662 case IPV6_RECVPKTINFO:
2663 case IPV6_UNICAST_HOPS:
2664 case IPV6_MULTICAST_HOPS:
2665 case IPV6_MULTICAST_LOOP:
2666 case IPV6_RECVERR:
2667 case IPV6_RECVHOPLIMIT:
2668 case IPV6_2292HOPLIMIT:
2669 case IPV6_CHECKSUM:
2670 case IPV6_ADDRFORM:
2671 case IPV6_2292PKTINFO:
2672 case IPV6_RECVTCLASS:
2673 case IPV6_RECVRTHDR:
2674 case IPV6_2292RTHDR:
2675 case IPV6_RECVHOPOPTS:
2676 case IPV6_2292HOPOPTS:
2677 case IPV6_RECVDSTOPTS:
2678 case IPV6_2292DSTOPTS:
2679 case IPV6_TCLASS:
2680 #ifdef IPV6_RECVPATHMTU
2681 case IPV6_RECVPATHMTU:
2682 #endif
2683 #ifdef IPV6_TRANSPARENT
2684 case IPV6_TRANSPARENT:
2685 #endif
2686 #ifdef IPV6_FREEBIND
2687 case IPV6_FREEBIND:
2688 #endif
2689 #ifdef IPV6_RECVORIGDSTADDR
2690 case IPV6_RECVORIGDSTADDR:
2691 #endif
2692 if (get_user_u32(len, optlen))
2693 return -TARGET_EFAULT;
2694 if (len < 0)
2695 return -TARGET_EINVAL;
2696 lv = sizeof(lv);
2697 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2698 if (ret < 0)
2699 return ret;
2700 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2701 len = 1;
2702 if (put_user_u32(len, optlen)
2703 || put_user_u8(val, optval_addr))
2704 return -TARGET_EFAULT;
2705 } else {
2706 if (len > sizeof(int))
2707 len = sizeof(int);
2708 if (put_user_u32(len, optlen)
2709 || put_user_u32(val, optval_addr))
2710 return -TARGET_EFAULT;
2712 break;
2713 default:
2714 ret = -TARGET_ENOPROTOOPT;
2715 break;
2717 break;
2718 #ifdef SOL_NETLINK
2719 case SOL_NETLINK:
2720 switch (optname) {
2721 case NETLINK_PKTINFO:
2722 case NETLINK_BROADCAST_ERROR:
2723 case NETLINK_NO_ENOBUFS:
2724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2725 case NETLINK_LISTEN_ALL_NSID:
2726 case NETLINK_CAP_ACK:
2727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2729 case NETLINK_EXT_ACK:
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2732 case NETLINK_GET_STRICT_CHK:
2733 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2734 if (get_user_u32(len, optlen)) {
2735 return -TARGET_EFAULT;
2737 if (len != sizeof(val)) {
2738 return -TARGET_EINVAL;
2740 lv = len;
2741 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2742 if (ret < 0) {
2743 return ret;
2745 if (put_user_u32(lv, optlen)
2746 || put_user_u32(val, optval_addr)) {
2747 return -TARGET_EFAULT;
2749 break;
2750 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2751 case NETLINK_LIST_MEMBERSHIPS:
2753 uint32_t *results;
2754 int i;
2755 if (get_user_u32(len, optlen)) {
2756 return -TARGET_EFAULT;
2758 if (len < 0) {
2759 return -TARGET_EINVAL;
2761 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2762 if (!results) {
2763 return -TARGET_EFAULT;
2765 lv = len;
2766 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2767 if (ret < 0) {
2768 unlock_user(results, optval_addr, 0);
2769 return ret;
2771 /* swap host endianess to target endianess. */
2772 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2773 results[i] = tswap32(results[i]);
2775 if (put_user_u32(lv, optlen)) {
2776 return -TARGET_EFAULT;
2778 unlock_user(results, optval_addr, 0);
2779 break;
2781 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2782 default:
2783 goto unimplemented;
2785 break;
2786 #endif /* SOL_NETLINK */
2787 default:
2788 unimplemented:
2789 qemu_log_mask(LOG_UNIMP,
2790 "getsockopt level=%d optname=%d not yet supported\n",
2791 level, optname);
2792 ret = -TARGET_EOPNOTSUPP;
2793 break;
2795 return ret;
2798 /* Convert target low/high pair representing file offset into the host
2799 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2800 * as the kernel doesn't handle them either.
2802 static void target_to_host_low_high(abi_ulong tlow,
2803 abi_ulong thigh,
2804 unsigned long *hlow,
2805 unsigned long *hhigh)
2807 uint64_t off = tlow |
2808 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2809 TARGET_LONG_BITS / 2;
2811 *hlow = off;
2812 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2815 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2816 abi_ulong count, int copy)
2818 struct target_iovec *target_vec;
2819 struct iovec *vec;
2820 abi_ulong total_len, max_len;
2821 int i;
2822 int err = 0;
2823 bool bad_address = false;
2825 if (count == 0) {
2826 errno = 0;
2827 return NULL;
2829 if (count > IOV_MAX) {
2830 errno = EINVAL;
2831 return NULL;
2834 vec = g_try_new0(struct iovec, count);
2835 if (vec == NULL) {
2836 errno = ENOMEM;
2837 return NULL;
2840 target_vec = lock_user(VERIFY_READ, target_addr,
2841 count * sizeof(struct target_iovec), 1);
2842 if (target_vec == NULL) {
2843 err = EFAULT;
2844 goto fail2;
2847 /* ??? If host page size > target page size, this will result in a
2848 value larger than what we can actually support. */
2849 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2850 total_len = 0;
2852 for (i = 0; i < count; i++) {
2853 abi_ulong base = tswapal(target_vec[i].iov_base);
2854 abi_long len = tswapal(target_vec[i].iov_len);
2856 if (len < 0) {
2857 err = EINVAL;
2858 goto fail;
2859 } else if (len == 0) {
2860 /* Zero length pointer is ignored. */
2861 vec[i].iov_base = 0;
2862 } else {
2863 vec[i].iov_base = lock_user(type, base, len, copy);
2864 /* If the first buffer pointer is bad, this is a fault. But
2865 * subsequent bad buffers will result in a partial write; this
2866 * is realized by filling the vector with null pointers and
2867 * zero lengths. */
2868 if (!vec[i].iov_base) {
2869 if (i == 0) {
2870 err = EFAULT;
2871 goto fail;
2872 } else {
2873 bad_address = true;
2876 if (bad_address) {
2877 len = 0;
2879 if (len > max_len - total_len) {
2880 len = max_len - total_len;
2883 vec[i].iov_len = len;
2884 total_len += len;
2887 unlock_user(target_vec, target_addr, 0);
2888 return vec;
2890 fail:
2891 while (--i >= 0) {
2892 if (tswapal(target_vec[i].iov_len) > 0) {
2893 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2896 unlock_user(target_vec, target_addr, 0);
2897 fail2:
2898 g_free(vec);
2899 errno = err;
2900 return NULL;
2903 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2904 abi_ulong count, int copy)
2906 struct target_iovec *target_vec;
2907 int i;
2909 target_vec = lock_user(VERIFY_READ, target_addr,
2910 count * sizeof(struct target_iovec), 1);
2911 if (target_vec) {
2912 for (i = 0; i < count; i++) {
2913 abi_ulong base = tswapal(target_vec[i].iov_base);
2914 abi_long len = tswapal(target_vec[i].iov_len);
2915 if (len < 0) {
2916 break;
2918 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920 unlock_user(target_vec, target_addr, 0);
2923 g_free(vec);
2926 static inline int target_to_host_sock_type(int *type)
2928 int host_type = 0;
2929 int target_type = *type;
2931 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2932 case TARGET_SOCK_DGRAM:
2933 host_type = SOCK_DGRAM;
2934 break;
2935 case TARGET_SOCK_STREAM:
2936 host_type = SOCK_STREAM;
2937 break;
2938 default:
2939 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2940 break;
2942 if (target_type & TARGET_SOCK_CLOEXEC) {
2943 #if defined(SOCK_CLOEXEC)
2944 host_type |= SOCK_CLOEXEC;
2945 #else
2946 return -TARGET_EINVAL;
2947 #endif
2949 if (target_type & TARGET_SOCK_NONBLOCK) {
2950 #if defined(SOCK_NONBLOCK)
2951 host_type |= SOCK_NONBLOCK;
2952 #elif !defined(O_NONBLOCK)
2953 return -TARGET_EINVAL;
2954 #endif
2956 *type = host_type;
2957 return 0;
2960 /* Try to emulate socket type flags after socket creation. */
2961 static int sock_flags_fixup(int fd, int target_type)
2963 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2964 if (target_type & TARGET_SOCK_NONBLOCK) {
2965 int flags = fcntl(fd, F_GETFL);
2966 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2967 close(fd);
2968 return -TARGET_EINVAL;
2971 #endif
2972 return fd;
2975 /* do_socket() Must return target values and target errnos. */
2976 static abi_long do_socket(int domain, int type, int protocol)
2978 int target_type = type;
2979 int ret;
2981 ret = target_to_host_sock_type(&type);
2982 if (ret) {
2983 return ret;
2986 if (domain == PF_NETLINK && !(
2987 #ifdef CONFIG_RTNETLINK
2988 protocol == NETLINK_ROUTE ||
2989 #endif
2990 protocol == NETLINK_KOBJECT_UEVENT ||
2991 protocol == NETLINK_AUDIT)) {
2992 return -TARGET_EPROTONOSUPPORT;
2995 if (domain == AF_PACKET ||
2996 (domain == AF_INET && type == SOCK_PACKET)) {
2997 protocol = tswap16(protocol);
3000 ret = get_errno(socket(domain, type, protocol));
3001 if (ret >= 0) {
3002 ret = sock_flags_fixup(ret, target_type);
3003 if (type == SOCK_PACKET) {
3004 /* Manage an obsolete case :
3005 * if socket type is SOCK_PACKET, bind by name
3007 fd_trans_register(ret, &target_packet_trans);
3008 } else if (domain == PF_NETLINK) {
3009 switch (protocol) {
3010 #ifdef CONFIG_RTNETLINK
3011 case NETLINK_ROUTE:
3012 fd_trans_register(ret, &target_netlink_route_trans);
3013 break;
3014 #endif
3015 case NETLINK_KOBJECT_UEVENT:
3016 /* nothing to do: messages are strings */
3017 break;
3018 case NETLINK_AUDIT:
3019 fd_trans_register(ret, &target_netlink_audit_trans);
3020 break;
3021 default:
3022 g_assert_not_reached();
3026 return ret;
3029 /* do_bind() Must return target values and target errnos. */
3030 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3031 socklen_t addrlen)
3033 void *addr;
3034 abi_long ret;
3036 if ((int)addrlen < 0) {
3037 return -TARGET_EINVAL;
3040 addr = alloca(addrlen+1);
3042 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3043 if (ret)
3044 return ret;
3046 return get_errno(bind(sockfd, addr, addrlen));
3049 /* do_connect() Must return target values and target errnos. */
3050 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3051 socklen_t addrlen)
3053 void *addr;
3054 abi_long ret;
3056 if ((int)addrlen < 0) {
3057 return -TARGET_EINVAL;
3060 addr = alloca(addrlen+1);
3062 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3063 if (ret)
3064 return ret;
3066 return get_errno(safe_connect(sockfd, addr, addrlen));
3069 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3070 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3071 int flags, int send)
3073 abi_long ret, len;
3074 struct msghdr msg;
3075 abi_ulong count;
3076 struct iovec *vec;
3077 abi_ulong target_vec;
3079 if (msgp->msg_name) {
3080 msg.msg_namelen = tswap32(msgp->msg_namelen);
3081 msg.msg_name = alloca(msg.msg_namelen+1);
3082 ret = target_to_host_sockaddr(fd, msg.msg_name,
3083 tswapal(msgp->msg_name),
3084 msg.msg_namelen);
3085 if (ret == -TARGET_EFAULT) {
3086 /* For connected sockets msg_name and msg_namelen must
3087 * be ignored, so returning EFAULT immediately is wrong.
3088 * Instead, pass a bad msg_name to the host kernel, and
3089 * let it decide whether to return EFAULT or not.
3091 msg.msg_name = (void *)-1;
3092 } else if (ret) {
3093 goto out2;
3095 } else {
3096 msg.msg_name = NULL;
3097 msg.msg_namelen = 0;
3099 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3100 msg.msg_control = alloca(msg.msg_controllen);
3101 memset(msg.msg_control, 0, msg.msg_controllen);
3103 msg.msg_flags = tswap32(msgp->msg_flags);
3105 count = tswapal(msgp->msg_iovlen);
3106 target_vec = tswapal(msgp->msg_iov);
3108 if (count > IOV_MAX) {
3109 /* sendrcvmsg returns a different errno for this condition than
3110 * readv/writev, so we must catch it here before lock_iovec() does.
3112 ret = -TARGET_EMSGSIZE;
3113 goto out2;
3116 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3117 target_vec, count, send);
3118 if (vec == NULL) {
3119 ret = -host_to_target_errno(errno);
3120 goto out2;
3122 msg.msg_iovlen = count;
3123 msg.msg_iov = vec;
3125 if (send) {
3126 if (fd_trans_target_to_host_data(fd)) {
3127 void *host_msg;
3129 host_msg = g_malloc(msg.msg_iov->iov_len);
3130 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3131 ret = fd_trans_target_to_host_data(fd)(host_msg,
3132 msg.msg_iov->iov_len);
3133 if (ret >= 0) {
3134 msg.msg_iov->iov_base = host_msg;
3135 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137 g_free(host_msg);
3138 } else {
3139 ret = target_to_host_cmsg(&msg, msgp);
3140 if (ret == 0) {
3141 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3144 } else {
3145 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3146 if (!is_error(ret)) {
3147 len = ret;
3148 if (fd_trans_host_to_target_data(fd)) {
3149 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3150 MIN(msg.msg_iov->iov_len, len));
3151 } else {
3152 ret = host_to_target_cmsg(msgp, &msg);
3154 if (!is_error(ret)) {
3155 msgp->msg_namelen = tswap32(msg.msg_namelen);
3156 msgp->msg_flags = tswap32(msg.msg_flags);
3157 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3158 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3159 msg.msg_name, msg.msg_namelen);
3160 if (ret) {
3161 goto out;
3165 ret = len;
3170 out:
3171 unlock_iovec(vec, target_vec, count, !send);
3172 out2:
3173 return ret;
3176 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3177 int flags, int send)
3179 abi_long ret;
3180 struct target_msghdr *msgp;
3182 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3183 msgp,
3184 target_msg,
3185 send ? 1 : 0)) {
3186 return -TARGET_EFAULT;
3188 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3189 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3190 return ret;
3193 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3194 * so it might not have this *mmsg-specific flag either.
3196 #ifndef MSG_WAITFORONE
3197 #define MSG_WAITFORONE 0x10000
3198 #endif
3200 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3201 unsigned int vlen, unsigned int flags,
3202 int send)
3204 struct target_mmsghdr *mmsgp;
3205 abi_long ret = 0;
3206 int i;
3208 if (vlen > UIO_MAXIOV) {
3209 vlen = UIO_MAXIOV;
3212 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3213 if (!mmsgp) {
3214 return -TARGET_EFAULT;
3217 for (i = 0; i < vlen; i++) {
3218 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3219 if (is_error(ret)) {
3220 break;
3222 mmsgp[i].msg_len = tswap32(ret);
3223 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3224 if (flags & MSG_WAITFORONE) {
3225 flags |= MSG_DONTWAIT;
3229 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231 /* Return number of datagrams sent if we sent any at all;
3232 * otherwise return the error.
3234 if (i) {
3235 return i;
3237 return ret;
3240 /* do_accept4() Must return target values and target errnos. */
3241 static abi_long do_accept4(int fd, abi_ulong target_addr,
3242 abi_ulong target_addrlen_addr, int flags)
3244 socklen_t addrlen, ret_addrlen;
3245 void *addr;
3246 abi_long ret;
3247 int host_flags;
3249 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251 if (target_addr == 0) {
3252 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3255 /* linux returns EINVAL if addrlen pointer is invalid */
3256 if (get_user_u32(addrlen, target_addrlen_addr))
3257 return -TARGET_EINVAL;
3259 if ((int)addrlen < 0) {
3260 return -TARGET_EINVAL;
3263 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3264 return -TARGET_EINVAL;
3266 addr = alloca(addrlen);
3268 ret_addrlen = addrlen;
3269 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3270 if (!is_error(ret)) {
3271 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3272 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3273 ret = -TARGET_EFAULT;
3276 return ret;
3279 /* do_getpeername() Must return target values and target errnos. */
3280 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3281 abi_ulong target_addrlen_addr)
3283 socklen_t addrlen, ret_addrlen;
3284 void *addr;
3285 abi_long ret;
3287 if (get_user_u32(addrlen, target_addrlen_addr))
3288 return -TARGET_EFAULT;
3290 if ((int)addrlen < 0) {
3291 return -TARGET_EINVAL;
3294 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3295 return -TARGET_EFAULT;
3297 addr = alloca(addrlen);
3299 ret_addrlen = addrlen;
3300 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3301 if (!is_error(ret)) {
3302 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3303 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3304 ret = -TARGET_EFAULT;
3307 return ret;
3310 /* do_getsockname() Must return target values and target errnos. */
3311 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3312 abi_ulong target_addrlen_addr)
3314 socklen_t addrlen, ret_addrlen;
3315 void *addr;
3316 abi_long ret;
3318 if (get_user_u32(addrlen, target_addrlen_addr))
3319 return -TARGET_EFAULT;
3321 if ((int)addrlen < 0) {
3322 return -TARGET_EINVAL;
3325 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3326 return -TARGET_EFAULT;
3328 addr = alloca(addrlen);
3330 ret_addrlen = addrlen;
3331 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3332 if (!is_error(ret)) {
3333 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3334 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3335 ret = -TARGET_EFAULT;
3338 return ret;
3341 /* do_socketpair() Must return target values and target errnos. */
3342 static abi_long do_socketpair(int domain, int type, int protocol,
3343 abi_ulong target_tab_addr)
3345 int tab[2];
3346 abi_long ret;
3348 target_to_host_sock_type(&type);
3350 ret = get_errno(socketpair(domain, type, protocol, tab));
3351 if (!is_error(ret)) {
3352 if (put_user_s32(tab[0], target_tab_addr)
3353 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3354 ret = -TARGET_EFAULT;
3356 return ret;
3359 /* do_sendto() Must return target values and target errnos. */
3360 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3361 abi_ulong target_addr, socklen_t addrlen)
3363 void *addr;
3364 void *host_msg;
3365 void *copy_msg = NULL;
3366 abi_long ret;
3368 if ((int)addrlen < 0) {
3369 return -TARGET_EINVAL;
3372 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3373 if (!host_msg)
3374 return -TARGET_EFAULT;
3375 if (fd_trans_target_to_host_data(fd)) {
3376 copy_msg = host_msg;
3377 host_msg = g_malloc(len);
3378 memcpy(host_msg, copy_msg, len);
3379 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3380 if (ret < 0) {
3381 goto fail;
3384 if (target_addr) {
3385 addr = alloca(addrlen+1);
3386 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3387 if (ret) {
3388 goto fail;
3390 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3391 } else {
3392 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394 fail:
3395 if (copy_msg) {
3396 g_free(host_msg);
3397 host_msg = copy_msg;
3399 unlock_user(host_msg, msg, 0);
3400 return ret;
3403 /* do_recvfrom() Must return target values and target errnos. */
3404 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3405 abi_ulong target_addr,
3406 abi_ulong target_addrlen)
3408 socklen_t addrlen, ret_addrlen;
3409 void *addr;
3410 void *host_msg;
3411 abi_long ret;
3413 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3414 if (!host_msg)
3415 return -TARGET_EFAULT;
3416 if (target_addr) {
3417 if (get_user_u32(addrlen, target_addrlen)) {
3418 ret = -TARGET_EFAULT;
3419 goto fail;
3421 if ((int)addrlen < 0) {
3422 ret = -TARGET_EINVAL;
3423 goto fail;
3425 addr = alloca(addrlen);
3426 ret_addrlen = addrlen;
3427 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3428 addr, &ret_addrlen));
3429 } else {
3430 addr = NULL; /* To keep compiler quiet. */
3431 addrlen = 0; /* To keep compiler quiet. */
3432 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434 if (!is_error(ret)) {
3435 if (fd_trans_host_to_target_data(fd)) {
3436 abi_long trans;
3437 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3438 if (is_error(trans)) {
3439 ret = trans;
3440 goto fail;
3443 if (target_addr) {
3444 host_to_target_sockaddr(target_addr, addr,
3445 MIN(addrlen, ret_addrlen));
3446 if (put_user_u32(ret_addrlen, target_addrlen)) {
3447 ret = -TARGET_EFAULT;
3448 goto fail;
3451 unlock_user(host_msg, msg, len);
3452 } else {
3453 fail:
3454 unlock_user(host_msg, msg, 0);
3456 return ret;
3459 #ifdef TARGET_NR_socketcall
3460 /* do_socketcall() must return target values and target errnos. */
3461 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 static const unsigned nargs[] = { /* number of arguments per operation */
3464 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3465 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3466 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3468 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3469 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3472 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3473 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3474 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3475 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3476 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3477 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3478 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3479 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3480 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3481 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3482 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3483 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3485 abi_long a[6]; /* max 6 args */
3486 unsigned i;
3488 /* check the range of the first argument num */
3489 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3490 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3491 return -TARGET_EINVAL;
3493 /* ensure we have space for args */
3494 if (nargs[num] > ARRAY_SIZE(a)) {
3495 return -TARGET_EINVAL;
3497 /* collect the arguments in a[] according to nargs[] */
3498 for (i = 0; i < nargs[num]; ++i) {
3499 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3500 return -TARGET_EFAULT;
3503 /* now when we have the args, invoke the appropriate underlying function */
3504 switch (num) {
3505 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3506 return do_socket(a[0], a[1], a[2]);
3507 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3508 return do_bind(a[0], a[1], a[2]);
3509 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3510 return do_connect(a[0], a[1], a[2]);
3511 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3512 return get_errno(listen(a[0], a[1]));
3513 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3514 return do_accept4(a[0], a[1], a[2], 0);
3515 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3516 return do_getsockname(a[0], a[1], a[2]);
3517 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3518 return do_getpeername(a[0], a[1], a[2]);
3519 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3520 return do_socketpair(a[0], a[1], a[2], a[3]);
3521 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3522 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3523 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3524 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3525 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3526 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3527 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3528 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3529 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3530 return get_errno(shutdown(a[0], a[1]));
3531 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3532 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3533 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3534 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3535 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3536 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3537 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3538 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3539 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3540 return do_accept4(a[0], a[1], a[2], a[3]);
3541 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3542 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3543 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3544 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3545 default:
3546 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3547 return -TARGET_EINVAL;
3550 #endif
3552 #define N_SHM_REGIONS 32
3554 static struct shm_region {
3555 abi_ulong start;
3556 abi_ulong size;
3557 bool in_use;
3558 } shm_regions[N_SHM_REGIONS];
3560 #ifndef TARGET_SEMID64_DS
3561 /* asm-generic version of this struct */
3562 struct target_semid64_ds
3564 struct target_ipc_perm sem_perm;
3565 abi_ulong sem_otime;
3566 #if TARGET_ABI_BITS == 32
3567 abi_ulong __unused1;
3568 #endif
3569 abi_ulong sem_ctime;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused2;
3572 #endif
3573 abi_ulong sem_nsems;
3574 abi_ulong __unused3;
3575 abi_ulong __unused4;
3577 #endif
3579 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3580 abi_ulong target_addr)
3582 struct target_ipc_perm *target_ip;
3583 struct target_semid64_ds *target_sd;
3585 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3586 return -TARGET_EFAULT;
3587 target_ip = &(target_sd->sem_perm);
3588 host_ip->__key = tswap32(target_ip->__key);
3589 host_ip->uid = tswap32(target_ip->uid);
3590 host_ip->gid = tswap32(target_ip->gid);
3591 host_ip->cuid = tswap32(target_ip->cuid);
3592 host_ip->cgid = tswap32(target_ip->cgid);
3593 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3594 host_ip->mode = tswap32(target_ip->mode);
3595 #else
3596 host_ip->mode = tswap16(target_ip->mode);
3597 #endif
3598 #if defined(TARGET_PPC)
3599 host_ip->__seq = tswap32(target_ip->__seq);
3600 #else
3601 host_ip->__seq = tswap16(target_ip->__seq);
3602 #endif
3603 unlock_user_struct(target_sd, target_addr, 0);
3604 return 0;
3607 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3608 struct ipc_perm *host_ip)
3610 struct target_ipc_perm *target_ip;
3611 struct target_semid64_ds *target_sd;
3613 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3614 return -TARGET_EFAULT;
3615 target_ip = &(target_sd->sem_perm);
3616 target_ip->__key = tswap32(host_ip->__key);
3617 target_ip->uid = tswap32(host_ip->uid);
3618 target_ip->gid = tswap32(host_ip->gid);
3619 target_ip->cuid = tswap32(host_ip->cuid);
3620 target_ip->cgid = tswap32(host_ip->cgid);
3621 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3622 target_ip->mode = tswap32(host_ip->mode);
3623 #else
3624 target_ip->mode = tswap16(host_ip->mode);
3625 #endif
3626 #if defined(TARGET_PPC)
3627 target_ip->__seq = tswap32(host_ip->__seq);
3628 #else
3629 target_ip->__seq = tswap16(host_ip->__seq);
3630 #endif
3631 unlock_user_struct(target_sd, target_addr, 1);
3632 return 0;
3635 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3636 abi_ulong target_addr)
3638 struct target_semid64_ds *target_sd;
3640 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3641 return -TARGET_EFAULT;
3642 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3643 return -TARGET_EFAULT;
3644 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3645 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3646 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3647 unlock_user_struct(target_sd, target_addr, 0);
3648 return 0;
3651 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3652 struct semid_ds *host_sd)
3654 struct target_semid64_ds *target_sd;
3656 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3657 return -TARGET_EFAULT;
3658 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3659 return -TARGET_EFAULT;
3660 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3661 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3662 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3663 unlock_user_struct(target_sd, target_addr, 1);
3664 return 0;
3667 struct target_seminfo {
3668 int semmap;
3669 int semmni;
3670 int semmns;
3671 int semmnu;
3672 int semmsl;
3673 int semopm;
3674 int semume;
3675 int semusz;
3676 int semvmx;
3677 int semaem;
3680 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3681 struct seminfo *host_seminfo)
3683 struct target_seminfo *target_seminfo;
3684 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3685 return -TARGET_EFAULT;
3686 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3687 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3688 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3689 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3690 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3691 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3692 __put_user(host_seminfo->semume, &target_seminfo->semume);
3693 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3694 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3695 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3696 unlock_user_struct(target_seminfo, target_addr, 1);
3697 return 0;
3700 union semun {
3701 int val;
3702 struct semid_ds *buf;
3703 unsigned short *array;
3704 struct seminfo *__buf;
3707 union target_semun {
3708 int val;
3709 abi_ulong buf;
3710 abi_ulong array;
3711 abi_ulong __buf;
3714 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3715 abi_ulong target_addr)
3717 int nsems;
3718 unsigned short *array;
3719 union semun semun;
3720 struct semid_ds semid_ds;
3721 int i, ret;
3723 semun.buf = &semid_ds;
3725 ret = semctl(semid, 0, IPC_STAT, semun);
3726 if (ret == -1)
3727 return get_errno(ret);
3729 nsems = semid_ds.sem_nsems;
3731 *host_array = g_try_new(unsigned short, nsems);
3732 if (!*host_array) {
3733 return -TARGET_ENOMEM;
3735 array = lock_user(VERIFY_READ, target_addr,
3736 nsems*sizeof(unsigned short), 1);
3737 if (!array) {
3738 g_free(*host_array);
3739 return -TARGET_EFAULT;
3742 for(i=0; i<nsems; i++) {
3743 __get_user((*host_array)[i], &array[i]);
3745 unlock_user(array, target_addr, 0);
3747 return 0;
3750 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3751 unsigned short **host_array)
3753 int nsems;
3754 unsigned short *array;
3755 union semun semun;
3756 struct semid_ds semid_ds;
3757 int i, ret;
3759 semun.buf = &semid_ds;
3761 ret = semctl(semid, 0, IPC_STAT, semun);
3762 if (ret == -1)
3763 return get_errno(ret);
3765 nsems = semid_ds.sem_nsems;
3767 array = lock_user(VERIFY_WRITE, target_addr,
3768 nsems*sizeof(unsigned short), 0);
3769 if (!array)
3770 return -TARGET_EFAULT;
3772 for(i=0; i<nsems; i++) {
3773 __put_user((*host_array)[i], &array[i]);
3775 g_free(*host_array);
3776 unlock_user(array, target_addr, 1);
3778 return 0;
3781 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3782 abi_ulong target_arg)
3784 union target_semun target_su = { .buf = target_arg };
3785 union semun arg;
3786 struct semid_ds dsarg;
3787 unsigned short *array = NULL;
3788 struct seminfo seminfo;
3789 abi_long ret = -TARGET_EINVAL;
3790 abi_long err;
3791 cmd &= 0xff;
3793 switch( cmd ) {
3794 case GETVAL:
3795 case SETVAL:
3796 /* In 64 bit cross-endian situations, we will erroneously pick up
3797 * the wrong half of the union for the "val" element. To rectify
3798 * this, the entire 8-byte structure is byteswapped, followed by
3799 * a swap of the 4 byte val field. In other cases, the data is
3800 * already in proper host byte order. */
3801 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3802 target_su.buf = tswapal(target_su.buf);
3803 arg.val = tswap32(target_su.val);
3804 } else {
3805 arg.val = target_su.val;
3807 ret = get_errno(semctl(semid, semnum, cmd, arg));
3808 break;
3809 case GETALL:
3810 case SETALL:
3811 err = target_to_host_semarray(semid, &array, target_su.array);
3812 if (err)
3813 return err;
3814 arg.array = array;
3815 ret = get_errno(semctl(semid, semnum, cmd, arg));
3816 err = host_to_target_semarray(semid, target_su.array, &array);
3817 if (err)
3818 return err;
3819 break;
3820 case IPC_STAT:
3821 case IPC_SET:
3822 case SEM_STAT:
3823 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3824 if (err)
3825 return err;
3826 arg.buf = &dsarg;
3827 ret = get_errno(semctl(semid, semnum, cmd, arg));
3828 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3829 if (err)
3830 return err;
3831 break;
3832 case IPC_INFO:
3833 case SEM_INFO:
3834 arg.__buf = &seminfo;
3835 ret = get_errno(semctl(semid, semnum, cmd, arg));
3836 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3837 if (err)
3838 return err;
3839 break;
3840 case IPC_RMID:
3841 case GETPID:
3842 case GETNCNT:
3843 case GETZCNT:
3844 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3845 break;
3848 return ret;
3851 struct target_sembuf {
3852 unsigned short sem_num;
3853 short sem_op;
3854 short sem_flg;
3857 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3858 abi_ulong target_addr,
3859 unsigned nsops)
3861 struct target_sembuf *target_sembuf;
3862 int i;
3864 target_sembuf = lock_user(VERIFY_READ, target_addr,
3865 nsops*sizeof(struct target_sembuf), 1);
3866 if (!target_sembuf)
3867 return -TARGET_EFAULT;
3869 for(i=0; i<nsops; i++) {
3870 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3871 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3872 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3875 unlock_user(target_sembuf, target_addr, 0);
3877 return 0;
3880 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3881 defined(TARGET_NR_semtimedop)
3884 * This macro is required to handle the s390 variants, which passes the
3885 * arguments in a different order than default.
3887 #ifdef __s390x__
3888 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3889 (__nsops), (__timeout), (__sops)
3890 #else
3891 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3892 (__nsops), 0, (__sops), (__timeout)
3893 #endif
3895 static inline abi_long do_semtimedop(int semid,
3896 abi_long ptr,
3897 unsigned nsops,
3898 abi_long timeout)
3900 struct sembuf sops[nsops];
3901 struct timespec ts, *pts = NULL;
3902 abi_long ret;
3904 if (timeout) {
3905 pts = &ts;
3906 if (target_to_host_timespec(pts, timeout)) {
3907 return -TARGET_EFAULT;
3911 if (target_to_host_sembuf(sops, ptr, nsops))
3912 return -TARGET_EFAULT;
3914 ret = -TARGET_ENOSYS;
3915 #ifdef __NR_semtimedop
3916 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3917 #endif
3918 #ifdef __NR_ipc
3919 if (ret == -TARGET_ENOSYS) {
3920 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3921 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3923 #endif
3924 return ret;
3926 #endif
3928 struct target_msqid_ds
3930 struct target_ipc_perm msg_perm;
3931 abi_ulong msg_stime;
3932 #if TARGET_ABI_BITS == 32
3933 abi_ulong __unused1;
3934 #endif
3935 abi_ulong msg_rtime;
3936 #if TARGET_ABI_BITS == 32
3937 abi_ulong __unused2;
3938 #endif
3939 abi_ulong msg_ctime;
3940 #if TARGET_ABI_BITS == 32
3941 abi_ulong __unused3;
3942 #endif
3943 abi_ulong __msg_cbytes;
3944 abi_ulong msg_qnum;
3945 abi_ulong msg_qbytes;
3946 abi_ulong msg_lspid;
3947 abi_ulong msg_lrpid;
3948 abi_ulong __unused4;
3949 abi_ulong __unused5;
3952 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3953 abi_ulong target_addr)
3955 struct target_msqid_ds *target_md;
3957 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3958 return -TARGET_EFAULT;
3959 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3960 return -TARGET_EFAULT;
3961 host_md->msg_stime = tswapal(target_md->msg_stime);
3962 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3963 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3964 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3965 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3966 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3967 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3968 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3969 unlock_user_struct(target_md, target_addr, 0);
3970 return 0;
3973 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3974 struct msqid_ds *host_md)
3976 struct target_msqid_ds *target_md;
3978 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3979 return -TARGET_EFAULT;
3980 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3981 return -TARGET_EFAULT;
3982 target_md->msg_stime = tswapal(host_md->msg_stime);
3983 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3984 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3985 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3986 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3987 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3988 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3989 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3990 unlock_user_struct(target_md, target_addr, 1);
3991 return 0;
3994 struct target_msginfo {
3995 int msgpool;
3996 int msgmap;
3997 int msgmax;
3998 int msgmnb;
3999 int msgmni;
4000 int msgssz;
4001 int msgtql;
4002 unsigned short int msgseg;
4005 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4006 struct msginfo *host_msginfo)
4008 struct target_msginfo *target_msginfo;
4009 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4010 return -TARGET_EFAULT;
4011 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4012 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4013 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4014 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4015 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4016 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4017 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4018 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4019 unlock_user_struct(target_msginfo, target_addr, 1);
4020 return 0;
4023 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4025 struct msqid_ds dsarg;
4026 struct msginfo msginfo;
4027 abi_long ret = -TARGET_EINVAL;
4029 cmd &= 0xff;
4031 switch (cmd) {
4032 case IPC_STAT:
4033 case IPC_SET:
4034 case MSG_STAT:
4035 if (target_to_host_msqid_ds(&dsarg,ptr))
4036 return -TARGET_EFAULT;
4037 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4038 if (host_to_target_msqid_ds(ptr,&dsarg))
4039 return -TARGET_EFAULT;
4040 break;
4041 case IPC_RMID:
4042 ret = get_errno(msgctl(msgid, cmd, NULL));
4043 break;
4044 case IPC_INFO:
4045 case MSG_INFO:
4046 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4047 if (host_to_target_msginfo(ptr, &msginfo))
4048 return -TARGET_EFAULT;
4049 break;
4052 return ret;
4055 struct target_msgbuf {
4056 abi_long mtype;
4057 char mtext[1];
4060 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4061 ssize_t msgsz, int msgflg)
4063 struct target_msgbuf *target_mb;
4064 struct msgbuf *host_mb;
4065 abi_long ret = 0;
4067 if (msgsz < 0) {
4068 return -TARGET_EINVAL;
4071 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4072 return -TARGET_EFAULT;
4073 host_mb = g_try_malloc(msgsz + sizeof(long));
4074 if (!host_mb) {
4075 unlock_user_struct(target_mb, msgp, 0);
4076 return -TARGET_ENOMEM;
4078 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4079 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4080 ret = -TARGET_ENOSYS;
4081 #ifdef __NR_msgsnd
4082 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4083 #endif
4084 #ifdef __NR_ipc
4085 if (ret == -TARGET_ENOSYS) {
4086 #ifdef __s390x__
4087 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4088 host_mb));
4089 #else
4090 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4091 host_mb, 0));
4092 #endif
4094 #endif
4095 g_free(host_mb);
4096 unlock_user_struct(target_mb, msgp, 0);
4098 return ret;
4101 #ifdef __NR_ipc
4102 #if defined(__sparc__)
4103 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4104 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4105 #elif defined(__s390x__)
4106 /* The s390 sys_ipc variant has only five parameters. */
4107 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4108 ((long int[]){(long int)__msgp, __msgtyp})
4109 #else
4110 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4111 ((long int[]){(long int)__msgp, __msgtyp}), 0
4112 #endif
4113 #endif
4115 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4116 ssize_t msgsz, abi_long msgtyp,
4117 int msgflg)
4119 struct target_msgbuf *target_mb;
4120 char *target_mtext;
4121 struct msgbuf *host_mb;
4122 abi_long ret = 0;
4124 if (msgsz < 0) {
4125 return -TARGET_EINVAL;
4128 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4129 return -TARGET_EFAULT;
4131 host_mb = g_try_malloc(msgsz + sizeof(long));
4132 if (!host_mb) {
4133 ret = -TARGET_ENOMEM;
4134 goto end;
4136 ret = -TARGET_ENOSYS;
4137 #ifdef __NR_msgrcv
4138 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4139 #endif
4140 #ifdef __NR_ipc
4141 if (ret == -TARGET_ENOSYS) {
4142 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4143 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4145 #endif
4147 if (ret > 0) {
4148 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4149 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4150 if (!target_mtext) {
4151 ret = -TARGET_EFAULT;
4152 goto end;
4154 memcpy(target_mb->mtext, host_mb->mtext, ret);
4155 unlock_user(target_mtext, target_mtext_addr, ret);
4158 target_mb->mtype = tswapal(host_mb->mtype);
4160 end:
4161 if (target_mb)
4162 unlock_user_struct(target_mb, msgp, 1);
4163 g_free(host_mb);
4164 return ret;
4167 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4168 abi_ulong target_addr)
4170 struct target_shmid_ds *target_sd;
4172 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4173 return -TARGET_EFAULT;
4174 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4175 return -TARGET_EFAULT;
4176 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4177 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4178 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4179 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4180 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4181 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4182 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4183 unlock_user_struct(target_sd, target_addr, 0);
4184 return 0;
4187 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4188 struct shmid_ds *host_sd)
4190 struct target_shmid_ds *target_sd;
4192 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4193 return -TARGET_EFAULT;
4194 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4195 return -TARGET_EFAULT;
4196 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4197 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4198 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4199 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4200 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4201 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4202 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4203 unlock_user_struct(target_sd, target_addr, 1);
4204 return 0;
4207 struct target_shminfo {
4208 abi_ulong shmmax;
4209 abi_ulong shmmin;
4210 abi_ulong shmmni;
4211 abi_ulong shmseg;
4212 abi_ulong shmall;
4215 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4216 struct shminfo *host_shminfo)
4218 struct target_shminfo *target_shminfo;
4219 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4220 return -TARGET_EFAULT;
4221 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4222 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4223 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4224 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4225 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4226 unlock_user_struct(target_shminfo, target_addr, 1);
4227 return 0;
4230 struct target_shm_info {
4231 int used_ids;
4232 abi_ulong shm_tot;
4233 abi_ulong shm_rss;
4234 abi_ulong shm_swp;
4235 abi_ulong swap_attempts;
4236 abi_ulong swap_successes;
4239 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4240 struct shm_info *host_shm_info)
4242 struct target_shm_info *target_shm_info;
4243 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4244 return -TARGET_EFAULT;
4245 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4246 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4247 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4248 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4249 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4250 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4251 unlock_user_struct(target_shm_info, target_addr, 1);
4252 return 0;
4255 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4257 struct shmid_ds dsarg;
4258 struct shminfo shminfo;
4259 struct shm_info shm_info;
4260 abi_long ret = -TARGET_EINVAL;
4262 cmd &= 0xff;
4264 switch(cmd) {
4265 case IPC_STAT:
4266 case IPC_SET:
4267 case SHM_STAT:
4268 if (target_to_host_shmid_ds(&dsarg, buf))
4269 return -TARGET_EFAULT;
4270 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4271 if (host_to_target_shmid_ds(buf, &dsarg))
4272 return -TARGET_EFAULT;
4273 break;
4274 case IPC_INFO:
4275 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4276 if (host_to_target_shminfo(buf, &shminfo))
4277 return -TARGET_EFAULT;
4278 break;
4279 case SHM_INFO:
4280 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4281 if (host_to_target_shm_info(buf, &shm_info))
4282 return -TARGET_EFAULT;
4283 break;
4284 case IPC_RMID:
4285 case SHM_LOCK:
4286 case SHM_UNLOCK:
4287 ret = get_errno(shmctl(shmid, cmd, NULL));
4288 break;
4291 return ret;
4294 #ifndef TARGET_FORCE_SHMLBA
4295 /* For most architectures, SHMLBA is the same as the page size;
4296 * some architectures have larger values, in which case they should
4297 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4298 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4299 * and defining its own value for SHMLBA.
4301 * The kernel also permits SHMLBA to be set by the architecture to a
4302 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4303 * this means that addresses are rounded to the large size if
4304 * SHM_RND is set but addresses not aligned to that size are not rejected
4305 * as long as they are at least page-aligned. Since the only architecture
4306 * which uses this is ia64 this code doesn't provide for that oddity.
4308 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4310 return TARGET_PAGE_SIZE;
4312 #endif
4314 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4315 int shmid, abi_ulong shmaddr, int shmflg)
4317 abi_long raddr;
4318 void *host_raddr;
4319 struct shmid_ds shm_info;
4320 int i,ret;
4321 abi_ulong shmlba;
4323 /* find out the length of the shared memory segment */
4324 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4325 if (is_error(ret)) {
4326 /* can't get length, bail out */
4327 return ret;
4330 shmlba = target_shmlba(cpu_env);
4332 if (shmaddr & (shmlba - 1)) {
4333 if (shmflg & SHM_RND) {
4334 shmaddr &= ~(shmlba - 1);
4335 } else {
4336 return -TARGET_EINVAL;
4339 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4340 return -TARGET_EINVAL;
4343 mmap_lock();
4345 if (shmaddr)
4346 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4347 else {
4348 abi_ulong mmap_start;
4350 /* In order to use the host shmat, we need to honor host SHMLBA. */
4351 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4353 if (mmap_start == -1) {
4354 errno = ENOMEM;
4355 host_raddr = (void *)-1;
4356 } else
4357 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4360 if (host_raddr == (void *)-1) {
4361 mmap_unlock();
4362 return get_errno((long)host_raddr);
4364 raddr=h2g((unsigned long)host_raddr);
4366 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4367 PAGE_VALID | PAGE_READ |
4368 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4370 for (i = 0; i < N_SHM_REGIONS; i++) {
4371 if (!shm_regions[i].in_use) {
4372 shm_regions[i].in_use = true;
4373 shm_regions[i].start = raddr;
4374 shm_regions[i].size = shm_info.shm_segsz;
4375 break;
4379 mmap_unlock();
4380 return raddr;
4384 static inline abi_long do_shmdt(abi_ulong shmaddr)
4386 int i;
4387 abi_long rv;
4389 mmap_lock();
4391 for (i = 0; i < N_SHM_REGIONS; ++i) {
4392 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4393 shm_regions[i].in_use = false;
4394 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4395 break;
4398 rv = get_errno(shmdt(g2h(shmaddr)));
4400 mmap_unlock();
4402 return rv;
4405 #ifdef TARGET_NR_ipc
4406 /* ??? This only works with linear mappings. */
4407 /* do_ipc() must return target values and target errnos. */
4408 static abi_long do_ipc(CPUArchState *cpu_env,
4409 unsigned int call, abi_long first,
4410 abi_long second, abi_long third,
4411 abi_long ptr, abi_long fifth)
4413 int version;
4414 abi_long ret = 0;
4416 version = call >> 16;
4417 call &= 0xffff;
4419 switch (call) {
4420 case IPCOP_semop:
4421 ret = do_semtimedop(first, ptr, second, 0);
4422 break;
4423 case IPCOP_semtimedop:
4425 * The s390 sys_ipc variant has only five parameters instead of six
4426 * (as for default variant) and the only difference is the handling of
4427 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4428 * to a struct timespec where the generic variant uses fifth parameter.
4430 #if defined(TARGET_S390X)
4431 ret = do_semtimedop(first, ptr, second, third);
4432 #else
4433 ret = do_semtimedop(first, ptr, second, fifth);
4434 #endif
4435 break;
4437 case IPCOP_semget:
4438 ret = get_errno(semget(first, second, third));
4439 break;
4441 case IPCOP_semctl: {
4442 /* The semun argument to semctl is passed by value, so dereference the
4443 * ptr argument. */
4444 abi_ulong atptr;
4445 get_user_ual(atptr, ptr);
4446 ret = do_semctl(first, second, third, atptr);
4447 break;
4450 case IPCOP_msgget:
4451 ret = get_errno(msgget(first, second));
4452 break;
4454 case IPCOP_msgsnd:
4455 ret = do_msgsnd(first, ptr, second, third);
4456 break;
4458 case IPCOP_msgctl:
4459 ret = do_msgctl(first, second, ptr);
4460 break;
4462 case IPCOP_msgrcv:
4463 switch (version) {
4464 case 0:
4466 struct target_ipc_kludge {
4467 abi_long msgp;
4468 abi_long msgtyp;
4469 } *tmp;
4471 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4472 ret = -TARGET_EFAULT;
4473 break;
4476 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4478 unlock_user_struct(tmp, ptr, 0);
4479 break;
4481 default:
4482 ret = do_msgrcv(first, ptr, second, fifth, third);
4484 break;
4486 case IPCOP_shmat:
4487 switch (version) {
4488 default:
4490 abi_ulong raddr;
4491 raddr = do_shmat(cpu_env, first, ptr, second);
4492 if (is_error(raddr))
4493 return get_errno(raddr);
4494 if (put_user_ual(raddr, third))
4495 return -TARGET_EFAULT;
4496 break;
4498 case 1:
4499 ret = -TARGET_EINVAL;
4500 break;
4502 break;
4503 case IPCOP_shmdt:
4504 ret = do_shmdt(ptr);
4505 break;
4507 case IPCOP_shmget:
4508 /* IPC_* flag values are the same on all linux platforms */
4509 ret = get_errno(shmget(first, second, third));
4510 break;
4512 /* IPC_* and SHM_* command values are the same on all linux platforms */
4513 case IPCOP_shmctl:
4514 ret = do_shmctl(first, second, ptr);
4515 break;
4516 default:
4517 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4518 call, version);
4519 ret = -TARGET_ENOSYS;
4520 break;
4522 return ret;
4524 #endif
4526 /* kernel structure types definitions */
4528 #define STRUCT(name, ...) STRUCT_ ## name,
4529 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4530 enum {
4531 #include "syscall_types.h"
4532 STRUCT_MAX
4534 #undef STRUCT
4535 #undef STRUCT_SPECIAL
4537 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4538 #define STRUCT_SPECIAL(name)
4539 #include "syscall_types.h"
4540 #undef STRUCT
4541 #undef STRUCT_SPECIAL
4543 #define MAX_STRUCT_SIZE 4096
4545 #ifdef CONFIG_FIEMAP
4546 /* So fiemap access checks don't overflow on 32 bit systems.
4547 * This is very slightly smaller than the limit imposed by
4548 * the underlying kernel.
4550 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4551 / sizeof(struct fiemap_extent))
4553 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4554 int fd, int cmd, abi_long arg)
4556 /* The parameter for this ioctl is a struct fiemap followed
4557 * by an array of struct fiemap_extent whose size is set
4558 * in fiemap->fm_extent_count. The array is filled in by the
4559 * ioctl.
4561 int target_size_in, target_size_out;
4562 struct fiemap *fm;
4563 const argtype *arg_type = ie->arg_type;
4564 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4565 void *argptr, *p;
4566 abi_long ret;
4567 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4568 uint32_t outbufsz;
4569 int free_fm = 0;
4571 assert(arg_type[0] == TYPE_PTR);
4572 assert(ie->access == IOC_RW);
4573 arg_type++;
4574 target_size_in = thunk_type_size(arg_type, 0);
4575 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4576 if (!argptr) {
4577 return -TARGET_EFAULT;
4579 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4580 unlock_user(argptr, arg, 0);
4581 fm = (struct fiemap *)buf_temp;
4582 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4583 return -TARGET_EINVAL;
4586 outbufsz = sizeof (*fm) +
4587 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4589 if (outbufsz > MAX_STRUCT_SIZE) {
4590 /* We can't fit all the extents into the fixed size buffer.
4591 * Allocate one that is large enough and use it instead.
4593 fm = g_try_malloc(outbufsz);
4594 if (!fm) {
4595 return -TARGET_ENOMEM;
4597 memcpy(fm, buf_temp, sizeof(struct fiemap));
4598 free_fm = 1;
4600 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4601 if (!is_error(ret)) {
4602 target_size_out = target_size_in;
4603 /* An extent_count of 0 means we were only counting the extents
4604 * so there are no structs to copy
4606 if (fm->fm_extent_count != 0) {
4607 target_size_out += fm->fm_mapped_extents * extent_size;
4609 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4610 if (!argptr) {
4611 ret = -TARGET_EFAULT;
4612 } else {
4613 /* Convert the struct fiemap */
4614 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4615 if (fm->fm_extent_count != 0) {
4616 p = argptr + target_size_in;
4617 /* ...and then all the struct fiemap_extents */
4618 for (i = 0; i < fm->fm_mapped_extents; i++) {
4619 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4620 THUNK_TARGET);
4621 p += extent_size;
4624 unlock_user(argptr, arg, target_size_out);
4627 if (free_fm) {
4628 g_free(fm);
4630 return ret;
4632 #endif
4634 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4635 int fd, int cmd, abi_long arg)
4637 const argtype *arg_type = ie->arg_type;
4638 int target_size;
4639 void *argptr;
4640 int ret;
4641 struct ifconf *host_ifconf;
4642 uint32_t outbufsz;
4643 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4644 int target_ifreq_size;
4645 int nb_ifreq;
4646 int free_buf = 0;
4647 int i;
4648 int target_ifc_len;
4649 abi_long target_ifc_buf;
4650 int host_ifc_len;
4651 char *host_ifc_buf;
4653 assert(arg_type[0] == TYPE_PTR);
4654 assert(ie->access == IOC_RW);
4656 arg_type++;
4657 target_size = thunk_type_size(arg_type, 0);
4659 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4660 if (!argptr)
4661 return -TARGET_EFAULT;
4662 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4663 unlock_user(argptr, arg, 0);
4665 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4666 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4667 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4669 if (target_ifc_buf != 0) {
4670 target_ifc_len = host_ifconf->ifc_len;
4671 nb_ifreq = target_ifc_len / target_ifreq_size;
4672 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4674 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4675 if (outbufsz > MAX_STRUCT_SIZE) {
4677 * We can't fit all the extents into the fixed size buffer.
4678 * Allocate one that is large enough and use it instead.
4680 host_ifconf = malloc(outbufsz);
4681 if (!host_ifconf) {
4682 return -TARGET_ENOMEM;
4684 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4685 free_buf = 1;
4687 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4689 host_ifconf->ifc_len = host_ifc_len;
4690 } else {
4691 host_ifc_buf = NULL;
4693 host_ifconf->ifc_buf = host_ifc_buf;
4695 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4696 if (!is_error(ret)) {
4697 /* convert host ifc_len to target ifc_len */
4699 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4700 target_ifc_len = nb_ifreq * target_ifreq_size;
4701 host_ifconf->ifc_len = target_ifc_len;
4703 /* restore target ifc_buf */
4705 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4707 /* copy struct ifconf to target user */
4709 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4710 if (!argptr)
4711 return -TARGET_EFAULT;
4712 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4713 unlock_user(argptr, arg, target_size);
4715 if (target_ifc_buf != 0) {
4716 /* copy ifreq[] to target user */
4717 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4718 for (i = 0; i < nb_ifreq ; i++) {
4719 thunk_convert(argptr + i * target_ifreq_size,
4720 host_ifc_buf + i * sizeof(struct ifreq),
4721 ifreq_arg_type, THUNK_TARGET);
4723 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4727 if (free_buf) {
4728 free(host_ifconf);
4731 return ret;
4734 #if defined(CONFIG_USBFS)
4735 #if HOST_LONG_BITS > 64
4736 #error USBDEVFS thunks do not support >64 bit hosts yet.
4737 #endif
4738 struct live_urb {
4739 uint64_t target_urb_adr;
4740 uint64_t target_buf_adr;
4741 char *target_buf_ptr;
4742 struct usbdevfs_urb host_urb;
4745 static GHashTable *usbdevfs_urb_hashtable(void)
4747 static GHashTable *urb_hashtable;
4749 if (!urb_hashtable) {
4750 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4752 return urb_hashtable;
4755 static void urb_hashtable_insert(struct live_urb *urb)
4757 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4758 g_hash_table_insert(urb_hashtable, urb, urb);
4761 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4763 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4764 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4767 static void urb_hashtable_remove(struct live_urb *urb)
4769 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4770 g_hash_table_remove(urb_hashtable, urb);
4773 static abi_long
4774 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4775 int fd, int cmd, abi_long arg)
4777 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4778 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4779 struct live_urb *lurb;
4780 void *argptr;
4781 uint64_t hurb;
4782 int target_size;
4783 uintptr_t target_urb_adr;
4784 abi_long ret;
4786 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4788 memset(buf_temp, 0, sizeof(uint64_t));
4789 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4790 if (is_error(ret)) {
4791 return ret;
4794 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4795 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4796 if (!lurb->target_urb_adr) {
4797 return -TARGET_EFAULT;
4799 urb_hashtable_remove(lurb);
4800 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4801 lurb->host_urb.buffer_length);
4802 lurb->target_buf_ptr = NULL;
4804 /* restore the guest buffer pointer */
4805 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4807 /* update the guest urb struct */
4808 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4809 if (!argptr) {
4810 g_free(lurb);
4811 return -TARGET_EFAULT;
4813 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4814 unlock_user(argptr, lurb->target_urb_adr, target_size);
4816 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4817 /* write back the urb handle */
4818 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4819 if (!argptr) {
4820 g_free(lurb);
4821 return -TARGET_EFAULT;
4824 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4825 target_urb_adr = lurb->target_urb_adr;
4826 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4827 unlock_user(argptr, arg, target_size);
4829 g_free(lurb);
4830 return ret;
4833 static abi_long
4834 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4835 uint8_t *buf_temp __attribute__((unused)),
4836 int fd, int cmd, abi_long arg)
4838 struct live_urb *lurb;
4840 /* map target address back to host URB with metadata. */
4841 lurb = urb_hashtable_lookup(arg);
4842 if (!lurb) {
4843 return -TARGET_EFAULT;
4845 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4848 static abi_long
4849 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4850 int fd, int cmd, abi_long arg)
4852 const argtype *arg_type = ie->arg_type;
4853 int target_size;
4854 abi_long ret;
4855 void *argptr;
4856 int rw_dir;
4857 struct live_urb *lurb;
4860 * each submitted URB needs to map to a unique ID for the
4861 * kernel, and that unique ID needs to be a pointer to
4862 * host memory. hence, we need to malloc for each URB.
4863 * isochronous transfers have a variable length struct.
4865 arg_type++;
4866 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4868 /* construct host copy of urb and metadata */
4869 lurb = g_try_malloc0(sizeof(struct live_urb));
4870 if (!lurb) {
4871 return -TARGET_ENOMEM;
4874 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4875 if (!argptr) {
4876 g_free(lurb);
4877 return -TARGET_EFAULT;
4879 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4880 unlock_user(argptr, arg, 0);
4882 lurb->target_urb_adr = arg;
4883 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4885 /* buffer space used depends on endpoint type so lock the entire buffer */
4886 /* control type urbs should check the buffer contents for true direction */
4887 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4888 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4889 lurb->host_urb.buffer_length, 1);
4890 if (lurb->target_buf_ptr == NULL) {
4891 g_free(lurb);
4892 return -TARGET_EFAULT;
4895 /* update buffer pointer in host copy */
4896 lurb->host_urb.buffer = lurb->target_buf_ptr;
4898 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4899 if (is_error(ret)) {
4900 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4901 g_free(lurb);
4902 } else {
4903 urb_hashtable_insert(lurb);
4906 return ret;
4908 #endif /* CONFIG_USBFS */
4910 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4911 int cmd, abi_long arg)
4913 void *argptr;
4914 struct dm_ioctl *host_dm;
4915 abi_long guest_data;
4916 uint32_t guest_data_size;
4917 int target_size;
4918 const argtype *arg_type = ie->arg_type;
4919 abi_long ret;
4920 void *big_buf = NULL;
4921 char *host_data;
4923 arg_type++;
4924 target_size = thunk_type_size(arg_type, 0);
4925 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4926 if (!argptr) {
4927 ret = -TARGET_EFAULT;
4928 goto out;
4930 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931 unlock_user(argptr, arg, 0);
4933 /* buf_temp is too small, so fetch things into a bigger buffer */
4934 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4935 memcpy(big_buf, buf_temp, target_size);
4936 buf_temp = big_buf;
4937 host_dm = big_buf;
4939 guest_data = arg + host_dm->data_start;
4940 if ((guest_data - arg) < 0) {
4941 ret = -TARGET_EINVAL;
4942 goto out;
4944 guest_data_size = host_dm->data_size - host_dm->data_start;
4945 host_data = (char*)host_dm + host_dm->data_start;
4947 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4948 if (!argptr) {
4949 ret = -TARGET_EFAULT;
4950 goto out;
4953 switch (ie->host_cmd) {
4954 case DM_REMOVE_ALL:
4955 case DM_LIST_DEVICES:
4956 case DM_DEV_CREATE:
4957 case DM_DEV_REMOVE:
4958 case DM_DEV_SUSPEND:
4959 case DM_DEV_STATUS:
4960 case DM_DEV_WAIT:
4961 case DM_TABLE_STATUS:
4962 case DM_TABLE_CLEAR:
4963 case DM_TABLE_DEPS:
4964 case DM_LIST_VERSIONS:
4965 /* no input data */
4966 break;
4967 case DM_DEV_RENAME:
4968 case DM_DEV_SET_GEOMETRY:
4969 /* data contains only strings */
4970 memcpy(host_data, argptr, guest_data_size);
4971 break;
4972 case DM_TARGET_MSG:
4973 memcpy(host_data, argptr, guest_data_size);
4974 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4975 break;
4976 case DM_TABLE_LOAD:
4978 void *gspec = argptr;
4979 void *cur_data = host_data;
4980 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4981 int spec_size = thunk_type_size(arg_type, 0);
4982 int i;
4984 for (i = 0; i < host_dm->target_count; i++) {
4985 struct dm_target_spec *spec = cur_data;
4986 uint32_t next;
4987 int slen;
4989 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4990 slen = strlen((char*)gspec + spec_size) + 1;
4991 next = spec->next;
4992 spec->next = sizeof(*spec) + slen;
4993 strcpy((char*)&spec[1], gspec + spec_size);
4994 gspec += next;
4995 cur_data += spec->next;
4997 break;
4999 default:
5000 ret = -TARGET_EINVAL;
5001 unlock_user(argptr, guest_data, 0);
5002 goto out;
5004 unlock_user(argptr, guest_data, 0);
5006 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5007 if (!is_error(ret)) {
5008 guest_data = arg + host_dm->data_start;
5009 guest_data_size = host_dm->data_size - host_dm->data_start;
5010 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5011 switch (ie->host_cmd) {
5012 case DM_REMOVE_ALL:
5013 case DM_DEV_CREATE:
5014 case DM_DEV_REMOVE:
5015 case DM_DEV_RENAME:
5016 case DM_DEV_SUSPEND:
5017 case DM_DEV_STATUS:
5018 case DM_TABLE_LOAD:
5019 case DM_TABLE_CLEAR:
5020 case DM_TARGET_MSG:
5021 case DM_DEV_SET_GEOMETRY:
5022 /* no return data */
5023 break;
5024 case DM_LIST_DEVICES:
5026 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5027 uint32_t remaining_data = guest_data_size;
5028 void *cur_data = argptr;
5029 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5030 int nl_size = 12; /* can't use thunk_size due to alignment */
5032 while (1) {
5033 uint32_t next = nl->next;
5034 if (next) {
5035 nl->next = nl_size + (strlen(nl->name) + 1);
5037 if (remaining_data < nl->next) {
5038 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5039 break;
5041 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5042 strcpy(cur_data + nl_size, nl->name);
5043 cur_data += nl->next;
5044 remaining_data -= nl->next;
5045 if (!next) {
5046 break;
5048 nl = (void*)nl + next;
5050 break;
5052 case DM_DEV_WAIT:
5053 case DM_TABLE_STATUS:
5055 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5056 void *cur_data = argptr;
5057 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5058 int spec_size = thunk_type_size(arg_type, 0);
5059 int i;
5061 for (i = 0; i < host_dm->target_count; i++) {
5062 uint32_t next = spec->next;
5063 int slen = strlen((char*)&spec[1]) + 1;
5064 spec->next = (cur_data - argptr) + spec_size + slen;
5065 if (guest_data_size < spec->next) {
5066 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5067 break;
5069 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5070 strcpy(cur_data + spec_size, (char*)&spec[1]);
5071 cur_data = argptr + spec->next;
5072 spec = (void*)host_dm + host_dm->data_start + next;
5074 break;
5076 case DM_TABLE_DEPS:
5078 void *hdata = (void*)host_dm + host_dm->data_start;
5079 int count = *(uint32_t*)hdata;
5080 uint64_t *hdev = hdata + 8;
5081 uint64_t *gdev = argptr + 8;
5082 int i;
5084 *(uint32_t*)argptr = tswap32(count);
5085 for (i = 0; i < count; i++) {
5086 *gdev = tswap64(*hdev);
5087 gdev++;
5088 hdev++;
5090 break;
5092 case DM_LIST_VERSIONS:
5094 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5095 uint32_t remaining_data = guest_data_size;
5096 void *cur_data = argptr;
5097 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5098 int vers_size = thunk_type_size(arg_type, 0);
5100 while (1) {
5101 uint32_t next = vers->next;
5102 if (next) {
5103 vers->next = vers_size + (strlen(vers->name) + 1);
5105 if (remaining_data < vers->next) {
5106 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5107 break;
5109 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5110 strcpy(cur_data + vers_size, vers->name);
5111 cur_data += vers->next;
5112 remaining_data -= vers->next;
5113 if (!next) {
5114 break;
5116 vers = (void*)vers + next;
5118 break;
5120 default:
5121 unlock_user(argptr, guest_data, 0);
5122 ret = -TARGET_EINVAL;
5123 goto out;
5125 unlock_user(argptr, guest_data, guest_data_size);
5127 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5128 if (!argptr) {
5129 ret = -TARGET_EFAULT;
5130 goto out;
5132 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5133 unlock_user(argptr, arg, target_size);
5135 out:
5136 g_free(big_buf);
5137 return ret;
5140 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5141 int cmd, abi_long arg)
5143 void *argptr;
5144 int target_size;
5145 const argtype *arg_type = ie->arg_type;
5146 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5147 abi_long ret;
5149 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5150 struct blkpg_partition host_part;
5152 /* Read and convert blkpg */
5153 arg_type++;
5154 target_size = thunk_type_size(arg_type, 0);
5155 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5156 if (!argptr) {
5157 ret = -TARGET_EFAULT;
5158 goto out;
5160 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5161 unlock_user(argptr, arg, 0);
5163 switch (host_blkpg->op) {
5164 case BLKPG_ADD_PARTITION:
5165 case BLKPG_DEL_PARTITION:
5166 /* payload is struct blkpg_partition */
5167 break;
5168 default:
5169 /* Unknown opcode */
5170 ret = -TARGET_EINVAL;
5171 goto out;
5174 /* Read and convert blkpg->data */
5175 arg = (abi_long)(uintptr_t)host_blkpg->data;
5176 target_size = thunk_type_size(part_arg_type, 0);
5177 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5178 if (!argptr) {
5179 ret = -TARGET_EFAULT;
5180 goto out;
5182 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5183 unlock_user(argptr, arg, 0);
5185 /* Swizzle the data pointer to our local copy and call! */
5186 host_blkpg->data = &host_part;
5187 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5189 out:
5190 return ret;
5193 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5194 int fd, int cmd, abi_long arg)
5196 const argtype *arg_type = ie->arg_type;
5197 const StructEntry *se;
5198 const argtype *field_types;
5199 const int *dst_offsets, *src_offsets;
5200 int target_size;
5201 void *argptr;
5202 abi_ulong *target_rt_dev_ptr = NULL;
5203 unsigned long *host_rt_dev_ptr = NULL;
5204 abi_long ret;
5205 int i;
5207 assert(ie->access == IOC_W);
5208 assert(*arg_type == TYPE_PTR);
5209 arg_type++;
5210 assert(*arg_type == TYPE_STRUCT);
5211 target_size = thunk_type_size(arg_type, 0);
5212 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5213 if (!argptr) {
5214 return -TARGET_EFAULT;
5216 arg_type++;
5217 assert(*arg_type == (int)STRUCT_rtentry);
5218 se = struct_entries + *arg_type++;
5219 assert(se->convert[0] == NULL);
5220 /* convert struct here to be able to catch rt_dev string */
5221 field_types = se->field_types;
5222 dst_offsets = se->field_offsets[THUNK_HOST];
5223 src_offsets = se->field_offsets[THUNK_TARGET];
5224 for (i = 0; i < se->nb_fields; i++) {
5225 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5226 assert(*field_types == TYPE_PTRVOID);
5227 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5228 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5229 if (*target_rt_dev_ptr != 0) {
5230 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5231 tswapal(*target_rt_dev_ptr));
5232 if (!*host_rt_dev_ptr) {
5233 unlock_user(argptr, arg, 0);
5234 return -TARGET_EFAULT;
5236 } else {
5237 *host_rt_dev_ptr = 0;
5239 field_types++;
5240 continue;
5242 field_types = thunk_convert(buf_temp + dst_offsets[i],
5243 argptr + src_offsets[i],
5244 field_types, THUNK_HOST);
5246 unlock_user(argptr, arg, 0);
5248 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5250 assert(host_rt_dev_ptr != NULL);
5251 assert(target_rt_dev_ptr != NULL);
5252 if (*host_rt_dev_ptr != 0) {
5253 unlock_user((void *)*host_rt_dev_ptr,
5254 *target_rt_dev_ptr, 0);
5256 return ret;
5259 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5260 int fd, int cmd, abi_long arg)
5262 int sig = target_to_host_signal(arg);
5263 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5266 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5267 int fd, int cmd, abi_long arg)
5269 struct timeval tv;
5270 abi_long ret;
5272 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5273 if (is_error(ret)) {
5274 return ret;
5277 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5278 if (copy_to_user_timeval(arg, &tv)) {
5279 return -TARGET_EFAULT;
5281 } else {
5282 if (copy_to_user_timeval64(arg, &tv)) {
5283 return -TARGET_EFAULT;
5287 return ret;
5290 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5291 int fd, int cmd, abi_long arg)
5293 struct timespec ts;
5294 abi_long ret;
5296 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5297 if (is_error(ret)) {
5298 return ret;
5301 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5302 if (host_to_target_timespec(arg, &ts)) {
5303 return -TARGET_EFAULT;
5305 } else{
5306 if (host_to_target_timespec64(arg, &ts)) {
5307 return -TARGET_EFAULT;
5311 return ret;
5314 #ifdef TIOCGPTPEER
5315 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5316 int fd, int cmd, abi_long arg)
5318 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5319 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5321 #endif
5323 #ifdef HAVE_DRM_H
5325 static void unlock_drm_version(struct drm_version *host_ver,
5326 struct target_drm_version *target_ver,
5327 bool copy)
5329 unlock_user(host_ver->name, target_ver->name,
5330 copy ? host_ver->name_len : 0);
5331 unlock_user(host_ver->date, target_ver->date,
5332 copy ? host_ver->date_len : 0);
5333 unlock_user(host_ver->desc, target_ver->desc,
5334 copy ? host_ver->desc_len : 0);
5337 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5338 struct target_drm_version *target_ver)
5340 memset(host_ver, 0, sizeof(*host_ver));
5342 __get_user(host_ver->name_len, &target_ver->name_len);
5343 if (host_ver->name_len) {
5344 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5345 target_ver->name_len, 0);
5346 if (!host_ver->name) {
5347 return -EFAULT;
5351 __get_user(host_ver->date_len, &target_ver->date_len);
5352 if (host_ver->date_len) {
5353 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5354 target_ver->date_len, 0);
5355 if (!host_ver->date) {
5356 goto err;
5360 __get_user(host_ver->desc_len, &target_ver->desc_len);
5361 if (host_ver->desc_len) {
5362 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5363 target_ver->desc_len, 0);
5364 if (!host_ver->desc) {
5365 goto err;
5369 return 0;
5370 err:
5371 unlock_drm_version(host_ver, target_ver, false);
5372 return -EFAULT;
5375 static inline void host_to_target_drmversion(
5376 struct target_drm_version *target_ver,
5377 struct drm_version *host_ver)
5379 __put_user(host_ver->version_major, &target_ver->version_major);
5380 __put_user(host_ver->version_minor, &target_ver->version_minor);
5381 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5382 __put_user(host_ver->name_len, &target_ver->name_len);
5383 __put_user(host_ver->date_len, &target_ver->date_len);
5384 __put_user(host_ver->desc_len, &target_ver->desc_len);
5385 unlock_drm_version(host_ver, target_ver, true);
5388 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5389 int fd, int cmd, abi_long arg)
5391 struct drm_version *ver;
5392 struct target_drm_version *target_ver;
5393 abi_long ret;
5395 switch (ie->host_cmd) {
5396 case DRM_IOCTL_VERSION:
5397 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5398 return -TARGET_EFAULT;
5400 ver = (struct drm_version *)buf_temp;
5401 ret = target_to_host_drmversion(ver, target_ver);
5402 if (!is_error(ret)) {
5403 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5404 if (is_error(ret)) {
5405 unlock_drm_version(ver, target_ver, false);
5406 } else {
5407 host_to_target_drmversion(target_ver, ver);
5410 unlock_user_struct(target_ver, arg, 0);
5411 return ret;
5413 return -TARGET_ENOSYS;
5416 #endif
5418 IOCTLEntry ioctl_entries[] = {
5419 #define IOCTL(cmd, access, ...) \
5420 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5421 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5422 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5423 #define IOCTL_IGNORE(cmd) \
5424 { TARGET_ ## cmd, 0, #cmd },
5425 #include "ioctls.h"
5426 { 0, 0, },
5429 /* ??? Implement proper locking for ioctls. */
5430 /* do_ioctl() Must return target values and target errnos. */
5431 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5433 const IOCTLEntry *ie;
5434 const argtype *arg_type;
5435 abi_long ret;
5436 uint8_t buf_temp[MAX_STRUCT_SIZE];
5437 int target_size;
5438 void *argptr;
5440 ie = ioctl_entries;
5441 for(;;) {
5442 if (ie->target_cmd == 0) {
5443 qemu_log_mask(
5444 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5445 return -TARGET_ENOSYS;
5447 if (ie->target_cmd == cmd)
5448 break;
5449 ie++;
5451 arg_type = ie->arg_type;
5452 if (ie->do_ioctl) {
5453 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5454 } else if (!ie->host_cmd) {
5455 /* Some architectures define BSD ioctls in their headers
5456 that are not implemented in Linux. */
5457 return -TARGET_ENOSYS;
5460 switch(arg_type[0]) {
5461 case TYPE_NULL:
5462 /* no argument */
5463 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5464 break;
5465 case TYPE_PTRVOID:
5466 case TYPE_INT:
5467 case TYPE_LONG:
5468 case TYPE_ULONG:
5469 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5470 break;
5471 case TYPE_PTR:
5472 arg_type++;
5473 target_size = thunk_type_size(arg_type, 0);
5474 switch(ie->access) {
5475 case IOC_R:
5476 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5477 if (!is_error(ret)) {
5478 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5479 if (!argptr)
5480 return -TARGET_EFAULT;
5481 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5482 unlock_user(argptr, arg, target_size);
5484 break;
5485 case IOC_W:
5486 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5487 if (!argptr)
5488 return -TARGET_EFAULT;
5489 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5490 unlock_user(argptr, arg, 0);
5491 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5492 break;
5493 default:
5494 case IOC_RW:
5495 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5496 if (!argptr)
5497 return -TARGET_EFAULT;
5498 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5499 unlock_user(argptr, arg, 0);
5500 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5501 if (!is_error(ret)) {
5502 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5503 if (!argptr)
5504 return -TARGET_EFAULT;
5505 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5506 unlock_user(argptr, arg, target_size);
5508 break;
5510 break;
5511 default:
5512 qemu_log_mask(LOG_UNIMP,
5513 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5514 (long)cmd, arg_type[0]);
5515 ret = -TARGET_ENOSYS;
5516 break;
5518 return ret;
5521 static const bitmask_transtbl iflag_tbl[] = {
5522 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5523 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5524 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5525 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5526 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5527 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5528 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5529 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5530 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5531 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5532 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5533 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5534 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5535 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5536 { 0, 0, 0, 0 }
5539 static const bitmask_transtbl oflag_tbl[] = {
5540 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5541 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5542 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5543 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5544 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5545 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5546 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5547 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5548 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5549 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5550 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5551 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5552 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5553 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5554 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5555 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5556 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5557 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5558 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5559 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5560 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5561 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5562 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5563 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5564 { 0, 0, 0, 0 }
5567 static const bitmask_transtbl cflag_tbl[] = {
5568 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5569 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5570 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5571 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5572 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5573 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5574 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5575 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5576 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5577 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5578 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5579 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5580 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5581 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5582 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5583 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5584 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5585 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5586 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5587 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5588 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5589 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5590 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5591 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5592 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5593 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5594 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5595 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5596 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5597 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5598 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5599 { 0, 0, 0, 0 }
5602 static const bitmask_transtbl lflag_tbl[] = {
5603 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5604 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5605 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5606 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5607 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5608 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5609 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5610 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5611 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5612 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5613 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5614 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5615 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5616 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5617 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5618 { 0, 0, 0, 0 }
5621 static void target_to_host_termios (void *dst, const void *src)
5623 struct host_termios *host = dst;
5624 const struct target_termios *target = src;
5626 host->c_iflag =
5627 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5628 host->c_oflag =
5629 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5630 host->c_cflag =
5631 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5632 host->c_lflag =
5633 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5634 host->c_line = target->c_line;
5636 memset(host->c_cc, 0, sizeof(host->c_cc));
5637 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5638 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5639 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5640 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5641 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5642 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5643 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5644 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5645 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5646 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5647 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5648 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5649 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5650 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5651 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5652 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5653 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5656 static void host_to_target_termios (void *dst, const void *src)
5658 struct target_termios *target = dst;
5659 const struct host_termios *host = src;
5661 target->c_iflag =
5662 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5663 target->c_oflag =
5664 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5665 target->c_cflag =
5666 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5667 target->c_lflag =
5668 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5669 target->c_line = host->c_line;
5671 memset(target->c_cc, 0, sizeof(target->c_cc));
5672 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5673 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5674 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5675 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5676 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5677 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5678 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5679 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5680 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5681 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5682 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5683 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5684 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5685 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5686 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5687 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5688 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5691 static const StructEntry struct_termios_def = {
5692 .convert = { host_to_target_termios, target_to_host_termios },
5693 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5694 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5697 static bitmask_transtbl mmap_flags_tbl[] = {
5698 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5699 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5700 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5701 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5702 MAP_ANONYMOUS, MAP_ANONYMOUS },
5703 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5704 MAP_GROWSDOWN, MAP_GROWSDOWN },
5705 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5706 MAP_DENYWRITE, MAP_DENYWRITE },
5707 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5708 MAP_EXECUTABLE, MAP_EXECUTABLE },
5709 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5710 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5711 MAP_NORESERVE, MAP_NORESERVE },
5712 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5713 /* MAP_STACK had been ignored by the kernel for quite some time.
5714 Recognize it for the target insofar as we do not want to pass
5715 it through to the host. */
5716 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5717 { 0, 0, 0, 0 }
5721 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5722 * TARGET_I386 is defined if TARGET_X86_64 is defined
5724 #if defined(TARGET_I386)
5726 /* NOTE: there is really one LDT for all the threads */
5727 static uint8_t *ldt_table;
5729 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5731 int size;
5732 void *p;
5734 if (!ldt_table)
5735 return 0;
5736 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5737 if (size > bytecount)
5738 size = bytecount;
5739 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5740 if (!p)
5741 return -TARGET_EFAULT;
5742 /* ??? Should this by byteswapped? */
5743 memcpy(p, ldt_table, size);
5744 unlock_user(p, ptr, size);
5745 return size;
5748 /* XXX: add locking support */
5749 static abi_long write_ldt(CPUX86State *env,
5750 abi_ulong ptr, unsigned long bytecount, int oldmode)
5752 struct target_modify_ldt_ldt_s ldt_info;
5753 struct target_modify_ldt_ldt_s *target_ldt_info;
5754 int seg_32bit, contents, read_exec_only, limit_in_pages;
5755 int seg_not_present, useable, lm;
5756 uint32_t *lp, entry_1, entry_2;
5758 if (bytecount != sizeof(ldt_info))
5759 return -TARGET_EINVAL;
5760 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5761 return -TARGET_EFAULT;
5762 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5763 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5764 ldt_info.limit = tswap32(target_ldt_info->limit);
5765 ldt_info.flags = tswap32(target_ldt_info->flags);
5766 unlock_user_struct(target_ldt_info, ptr, 0);
5768 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5769 return -TARGET_EINVAL;
5770 seg_32bit = ldt_info.flags & 1;
5771 contents = (ldt_info.flags >> 1) & 3;
5772 read_exec_only = (ldt_info.flags >> 3) & 1;
5773 limit_in_pages = (ldt_info.flags >> 4) & 1;
5774 seg_not_present = (ldt_info.flags >> 5) & 1;
5775 useable = (ldt_info.flags >> 6) & 1;
5776 #ifdef TARGET_ABI32
5777 lm = 0;
5778 #else
5779 lm = (ldt_info.flags >> 7) & 1;
5780 #endif
5781 if (contents == 3) {
5782 if (oldmode)
5783 return -TARGET_EINVAL;
5784 if (seg_not_present == 0)
5785 return -TARGET_EINVAL;
5787 /* allocate the LDT */
5788 if (!ldt_table) {
5789 env->ldt.base = target_mmap(0,
5790 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5791 PROT_READ|PROT_WRITE,
5792 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5793 if (env->ldt.base == -1)
5794 return -TARGET_ENOMEM;
5795 memset(g2h(env->ldt.base), 0,
5796 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5797 env->ldt.limit = 0xffff;
5798 ldt_table = g2h(env->ldt.base);
5801 /* NOTE: same code as Linux kernel */
5802 /* Allow LDTs to be cleared by the user. */
5803 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5804 if (oldmode ||
5805 (contents == 0 &&
5806 read_exec_only == 1 &&
5807 seg_32bit == 0 &&
5808 limit_in_pages == 0 &&
5809 seg_not_present == 1 &&
5810 useable == 0 )) {
5811 entry_1 = 0;
5812 entry_2 = 0;
5813 goto install;
5817 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5818 (ldt_info.limit & 0x0ffff);
5819 entry_2 = (ldt_info.base_addr & 0xff000000) |
5820 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5821 (ldt_info.limit & 0xf0000) |
5822 ((read_exec_only ^ 1) << 9) |
5823 (contents << 10) |
5824 ((seg_not_present ^ 1) << 15) |
5825 (seg_32bit << 22) |
5826 (limit_in_pages << 23) |
5827 (lm << 21) |
5828 0x7000;
5829 if (!oldmode)
5830 entry_2 |= (useable << 20);
5832 /* Install the new entry ... */
5833 install:
5834 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5835 lp[0] = tswap32(entry_1);
5836 lp[1] = tswap32(entry_2);
5837 return 0;
5840 /* specific and weird i386 syscalls */
5841 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5842 unsigned long bytecount)
5844 abi_long ret;
5846 switch (func) {
5847 case 0:
5848 ret = read_ldt(ptr, bytecount);
5849 break;
5850 case 1:
5851 ret = write_ldt(env, ptr, bytecount, 1);
5852 break;
5853 case 0x11:
5854 ret = write_ldt(env, ptr, bytecount, 0);
5855 break;
5856 default:
5857 ret = -TARGET_ENOSYS;
5858 break;
5860 return ret;
5863 #if defined(TARGET_ABI32)
5864 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5866 uint64_t *gdt_table = g2h(env->gdt.base);
5867 struct target_modify_ldt_ldt_s ldt_info;
5868 struct target_modify_ldt_ldt_s *target_ldt_info;
5869 int seg_32bit, contents, read_exec_only, limit_in_pages;
5870 int seg_not_present, useable, lm;
5871 uint32_t *lp, entry_1, entry_2;
5872 int i;
5874 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5875 if (!target_ldt_info)
5876 return -TARGET_EFAULT;
5877 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5878 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5879 ldt_info.limit = tswap32(target_ldt_info->limit);
5880 ldt_info.flags = tswap32(target_ldt_info->flags);
5881 if (ldt_info.entry_number == -1) {
5882 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5883 if (gdt_table[i] == 0) {
5884 ldt_info.entry_number = i;
5885 target_ldt_info->entry_number = tswap32(i);
5886 break;
5890 unlock_user_struct(target_ldt_info, ptr, 1);
5892 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5893 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5894 return -TARGET_EINVAL;
5895 seg_32bit = ldt_info.flags & 1;
5896 contents = (ldt_info.flags >> 1) & 3;
5897 read_exec_only = (ldt_info.flags >> 3) & 1;
5898 limit_in_pages = (ldt_info.flags >> 4) & 1;
5899 seg_not_present = (ldt_info.flags >> 5) & 1;
5900 useable = (ldt_info.flags >> 6) & 1;
5901 #ifdef TARGET_ABI32
5902 lm = 0;
5903 #else
5904 lm = (ldt_info.flags >> 7) & 1;
5905 #endif
5907 if (contents == 3) {
5908 if (seg_not_present == 0)
5909 return -TARGET_EINVAL;
5912 /* NOTE: same code as Linux kernel */
5913 /* Allow LDTs to be cleared by the user. */
5914 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5915 if ((contents == 0 &&
5916 read_exec_only == 1 &&
5917 seg_32bit == 0 &&
5918 limit_in_pages == 0 &&
5919 seg_not_present == 1 &&
5920 useable == 0 )) {
5921 entry_1 = 0;
5922 entry_2 = 0;
5923 goto install;
5927 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5928 (ldt_info.limit & 0x0ffff);
5929 entry_2 = (ldt_info.base_addr & 0xff000000) |
5930 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5931 (ldt_info.limit & 0xf0000) |
5932 ((read_exec_only ^ 1) << 9) |
5933 (contents << 10) |
5934 ((seg_not_present ^ 1) << 15) |
5935 (seg_32bit << 22) |
5936 (limit_in_pages << 23) |
5937 (useable << 20) |
5938 (lm << 21) |
5939 0x7000;
5941 /* Install the new entry ... */
5942 install:
5943 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5944 lp[0] = tswap32(entry_1);
5945 lp[1] = tswap32(entry_2);
5946 return 0;
5949 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5951 struct target_modify_ldt_ldt_s *target_ldt_info;
5952 uint64_t *gdt_table = g2h(env->gdt.base);
5953 uint32_t base_addr, limit, flags;
5954 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5955 int seg_not_present, useable, lm;
5956 uint32_t *lp, entry_1, entry_2;
5958 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5959 if (!target_ldt_info)
5960 return -TARGET_EFAULT;
5961 idx = tswap32(target_ldt_info->entry_number);
5962 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5963 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5964 unlock_user_struct(target_ldt_info, ptr, 1);
5965 return -TARGET_EINVAL;
5967 lp = (uint32_t *)(gdt_table + idx);
5968 entry_1 = tswap32(lp[0]);
5969 entry_2 = tswap32(lp[1]);
5971 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5972 contents = (entry_2 >> 10) & 3;
5973 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5974 seg_32bit = (entry_2 >> 22) & 1;
5975 limit_in_pages = (entry_2 >> 23) & 1;
5976 useable = (entry_2 >> 20) & 1;
5977 #ifdef TARGET_ABI32
5978 lm = 0;
5979 #else
5980 lm = (entry_2 >> 21) & 1;
5981 #endif
5982 flags = (seg_32bit << 0) | (contents << 1) |
5983 (read_exec_only << 3) | (limit_in_pages << 4) |
5984 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5985 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5986 base_addr = (entry_1 >> 16) |
5987 (entry_2 & 0xff000000) |
5988 ((entry_2 & 0xff) << 16);
5989 target_ldt_info->base_addr = tswapal(base_addr);
5990 target_ldt_info->limit = tswap32(limit);
5991 target_ldt_info->flags = tswap32(flags);
5992 unlock_user_struct(target_ldt_info, ptr, 1);
5993 return 0;
5996 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5998 return -TARGET_ENOSYS;
6000 #else
6001 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6003 abi_long ret = 0;
6004 abi_ulong val;
6005 int idx;
6007 switch(code) {
6008 case TARGET_ARCH_SET_GS:
6009 case TARGET_ARCH_SET_FS:
6010 if (code == TARGET_ARCH_SET_GS)
6011 idx = R_GS;
6012 else
6013 idx = R_FS;
6014 cpu_x86_load_seg(env, idx, 0);
6015 env->segs[idx].base = addr;
6016 break;
6017 case TARGET_ARCH_GET_GS:
6018 case TARGET_ARCH_GET_FS:
6019 if (code == TARGET_ARCH_GET_GS)
6020 idx = R_GS;
6021 else
6022 idx = R_FS;
6023 val = env->segs[idx].base;
6024 if (put_user(val, addr, abi_ulong))
6025 ret = -TARGET_EFAULT;
6026 break;
6027 default:
6028 ret = -TARGET_EINVAL;
6029 break;
6031 return ret;
6033 #endif /* defined(TARGET_ABI32 */
6035 #endif /* defined(TARGET_I386) */
6037 #define NEW_STACK_SIZE 0x40000
6040 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6041 typedef struct {
6042 CPUArchState *env;
6043 pthread_mutex_t mutex;
6044 pthread_cond_t cond;
6045 pthread_t thread;
6046 uint32_t tid;
6047 abi_ulong child_tidptr;
6048 abi_ulong parent_tidptr;
6049 sigset_t sigmask;
6050 } new_thread_info;
6052 static void *clone_func(void *arg)
6054 new_thread_info *info = arg;
6055 CPUArchState *env;
6056 CPUState *cpu;
6057 TaskState *ts;
6059 rcu_register_thread();
6060 tcg_register_thread();
6061 env = info->env;
6062 cpu = env_cpu(env);
6063 thread_cpu = cpu;
6064 ts = (TaskState *)cpu->opaque;
6065 info->tid = sys_gettid();
6066 task_settid(ts);
6067 if (info->child_tidptr)
6068 put_user_u32(info->tid, info->child_tidptr);
6069 if (info->parent_tidptr)
6070 put_user_u32(info->tid, info->parent_tidptr);
6071 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6072 /* Enable signals. */
6073 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6074 /* Signal to the parent that we're ready. */
6075 pthread_mutex_lock(&info->mutex);
6076 pthread_cond_broadcast(&info->cond);
6077 pthread_mutex_unlock(&info->mutex);
6078 /* Wait until the parent has finished initializing the tls state. */
6079 pthread_mutex_lock(&clone_lock);
6080 pthread_mutex_unlock(&clone_lock);
6081 cpu_loop(env);
6082 /* never exits */
6083 return NULL;
6086 /* do_fork() Must return host values and target errnos (unlike most
6087 do_*() functions). */
6088 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6089 abi_ulong parent_tidptr, target_ulong newtls,
6090 abi_ulong child_tidptr)
6092 CPUState *cpu = env_cpu(env);
6093 int ret;
6094 TaskState *ts;
6095 CPUState *new_cpu;
6096 CPUArchState *new_env;
6097 sigset_t sigmask;
6099 flags &= ~CLONE_IGNORED_FLAGS;
6101 /* Emulate vfork() with fork() */
6102 if (flags & CLONE_VFORK)
6103 flags &= ~(CLONE_VFORK | CLONE_VM);
6105 if (flags & CLONE_VM) {
6106 TaskState *parent_ts = (TaskState *)cpu->opaque;
6107 new_thread_info info;
6108 pthread_attr_t attr;
6110 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6111 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6112 return -TARGET_EINVAL;
6115 ts = g_new0(TaskState, 1);
6116 init_task_state(ts);
6118 /* Grab a mutex so that thread setup appears atomic. */
6119 pthread_mutex_lock(&clone_lock);
6121 /* we create a new CPU instance. */
6122 new_env = cpu_copy(env);
6123 /* Init regs that differ from the parent. */
6124 cpu_clone_regs_child(new_env, newsp, flags);
6125 cpu_clone_regs_parent(env, flags);
6126 new_cpu = env_cpu(new_env);
6127 new_cpu->opaque = ts;
6128 ts->bprm = parent_ts->bprm;
6129 ts->info = parent_ts->info;
6130 ts->signal_mask = parent_ts->signal_mask;
6132 if (flags & CLONE_CHILD_CLEARTID) {
6133 ts->child_tidptr = child_tidptr;
6136 if (flags & CLONE_SETTLS) {
6137 cpu_set_tls (new_env, newtls);
6140 memset(&info, 0, sizeof(info));
6141 pthread_mutex_init(&info.mutex, NULL);
6142 pthread_mutex_lock(&info.mutex);
6143 pthread_cond_init(&info.cond, NULL);
6144 info.env = new_env;
6145 if (flags & CLONE_CHILD_SETTID) {
6146 info.child_tidptr = child_tidptr;
6148 if (flags & CLONE_PARENT_SETTID) {
6149 info.parent_tidptr = parent_tidptr;
6152 ret = pthread_attr_init(&attr);
6153 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6154 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6155 /* It is not safe to deliver signals until the child has finished
6156 initializing, so temporarily block all signals. */
6157 sigfillset(&sigmask);
6158 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6159 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6161 /* If this is our first additional thread, we need to ensure we
6162 * generate code for parallel execution and flush old translations.
6164 if (!parallel_cpus) {
6165 parallel_cpus = true;
6166 tb_flush(cpu);
6169 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6170 /* TODO: Free new CPU state if thread creation failed. */
6172 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6173 pthread_attr_destroy(&attr);
6174 if (ret == 0) {
6175 /* Wait for the child to initialize. */
6176 pthread_cond_wait(&info.cond, &info.mutex);
6177 ret = info.tid;
6178 } else {
6179 ret = -1;
6181 pthread_mutex_unlock(&info.mutex);
6182 pthread_cond_destroy(&info.cond);
6183 pthread_mutex_destroy(&info.mutex);
6184 pthread_mutex_unlock(&clone_lock);
6185 } else {
6186 /* if no CLONE_VM, we consider it is a fork */
6187 if (flags & CLONE_INVALID_FORK_FLAGS) {
6188 return -TARGET_EINVAL;
6191 /* We can't support custom termination signals */
6192 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6193 return -TARGET_EINVAL;
6196 if (block_signals()) {
6197 return -TARGET_ERESTARTSYS;
6200 fork_start();
6201 ret = fork();
6202 if (ret == 0) {
6203 /* Child Process. */
6204 cpu_clone_regs_child(env, newsp, flags);
6205 fork_end(1);
6206 /* There is a race condition here. The parent process could
6207 theoretically read the TID in the child process before the child
6208 tid is set. This would require using either ptrace
6209 (not implemented) or having *_tidptr to point at a shared memory
6210 mapping. We can't repeat the spinlock hack used above because
6211 the child process gets its own copy of the lock. */
6212 if (flags & CLONE_CHILD_SETTID)
6213 put_user_u32(sys_gettid(), child_tidptr);
6214 if (flags & CLONE_PARENT_SETTID)
6215 put_user_u32(sys_gettid(), parent_tidptr);
6216 ts = (TaskState *)cpu->opaque;
6217 if (flags & CLONE_SETTLS)
6218 cpu_set_tls (env, newtls);
6219 if (flags & CLONE_CHILD_CLEARTID)
6220 ts->child_tidptr = child_tidptr;
6221 } else {
6222 cpu_clone_regs_parent(env, flags);
6223 fork_end(0);
6226 return ret;
6229 /* warning : doesn't handle linux specific flags... */
6230 static int target_to_host_fcntl_cmd(int cmd)
6232 int ret;
6234 switch(cmd) {
6235 case TARGET_F_DUPFD:
6236 case TARGET_F_GETFD:
6237 case TARGET_F_SETFD:
6238 case TARGET_F_GETFL:
6239 case TARGET_F_SETFL:
6240 case TARGET_F_OFD_GETLK:
6241 case TARGET_F_OFD_SETLK:
6242 case TARGET_F_OFD_SETLKW:
6243 ret = cmd;
6244 break;
6245 case TARGET_F_GETLK:
6246 ret = F_GETLK64;
6247 break;
6248 case TARGET_F_SETLK:
6249 ret = F_SETLK64;
6250 break;
6251 case TARGET_F_SETLKW:
6252 ret = F_SETLKW64;
6253 break;
6254 case TARGET_F_GETOWN:
6255 ret = F_GETOWN;
6256 break;
6257 case TARGET_F_SETOWN:
6258 ret = F_SETOWN;
6259 break;
6260 case TARGET_F_GETSIG:
6261 ret = F_GETSIG;
6262 break;
6263 case TARGET_F_SETSIG:
6264 ret = F_SETSIG;
6265 break;
6266 #if TARGET_ABI_BITS == 32
6267 case TARGET_F_GETLK64:
6268 ret = F_GETLK64;
6269 break;
6270 case TARGET_F_SETLK64:
6271 ret = F_SETLK64;
6272 break;
6273 case TARGET_F_SETLKW64:
6274 ret = F_SETLKW64;
6275 break;
6276 #endif
6277 case TARGET_F_SETLEASE:
6278 ret = F_SETLEASE;
6279 break;
6280 case TARGET_F_GETLEASE:
6281 ret = F_GETLEASE;
6282 break;
6283 #ifdef F_DUPFD_CLOEXEC
6284 case TARGET_F_DUPFD_CLOEXEC:
6285 ret = F_DUPFD_CLOEXEC;
6286 break;
6287 #endif
6288 case TARGET_F_NOTIFY:
6289 ret = F_NOTIFY;
6290 break;
6291 #ifdef F_GETOWN_EX
6292 case TARGET_F_GETOWN_EX:
6293 ret = F_GETOWN_EX;
6294 break;
6295 #endif
6296 #ifdef F_SETOWN_EX
6297 case TARGET_F_SETOWN_EX:
6298 ret = F_SETOWN_EX;
6299 break;
6300 #endif
6301 #ifdef F_SETPIPE_SZ
6302 case TARGET_F_SETPIPE_SZ:
6303 ret = F_SETPIPE_SZ;
6304 break;
6305 case TARGET_F_GETPIPE_SZ:
6306 ret = F_GETPIPE_SZ;
6307 break;
6308 #endif
6309 default:
6310 ret = -TARGET_EINVAL;
6311 break;
6314 #if defined(__powerpc64__)
6315 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6316 * is not supported by kernel. The glibc fcntl call actually adjusts
6317 * them to 5, 6 and 7 before making the syscall(). Since we make the
6318 * syscall directly, adjust to what is supported by the kernel.
6320 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6321 ret -= F_GETLK64 - 5;
6323 #endif
6325 return ret;
6328 #define FLOCK_TRANSTBL \
6329 switch (type) { \
6330 TRANSTBL_CONVERT(F_RDLCK); \
6331 TRANSTBL_CONVERT(F_WRLCK); \
6332 TRANSTBL_CONVERT(F_UNLCK); \
6333 TRANSTBL_CONVERT(F_EXLCK); \
6334 TRANSTBL_CONVERT(F_SHLCK); \
6337 static int target_to_host_flock(int type)
6339 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6340 FLOCK_TRANSTBL
6341 #undef TRANSTBL_CONVERT
6342 return -TARGET_EINVAL;
6345 static int host_to_target_flock(int type)
6347 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6348 FLOCK_TRANSTBL
6349 #undef TRANSTBL_CONVERT
6350 /* if we don't know how to convert the value coming
6351 * from the host we copy to the target field as-is
6353 return type;
6356 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6357 abi_ulong target_flock_addr)
6359 struct target_flock *target_fl;
6360 int l_type;
6362 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6363 return -TARGET_EFAULT;
6366 __get_user(l_type, &target_fl->l_type);
6367 l_type = target_to_host_flock(l_type);
6368 if (l_type < 0) {
6369 return l_type;
6371 fl->l_type = l_type;
6372 __get_user(fl->l_whence, &target_fl->l_whence);
6373 __get_user(fl->l_start, &target_fl->l_start);
6374 __get_user(fl->l_len, &target_fl->l_len);
6375 __get_user(fl->l_pid, &target_fl->l_pid);
6376 unlock_user_struct(target_fl, target_flock_addr, 0);
6377 return 0;
6380 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6381 const struct flock64 *fl)
6383 struct target_flock *target_fl;
6384 short l_type;
6386 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6387 return -TARGET_EFAULT;
6390 l_type = host_to_target_flock(fl->l_type);
6391 __put_user(l_type, &target_fl->l_type);
6392 __put_user(fl->l_whence, &target_fl->l_whence);
6393 __put_user(fl->l_start, &target_fl->l_start);
6394 __put_user(fl->l_len, &target_fl->l_len);
6395 __put_user(fl->l_pid, &target_fl->l_pid);
6396 unlock_user_struct(target_fl, target_flock_addr, 1);
6397 return 0;
6400 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6401 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6403 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6404 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6405 abi_ulong target_flock_addr)
6407 struct target_oabi_flock64 *target_fl;
6408 int l_type;
6410 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6411 return -TARGET_EFAULT;
6414 __get_user(l_type, &target_fl->l_type);
6415 l_type = target_to_host_flock(l_type);
6416 if (l_type < 0) {
6417 return l_type;
6419 fl->l_type = l_type;
6420 __get_user(fl->l_whence, &target_fl->l_whence);
6421 __get_user(fl->l_start, &target_fl->l_start);
6422 __get_user(fl->l_len, &target_fl->l_len);
6423 __get_user(fl->l_pid, &target_fl->l_pid);
6424 unlock_user_struct(target_fl, target_flock_addr, 0);
6425 return 0;
6428 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6429 const struct flock64 *fl)
6431 struct target_oabi_flock64 *target_fl;
6432 short l_type;
6434 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6435 return -TARGET_EFAULT;
6438 l_type = host_to_target_flock(fl->l_type);
6439 __put_user(l_type, &target_fl->l_type);
6440 __put_user(fl->l_whence, &target_fl->l_whence);
6441 __put_user(fl->l_start, &target_fl->l_start);
6442 __put_user(fl->l_len, &target_fl->l_len);
6443 __put_user(fl->l_pid, &target_fl->l_pid);
6444 unlock_user_struct(target_fl, target_flock_addr, 1);
6445 return 0;
6447 #endif
6449 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6450 abi_ulong target_flock_addr)
6452 struct target_flock64 *target_fl;
6453 int l_type;
6455 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6456 return -TARGET_EFAULT;
6459 __get_user(l_type, &target_fl->l_type);
6460 l_type = target_to_host_flock(l_type);
6461 if (l_type < 0) {
6462 return l_type;
6464 fl->l_type = l_type;
6465 __get_user(fl->l_whence, &target_fl->l_whence);
6466 __get_user(fl->l_start, &target_fl->l_start);
6467 __get_user(fl->l_len, &target_fl->l_len);
6468 __get_user(fl->l_pid, &target_fl->l_pid);
6469 unlock_user_struct(target_fl, target_flock_addr, 0);
6470 return 0;
6473 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6474 const struct flock64 *fl)
6476 struct target_flock64 *target_fl;
6477 short l_type;
6479 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6480 return -TARGET_EFAULT;
6483 l_type = host_to_target_flock(fl->l_type);
6484 __put_user(l_type, &target_fl->l_type);
6485 __put_user(fl->l_whence, &target_fl->l_whence);
6486 __put_user(fl->l_start, &target_fl->l_start);
6487 __put_user(fl->l_len, &target_fl->l_len);
6488 __put_user(fl->l_pid, &target_fl->l_pid);
6489 unlock_user_struct(target_fl, target_flock_addr, 1);
6490 return 0;
6493 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6495 struct flock64 fl64;
6496 #ifdef F_GETOWN_EX
6497 struct f_owner_ex fox;
6498 struct target_f_owner_ex *target_fox;
6499 #endif
6500 abi_long ret;
6501 int host_cmd = target_to_host_fcntl_cmd(cmd);
6503 if (host_cmd == -TARGET_EINVAL)
6504 return host_cmd;
6506 switch(cmd) {
6507 case TARGET_F_GETLK:
6508 ret = copy_from_user_flock(&fl64, arg);
6509 if (ret) {
6510 return ret;
6512 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6513 if (ret == 0) {
6514 ret = copy_to_user_flock(arg, &fl64);
6516 break;
6518 case TARGET_F_SETLK:
6519 case TARGET_F_SETLKW:
6520 ret = copy_from_user_flock(&fl64, arg);
6521 if (ret) {
6522 return ret;
6524 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6525 break;
6527 case TARGET_F_GETLK64:
6528 case TARGET_F_OFD_GETLK:
6529 ret = copy_from_user_flock64(&fl64, arg);
6530 if (ret) {
6531 return ret;
6533 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6534 if (ret == 0) {
6535 ret = copy_to_user_flock64(arg, &fl64);
6537 break;
6538 case TARGET_F_SETLK64:
6539 case TARGET_F_SETLKW64:
6540 case TARGET_F_OFD_SETLK:
6541 case TARGET_F_OFD_SETLKW:
6542 ret = copy_from_user_flock64(&fl64, arg);
6543 if (ret) {
6544 return ret;
6546 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6547 break;
6549 case TARGET_F_GETFL:
6550 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6551 if (ret >= 0) {
6552 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6554 break;
6556 case TARGET_F_SETFL:
6557 ret = get_errno(safe_fcntl(fd, host_cmd,
6558 target_to_host_bitmask(arg,
6559 fcntl_flags_tbl)));
6560 break;
6562 #ifdef F_GETOWN_EX
6563 case TARGET_F_GETOWN_EX:
6564 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6565 if (ret >= 0) {
6566 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6567 return -TARGET_EFAULT;
6568 target_fox->type = tswap32(fox.type);
6569 target_fox->pid = tswap32(fox.pid);
6570 unlock_user_struct(target_fox, arg, 1);
6572 break;
6573 #endif
6575 #ifdef F_SETOWN_EX
6576 case TARGET_F_SETOWN_EX:
6577 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6578 return -TARGET_EFAULT;
6579 fox.type = tswap32(target_fox->type);
6580 fox.pid = tswap32(target_fox->pid);
6581 unlock_user_struct(target_fox, arg, 0);
6582 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6583 break;
6584 #endif
6586 case TARGET_F_SETOWN:
6587 case TARGET_F_GETOWN:
6588 case TARGET_F_SETSIG:
6589 case TARGET_F_GETSIG:
6590 case TARGET_F_SETLEASE:
6591 case TARGET_F_GETLEASE:
6592 case TARGET_F_SETPIPE_SZ:
6593 case TARGET_F_GETPIPE_SZ:
6594 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6595 break;
6597 default:
6598 ret = get_errno(safe_fcntl(fd, cmd, arg));
6599 break;
6601 return ret;
6604 #ifdef USE_UID16
6606 static inline int high2lowuid(int uid)
6608 if (uid > 65535)
6609 return 65534;
6610 else
6611 return uid;
6614 static inline int high2lowgid(int gid)
6616 if (gid > 65535)
6617 return 65534;
6618 else
6619 return gid;
6622 static inline int low2highuid(int uid)
6624 if ((int16_t)uid == -1)
6625 return -1;
6626 else
6627 return uid;
6630 static inline int low2highgid(int gid)
6632 if ((int16_t)gid == -1)
6633 return -1;
6634 else
6635 return gid;
6637 static inline int tswapid(int id)
6639 return tswap16(id);
6642 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6644 #else /* !USE_UID16 */
6645 static inline int high2lowuid(int uid)
6647 return uid;
6649 static inline int high2lowgid(int gid)
6651 return gid;
6653 static inline int low2highuid(int uid)
6655 return uid;
6657 static inline int low2highgid(int gid)
6659 return gid;
6661 static inline int tswapid(int id)
6663 return tswap32(id);
6666 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6668 #endif /* USE_UID16 */
6670 /* We must do direct syscalls for setting UID/GID, because we want to
6671 * implement the Linux system call semantics of "change only for this thread",
6672 * not the libc/POSIX semantics of "change for all threads in process".
6673 * (See http://ewontfix.com/17/ for more details.)
6674 * We use the 32-bit version of the syscalls if present; if it is not
6675 * then either the host architecture supports 32-bit UIDs natively with
6676 * the standard syscall, or the 16-bit UID is the best we can do.
6678 #ifdef __NR_setuid32
6679 #define __NR_sys_setuid __NR_setuid32
6680 #else
6681 #define __NR_sys_setuid __NR_setuid
6682 #endif
6683 #ifdef __NR_setgid32
6684 #define __NR_sys_setgid __NR_setgid32
6685 #else
6686 #define __NR_sys_setgid __NR_setgid
6687 #endif
6688 #ifdef __NR_setresuid32
6689 #define __NR_sys_setresuid __NR_setresuid32
6690 #else
6691 #define __NR_sys_setresuid __NR_setresuid
6692 #endif
6693 #ifdef __NR_setresgid32
6694 #define __NR_sys_setresgid __NR_setresgid32
6695 #else
6696 #define __NR_sys_setresgid __NR_setresgid
6697 #endif
6699 _syscall1(int, sys_setuid, uid_t, uid)
6700 _syscall1(int, sys_setgid, gid_t, gid)
6701 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6702 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6704 void syscall_init(void)
6706 IOCTLEntry *ie;
6707 const argtype *arg_type;
6708 int size;
6709 int i;
6711 thunk_init(STRUCT_MAX);
6713 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6714 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6715 #include "syscall_types.h"
6716 #undef STRUCT
6717 #undef STRUCT_SPECIAL
6719 /* Build target_to_host_errno_table[] table from
6720 * host_to_target_errno_table[]. */
6721 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6722 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6725 /* we patch the ioctl size if necessary. We rely on the fact that
6726 no ioctl has all the bits at '1' in the size field */
6727 ie = ioctl_entries;
6728 while (ie->target_cmd != 0) {
6729 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6730 TARGET_IOC_SIZEMASK) {
6731 arg_type = ie->arg_type;
6732 if (arg_type[0] != TYPE_PTR) {
6733 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6734 ie->target_cmd);
6735 exit(1);
6737 arg_type++;
6738 size = thunk_type_size(arg_type, 0);
6739 ie->target_cmd = (ie->target_cmd &
6740 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6741 (size << TARGET_IOC_SIZESHIFT);
6744 /* automatic consistency check if same arch */
6745 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6746 (defined(__x86_64__) && defined(TARGET_X86_64))
6747 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6748 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6749 ie->name, ie->target_cmd, ie->host_cmd);
6751 #endif
6752 ie++;
6756 #ifdef TARGET_NR_truncate64
6757 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6758 abi_long arg2,
6759 abi_long arg3,
6760 abi_long arg4)
6762 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6763 arg2 = arg3;
6764 arg3 = arg4;
6766 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6768 #endif
6770 #ifdef TARGET_NR_ftruncate64
6771 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6772 abi_long arg2,
6773 abi_long arg3,
6774 abi_long arg4)
6776 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6777 arg2 = arg3;
6778 arg3 = arg4;
6780 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6782 #endif
6784 #if defined(TARGET_NR_timer_settime) || \
6785 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6786 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6787 abi_ulong target_addr)
6789 struct target_itimerspec *target_itspec;
6791 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6792 return -TARGET_EFAULT;
6795 host_itspec->it_interval.tv_sec =
6796 tswapal(target_itspec->it_interval.tv_sec);
6797 host_itspec->it_interval.tv_nsec =
6798 tswapal(target_itspec->it_interval.tv_nsec);
6799 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6800 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6802 unlock_user_struct(target_itspec, target_addr, 1);
6803 return 0;
6805 #endif
6807 #if ((defined(TARGET_NR_timerfd_gettime) || \
6808 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6809 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6810 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6811 struct itimerspec *host_its)
6813 struct target_itimerspec *target_itspec;
6815 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6816 return -TARGET_EFAULT;
6819 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6820 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6822 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6823 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6825 unlock_user_struct(target_itspec, target_addr, 0);
6826 return 0;
6828 #endif
6830 #if defined(TARGET_NR_adjtimex) || \
6831 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6832 static inline abi_long target_to_host_timex(struct timex *host_tx,
6833 abi_long target_addr)
6835 struct target_timex *target_tx;
6837 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6838 return -TARGET_EFAULT;
6841 __get_user(host_tx->modes, &target_tx->modes);
6842 __get_user(host_tx->offset, &target_tx->offset);
6843 __get_user(host_tx->freq, &target_tx->freq);
6844 __get_user(host_tx->maxerror, &target_tx->maxerror);
6845 __get_user(host_tx->esterror, &target_tx->esterror);
6846 __get_user(host_tx->status, &target_tx->status);
6847 __get_user(host_tx->constant, &target_tx->constant);
6848 __get_user(host_tx->precision, &target_tx->precision);
6849 __get_user(host_tx->tolerance, &target_tx->tolerance);
6850 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6851 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6852 __get_user(host_tx->tick, &target_tx->tick);
6853 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6854 __get_user(host_tx->jitter, &target_tx->jitter);
6855 __get_user(host_tx->shift, &target_tx->shift);
6856 __get_user(host_tx->stabil, &target_tx->stabil);
6857 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6858 __get_user(host_tx->calcnt, &target_tx->calcnt);
6859 __get_user(host_tx->errcnt, &target_tx->errcnt);
6860 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6861 __get_user(host_tx->tai, &target_tx->tai);
6863 unlock_user_struct(target_tx, target_addr, 0);
6864 return 0;
6867 static inline abi_long host_to_target_timex(abi_long target_addr,
6868 struct timex *host_tx)
6870 struct target_timex *target_tx;
6872 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6873 return -TARGET_EFAULT;
6876 __put_user(host_tx->modes, &target_tx->modes);
6877 __put_user(host_tx->offset, &target_tx->offset);
6878 __put_user(host_tx->freq, &target_tx->freq);
6879 __put_user(host_tx->maxerror, &target_tx->maxerror);
6880 __put_user(host_tx->esterror, &target_tx->esterror);
6881 __put_user(host_tx->status, &target_tx->status);
6882 __put_user(host_tx->constant, &target_tx->constant);
6883 __put_user(host_tx->precision, &target_tx->precision);
6884 __put_user(host_tx->tolerance, &target_tx->tolerance);
6885 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6886 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6887 __put_user(host_tx->tick, &target_tx->tick);
6888 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6889 __put_user(host_tx->jitter, &target_tx->jitter);
6890 __put_user(host_tx->shift, &target_tx->shift);
6891 __put_user(host_tx->stabil, &target_tx->stabil);
6892 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6893 __put_user(host_tx->calcnt, &target_tx->calcnt);
6894 __put_user(host_tx->errcnt, &target_tx->errcnt);
6895 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6896 __put_user(host_tx->tai, &target_tx->tai);
6898 unlock_user_struct(target_tx, target_addr, 1);
6899 return 0;
6901 #endif
6903 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6904 abi_ulong target_addr)
6906 struct target_sigevent *target_sevp;
6908 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6909 return -TARGET_EFAULT;
6912 /* This union is awkward on 64 bit systems because it has a 32 bit
6913 * integer and a pointer in it; we follow the conversion approach
6914 * used for handling sigval types in signal.c so the guest should get
6915 * the correct value back even if we did a 64 bit byteswap and it's
6916 * using the 32 bit integer.
6918 host_sevp->sigev_value.sival_ptr =
6919 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6920 host_sevp->sigev_signo =
6921 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6922 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6923 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6925 unlock_user_struct(target_sevp, target_addr, 1);
6926 return 0;
6929 #if defined(TARGET_NR_mlockall)
6930 static inline int target_to_host_mlockall_arg(int arg)
6932 int result = 0;
6934 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6935 result |= MCL_CURRENT;
6937 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6938 result |= MCL_FUTURE;
6940 return result;
6942 #endif
6944 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6945 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6946 defined(TARGET_NR_newfstatat))
6947 static inline abi_long host_to_target_stat64(void *cpu_env,
6948 abi_ulong target_addr,
6949 struct stat *host_st)
6951 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6952 if (((CPUARMState *)cpu_env)->eabi) {
6953 struct target_eabi_stat64 *target_st;
6955 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6956 return -TARGET_EFAULT;
6957 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6958 __put_user(host_st->st_dev, &target_st->st_dev);
6959 __put_user(host_st->st_ino, &target_st->st_ino);
6960 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6961 __put_user(host_st->st_ino, &target_st->__st_ino);
6962 #endif
6963 __put_user(host_st->st_mode, &target_st->st_mode);
6964 __put_user(host_st->st_nlink, &target_st->st_nlink);
6965 __put_user(host_st->st_uid, &target_st->st_uid);
6966 __put_user(host_st->st_gid, &target_st->st_gid);
6967 __put_user(host_st->st_rdev, &target_st->st_rdev);
6968 __put_user(host_st->st_size, &target_st->st_size);
6969 __put_user(host_st->st_blksize, &target_st->st_blksize);
6970 __put_user(host_st->st_blocks, &target_st->st_blocks);
6971 __put_user(host_st->st_atime, &target_st->target_st_atime);
6972 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6973 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6974 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6975 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6976 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6977 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6978 #endif
6979 unlock_user_struct(target_st, target_addr, 1);
6980 } else
6981 #endif
6983 #if defined(TARGET_HAS_STRUCT_STAT64)
6984 struct target_stat64 *target_st;
6985 #else
6986 struct target_stat *target_st;
6987 #endif
6989 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6990 return -TARGET_EFAULT;
6991 memset(target_st, 0, sizeof(*target_st));
6992 __put_user(host_st->st_dev, &target_st->st_dev);
6993 __put_user(host_st->st_ino, &target_st->st_ino);
6994 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6995 __put_user(host_st->st_ino, &target_st->__st_ino);
6996 #endif
6997 __put_user(host_st->st_mode, &target_st->st_mode);
6998 __put_user(host_st->st_nlink, &target_st->st_nlink);
6999 __put_user(host_st->st_uid, &target_st->st_uid);
7000 __put_user(host_st->st_gid, &target_st->st_gid);
7001 __put_user(host_st->st_rdev, &target_st->st_rdev);
7002 /* XXX: better use of kernel struct */
7003 __put_user(host_st->st_size, &target_st->st_size);
7004 __put_user(host_st->st_blksize, &target_st->st_blksize);
7005 __put_user(host_st->st_blocks, &target_st->st_blocks);
7006 __put_user(host_st->st_atime, &target_st->target_st_atime);
7007 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7008 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7009 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7010 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7011 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7012 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7013 #endif
7014 unlock_user_struct(target_st, target_addr, 1);
7017 return 0;
7019 #endif
7021 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7022 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7023 abi_ulong target_addr)
7025 struct target_statx *target_stx;
7027 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7028 return -TARGET_EFAULT;
7030 memset(target_stx, 0, sizeof(*target_stx));
7032 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7033 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7034 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7035 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7036 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7037 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7038 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7039 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7040 __put_user(host_stx->stx_size, &target_stx->stx_size);
7041 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7042 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7043 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7044 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7045 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7046 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7047 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7048 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7049 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7050 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7051 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7052 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7053 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7054 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7056 unlock_user_struct(target_stx, target_addr, 1);
7058 return 0;
7060 #endif
7062 static int do_sys_futex(int *uaddr, int op, int val,
7063 const struct timespec *timeout, int *uaddr2,
7064 int val3)
7066 #if HOST_LONG_BITS == 64
7067 #if defined(__NR_futex)
7068 /* always a 64-bit time_t, it doesn't define _time64 version */
7069 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7071 #endif
7072 #else /* HOST_LONG_BITS == 64 */
7073 #if defined(__NR_futex_time64)
7074 if (sizeof(timeout->tv_sec) == 8) {
7075 /* _time64 function on 32bit arch */
7076 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7078 #endif
7079 #if defined(__NR_futex)
7080 /* old function on 32bit arch */
7081 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7082 #endif
7083 #endif /* HOST_LONG_BITS == 64 */
7084 g_assert_not_reached();
7087 static int do_safe_futex(int *uaddr, int op, int val,
7088 const struct timespec *timeout, int *uaddr2,
7089 int val3)
7091 #if HOST_LONG_BITS == 64
7092 #if defined(__NR_futex)
7093 /* always a 64-bit time_t, it doesn't define _time64 version */
7094 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7095 #endif
7096 #else /* HOST_LONG_BITS == 64 */
7097 #if defined(__NR_futex_time64)
7098 if (sizeof(timeout->tv_sec) == 8) {
7099 /* _time64 function on 32bit arch */
7100 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7101 val3));
7103 #endif
7104 #if defined(__NR_futex)
7105 /* old function on 32bit arch */
7106 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7107 #endif
7108 #endif /* HOST_LONG_BITS == 64 */
7109 return -TARGET_ENOSYS;
7112 /* ??? Using host futex calls even when target atomic operations
7113 are not really atomic probably breaks things. However implementing
7114 futexes locally would make futexes shared between multiple processes
7115 tricky. However they're probably useless because guest atomic
7116 operations won't work either. */
7117 #if defined(TARGET_NR_futex)
7118 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7119 target_ulong uaddr2, int val3)
7121 struct timespec ts, *pts;
7122 int base_op;
7124 /* ??? We assume FUTEX_* constants are the same on both host
7125 and target. */
7126 #ifdef FUTEX_CMD_MASK
7127 base_op = op & FUTEX_CMD_MASK;
7128 #else
7129 base_op = op;
7130 #endif
7131 switch (base_op) {
7132 case FUTEX_WAIT:
7133 case FUTEX_WAIT_BITSET:
7134 if (timeout) {
7135 pts = &ts;
7136 target_to_host_timespec(pts, timeout);
7137 } else {
7138 pts = NULL;
7140 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7141 case FUTEX_WAKE:
7142 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7143 case FUTEX_FD:
7144 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7145 case FUTEX_REQUEUE:
7146 case FUTEX_CMP_REQUEUE:
7147 case FUTEX_WAKE_OP:
7148 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7149 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7150 But the prototype takes a `struct timespec *'; insert casts
7151 to satisfy the compiler. We do not need to tswap TIMEOUT
7152 since it's not compared to guest memory. */
7153 pts = (struct timespec *)(uintptr_t) timeout;
7154 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7155 (base_op == FUTEX_CMP_REQUEUE
7156 ? tswap32(val3)
7157 : val3));
7158 default:
7159 return -TARGET_ENOSYS;
7162 #endif
7164 #if defined(TARGET_NR_futex_time64)
7165 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7166 target_ulong uaddr2, int val3)
7168 struct timespec ts, *pts;
7169 int base_op;
7171 /* ??? We assume FUTEX_* constants are the same on both host
7172 and target. */
7173 #ifdef FUTEX_CMD_MASK
7174 base_op = op & FUTEX_CMD_MASK;
7175 #else
7176 base_op = op;
7177 #endif
7178 switch (base_op) {
7179 case FUTEX_WAIT:
7180 case FUTEX_WAIT_BITSET:
7181 if (timeout) {
7182 pts = &ts;
7183 target_to_host_timespec64(pts, timeout);
7184 } else {
7185 pts = NULL;
7187 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7188 case FUTEX_WAKE:
7189 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7190 case FUTEX_FD:
7191 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7192 case FUTEX_REQUEUE:
7193 case FUTEX_CMP_REQUEUE:
7194 case FUTEX_WAKE_OP:
7195 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7196 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7197 But the prototype takes a `struct timespec *'; insert casts
7198 to satisfy the compiler. We do not need to tswap TIMEOUT
7199 since it's not compared to guest memory. */
7200 pts = (struct timespec *)(uintptr_t) timeout;
7201 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7202 (base_op == FUTEX_CMP_REQUEUE
7203 ? tswap32(val3)
7204 : val3));
7205 default:
7206 return -TARGET_ENOSYS;
7209 #endif
7211 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7212 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7213 abi_long handle, abi_long mount_id,
7214 abi_long flags)
7216 struct file_handle *target_fh;
7217 struct file_handle *fh;
7218 int mid = 0;
7219 abi_long ret;
7220 char *name;
7221 unsigned int size, total_size;
7223 if (get_user_s32(size, handle)) {
7224 return -TARGET_EFAULT;
7227 name = lock_user_string(pathname);
7228 if (!name) {
7229 return -TARGET_EFAULT;
7232 total_size = sizeof(struct file_handle) + size;
7233 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7234 if (!target_fh) {
7235 unlock_user(name, pathname, 0);
7236 return -TARGET_EFAULT;
7239 fh = g_malloc0(total_size);
7240 fh->handle_bytes = size;
7242 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7243 unlock_user(name, pathname, 0);
7245 /* man name_to_handle_at(2):
7246 * Other than the use of the handle_bytes field, the caller should treat
7247 * the file_handle structure as an opaque data type
7250 memcpy(target_fh, fh, total_size);
7251 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7252 target_fh->handle_type = tswap32(fh->handle_type);
7253 g_free(fh);
7254 unlock_user(target_fh, handle, total_size);
7256 if (put_user_s32(mid, mount_id)) {
7257 return -TARGET_EFAULT;
7260 return ret;
7263 #endif
7265 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7266 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7267 abi_long flags)
7269 struct file_handle *target_fh;
7270 struct file_handle *fh;
7271 unsigned int size, total_size;
7272 abi_long ret;
7274 if (get_user_s32(size, handle)) {
7275 return -TARGET_EFAULT;
7278 total_size = sizeof(struct file_handle) + size;
7279 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7280 if (!target_fh) {
7281 return -TARGET_EFAULT;
7284 fh = g_memdup(target_fh, total_size);
7285 fh->handle_bytes = size;
7286 fh->handle_type = tswap32(target_fh->handle_type);
7288 ret = get_errno(open_by_handle_at(mount_fd, fh,
7289 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7291 g_free(fh);
7293 unlock_user(target_fh, handle, total_size);
7295 return ret;
7297 #endif
7299 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7301 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7303 int host_flags;
7304 target_sigset_t *target_mask;
7305 sigset_t host_mask;
7306 abi_long ret;
7308 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7309 return -TARGET_EINVAL;
7311 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7312 return -TARGET_EFAULT;
7315 target_to_host_sigset(&host_mask, target_mask);
7317 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7319 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7320 if (ret >= 0) {
7321 fd_trans_register(ret, &target_signalfd_trans);
7324 unlock_user_struct(target_mask, mask, 0);
7326 return ret;
7328 #endif
7330 /* Map host to target signal numbers for the wait family of syscalls.
7331 Assume all other status bits are the same. */
7332 int host_to_target_waitstatus(int status)
7334 if (WIFSIGNALED(status)) {
7335 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7337 if (WIFSTOPPED(status)) {
7338 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7339 | (status & 0xff);
7341 return status;
7344 static int open_self_cmdline(void *cpu_env, int fd)
7346 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7347 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7348 int i;
7350 for (i = 0; i < bprm->argc; i++) {
7351 size_t len = strlen(bprm->argv[i]) + 1;
7353 if (write(fd, bprm->argv[i], len) != len) {
7354 return -1;
7358 return 0;
7361 static int open_self_maps(void *cpu_env, int fd)
7363 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7364 TaskState *ts = cpu->opaque;
7365 GSList *map_info = read_self_maps();
7366 GSList *s;
7367 int count;
7369 for (s = map_info; s; s = g_slist_next(s)) {
7370 MapInfo *e = (MapInfo *) s->data;
7372 if (h2g_valid(e->start)) {
7373 unsigned long min = e->start;
7374 unsigned long max = e->end;
7375 int flags = page_get_flags(h2g(min));
7376 const char *path;
7378 max = h2g_valid(max - 1) ?
7379 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7381 if (page_check_range(h2g(min), max - min, flags) == -1) {
7382 continue;
7385 if (h2g(min) == ts->info->stack_limit) {
7386 path = "[stack]";
7387 } else {
7388 path = e->path;
7391 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7392 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7393 h2g(min), h2g(max - 1) + 1,
7394 e->is_read ? 'r' : '-',
7395 e->is_write ? 'w' : '-',
7396 e->is_exec ? 'x' : '-',
7397 e->is_priv ? 'p' : '-',
7398 (uint64_t) e->offset, e->dev, e->inode);
7399 if (path) {
7400 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7401 } else {
7402 dprintf(fd, "\n");
7407 free_self_maps(map_info);
7409 #ifdef TARGET_VSYSCALL_PAGE
7411 * We only support execution from the vsyscall page.
7412 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7414 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7415 " --xp 00000000 00:00 0",
7416 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7417 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7418 #endif
7420 return 0;
7423 static int open_self_stat(void *cpu_env, int fd)
7425 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7426 TaskState *ts = cpu->opaque;
7427 g_autoptr(GString) buf = g_string_new(NULL);
7428 int i;
7430 for (i = 0; i < 44; i++) {
7431 if (i == 0) {
7432 /* pid */
7433 g_string_printf(buf, FMT_pid " ", getpid());
7434 } else if (i == 1) {
7435 /* app name */
7436 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7437 bin = bin ? bin + 1 : ts->bprm->argv[0];
7438 g_string_printf(buf, "(%.15s) ", bin);
7439 } else if (i == 27) {
7440 /* stack bottom */
7441 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7442 } else {
7443 /* for the rest, there is MasterCard */
7444 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7447 if (write(fd, buf->str, buf->len) != buf->len) {
7448 return -1;
7452 return 0;
7455 static int open_self_auxv(void *cpu_env, int fd)
7457 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7458 TaskState *ts = cpu->opaque;
7459 abi_ulong auxv = ts->info->saved_auxv;
7460 abi_ulong len = ts->info->auxv_len;
7461 char *ptr;
7464 * Auxiliary vector is stored in target process stack.
7465 * read in whole auxv vector and copy it to file
7467 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7468 if (ptr != NULL) {
7469 while (len > 0) {
7470 ssize_t r;
7471 r = write(fd, ptr, len);
7472 if (r <= 0) {
7473 break;
7475 len -= r;
7476 ptr += r;
7478 lseek(fd, 0, SEEK_SET);
7479 unlock_user(ptr, auxv, len);
7482 return 0;
7485 static int is_proc_myself(const char *filename, const char *entry)
7487 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7488 filename += strlen("/proc/");
7489 if (!strncmp(filename, "self/", strlen("self/"))) {
7490 filename += strlen("self/");
7491 } else if (*filename >= '1' && *filename <= '9') {
7492 char myself[80];
7493 snprintf(myself, sizeof(myself), "%d/", getpid());
7494 if (!strncmp(filename, myself, strlen(myself))) {
7495 filename += strlen(myself);
7496 } else {
7497 return 0;
7499 } else {
7500 return 0;
7502 if (!strcmp(filename, entry)) {
7503 return 1;
7506 return 0;
7509 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7510 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7511 static int is_proc(const char *filename, const char *entry)
7513 return strcmp(filename, entry) == 0;
7515 #endif
7517 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7518 static int open_net_route(void *cpu_env, int fd)
7520 FILE *fp;
7521 char *line = NULL;
7522 size_t len = 0;
7523 ssize_t read;
7525 fp = fopen("/proc/net/route", "r");
7526 if (fp == NULL) {
7527 return -1;
7530 /* read header */
7532 read = getline(&line, &len, fp);
7533 dprintf(fd, "%s", line);
7535 /* read routes */
7537 while ((read = getline(&line, &len, fp)) != -1) {
7538 char iface[16];
7539 uint32_t dest, gw, mask;
7540 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7541 int fields;
7543 fields = sscanf(line,
7544 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7545 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7546 &mask, &mtu, &window, &irtt);
7547 if (fields != 11) {
7548 continue;
7550 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7551 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7552 metric, tswap32(mask), mtu, window, irtt);
7555 free(line);
7556 fclose(fp);
7558 return 0;
7560 #endif
7562 #if defined(TARGET_SPARC)
7563 static int open_cpuinfo(void *cpu_env, int fd)
7565 dprintf(fd, "type\t\t: sun4u\n");
7566 return 0;
7568 #endif
7570 #if defined(TARGET_HPPA)
7571 static int open_cpuinfo(void *cpu_env, int fd)
7573 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7574 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7575 dprintf(fd, "capabilities\t: os32\n");
7576 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7577 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7578 return 0;
7580 #endif
7582 #if defined(TARGET_M68K)
7583 static int open_hardware(void *cpu_env, int fd)
7585 dprintf(fd, "Model:\t\tqemu-m68k\n");
7586 return 0;
7588 #endif
7590 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7592 struct fake_open {
7593 const char *filename;
7594 int (*fill)(void *cpu_env, int fd);
7595 int (*cmp)(const char *s1, const char *s2);
7597 const struct fake_open *fake_open;
7598 static const struct fake_open fakes[] = {
7599 { "maps", open_self_maps, is_proc_myself },
7600 { "stat", open_self_stat, is_proc_myself },
7601 { "auxv", open_self_auxv, is_proc_myself },
7602 { "cmdline", open_self_cmdline, is_proc_myself },
7603 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7604 { "/proc/net/route", open_net_route, is_proc },
7605 #endif
7606 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7607 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7608 #endif
7609 #if defined(TARGET_M68K)
7610 { "/proc/hardware", open_hardware, is_proc },
7611 #endif
7612 { NULL, NULL, NULL }
7615 if (is_proc_myself(pathname, "exe")) {
7616 int execfd = qemu_getauxval(AT_EXECFD);
7617 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7620 for (fake_open = fakes; fake_open->filename; fake_open++) {
7621 if (fake_open->cmp(pathname, fake_open->filename)) {
7622 break;
7626 if (fake_open->filename) {
7627 const char *tmpdir;
7628 char filename[PATH_MAX];
7629 int fd, r;
7631 /* create temporary file to map stat to */
7632 tmpdir = getenv("TMPDIR");
7633 if (!tmpdir)
7634 tmpdir = "/tmp";
7635 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7636 fd = mkstemp(filename);
7637 if (fd < 0) {
7638 return fd;
7640 unlink(filename);
7642 if ((r = fake_open->fill(cpu_env, fd))) {
7643 int e = errno;
7644 close(fd);
7645 errno = e;
7646 return r;
7648 lseek(fd, 0, SEEK_SET);
7650 return fd;
7653 return safe_openat(dirfd, path(pathname), flags, mode);
7656 #define TIMER_MAGIC 0x0caf0000
7657 #define TIMER_MAGIC_MASK 0xffff0000
7659 /* Convert QEMU provided timer ID back to internal 16bit index format */
7660 static target_timer_t get_timer_id(abi_long arg)
7662 target_timer_t timerid = arg;
7664 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7665 return -TARGET_EINVAL;
7668 timerid &= 0xffff;
7670 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7671 return -TARGET_EINVAL;
7674 return timerid;
7677 static int target_to_host_cpu_mask(unsigned long *host_mask,
7678 size_t host_size,
7679 abi_ulong target_addr,
7680 size_t target_size)
7682 unsigned target_bits = sizeof(abi_ulong) * 8;
7683 unsigned host_bits = sizeof(*host_mask) * 8;
7684 abi_ulong *target_mask;
7685 unsigned i, j;
7687 assert(host_size >= target_size);
7689 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7690 if (!target_mask) {
7691 return -TARGET_EFAULT;
7693 memset(host_mask, 0, host_size);
7695 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7696 unsigned bit = i * target_bits;
7697 abi_ulong val;
7699 __get_user(val, &target_mask[i]);
7700 for (j = 0; j < target_bits; j++, bit++) {
7701 if (val & (1UL << j)) {
7702 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7707 unlock_user(target_mask, target_addr, 0);
7708 return 0;
7711 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7712 size_t host_size,
7713 abi_ulong target_addr,
7714 size_t target_size)
7716 unsigned target_bits = sizeof(abi_ulong) * 8;
7717 unsigned host_bits = sizeof(*host_mask) * 8;
7718 abi_ulong *target_mask;
7719 unsigned i, j;
7721 assert(host_size >= target_size);
7723 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7724 if (!target_mask) {
7725 return -TARGET_EFAULT;
7728 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7729 unsigned bit = i * target_bits;
7730 abi_ulong val = 0;
7732 for (j = 0; j < target_bits; j++, bit++) {
7733 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7734 val |= 1UL << j;
7737 __put_user(val, &target_mask[i]);
7740 unlock_user(target_mask, target_addr, target_size);
7741 return 0;
7744 /* This is an internal helper for do_syscall so that it is easier
7745 * to have a single return point, so that actions, such as logging
7746 * of syscall results, can be performed.
7747 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7749 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7750 abi_long arg2, abi_long arg3, abi_long arg4,
7751 abi_long arg5, abi_long arg6, abi_long arg7,
7752 abi_long arg8)
7754 CPUState *cpu = env_cpu(cpu_env);
7755 abi_long ret;
7756 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7757 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7758 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7759 || defined(TARGET_NR_statx)
7760 struct stat st;
7761 #endif
7762 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7763 || defined(TARGET_NR_fstatfs)
7764 struct statfs stfs;
7765 #endif
7766 void *p;
7768 switch(num) {
7769 case TARGET_NR_exit:
7770 /* In old applications this may be used to implement _exit(2).
7771 However in threaded applictions it is used for thread termination,
7772 and _exit_group is used for application termination.
7773 Do thread termination if we have more then one thread. */
7775 if (block_signals()) {
7776 return -TARGET_ERESTARTSYS;
7779 pthread_mutex_lock(&clone_lock);
7781 if (CPU_NEXT(first_cpu)) {
7782 TaskState *ts = cpu->opaque;
7784 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7785 object_unref(OBJECT(cpu));
7787 * At this point the CPU should be unrealized and removed
7788 * from cpu lists. We can clean-up the rest of the thread
7789 * data without the lock held.
7792 pthread_mutex_unlock(&clone_lock);
7794 if (ts->child_tidptr) {
7795 put_user_u32(0, ts->child_tidptr);
7796 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7797 NULL, NULL, 0);
7799 thread_cpu = NULL;
7800 g_free(ts);
7801 rcu_unregister_thread();
7802 pthread_exit(NULL);
7805 pthread_mutex_unlock(&clone_lock);
7806 preexit_cleanup(cpu_env, arg1);
7807 _exit(arg1);
7808 return 0; /* avoid warning */
7809 case TARGET_NR_read:
7810 if (arg2 == 0 && arg3 == 0) {
7811 return get_errno(safe_read(arg1, 0, 0));
7812 } else {
7813 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7814 return -TARGET_EFAULT;
7815 ret = get_errno(safe_read(arg1, p, arg3));
7816 if (ret >= 0 &&
7817 fd_trans_host_to_target_data(arg1)) {
7818 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7820 unlock_user(p, arg2, ret);
7822 return ret;
7823 case TARGET_NR_write:
7824 if (arg2 == 0 && arg3 == 0) {
7825 return get_errno(safe_write(arg1, 0, 0));
7827 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7828 return -TARGET_EFAULT;
7829 if (fd_trans_target_to_host_data(arg1)) {
7830 void *copy = g_malloc(arg3);
7831 memcpy(copy, p, arg3);
7832 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7833 if (ret >= 0) {
7834 ret = get_errno(safe_write(arg1, copy, ret));
7836 g_free(copy);
7837 } else {
7838 ret = get_errno(safe_write(arg1, p, arg3));
7840 unlock_user(p, arg2, 0);
7841 return ret;
7843 #ifdef TARGET_NR_open
7844 case TARGET_NR_open:
7845 if (!(p = lock_user_string(arg1)))
7846 return -TARGET_EFAULT;
7847 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7848 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7849 arg3));
7850 fd_trans_unregister(ret);
7851 unlock_user(p, arg1, 0);
7852 return ret;
7853 #endif
7854 case TARGET_NR_openat:
7855 if (!(p = lock_user_string(arg2)))
7856 return -TARGET_EFAULT;
7857 ret = get_errno(do_openat(cpu_env, arg1, p,
7858 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7859 arg4));
7860 fd_trans_unregister(ret);
7861 unlock_user(p, arg2, 0);
7862 return ret;
7863 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7864 case TARGET_NR_name_to_handle_at:
7865 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7866 return ret;
7867 #endif
7868 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7869 case TARGET_NR_open_by_handle_at:
7870 ret = do_open_by_handle_at(arg1, arg2, arg3);
7871 fd_trans_unregister(ret);
7872 return ret;
7873 #endif
7874 case TARGET_NR_close:
7875 fd_trans_unregister(arg1);
7876 return get_errno(close(arg1));
7878 case TARGET_NR_brk:
7879 return do_brk(arg1);
7880 #ifdef TARGET_NR_fork
7881 case TARGET_NR_fork:
7882 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7883 #endif
7884 #ifdef TARGET_NR_waitpid
7885 case TARGET_NR_waitpid:
7887 int status;
7888 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7889 if (!is_error(ret) && arg2 && ret
7890 && put_user_s32(host_to_target_waitstatus(status), arg2))
7891 return -TARGET_EFAULT;
7893 return ret;
7894 #endif
7895 #ifdef TARGET_NR_waitid
7896 case TARGET_NR_waitid:
7898 siginfo_t info;
7899 info.si_pid = 0;
7900 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7901 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7902 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7903 return -TARGET_EFAULT;
7904 host_to_target_siginfo(p, &info);
7905 unlock_user(p, arg3, sizeof(target_siginfo_t));
7908 return ret;
7909 #endif
7910 #ifdef TARGET_NR_creat /* not on alpha */
7911 case TARGET_NR_creat:
7912 if (!(p = lock_user_string(arg1)))
7913 return -TARGET_EFAULT;
7914 ret = get_errno(creat(p, arg2));
7915 fd_trans_unregister(ret);
7916 unlock_user(p, arg1, 0);
7917 return ret;
7918 #endif
7919 #ifdef TARGET_NR_link
7920 case TARGET_NR_link:
7922 void * p2;
7923 p = lock_user_string(arg1);
7924 p2 = lock_user_string(arg2);
7925 if (!p || !p2)
7926 ret = -TARGET_EFAULT;
7927 else
7928 ret = get_errno(link(p, p2));
7929 unlock_user(p2, arg2, 0);
7930 unlock_user(p, arg1, 0);
7932 return ret;
7933 #endif
7934 #if defined(TARGET_NR_linkat)
7935 case TARGET_NR_linkat:
7937 void * p2 = NULL;
7938 if (!arg2 || !arg4)
7939 return -TARGET_EFAULT;
7940 p = lock_user_string(arg2);
7941 p2 = lock_user_string(arg4);
7942 if (!p || !p2)
7943 ret = -TARGET_EFAULT;
7944 else
7945 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7946 unlock_user(p, arg2, 0);
7947 unlock_user(p2, arg4, 0);
7949 return ret;
7950 #endif
7951 #ifdef TARGET_NR_unlink
7952 case TARGET_NR_unlink:
7953 if (!(p = lock_user_string(arg1)))
7954 return -TARGET_EFAULT;
7955 ret = get_errno(unlink(p));
7956 unlock_user(p, arg1, 0);
7957 return ret;
7958 #endif
7959 #if defined(TARGET_NR_unlinkat)
7960 case TARGET_NR_unlinkat:
7961 if (!(p = lock_user_string(arg2)))
7962 return -TARGET_EFAULT;
7963 ret = get_errno(unlinkat(arg1, p, arg3));
7964 unlock_user(p, arg2, 0);
7965 return ret;
7966 #endif
7967 case TARGET_NR_execve:
7969 char **argp, **envp;
7970 int argc, envc;
7971 abi_ulong gp;
7972 abi_ulong guest_argp;
7973 abi_ulong guest_envp;
7974 abi_ulong addr;
7975 char **q;
7976 int total_size = 0;
7978 argc = 0;
7979 guest_argp = arg2;
7980 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7981 if (get_user_ual(addr, gp))
7982 return -TARGET_EFAULT;
7983 if (!addr)
7984 break;
7985 argc++;
7987 envc = 0;
7988 guest_envp = arg3;
7989 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7990 if (get_user_ual(addr, gp))
7991 return -TARGET_EFAULT;
7992 if (!addr)
7993 break;
7994 envc++;
7997 argp = g_new0(char *, argc + 1);
7998 envp = g_new0(char *, envc + 1);
8000 for (gp = guest_argp, q = argp; gp;
8001 gp += sizeof(abi_ulong), q++) {
8002 if (get_user_ual(addr, gp))
8003 goto execve_efault;
8004 if (!addr)
8005 break;
8006 if (!(*q = lock_user_string(addr)))
8007 goto execve_efault;
8008 total_size += strlen(*q) + 1;
8010 *q = NULL;
8012 for (gp = guest_envp, q = envp; gp;
8013 gp += sizeof(abi_ulong), q++) {
8014 if (get_user_ual(addr, gp))
8015 goto execve_efault;
8016 if (!addr)
8017 break;
8018 if (!(*q = lock_user_string(addr)))
8019 goto execve_efault;
8020 total_size += strlen(*q) + 1;
8022 *q = NULL;
8024 if (!(p = lock_user_string(arg1)))
8025 goto execve_efault;
8026 /* Although execve() is not an interruptible syscall it is
8027 * a special case where we must use the safe_syscall wrapper:
8028 * if we allow a signal to happen before we make the host
8029 * syscall then we will 'lose' it, because at the point of
8030 * execve the process leaves QEMU's control. So we use the
8031 * safe syscall wrapper to ensure that we either take the
8032 * signal as a guest signal, or else it does not happen
8033 * before the execve completes and makes it the other
8034 * program's problem.
8036 ret = get_errno(safe_execve(p, argp, envp));
8037 unlock_user(p, arg1, 0);
8039 goto execve_end;
8041 execve_efault:
8042 ret = -TARGET_EFAULT;
8044 execve_end:
8045 for (gp = guest_argp, q = argp; *q;
8046 gp += sizeof(abi_ulong), q++) {
8047 if (get_user_ual(addr, gp)
8048 || !addr)
8049 break;
8050 unlock_user(*q, addr, 0);
8052 for (gp = guest_envp, q = envp; *q;
8053 gp += sizeof(abi_ulong), q++) {
8054 if (get_user_ual(addr, gp)
8055 || !addr)
8056 break;
8057 unlock_user(*q, addr, 0);
8060 g_free(argp);
8061 g_free(envp);
8063 return ret;
8064 case TARGET_NR_chdir:
8065 if (!(p = lock_user_string(arg1)))
8066 return -TARGET_EFAULT;
8067 ret = get_errno(chdir(p));
8068 unlock_user(p, arg1, 0);
8069 return ret;
8070 #ifdef TARGET_NR_time
8071 case TARGET_NR_time:
8073 time_t host_time;
8074 ret = get_errno(time(&host_time));
8075 if (!is_error(ret)
8076 && arg1
8077 && put_user_sal(host_time, arg1))
8078 return -TARGET_EFAULT;
8080 return ret;
8081 #endif
8082 #ifdef TARGET_NR_mknod
8083 case TARGET_NR_mknod:
8084 if (!(p = lock_user_string(arg1)))
8085 return -TARGET_EFAULT;
8086 ret = get_errno(mknod(p, arg2, arg3));
8087 unlock_user(p, arg1, 0);
8088 return ret;
8089 #endif
8090 #if defined(TARGET_NR_mknodat)
8091 case TARGET_NR_mknodat:
8092 if (!(p = lock_user_string(arg2)))
8093 return -TARGET_EFAULT;
8094 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8095 unlock_user(p, arg2, 0);
8096 return ret;
8097 #endif
8098 #ifdef TARGET_NR_chmod
8099 case TARGET_NR_chmod:
8100 if (!(p = lock_user_string(arg1)))
8101 return -TARGET_EFAULT;
8102 ret = get_errno(chmod(p, arg2));
8103 unlock_user(p, arg1, 0);
8104 return ret;
8105 #endif
8106 #ifdef TARGET_NR_lseek
8107 case TARGET_NR_lseek:
8108 return get_errno(lseek(arg1, arg2, arg3));
8109 #endif
8110 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8111 /* Alpha specific */
8112 case TARGET_NR_getxpid:
8113 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8114 return get_errno(getpid());
8115 #endif
8116 #ifdef TARGET_NR_getpid
8117 case TARGET_NR_getpid:
8118 return get_errno(getpid());
8119 #endif
8120 case TARGET_NR_mount:
8122 /* need to look at the data field */
8123 void *p2, *p3;
8125 if (arg1) {
8126 p = lock_user_string(arg1);
8127 if (!p) {
8128 return -TARGET_EFAULT;
8130 } else {
8131 p = NULL;
8134 p2 = lock_user_string(arg2);
8135 if (!p2) {
8136 if (arg1) {
8137 unlock_user(p, arg1, 0);
8139 return -TARGET_EFAULT;
8142 if (arg3) {
8143 p3 = lock_user_string(arg3);
8144 if (!p3) {
8145 if (arg1) {
8146 unlock_user(p, arg1, 0);
8148 unlock_user(p2, arg2, 0);
8149 return -TARGET_EFAULT;
8151 } else {
8152 p3 = NULL;
8155 /* FIXME - arg5 should be locked, but it isn't clear how to
8156 * do that since it's not guaranteed to be a NULL-terminated
8157 * string.
8159 if (!arg5) {
8160 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8161 } else {
8162 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8164 ret = get_errno(ret);
8166 if (arg1) {
8167 unlock_user(p, arg1, 0);
8169 unlock_user(p2, arg2, 0);
8170 if (arg3) {
8171 unlock_user(p3, arg3, 0);
8174 return ret;
8175 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8176 #if defined(TARGET_NR_umount)
8177 case TARGET_NR_umount:
8178 #endif
8179 #if defined(TARGET_NR_oldumount)
8180 case TARGET_NR_oldumount:
8181 #endif
8182 if (!(p = lock_user_string(arg1)))
8183 return -TARGET_EFAULT;
8184 ret = get_errno(umount(p));
8185 unlock_user(p, arg1, 0);
8186 return ret;
8187 #endif
8188 #ifdef TARGET_NR_stime /* not on alpha */
8189 case TARGET_NR_stime:
8191 struct timespec ts;
8192 ts.tv_nsec = 0;
8193 if (get_user_sal(ts.tv_sec, arg1)) {
8194 return -TARGET_EFAULT;
8196 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8198 #endif
8199 #ifdef TARGET_NR_alarm /* not on alpha */
8200 case TARGET_NR_alarm:
8201 return alarm(arg1);
8202 #endif
8203 #ifdef TARGET_NR_pause /* not on alpha */
8204 case TARGET_NR_pause:
8205 if (!block_signals()) {
8206 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8208 return -TARGET_EINTR;
8209 #endif
8210 #ifdef TARGET_NR_utime
8211 case TARGET_NR_utime:
8213 struct utimbuf tbuf, *host_tbuf;
8214 struct target_utimbuf *target_tbuf;
8215 if (arg2) {
8216 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8217 return -TARGET_EFAULT;
8218 tbuf.actime = tswapal(target_tbuf->actime);
8219 tbuf.modtime = tswapal(target_tbuf->modtime);
8220 unlock_user_struct(target_tbuf, arg2, 0);
8221 host_tbuf = &tbuf;
8222 } else {
8223 host_tbuf = NULL;
8225 if (!(p = lock_user_string(arg1)))
8226 return -TARGET_EFAULT;
8227 ret = get_errno(utime(p, host_tbuf));
8228 unlock_user(p, arg1, 0);
8230 return ret;
8231 #endif
8232 #ifdef TARGET_NR_utimes
8233 case TARGET_NR_utimes:
8235 struct timeval *tvp, tv[2];
8236 if (arg2) {
8237 if (copy_from_user_timeval(&tv[0], arg2)
8238 || copy_from_user_timeval(&tv[1],
8239 arg2 + sizeof(struct target_timeval)))
8240 return -TARGET_EFAULT;
8241 tvp = tv;
8242 } else {
8243 tvp = NULL;
8245 if (!(p = lock_user_string(arg1)))
8246 return -TARGET_EFAULT;
8247 ret = get_errno(utimes(p, tvp));
8248 unlock_user(p, arg1, 0);
8250 return ret;
8251 #endif
8252 #if defined(TARGET_NR_futimesat)
8253 case TARGET_NR_futimesat:
8255 struct timeval *tvp, tv[2];
8256 if (arg3) {
8257 if (copy_from_user_timeval(&tv[0], arg3)
8258 || copy_from_user_timeval(&tv[1],
8259 arg3 + sizeof(struct target_timeval)))
8260 return -TARGET_EFAULT;
8261 tvp = tv;
8262 } else {
8263 tvp = NULL;
8265 if (!(p = lock_user_string(arg2))) {
8266 return -TARGET_EFAULT;
8268 ret = get_errno(futimesat(arg1, path(p), tvp));
8269 unlock_user(p, arg2, 0);
8271 return ret;
8272 #endif
8273 #ifdef TARGET_NR_access
8274 case TARGET_NR_access:
8275 if (!(p = lock_user_string(arg1))) {
8276 return -TARGET_EFAULT;
8278 ret = get_errno(access(path(p), arg2));
8279 unlock_user(p, arg1, 0);
8280 return ret;
8281 #endif
8282 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8283 case TARGET_NR_faccessat:
8284 if (!(p = lock_user_string(arg2))) {
8285 return -TARGET_EFAULT;
8287 ret = get_errno(faccessat(arg1, p, arg3, 0));
8288 unlock_user(p, arg2, 0);
8289 return ret;
8290 #endif
8291 #ifdef TARGET_NR_nice /* not on alpha */
8292 case TARGET_NR_nice:
8293 return get_errno(nice(arg1));
8294 #endif
8295 case TARGET_NR_sync:
8296 sync();
8297 return 0;
8298 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8299 case TARGET_NR_syncfs:
8300 return get_errno(syncfs(arg1));
8301 #endif
8302 case TARGET_NR_kill:
8303 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8304 #ifdef TARGET_NR_rename
8305 case TARGET_NR_rename:
8307 void *p2;
8308 p = lock_user_string(arg1);
8309 p2 = lock_user_string(arg2);
8310 if (!p || !p2)
8311 ret = -TARGET_EFAULT;
8312 else
8313 ret = get_errno(rename(p, p2));
8314 unlock_user(p2, arg2, 0);
8315 unlock_user(p, arg1, 0);
8317 return ret;
8318 #endif
8319 #if defined(TARGET_NR_renameat)
8320 case TARGET_NR_renameat:
8322 void *p2;
8323 p = lock_user_string(arg2);
8324 p2 = lock_user_string(arg4);
8325 if (!p || !p2)
8326 ret = -TARGET_EFAULT;
8327 else
8328 ret = get_errno(renameat(arg1, p, arg3, p2));
8329 unlock_user(p2, arg4, 0);
8330 unlock_user(p, arg2, 0);
8332 return ret;
8333 #endif
8334 #if defined(TARGET_NR_renameat2)
8335 case TARGET_NR_renameat2:
8337 void *p2;
8338 p = lock_user_string(arg2);
8339 p2 = lock_user_string(arg4);
8340 if (!p || !p2) {
8341 ret = -TARGET_EFAULT;
8342 } else {
8343 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8345 unlock_user(p2, arg4, 0);
8346 unlock_user(p, arg2, 0);
8348 return ret;
8349 #endif
8350 #ifdef TARGET_NR_mkdir
8351 case TARGET_NR_mkdir:
8352 if (!(p = lock_user_string(arg1)))
8353 return -TARGET_EFAULT;
8354 ret = get_errno(mkdir(p, arg2));
8355 unlock_user(p, arg1, 0);
8356 return ret;
8357 #endif
8358 #if defined(TARGET_NR_mkdirat)
8359 case TARGET_NR_mkdirat:
8360 if (!(p = lock_user_string(arg2)))
8361 return -TARGET_EFAULT;
8362 ret = get_errno(mkdirat(arg1, p, arg3));
8363 unlock_user(p, arg2, 0);
8364 return ret;
8365 #endif
8366 #ifdef TARGET_NR_rmdir
8367 case TARGET_NR_rmdir:
8368 if (!(p = lock_user_string(arg1)))
8369 return -TARGET_EFAULT;
8370 ret = get_errno(rmdir(p));
8371 unlock_user(p, arg1, 0);
8372 return ret;
8373 #endif
8374 case TARGET_NR_dup:
8375 ret = get_errno(dup(arg1));
8376 if (ret >= 0) {
8377 fd_trans_dup(arg1, ret);
8379 return ret;
8380 #ifdef TARGET_NR_pipe
8381 case TARGET_NR_pipe:
8382 return do_pipe(cpu_env, arg1, 0, 0);
8383 #endif
8384 #ifdef TARGET_NR_pipe2
8385 case TARGET_NR_pipe2:
8386 return do_pipe(cpu_env, arg1,
8387 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8388 #endif
8389 case TARGET_NR_times:
8391 struct target_tms *tmsp;
8392 struct tms tms;
8393 ret = get_errno(times(&tms));
8394 if (arg1) {
8395 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8396 if (!tmsp)
8397 return -TARGET_EFAULT;
8398 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8399 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8400 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8401 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8403 if (!is_error(ret))
8404 ret = host_to_target_clock_t(ret);
8406 return ret;
8407 case TARGET_NR_acct:
8408 if (arg1 == 0) {
8409 ret = get_errno(acct(NULL));
8410 } else {
8411 if (!(p = lock_user_string(arg1))) {
8412 return -TARGET_EFAULT;
8414 ret = get_errno(acct(path(p)));
8415 unlock_user(p, arg1, 0);
8417 return ret;
8418 #ifdef TARGET_NR_umount2
8419 case TARGET_NR_umount2:
8420 if (!(p = lock_user_string(arg1)))
8421 return -TARGET_EFAULT;
8422 ret = get_errno(umount2(p, arg2));
8423 unlock_user(p, arg1, 0);
8424 return ret;
8425 #endif
8426 case TARGET_NR_ioctl:
8427 return do_ioctl(arg1, arg2, arg3);
8428 #ifdef TARGET_NR_fcntl
8429 case TARGET_NR_fcntl:
8430 return do_fcntl(arg1, arg2, arg3);
8431 #endif
8432 case TARGET_NR_setpgid:
8433 return get_errno(setpgid(arg1, arg2));
8434 case TARGET_NR_umask:
8435 return get_errno(umask(arg1));
8436 case TARGET_NR_chroot:
8437 if (!(p = lock_user_string(arg1)))
8438 return -TARGET_EFAULT;
8439 ret = get_errno(chroot(p));
8440 unlock_user(p, arg1, 0);
8441 return ret;
8442 #ifdef TARGET_NR_dup2
8443 case TARGET_NR_dup2:
8444 ret = get_errno(dup2(arg1, arg2));
8445 if (ret >= 0) {
8446 fd_trans_dup(arg1, arg2);
8448 return ret;
8449 #endif
8450 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8451 case TARGET_NR_dup3:
8453 int host_flags;
8455 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8456 return -EINVAL;
8458 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8459 ret = get_errno(dup3(arg1, arg2, host_flags));
8460 if (ret >= 0) {
8461 fd_trans_dup(arg1, arg2);
8463 return ret;
8465 #endif
8466 #ifdef TARGET_NR_getppid /* not on alpha */
8467 case TARGET_NR_getppid:
8468 return get_errno(getppid());
8469 #endif
8470 #ifdef TARGET_NR_getpgrp
8471 case TARGET_NR_getpgrp:
8472 return get_errno(getpgrp());
8473 #endif
8474 case TARGET_NR_setsid:
8475 return get_errno(setsid());
8476 #ifdef TARGET_NR_sigaction
8477 case TARGET_NR_sigaction:
8479 #if defined(TARGET_ALPHA)
8480 struct target_sigaction act, oact, *pact = 0;
8481 struct target_old_sigaction *old_act;
8482 if (arg2) {
8483 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8484 return -TARGET_EFAULT;
8485 act._sa_handler = old_act->_sa_handler;
8486 target_siginitset(&act.sa_mask, old_act->sa_mask);
8487 act.sa_flags = old_act->sa_flags;
8488 act.sa_restorer = 0;
8489 unlock_user_struct(old_act, arg2, 0);
8490 pact = &act;
8492 ret = get_errno(do_sigaction(arg1, pact, &oact));
8493 if (!is_error(ret) && arg3) {
8494 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8495 return -TARGET_EFAULT;
8496 old_act->_sa_handler = oact._sa_handler;
8497 old_act->sa_mask = oact.sa_mask.sig[0];
8498 old_act->sa_flags = oact.sa_flags;
8499 unlock_user_struct(old_act, arg3, 1);
8501 #elif defined(TARGET_MIPS)
8502 struct target_sigaction act, oact, *pact, *old_act;
8504 if (arg2) {
8505 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8506 return -TARGET_EFAULT;
8507 act._sa_handler = old_act->_sa_handler;
8508 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8509 act.sa_flags = old_act->sa_flags;
8510 unlock_user_struct(old_act, arg2, 0);
8511 pact = &act;
8512 } else {
8513 pact = NULL;
8516 ret = get_errno(do_sigaction(arg1, pact, &oact));
8518 if (!is_error(ret) && arg3) {
8519 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8520 return -TARGET_EFAULT;
8521 old_act->_sa_handler = oact._sa_handler;
8522 old_act->sa_flags = oact.sa_flags;
8523 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8524 old_act->sa_mask.sig[1] = 0;
8525 old_act->sa_mask.sig[2] = 0;
8526 old_act->sa_mask.sig[3] = 0;
8527 unlock_user_struct(old_act, arg3, 1);
8529 #else
8530 struct target_old_sigaction *old_act;
8531 struct target_sigaction act, oact, *pact;
8532 if (arg2) {
8533 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8534 return -TARGET_EFAULT;
8535 act._sa_handler = old_act->_sa_handler;
8536 target_siginitset(&act.sa_mask, old_act->sa_mask);
8537 act.sa_flags = old_act->sa_flags;
8538 act.sa_restorer = old_act->sa_restorer;
8539 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8540 act.ka_restorer = 0;
8541 #endif
8542 unlock_user_struct(old_act, arg2, 0);
8543 pact = &act;
8544 } else {
8545 pact = NULL;
8547 ret = get_errno(do_sigaction(arg1, pact, &oact));
8548 if (!is_error(ret) && arg3) {
8549 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8550 return -TARGET_EFAULT;
8551 old_act->_sa_handler = oact._sa_handler;
8552 old_act->sa_mask = oact.sa_mask.sig[0];
8553 old_act->sa_flags = oact.sa_flags;
8554 old_act->sa_restorer = oact.sa_restorer;
8555 unlock_user_struct(old_act, arg3, 1);
8557 #endif
8559 return ret;
8560 #endif
8561 case TARGET_NR_rt_sigaction:
8563 #if defined(TARGET_ALPHA)
8564 /* For Alpha and SPARC this is a 5 argument syscall, with
8565 * a 'restorer' parameter which must be copied into the
8566 * sa_restorer field of the sigaction struct.
8567 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8568 * and arg5 is the sigsetsize.
8569 * Alpha also has a separate rt_sigaction struct that it uses
8570 * here; SPARC uses the usual sigaction struct.
8572 struct target_rt_sigaction *rt_act;
8573 struct target_sigaction act, oact, *pact = 0;
8575 if (arg4 != sizeof(target_sigset_t)) {
8576 return -TARGET_EINVAL;
8578 if (arg2) {
8579 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8580 return -TARGET_EFAULT;
8581 act._sa_handler = rt_act->_sa_handler;
8582 act.sa_mask = rt_act->sa_mask;
8583 act.sa_flags = rt_act->sa_flags;
8584 act.sa_restorer = arg5;
8585 unlock_user_struct(rt_act, arg2, 0);
8586 pact = &act;
8588 ret = get_errno(do_sigaction(arg1, pact, &oact));
8589 if (!is_error(ret) && arg3) {
8590 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8591 return -TARGET_EFAULT;
8592 rt_act->_sa_handler = oact._sa_handler;
8593 rt_act->sa_mask = oact.sa_mask;
8594 rt_act->sa_flags = oact.sa_flags;
8595 unlock_user_struct(rt_act, arg3, 1);
8597 #else
8598 #ifdef TARGET_SPARC
8599 target_ulong restorer = arg4;
8600 target_ulong sigsetsize = arg5;
8601 #else
8602 target_ulong sigsetsize = arg4;
8603 #endif
8604 struct target_sigaction *act;
8605 struct target_sigaction *oact;
8607 if (sigsetsize != sizeof(target_sigset_t)) {
8608 return -TARGET_EINVAL;
8610 if (arg2) {
8611 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8612 return -TARGET_EFAULT;
8614 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8615 act->ka_restorer = restorer;
8616 #endif
8617 } else {
8618 act = NULL;
8620 if (arg3) {
8621 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8622 ret = -TARGET_EFAULT;
8623 goto rt_sigaction_fail;
8625 } else
8626 oact = NULL;
8627 ret = get_errno(do_sigaction(arg1, act, oact));
8628 rt_sigaction_fail:
8629 if (act)
8630 unlock_user_struct(act, arg2, 0);
8631 if (oact)
8632 unlock_user_struct(oact, arg3, 1);
8633 #endif
8635 return ret;
8636 #ifdef TARGET_NR_sgetmask /* not on alpha */
8637 case TARGET_NR_sgetmask:
8639 sigset_t cur_set;
8640 abi_ulong target_set;
8641 ret = do_sigprocmask(0, NULL, &cur_set);
8642 if (!ret) {
8643 host_to_target_old_sigset(&target_set, &cur_set);
8644 ret = target_set;
8647 return ret;
8648 #endif
8649 #ifdef TARGET_NR_ssetmask /* not on alpha */
8650 case TARGET_NR_ssetmask:
8652 sigset_t set, oset;
8653 abi_ulong target_set = arg1;
8654 target_to_host_old_sigset(&set, &target_set);
8655 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8656 if (!ret) {
8657 host_to_target_old_sigset(&target_set, &oset);
8658 ret = target_set;
8661 return ret;
8662 #endif
8663 #ifdef TARGET_NR_sigprocmask
8664 case TARGET_NR_sigprocmask:
8666 #if defined(TARGET_ALPHA)
8667 sigset_t set, oldset;
8668 abi_ulong mask;
8669 int how;
8671 switch (arg1) {
8672 case TARGET_SIG_BLOCK:
8673 how = SIG_BLOCK;
8674 break;
8675 case TARGET_SIG_UNBLOCK:
8676 how = SIG_UNBLOCK;
8677 break;
8678 case TARGET_SIG_SETMASK:
8679 how = SIG_SETMASK;
8680 break;
8681 default:
8682 return -TARGET_EINVAL;
8684 mask = arg2;
8685 target_to_host_old_sigset(&set, &mask);
8687 ret = do_sigprocmask(how, &set, &oldset);
8688 if (!is_error(ret)) {
8689 host_to_target_old_sigset(&mask, &oldset);
8690 ret = mask;
8691 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8693 #else
8694 sigset_t set, oldset, *set_ptr;
8695 int how;
8697 if (arg2) {
8698 switch (arg1) {
8699 case TARGET_SIG_BLOCK:
8700 how = SIG_BLOCK;
8701 break;
8702 case TARGET_SIG_UNBLOCK:
8703 how = SIG_UNBLOCK;
8704 break;
8705 case TARGET_SIG_SETMASK:
8706 how = SIG_SETMASK;
8707 break;
8708 default:
8709 return -TARGET_EINVAL;
8711 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8712 return -TARGET_EFAULT;
8713 target_to_host_old_sigset(&set, p);
8714 unlock_user(p, arg2, 0);
8715 set_ptr = &set;
8716 } else {
8717 how = 0;
8718 set_ptr = NULL;
8720 ret = do_sigprocmask(how, set_ptr, &oldset);
8721 if (!is_error(ret) && arg3) {
8722 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8723 return -TARGET_EFAULT;
8724 host_to_target_old_sigset(p, &oldset);
8725 unlock_user(p, arg3, sizeof(target_sigset_t));
8727 #endif
8729 return ret;
8730 #endif
8731 case TARGET_NR_rt_sigprocmask:
8733 int how = arg1;
8734 sigset_t set, oldset, *set_ptr;
8736 if (arg4 != sizeof(target_sigset_t)) {
8737 return -TARGET_EINVAL;
8740 if (arg2) {
8741 switch(how) {
8742 case TARGET_SIG_BLOCK:
8743 how = SIG_BLOCK;
8744 break;
8745 case TARGET_SIG_UNBLOCK:
8746 how = SIG_UNBLOCK;
8747 break;
8748 case TARGET_SIG_SETMASK:
8749 how = SIG_SETMASK;
8750 break;
8751 default:
8752 return -TARGET_EINVAL;
8754 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8755 return -TARGET_EFAULT;
8756 target_to_host_sigset(&set, p);
8757 unlock_user(p, arg2, 0);
8758 set_ptr = &set;
8759 } else {
8760 how = 0;
8761 set_ptr = NULL;
8763 ret = do_sigprocmask(how, set_ptr, &oldset);
8764 if (!is_error(ret) && arg3) {
8765 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8766 return -TARGET_EFAULT;
8767 host_to_target_sigset(p, &oldset);
8768 unlock_user(p, arg3, sizeof(target_sigset_t));
8771 return ret;
8772 #ifdef TARGET_NR_sigpending
8773 case TARGET_NR_sigpending:
8775 sigset_t set;
8776 ret = get_errno(sigpending(&set));
8777 if (!is_error(ret)) {
8778 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8779 return -TARGET_EFAULT;
8780 host_to_target_old_sigset(p, &set);
8781 unlock_user(p, arg1, sizeof(target_sigset_t));
8784 return ret;
8785 #endif
8786 case TARGET_NR_rt_sigpending:
8788 sigset_t set;
8790 /* Yes, this check is >, not != like most. We follow the kernel's
8791 * logic and it does it like this because it implements
8792 * NR_sigpending through the same code path, and in that case
8793 * the old_sigset_t is smaller in size.
8795 if (arg2 > sizeof(target_sigset_t)) {
8796 return -TARGET_EINVAL;
8799 ret = get_errno(sigpending(&set));
8800 if (!is_error(ret)) {
8801 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8802 return -TARGET_EFAULT;
8803 host_to_target_sigset(p, &set);
8804 unlock_user(p, arg1, sizeof(target_sigset_t));
8807 return ret;
8808 #ifdef TARGET_NR_sigsuspend
8809 case TARGET_NR_sigsuspend:
8811 TaskState *ts = cpu->opaque;
8812 #if defined(TARGET_ALPHA)
8813 abi_ulong mask = arg1;
8814 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8815 #else
8816 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8817 return -TARGET_EFAULT;
8818 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8819 unlock_user(p, arg1, 0);
8820 #endif
8821 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8822 SIGSET_T_SIZE));
8823 if (ret != -TARGET_ERESTARTSYS) {
8824 ts->in_sigsuspend = 1;
8827 return ret;
8828 #endif
8829 case TARGET_NR_rt_sigsuspend:
8831 TaskState *ts = cpu->opaque;
8833 if (arg2 != sizeof(target_sigset_t)) {
8834 return -TARGET_EINVAL;
8836 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8837 return -TARGET_EFAULT;
8838 target_to_host_sigset(&ts->sigsuspend_mask, p);
8839 unlock_user(p, arg1, 0);
8840 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8841 SIGSET_T_SIZE));
8842 if (ret != -TARGET_ERESTARTSYS) {
8843 ts->in_sigsuspend = 1;
8846 return ret;
8847 #ifdef TARGET_NR_rt_sigtimedwait
8848 case TARGET_NR_rt_sigtimedwait:
8850 sigset_t set;
8851 struct timespec uts, *puts;
8852 siginfo_t uinfo;
8854 if (arg4 != sizeof(target_sigset_t)) {
8855 return -TARGET_EINVAL;
8858 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8859 return -TARGET_EFAULT;
8860 target_to_host_sigset(&set, p);
8861 unlock_user(p, arg1, 0);
8862 if (arg3) {
8863 puts = &uts;
8864 if (target_to_host_timespec(puts, arg3)) {
8865 return -TARGET_EFAULT;
8867 } else {
8868 puts = NULL;
8870 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8871 SIGSET_T_SIZE));
8872 if (!is_error(ret)) {
8873 if (arg2) {
8874 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8876 if (!p) {
8877 return -TARGET_EFAULT;
8879 host_to_target_siginfo(p, &uinfo);
8880 unlock_user(p, arg2, sizeof(target_siginfo_t));
8882 ret = host_to_target_signal(ret);
8885 return ret;
8886 #endif
8887 case TARGET_NR_rt_sigqueueinfo:
8889 siginfo_t uinfo;
8891 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8892 if (!p) {
8893 return -TARGET_EFAULT;
8895 target_to_host_siginfo(&uinfo, p);
8896 unlock_user(p, arg3, 0);
8897 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8899 return ret;
8900 case TARGET_NR_rt_tgsigqueueinfo:
8902 siginfo_t uinfo;
8904 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8905 if (!p) {
8906 return -TARGET_EFAULT;
8908 target_to_host_siginfo(&uinfo, p);
8909 unlock_user(p, arg4, 0);
8910 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8912 return ret;
8913 #ifdef TARGET_NR_sigreturn
8914 case TARGET_NR_sigreturn:
8915 if (block_signals()) {
8916 return -TARGET_ERESTARTSYS;
8918 return do_sigreturn(cpu_env);
8919 #endif
8920 case TARGET_NR_rt_sigreturn:
8921 if (block_signals()) {
8922 return -TARGET_ERESTARTSYS;
8924 return do_rt_sigreturn(cpu_env);
8925 case TARGET_NR_sethostname:
8926 if (!(p = lock_user_string(arg1)))
8927 return -TARGET_EFAULT;
8928 ret = get_errno(sethostname(p, arg2));
8929 unlock_user(p, arg1, 0);
8930 return ret;
8931 #ifdef TARGET_NR_setrlimit
8932 case TARGET_NR_setrlimit:
8934 int resource = target_to_host_resource(arg1);
8935 struct target_rlimit *target_rlim;
8936 struct rlimit rlim;
8937 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8938 return -TARGET_EFAULT;
8939 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8940 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8941 unlock_user_struct(target_rlim, arg2, 0);
8943 * If we just passed through resource limit settings for memory then
8944 * they would also apply to QEMU's own allocations, and QEMU will
8945 * crash or hang or die if its allocations fail. Ideally we would
8946 * track the guest allocations in QEMU and apply the limits ourselves.
8947 * For now, just tell the guest the call succeeded but don't actually
8948 * limit anything.
8950 if (resource != RLIMIT_AS &&
8951 resource != RLIMIT_DATA &&
8952 resource != RLIMIT_STACK) {
8953 return get_errno(setrlimit(resource, &rlim));
8954 } else {
8955 return 0;
8958 #endif
8959 #ifdef TARGET_NR_getrlimit
8960 case TARGET_NR_getrlimit:
8962 int resource = target_to_host_resource(arg1);
8963 struct target_rlimit *target_rlim;
8964 struct rlimit rlim;
8966 ret = get_errno(getrlimit(resource, &rlim));
8967 if (!is_error(ret)) {
8968 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8969 return -TARGET_EFAULT;
8970 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8971 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8972 unlock_user_struct(target_rlim, arg2, 1);
8975 return ret;
8976 #endif
8977 case TARGET_NR_getrusage:
8979 struct rusage rusage;
8980 ret = get_errno(getrusage(arg1, &rusage));
8981 if (!is_error(ret)) {
8982 ret = host_to_target_rusage(arg2, &rusage);
8985 return ret;
8986 #if defined(TARGET_NR_gettimeofday)
8987 case TARGET_NR_gettimeofday:
8989 struct timeval tv;
8990 struct timezone tz;
8992 ret = get_errno(gettimeofday(&tv, &tz));
8993 if (!is_error(ret)) {
8994 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8995 return -TARGET_EFAULT;
8997 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8998 return -TARGET_EFAULT;
9002 return ret;
9003 #endif
9004 #if defined(TARGET_NR_settimeofday)
9005 case TARGET_NR_settimeofday:
9007 struct timeval tv, *ptv = NULL;
9008 struct timezone tz, *ptz = NULL;
9010 if (arg1) {
9011 if (copy_from_user_timeval(&tv, arg1)) {
9012 return -TARGET_EFAULT;
9014 ptv = &tv;
9017 if (arg2) {
9018 if (copy_from_user_timezone(&tz, arg2)) {
9019 return -TARGET_EFAULT;
9021 ptz = &tz;
9024 return get_errno(settimeofday(ptv, ptz));
9026 #endif
9027 #if defined(TARGET_NR_select)
9028 case TARGET_NR_select:
9029 #if defined(TARGET_WANT_NI_OLD_SELECT)
9030 /* some architectures used to have old_select here
9031 * but now ENOSYS it.
9033 ret = -TARGET_ENOSYS;
9034 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9035 ret = do_old_select(arg1);
9036 #else
9037 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9038 #endif
9039 return ret;
9040 #endif
9041 #ifdef TARGET_NR_pselect6
9042 case TARGET_NR_pselect6:
9044 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9045 fd_set rfds, wfds, efds;
9046 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9047 struct timespec ts, *ts_ptr;
9050 * The 6th arg is actually two args smashed together,
9051 * so we cannot use the C library.
9053 sigset_t set;
9054 struct {
9055 sigset_t *set;
9056 size_t size;
9057 } sig, *sig_ptr;
9059 abi_ulong arg_sigset, arg_sigsize, *arg7;
9060 target_sigset_t *target_sigset;
9062 n = arg1;
9063 rfd_addr = arg2;
9064 wfd_addr = arg3;
9065 efd_addr = arg4;
9066 ts_addr = arg5;
9068 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9069 if (ret) {
9070 return ret;
9072 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9073 if (ret) {
9074 return ret;
9076 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9077 if (ret) {
9078 return ret;
9082 * This takes a timespec, and not a timeval, so we cannot
9083 * use the do_select() helper ...
9085 if (ts_addr) {
9086 if (target_to_host_timespec(&ts, ts_addr)) {
9087 return -TARGET_EFAULT;
9089 ts_ptr = &ts;
9090 } else {
9091 ts_ptr = NULL;
9094 /* Extract the two packed args for the sigset */
9095 if (arg6) {
9096 sig_ptr = &sig;
9097 sig.size = SIGSET_T_SIZE;
9099 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9100 if (!arg7) {
9101 return -TARGET_EFAULT;
9103 arg_sigset = tswapal(arg7[0]);
9104 arg_sigsize = tswapal(arg7[1]);
9105 unlock_user(arg7, arg6, 0);
9107 if (arg_sigset) {
9108 sig.set = &set;
9109 if (arg_sigsize != sizeof(*target_sigset)) {
9110 /* Like the kernel, we enforce correct size sigsets */
9111 return -TARGET_EINVAL;
9113 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9114 sizeof(*target_sigset), 1);
9115 if (!target_sigset) {
9116 return -TARGET_EFAULT;
9118 target_to_host_sigset(&set, target_sigset);
9119 unlock_user(target_sigset, arg_sigset, 0);
9120 } else {
9121 sig.set = NULL;
9123 } else {
9124 sig_ptr = NULL;
9127 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9128 ts_ptr, sig_ptr));
9130 if (!is_error(ret)) {
9131 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9132 return -TARGET_EFAULT;
9133 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9134 return -TARGET_EFAULT;
9135 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9136 return -TARGET_EFAULT;
9138 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9139 return -TARGET_EFAULT;
9142 return ret;
9143 #endif
9144 #ifdef TARGET_NR_symlink
9145 case TARGET_NR_symlink:
9147 void *p2;
9148 p = lock_user_string(arg1);
9149 p2 = lock_user_string(arg2);
9150 if (!p || !p2)
9151 ret = -TARGET_EFAULT;
9152 else
9153 ret = get_errno(symlink(p, p2));
9154 unlock_user(p2, arg2, 0);
9155 unlock_user(p, arg1, 0);
9157 return ret;
9158 #endif
9159 #if defined(TARGET_NR_symlinkat)
9160 case TARGET_NR_symlinkat:
9162 void *p2;
9163 p = lock_user_string(arg1);
9164 p2 = lock_user_string(arg3);
9165 if (!p || !p2)
9166 ret = -TARGET_EFAULT;
9167 else
9168 ret = get_errno(symlinkat(p, arg2, p2));
9169 unlock_user(p2, arg3, 0);
9170 unlock_user(p, arg1, 0);
9172 return ret;
9173 #endif
9174 #ifdef TARGET_NR_readlink
9175 case TARGET_NR_readlink:
9177 void *p2;
9178 p = lock_user_string(arg1);
9179 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9180 if (!p || !p2) {
9181 ret = -TARGET_EFAULT;
9182 } else if (!arg3) {
9183 /* Short circuit this for the magic exe check. */
9184 ret = -TARGET_EINVAL;
9185 } else if (is_proc_myself((const char *)p, "exe")) {
9186 char real[PATH_MAX], *temp;
9187 temp = realpath(exec_path, real);
9188 /* Return value is # of bytes that we wrote to the buffer. */
9189 if (temp == NULL) {
9190 ret = get_errno(-1);
9191 } else {
9192 /* Don't worry about sign mismatch as earlier mapping
9193 * logic would have thrown a bad address error. */
9194 ret = MIN(strlen(real), arg3);
9195 /* We cannot NUL terminate the string. */
9196 memcpy(p2, real, ret);
9198 } else {
9199 ret = get_errno(readlink(path(p), p2, arg3));
9201 unlock_user(p2, arg2, ret);
9202 unlock_user(p, arg1, 0);
9204 return ret;
9205 #endif
9206 #if defined(TARGET_NR_readlinkat)
9207 case TARGET_NR_readlinkat:
9209 void *p2;
9210 p = lock_user_string(arg2);
9211 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9212 if (!p || !p2) {
9213 ret = -TARGET_EFAULT;
9214 } else if (is_proc_myself((const char *)p, "exe")) {
9215 char real[PATH_MAX], *temp;
9216 temp = realpath(exec_path, real);
9217 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9218 snprintf((char *)p2, arg4, "%s", real);
9219 } else {
9220 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9222 unlock_user(p2, arg3, ret);
9223 unlock_user(p, arg2, 0);
9225 return ret;
9226 #endif
9227 #ifdef TARGET_NR_swapon
9228 case TARGET_NR_swapon:
9229 if (!(p = lock_user_string(arg1)))
9230 return -TARGET_EFAULT;
9231 ret = get_errno(swapon(p, arg2));
9232 unlock_user(p, arg1, 0);
9233 return ret;
9234 #endif
9235 case TARGET_NR_reboot:
9236 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9237 /* arg4 must be ignored in all other cases */
9238 p = lock_user_string(arg4);
9239 if (!p) {
9240 return -TARGET_EFAULT;
9242 ret = get_errno(reboot(arg1, arg2, arg3, p));
9243 unlock_user(p, arg4, 0);
9244 } else {
9245 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9247 return ret;
9248 #ifdef TARGET_NR_mmap
9249 case TARGET_NR_mmap:
9250 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9251 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9252 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9253 || defined(TARGET_S390X)
9255 abi_ulong *v;
9256 abi_ulong v1, v2, v3, v4, v5, v6;
9257 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9258 return -TARGET_EFAULT;
9259 v1 = tswapal(v[0]);
9260 v2 = tswapal(v[1]);
9261 v3 = tswapal(v[2]);
9262 v4 = tswapal(v[3]);
9263 v5 = tswapal(v[4]);
9264 v6 = tswapal(v[5]);
9265 unlock_user(v, arg1, 0);
9266 ret = get_errno(target_mmap(v1, v2, v3,
9267 target_to_host_bitmask(v4, mmap_flags_tbl),
9268 v5, v6));
9270 #else
9271 ret = get_errno(target_mmap(arg1, arg2, arg3,
9272 target_to_host_bitmask(arg4, mmap_flags_tbl),
9273 arg5,
9274 arg6));
9275 #endif
9276 return ret;
9277 #endif
9278 #ifdef TARGET_NR_mmap2
9279 case TARGET_NR_mmap2:
9280 #ifndef MMAP_SHIFT
9281 #define MMAP_SHIFT 12
9282 #endif
9283 ret = target_mmap(arg1, arg2, arg3,
9284 target_to_host_bitmask(arg4, mmap_flags_tbl),
9285 arg5, arg6 << MMAP_SHIFT);
9286 return get_errno(ret);
9287 #endif
9288 case TARGET_NR_munmap:
9289 return get_errno(target_munmap(arg1, arg2));
9290 case TARGET_NR_mprotect:
9292 TaskState *ts = cpu->opaque;
9293 /* Special hack to detect libc making the stack executable. */
9294 if ((arg3 & PROT_GROWSDOWN)
9295 && arg1 >= ts->info->stack_limit
9296 && arg1 <= ts->info->start_stack) {
9297 arg3 &= ~PROT_GROWSDOWN;
9298 arg2 = arg2 + arg1 - ts->info->stack_limit;
9299 arg1 = ts->info->stack_limit;
9302 return get_errno(target_mprotect(arg1, arg2, arg3));
9303 #ifdef TARGET_NR_mremap
9304 case TARGET_NR_mremap:
9305 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9306 #endif
9307 /* ??? msync/mlock/munlock are broken for softmmu. */
9308 #ifdef TARGET_NR_msync
9309 case TARGET_NR_msync:
9310 return get_errno(msync(g2h(arg1), arg2, arg3));
9311 #endif
9312 #ifdef TARGET_NR_mlock
9313 case TARGET_NR_mlock:
9314 return get_errno(mlock(g2h(arg1), arg2));
9315 #endif
9316 #ifdef TARGET_NR_munlock
9317 case TARGET_NR_munlock:
9318 return get_errno(munlock(g2h(arg1), arg2));
9319 #endif
9320 #ifdef TARGET_NR_mlockall
9321 case TARGET_NR_mlockall:
9322 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9323 #endif
9324 #ifdef TARGET_NR_munlockall
9325 case TARGET_NR_munlockall:
9326 return get_errno(munlockall());
9327 #endif
9328 #ifdef TARGET_NR_truncate
9329 case TARGET_NR_truncate:
9330 if (!(p = lock_user_string(arg1)))
9331 return -TARGET_EFAULT;
9332 ret = get_errno(truncate(p, arg2));
9333 unlock_user(p, arg1, 0);
9334 return ret;
9335 #endif
9336 #ifdef TARGET_NR_ftruncate
9337 case TARGET_NR_ftruncate:
9338 return get_errno(ftruncate(arg1, arg2));
9339 #endif
9340 case TARGET_NR_fchmod:
9341 return get_errno(fchmod(arg1, arg2));
9342 #if defined(TARGET_NR_fchmodat)
9343 case TARGET_NR_fchmodat:
9344 if (!(p = lock_user_string(arg2)))
9345 return -TARGET_EFAULT;
9346 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9347 unlock_user(p, arg2, 0);
9348 return ret;
9349 #endif
9350 case TARGET_NR_getpriority:
9351 /* Note that negative values are valid for getpriority, so we must
9352 differentiate based on errno settings. */
9353 errno = 0;
9354 ret = getpriority(arg1, arg2);
9355 if (ret == -1 && errno != 0) {
9356 return -host_to_target_errno(errno);
9358 #ifdef TARGET_ALPHA
9359 /* Return value is the unbiased priority. Signal no error. */
9360 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9361 #else
9362 /* Return value is a biased priority to avoid negative numbers. */
9363 ret = 20 - ret;
9364 #endif
9365 return ret;
9366 case TARGET_NR_setpriority:
9367 return get_errno(setpriority(arg1, arg2, arg3));
9368 #ifdef TARGET_NR_statfs
9369 case TARGET_NR_statfs:
9370 if (!(p = lock_user_string(arg1))) {
9371 return -TARGET_EFAULT;
9373 ret = get_errno(statfs(path(p), &stfs));
9374 unlock_user(p, arg1, 0);
9375 convert_statfs:
9376 if (!is_error(ret)) {
9377 struct target_statfs *target_stfs;
9379 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9380 return -TARGET_EFAULT;
9381 __put_user(stfs.f_type, &target_stfs->f_type);
9382 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9383 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9384 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9385 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9386 __put_user(stfs.f_files, &target_stfs->f_files);
9387 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9388 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9389 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9390 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9391 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9392 #ifdef _STATFS_F_FLAGS
9393 __put_user(stfs.f_flags, &target_stfs->f_flags);
9394 #else
9395 __put_user(0, &target_stfs->f_flags);
9396 #endif
9397 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9398 unlock_user_struct(target_stfs, arg2, 1);
9400 return ret;
9401 #endif
9402 #ifdef TARGET_NR_fstatfs
9403 case TARGET_NR_fstatfs:
9404 ret = get_errno(fstatfs(arg1, &stfs));
9405 goto convert_statfs;
9406 #endif
9407 #ifdef TARGET_NR_statfs64
9408 case TARGET_NR_statfs64:
9409 if (!(p = lock_user_string(arg1))) {
9410 return -TARGET_EFAULT;
9412 ret = get_errno(statfs(path(p), &stfs));
9413 unlock_user(p, arg1, 0);
9414 convert_statfs64:
9415 if (!is_error(ret)) {
9416 struct target_statfs64 *target_stfs;
9418 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9419 return -TARGET_EFAULT;
9420 __put_user(stfs.f_type, &target_stfs->f_type);
9421 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9422 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9423 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9424 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9425 __put_user(stfs.f_files, &target_stfs->f_files);
9426 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9427 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9428 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9429 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9430 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9431 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9432 unlock_user_struct(target_stfs, arg3, 1);
9434 return ret;
9435 case TARGET_NR_fstatfs64:
9436 ret = get_errno(fstatfs(arg1, &stfs));
9437 goto convert_statfs64;
9438 #endif
9439 #ifdef TARGET_NR_socketcall
9440 case TARGET_NR_socketcall:
9441 return do_socketcall(arg1, arg2);
9442 #endif
9443 #ifdef TARGET_NR_accept
9444 case TARGET_NR_accept:
9445 return do_accept4(arg1, arg2, arg3, 0);
9446 #endif
9447 #ifdef TARGET_NR_accept4
9448 case TARGET_NR_accept4:
9449 return do_accept4(arg1, arg2, arg3, arg4);
9450 #endif
9451 #ifdef TARGET_NR_bind
9452 case TARGET_NR_bind:
9453 return do_bind(arg1, arg2, arg3);
9454 #endif
9455 #ifdef TARGET_NR_connect
9456 case TARGET_NR_connect:
9457 return do_connect(arg1, arg2, arg3);
9458 #endif
9459 #ifdef TARGET_NR_getpeername
9460 case TARGET_NR_getpeername:
9461 return do_getpeername(arg1, arg2, arg3);
9462 #endif
9463 #ifdef TARGET_NR_getsockname
9464 case TARGET_NR_getsockname:
9465 return do_getsockname(arg1, arg2, arg3);
9466 #endif
9467 #ifdef TARGET_NR_getsockopt
9468 case TARGET_NR_getsockopt:
9469 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9470 #endif
9471 #ifdef TARGET_NR_listen
9472 case TARGET_NR_listen:
9473 return get_errno(listen(arg1, arg2));
9474 #endif
9475 #ifdef TARGET_NR_recv
9476 case TARGET_NR_recv:
9477 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9478 #endif
9479 #ifdef TARGET_NR_recvfrom
9480 case TARGET_NR_recvfrom:
9481 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9482 #endif
9483 #ifdef TARGET_NR_recvmsg
9484 case TARGET_NR_recvmsg:
9485 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9486 #endif
9487 #ifdef TARGET_NR_send
9488 case TARGET_NR_send:
9489 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9490 #endif
9491 #ifdef TARGET_NR_sendmsg
9492 case TARGET_NR_sendmsg:
9493 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9494 #endif
9495 #ifdef TARGET_NR_sendmmsg
9496 case TARGET_NR_sendmmsg:
9497 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9498 #endif
9499 #ifdef TARGET_NR_recvmmsg
9500 case TARGET_NR_recvmmsg:
9501 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9502 #endif
9503 #ifdef TARGET_NR_sendto
9504 case TARGET_NR_sendto:
9505 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9506 #endif
9507 #ifdef TARGET_NR_shutdown
9508 case TARGET_NR_shutdown:
9509 return get_errno(shutdown(arg1, arg2));
9510 #endif
9511 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9512 case TARGET_NR_getrandom:
9513 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9514 if (!p) {
9515 return -TARGET_EFAULT;
9517 ret = get_errno(getrandom(p, arg2, arg3));
9518 unlock_user(p, arg1, ret);
9519 return ret;
9520 #endif
9521 #ifdef TARGET_NR_socket
9522 case TARGET_NR_socket:
9523 return do_socket(arg1, arg2, arg3);
9524 #endif
9525 #ifdef TARGET_NR_socketpair
9526 case TARGET_NR_socketpair:
9527 return do_socketpair(arg1, arg2, arg3, arg4);
9528 #endif
9529 #ifdef TARGET_NR_setsockopt
9530 case TARGET_NR_setsockopt:
9531 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9532 #endif
9533 #if defined(TARGET_NR_syslog)
9534 case TARGET_NR_syslog:
9536 int len = arg2;
9538 switch (arg1) {
9539 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9540 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9541 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9542 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9543 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9544 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9545 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9546 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9547 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9548 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9549 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9550 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9552 if (len < 0) {
9553 return -TARGET_EINVAL;
9555 if (len == 0) {
9556 return 0;
9558 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9559 if (!p) {
9560 return -TARGET_EFAULT;
9562 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9563 unlock_user(p, arg2, arg3);
9565 return ret;
9566 default:
9567 return -TARGET_EINVAL;
9570 break;
9571 #endif
9572 case TARGET_NR_setitimer:
9574 struct itimerval value, ovalue, *pvalue;
9576 if (arg2) {
9577 pvalue = &value;
9578 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9579 || copy_from_user_timeval(&pvalue->it_value,
9580 arg2 + sizeof(struct target_timeval)))
9581 return -TARGET_EFAULT;
9582 } else {
9583 pvalue = NULL;
9585 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9586 if (!is_error(ret) && arg3) {
9587 if (copy_to_user_timeval(arg3,
9588 &ovalue.it_interval)
9589 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9590 &ovalue.it_value))
9591 return -TARGET_EFAULT;
9594 return ret;
9595 case TARGET_NR_getitimer:
9597 struct itimerval value;
9599 ret = get_errno(getitimer(arg1, &value));
9600 if (!is_error(ret) && arg2) {
9601 if (copy_to_user_timeval(arg2,
9602 &value.it_interval)
9603 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9604 &value.it_value))
9605 return -TARGET_EFAULT;
9608 return ret;
9609 #ifdef TARGET_NR_stat
9610 case TARGET_NR_stat:
9611 if (!(p = lock_user_string(arg1))) {
9612 return -TARGET_EFAULT;
9614 ret = get_errno(stat(path(p), &st));
9615 unlock_user(p, arg1, 0);
9616 goto do_stat;
9617 #endif
9618 #ifdef TARGET_NR_lstat
9619 case TARGET_NR_lstat:
9620 if (!(p = lock_user_string(arg1))) {
9621 return -TARGET_EFAULT;
9623 ret = get_errno(lstat(path(p), &st));
9624 unlock_user(p, arg1, 0);
9625 goto do_stat;
9626 #endif
9627 #ifdef TARGET_NR_fstat
9628 case TARGET_NR_fstat:
9630 ret = get_errno(fstat(arg1, &st));
9631 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9632 do_stat:
9633 #endif
9634 if (!is_error(ret)) {
9635 struct target_stat *target_st;
9637 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9638 return -TARGET_EFAULT;
9639 memset(target_st, 0, sizeof(*target_st));
9640 __put_user(st.st_dev, &target_st->st_dev);
9641 __put_user(st.st_ino, &target_st->st_ino);
9642 __put_user(st.st_mode, &target_st->st_mode);
9643 __put_user(st.st_uid, &target_st->st_uid);
9644 __put_user(st.st_gid, &target_st->st_gid);
9645 __put_user(st.st_nlink, &target_st->st_nlink);
9646 __put_user(st.st_rdev, &target_st->st_rdev);
9647 __put_user(st.st_size, &target_st->st_size);
9648 __put_user(st.st_blksize, &target_st->st_blksize);
9649 __put_user(st.st_blocks, &target_st->st_blocks);
9650 __put_user(st.st_atime, &target_st->target_st_atime);
9651 __put_user(st.st_mtime, &target_st->target_st_mtime);
9652 __put_user(st.st_ctime, &target_st->target_st_ctime);
9653 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9654 defined(TARGET_STAT_HAVE_NSEC)
9655 __put_user(st.st_atim.tv_nsec,
9656 &target_st->target_st_atime_nsec);
9657 __put_user(st.st_mtim.tv_nsec,
9658 &target_st->target_st_mtime_nsec);
9659 __put_user(st.st_ctim.tv_nsec,
9660 &target_st->target_st_ctime_nsec);
9661 #endif
9662 unlock_user_struct(target_st, arg2, 1);
9665 return ret;
9666 #endif
9667 case TARGET_NR_vhangup:
9668 return get_errno(vhangup());
9669 #ifdef TARGET_NR_syscall
9670 case TARGET_NR_syscall:
9671 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9672 arg6, arg7, arg8, 0);
9673 #endif
9674 #if defined(TARGET_NR_wait4)
9675 case TARGET_NR_wait4:
9677 int status;
9678 abi_long status_ptr = arg2;
9679 struct rusage rusage, *rusage_ptr;
9680 abi_ulong target_rusage = arg4;
9681 abi_long rusage_err;
9682 if (target_rusage)
9683 rusage_ptr = &rusage;
9684 else
9685 rusage_ptr = NULL;
9686 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9687 if (!is_error(ret)) {
9688 if (status_ptr && ret) {
9689 status = host_to_target_waitstatus(status);
9690 if (put_user_s32(status, status_ptr))
9691 return -TARGET_EFAULT;
9693 if (target_rusage) {
9694 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9695 if (rusage_err) {
9696 ret = rusage_err;
9701 return ret;
9702 #endif
9703 #ifdef TARGET_NR_swapoff
9704 case TARGET_NR_swapoff:
9705 if (!(p = lock_user_string(arg1)))
9706 return -TARGET_EFAULT;
9707 ret = get_errno(swapoff(p));
9708 unlock_user(p, arg1, 0);
9709 return ret;
9710 #endif
9711 case TARGET_NR_sysinfo:
9713 struct target_sysinfo *target_value;
9714 struct sysinfo value;
9715 ret = get_errno(sysinfo(&value));
9716 if (!is_error(ret) && arg1)
9718 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9719 return -TARGET_EFAULT;
9720 __put_user(value.uptime, &target_value->uptime);
9721 __put_user(value.loads[0], &target_value->loads[0]);
9722 __put_user(value.loads[1], &target_value->loads[1]);
9723 __put_user(value.loads[2], &target_value->loads[2]);
9724 __put_user(value.totalram, &target_value->totalram);
9725 __put_user(value.freeram, &target_value->freeram);
9726 __put_user(value.sharedram, &target_value->sharedram);
9727 __put_user(value.bufferram, &target_value->bufferram);
9728 __put_user(value.totalswap, &target_value->totalswap);
9729 __put_user(value.freeswap, &target_value->freeswap);
9730 __put_user(value.procs, &target_value->procs);
9731 __put_user(value.totalhigh, &target_value->totalhigh);
9732 __put_user(value.freehigh, &target_value->freehigh);
9733 __put_user(value.mem_unit, &target_value->mem_unit);
9734 unlock_user_struct(target_value, arg1, 1);
9737 return ret;
9738 #ifdef TARGET_NR_ipc
9739 case TARGET_NR_ipc:
9740 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9741 #endif
9742 #ifdef TARGET_NR_semget
9743 case TARGET_NR_semget:
9744 return get_errno(semget(arg1, arg2, arg3));
9745 #endif
9746 #ifdef TARGET_NR_semop
9747 case TARGET_NR_semop:
9748 return do_semtimedop(arg1, arg2, arg3, 0);
9749 #endif
9750 #ifdef TARGET_NR_semtimedop
9751 case TARGET_NR_semtimedop:
9752 return do_semtimedop(arg1, arg2, arg3, arg4);
9753 #endif
9754 #ifdef TARGET_NR_semctl
9755 case TARGET_NR_semctl:
9756 return do_semctl(arg1, arg2, arg3, arg4);
9757 #endif
9758 #ifdef TARGET_NR_msgctl
9759 case TARGET_NR_msgctl:
9760 return do_msgctl(arg1, arg2, arg3);
9761 #endif
9762 #ifdef TARGET_NR_msgget
9763 case TARGET_NR_msgget:
9764 return get_errno(msgget(arg1, arg2));
9765 #endif
9766 #ifdef TARGET_NR_msgrcv
9767 case TARGET_NR_msgrcv:
9768 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9769 #endif
9770 #ifdef TARGET_NR_msgsnd
9771 case TARGET_NR_msgsnd:
9772 return do_msgsnd(arg1, arg2, arg3, arg4);
9773 #endif
9774 #ifdef TARGET_NR_shmget
9775 case TARGET_NR_shmget:
9776 return get_errno(shmget(arg1, arg2, arg3));
9777 #endif
9778 #ifdef TARGET_NR_shmctl
9779 case TARGET_NR_shmctl:
9780 return do_shmctl(arg1, arg2, arg3);
9781 #endif
9782 #ifdef TARGET_NR_shmat
9783 case TARGET_NR_shmat:
9784 return do_shmat(cpu_env, arg1, arg2, arg3);
9785 #endif
9786 #ifdef TARGET_NR_shmdt
9787 case TARGET_NR_shmdt:
9788 return do_shmdt(arg1);
9789 #endif
9790 case TARGET_NR_fsync:
9791 return get_errno(fsync(arg1));
9792 case TARGET_NR_clone:
9793 /* Linux manages to have three different orderings for its
9794 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9795 * match the kernel's CONFIG_CLONE_* settings.
9796 * Microblaze is further special in that it uses a sixth
9797 * implicit argument to clone for the TLS pointer.
9799 #if defined(TARGET_MICROBLAZE)
9800 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9801 #elif defined(TARGET_CLONE_BACKWARDS)
9802 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9803 #elif defined(TARGET_CLONE_BACKWARDS2)
9804 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9805 #else
9806 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9807 #endif
9808 return ret;
9809 #ifdef __NR_exit_group
9810 /* new thread calls */
9811 case TARGET_NR_exit_group:
9812 preexit_cleanup(cpu_env, arg1);
9813 return get_errno(exit_group(arg1));
9814 #endif
9815 case TARGET_NR_setdomainname:
9816 if (!(p = lock_user_string(arg1)))
9817 return -TARGET_EFAULT;
9818 ret = get_errno(setdomainname(p, arg2));
9819 unlock_user(p, arg1, 0);
9820 return ret;
9821 case TARGET_NR_uname:
9822 /* no need to transcode because we use the linux syscall */
9824 struct new_utsname * buf;
9826 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9827 return -TARGET_EFAULT;
9828 ret = get_errno(sys_uname(buf));
9829 if (!is_error(ret)) {
9830 /* Overwrite the native machine name with whatever is being
9831 emulated. */
9832 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9833 sizeof(buf->machine));
9834 /* Allow the user to override the reported release. */
9835 if (qemu_uname_release && *qemu_uname_release) {
9836 g_strlcpy(buf->release, qemu_uname_release,
9837 sizeof(buf->release));
9840 unlock_user_struct(buf, arg1, 1);
9842 return ret;
9843 #ifdef TARGET_I386
9844 case TARGET_NR_modify_ldt:
9845 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9846 #if !defined(TARGET_X86_64)
9847 case TARGET_NR_vm86:
9848 return do_vm86(cpu_env, arg1, arg2);
9849 #endif
9850 #endif
9851 #if defined(TARGET_NR_adjtimex)
9852 case TARGET_NR_adjtimex:
9854 struct timex host_buf;
9856 if (target_to_host_timex(&host_buf, arg1) != 0) {
9857 return -TARGET_EFAULT;
9859 ret = get_errno(adjtimex(&host_buf));
9860 if (!is_error(ret)) {
9861 if (host_to_target_timex(arg1, &host_buf) != 0) {
9862 return -TARGET_EFAULT;
9866 return ret;
9867 #endif
9868 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9869 case TARGET_NR_clock_adjtime:
9871 struct timex htx, *phtx = &htx;
9873 if (target_to_host_timex(phtx, arg2) != 0) {
9874 return -TARGET_EFAULT;
9876 ret = get_errno(clock_adjtime(arg1, phtx));
9877 if (!is_error(ret) && phtx) {
9878 if (host_to_target_timex(arg2, phtx) != 0) {
9879 return -TARGET_EFAULT;
9883 return ret;
9884 #endif
9885 case TARGET_NR_getpgid:
9886 return get_errno(getpgid(arg1));
9887 case TARGET_NR_fchdir:
9888 return get_errno(fchdir(arg1));
9889 case TARGET_NR_personality:
9890 return get_errno(personality(arg1));
9891 #ifdef TARGET_NR__llseek /* Not on alpha */
9892 case TARGET_NR__llseek:
9894 int64_t res;
9895 #if !defined(__NR_llseek)
9896 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9897 if (res == -1) {
9898 ret = get_errno(res);
9899 } else {
9900 ret = 0;
9902 #else
9903 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9904 #endif
9905 if ((ret == 0) && put_user_s64(res, arg4)) {
9906 return -TARGET_EFAULT;
9909 return ret;
9910 #endif
9911 #ifdef TARGET_NR_getdents
9912 case TARGET_NR_getdents:
9913 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9914 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9916 struct target_dirent *target_dirp;
9917 struct linux_dirent *dirp;
9918 abi_long count = arg3;
9920 dirp = g_try_malloc(count);
9921 if (!dirp) {
9922 return -TARGET_ENOMEM;
9925 ret = get_errno(sys_getdents(arg1, dirp, count));
9926 if (!is_error(ret)) {
9927 struct linux_dirent *de;
9928 struct target_dirent *tde;
9929 int len = ret;
9930 int reclen, treclen;
9931 int count1, tnamelen;
9933 count1 = 0;
9934 de = dirp;
9935 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9936 return -TARGET_EFAULT;
9937 tde = target_dirp;
9938 while (len > 0) {
9939 reclen = de->d_reclen;
9940 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9941 assert(tnamelen >= 0);
9942 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9943 assert(count1 + treclen <= count);
9944 tde->d_reclen = tswap16(treclen);
9945 tde->d_ino = tswapal(de->d_ino);
9946 tde->d_off = tswapal(de->d_off);
9947 memcpy(tde->d_name, de->d_name, tnamelen);
9948 de = (struct linux_dirent *)((char *)de + reclen);
9949 len -= reclen;
9950 tde = (struct target_dirent *)((char *)tde + treclen);
9951 count1 += treclen;
9953 ret = count1;
9954 unlock_user(target_dirp, arg2, ret);
9956 g_free(dirp);
9958 #else
9960 struct linux_dirent *dirp;
9961 abi_long count = arg3;
9963 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9964 return -TARGET_EFAULT;
9965 ret = get_errno(sys_getdents(arg1, dirp, count));
9966 if (!is_error(ret)) {
9967 struct linux_dirent *de;
9968 int len = ret;
9969 int reclen;
9970 de = dirp;
9971 while (len > 0) {
9972 reclen = de->d_reclen;
9973 if (reclen > len)
9974 break;
9975 de->d_reclen = tswap16(reclen);
9976 tswapls(&de->d_ino);
9977 tswapls(&de->d_off);
9978 de = (struct linux_dirent *)((char *)de + reclen);
9979 len -= reclen;
9982 unlock_user(dirp, arg2, ret);
9984 #endif
9985 #else
9986 /* Implement getdents in terms of getdents64 */
9988 struct linux_dirent64 *dirp;
9989 abi_long count = arg3;
9991 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9992 if (!dirp) {
9993 return -TARGET_EFAULT;
9995 ret = get_errno(sys_getdents64(arg1, dirp, count));
9996 if (!is_error(ret)) {
9997 /* Convert the dirent64 structs to target dirent. We do this
9998 * in-place, since we can guarantee that a target_dirent is no
9999 * larger than a dirent64; however this means we have to be
10000 * careful to read everything before writing in the new format.
10002 struct linux_dirent64 *de;
10003 struct target_dirent *tde;
10004 int len = ret;
10005 int tlen = 0;
10007 de = dirp;
10008 tde = (struct target_dirent *)dirp;
10009 while (len > 0) {
10010 int namelen, treclen;
10011 int reclen = de->d_reclen;
10012 uint64_t ino = de->d_ino;
10013 int64_t off = de->d_off;
10014 uint8_t type = de->d_type;
10016 namelen = strlen(de->d_name);
10017 treclen = offsetof(struct target_dirent, d_name)
10018 + namelen + 2;
10019 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10021 memmove(tde->d_name, de->d_name, namelen + 1);
10022 tde->d_ino = tswapal(ino);
10023 tde->d_off = tswapal(off);
10024 tde->d_reclen = tswap16(treclen);
10025 /* The target_dirent type is in what was formerly a padding
10026 * byte at the end of the structure:
10028 *(((char *)tde) + treclen - 1) = type;
10030 de = (struct linux_dirent64 *)((char *)de + reclen);
10031 tde = (struct target_dirent *)((char *)tde + treclen);
10032 len -= reclen;
10033 tlen += treclen;
10035 ret = tlen;
10037 unlock_user(dirp, arg2, ret);
10039 #endif
10040 return ret;
10041 #endif /* TARGET_NR_getdents */
10042 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10043 case TARGET_NR_getdents64:
10045 struct linux_dirent64 *dirp;
10046 abi_long count = arg3;
10047 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10048 return -TARGET_EFAULT;
10049 ret = get_errno(sys_getdents64(arg1, dirp, count));
10050 if (!is_error(ret)) {
10051 struct linux_dirent64 *de;
10052 int len = ret;
10053 int reclen;
10054 de = dirp;
10055 while (len > 0) {
10056 reclen = de->d_reclen;
10057 if (reclen > len)
10058 break;
10059 de->d_reclen = tswap16(reclen);
10060 tswap64s((uint64_t *)&de->d_ino);
10061 tswap64s((uint64_t *)&de->d_off);
10062 de = (struct linux_dirent64 *)((char *)de + reclen);
10063 len -= reclen;
10066 unlock_user(dirp, arg2, ret);
10068 return ret;
10069 #endif /* TARGET_NR_getdents64 */
10070 #if defined(TARGET_NR__newselect)
10071 case TARGET_NR__newselect:
10072 return do_select(arg1, arg2, arg3, arg4, arg5);
10073 #endif
10074 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10075 # ifdef TARGET_NR_poll
10076 case TARGET_NR_poll:
10077 # endif
10078 # ifdef TARGET_NR_ppoll
10079 case TARGET_NR_ppoll:
10080 # endif
10082 struct target_pollfd *target_pfd;
10083 unsigned int nfds = arg2;
10084 struct pollfd *pfd;
10085 unsigned int i;
10087 pfd = NULL;
10088 target_pfd = NULL;
10089 if (nfds) {
10090 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10091 return -TARGET_EINVAL;
10094 target_pfd = lock_user(VERIFY_WRITE, arg1,
10095 sizeof(struct target_pollfd) * nfds, 1);
10096 if (!target_pfd) {
10097 return -TARGET_EFAULT;
10100 pfd = alloca(sizeof(struct pollfd) * nfds);
10101 for (i = 0; i < nfds; i++) {
10102 pfd[i].fd = tswap32(target_pfd[i].fd);
10103 pfd[i].events = tswap16(target_pfd[i].events);
10107 switch (num) {
10108 # ifdef TARGET_NR_ppoll
10109 case TARGET_NR_ppoll:
10111 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10112 target_sigset_t *target_set;
10113 sigset_t _set, *set = &_set;
10115 if (arg3) {
10116 if (target_to_host_timespec(timeout_ts, arg3)) {
10117 unlock_user(target_pfd, arg1, 0);
10118 return -TARGET_EFAULT;
10120 } else {
10121 timeout_ts = NULL;
10124 if (arg4) {
10125 if (arg5 != sizeof(target_sigset_t)) {
10126 unlock_user(target_pfd, arg1, 0);
10127 return -TARGET_EINVAL;
10130 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10131 if (!target_set) {
10132 unlock_user(target_pfd, arg1, 0);
10133 return -TARGET_EFAULT;
10135 target_to_host_sigset(set, target_set);
10136 } else {
10137 set = NULL;
10140 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10141 set, SIGSET_T_SIZE));
10143 if (!is_error(ret) && arg3) {
10144 host_to_target_timespec(arg3, timeout_ts);
10146 if (arg4) {
10147 unlock_user(target_set, arg4, 0);
10149 break;
10151 # endif
10152 # ifdef TARGET_NR_poll
10153 case TARGET_NR_poll:
10155 struct timespec ts, *pts;
10157 if (arg3 >= 0) {
10158 /* Convert ms to secs, ns */
10159 ts.tv_sec = arg3 / 1000;
10160 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10161 pts = &ts;
10162 } else {
10163 /* -ve poll() timeout means "infinite" */
10164 pts = NULL;
10166 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10167 break;
10169 # endif
10170 default:
10171 g_assert_not_reached();
10174 if (!is_error(ret)) {
10175 for(i = 0; i < nfds; i++) {
10176 target_pfd[i].revents = tswap16(pfd[i].revents);
10179 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10181 return ret;
10182 #endif
10183 case TARGET_NR_flock:
10184 /* NOTE: the flock constant seems to be the same for every
10185 Linux platform */
10186 return get_errno(safe_flock(arg1, arg2));
10187 case TARGET_NR_readv:
10189 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10190 if (vec != NULL) {
10191 ret = get_errno(safe_readv(arg1, vec, arg3));
10192 unlock_iovec(vec, arg2, arg3, 1);
10193 } else {
10194 ret = -host_to_target_errno(errno);
10197 return ret;
10198 case TARGET_NR_writev:
10200 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10201 if (vec != NULL) {
10202 ret = get_errno(safe_writev(arg1, vec, arg3));
10203 unlock_iovec(vec, arg2, arg3, 0);
10204 } else {
10205 ret = -host_to_target_errno(errno);
10208 return ret;
10209 #if defined(TARGET_NR_preadv)
10210 case TARGET_NR_preadv:
10212 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10213 if (vec != NULL) {
10214 unsigned long low, high;
10216 target_to_host_low_high(arg4, arg5, &low, &high);
10217 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10218 unlock_iovec(vec, arg2, arg3, 1);
10219 } else {
10220 ret = -host_to_target_errno(errno);
10223 return ret;
10224 #endif
10225 #if defined(TARGET_NR_pwritev)
10226 case TARGET_NR_pwritev:
10228 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10229 if (vec != NULL) {
10230 unsigned long low, high;
10232 target_to_host_low_high(arg4, arg5, &low, &high);
10233 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10234 unlock_iovec(vec, arg2, arg3, 0);
10235 } else {
10236 ret = -host_to_target_errno(errno);
10239 return ret;
10240 #endif
10241 case TARGET_NR_getsid:
10242 return get_errno(getsid(arg1));
10243 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10244 case TARGET_NR_fdatasync:
10245 return get_errno(fdatasync(arg1));
10246 #endif
10247 #ifdef TARGET_NR__sysctl
10248 case TARGET_NR__sysctl:
10249 /* We don't implement this, but ENOTDIR is always a safe
10250 return value. */
10251 return -TARGET_ENOTDIR;
10252 #endif
10253 case TARGET_NR_sched_getaffinity:
10255 unsigned int mask_size;
10256 unsigned long *mask;
10259 * sched_getaffinity needs multiples of ulong, so need to take
10260 * care of mismatches between target ulong and host ulong sizes.
10262 if (arg2 & (sizeof(abi_ulong) - 1)) {
10263 return -TARGET_EINVAL;
10265 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10267 mask = alloca(mask_size);
10268 memset(mask, 0, mask_size);
10269 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10271 if (!is_error(ret)) {
10272 if (ret > arg2) {
10273 /* More data returned than the caller's buffer will fit.
10274 * This only happens if sizeof(abi_long) < sizeof(long)
10275 * and the caller passed us a buffer holding an odd number
10276 * of abi_longs. If the host kernel is actually using the
10277 * extra 4 bytes then fail EINVAL; otherwise we can just
10278 * ignore them and only copy the interesting part.
10280 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10281 if (numcpus > arg2 * 8) {
10282 return -TARGET_EINVAL;
10284 ret = arg2;
10287 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10288 return -TARGET_EFAULT;
10292 return ret;
10293 case TARGET_NR_sched_setaffinity:
10295 unsigned int mask_size;
10296 unsigned long *mask;
10299 * sched_setaffinity needs multiples of ulong, so need to take
10300 * care of mismatches between target ulong and host ulong sizes.
10302 if (arg2 & (sizeof(abi_ulong) - 1)) {
10303 return -TARGET_EINVAL;
10305 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10306 mask = alloca(mask_size);
10308 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10309 if (ret) {
10310 return ret;
10313 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10315 case TARGET_NR_getcpu:
10317 unsigned cpu, node;
10318 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10319 arg2 ? &node : NULL,
10320 NULL));
10321 if (is_error(ret)) {
10322 return ret;
10324 if (arg1 && put_user_u32(cpu, arg1)) {
10325 return -TARGET_EFAULT;
10327 if (arg2 && put_user_u32(node, arg2)) {
10328 return -TARGET_EFAULT;
10331 return ret;
10332 case TARGET_NR_sched_setparam:
10334 struct sched_param *target_schp;
10335 struct sched_param schp;
10337 if (arg2 == 0) {
10338 return -TARGET_EINVAL;
10340 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10341 return -TARGET_EFAULT;
10342 schp.sched_priority = tswap32(target_schp->sched_priority);
10343 unlock_user_struct(target_schp, arg2, 0);
10344 return get_errno(sched_setparam(arg1, &schp));
10346 case TARGET_NR_sched_getparam:
10348 struct sched_param *target_schp;
10349 struct sched_param schp;
10351 if (arg2 == 0) {
10352 return -TARGET_EINVAL;
10354 ret = get_errno(sched_getparam(arg1, &schp));
10355 if (!is_error(ret)) {
10356 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10357 return -TARGET_EFAULT;
10358 target_schp->sched_priority = tswap32(schp.sched_priority);
10359 unlock_user_struct(target_schp, arg2, 1);
10362 return ret;
10363 case TARGET_NR_sched_setscheduler:
10365 struct sched_param *target_schp;
10366 struct sched_param schp;
10367 if (arg3 == 0) {
10368 return -TARGET_EINVAL;
10370 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10371 return -TARGET_EFAULT;
10372 schp.sched_priority = tswap32(target_schp->sched_priority);
10373 unlock_user_struct(target_schp, arg3, 0);
10374 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10376 case TARGET_NR_sched_getscheduler:
10377 return get_errno(sched_getscheduler(arg1));
10378 case TARGET_NR_sched_yield:
10379 return get_errno(sched_yield());
10380 case TARGET_NR_sched_get_priority_max:
10381 return get_errno(sched_get_priority_max(arg1));
10382 case TARGET_NR_sched_get_priority_min:
10383 return get_errno(sched_get_priority_min(arg1));
10384 #ifdef TARGET_NR_sched_rr_get_interval
10385 case TARGET_NR_sched_rr_get_interval:
10387 struct timespec ts;
10388 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10389 if (!is_error(ret)) {
10390 ret = host_to_target_timespec(arg2, &ts);
10393 return ret;
10394 #endif
10395 #if defined(TARGET_NR_nanosleep)
10396 case TARGET_NR_nanosleep:
10398 struct timespec req, rem;
10399 target_to_host_timespec(&req, arg1);
10400 ret = get_errno(safe_nanosleep(&req, &rem));
10401 if (is_error(ret) && arg2) {
10402 host_to_target_timespec(arg2, &rem);
10405 return ret;
10406 #endif
10407 case TARGET_NR_prctl:
10408 switch (arg1) {
10409 case PR_GET_PDEATHSIG:
10411 int deathsig;
10412 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10413 if (!is_error(ret) && arg2
10414 && put_user_ual(deathsig, arg2)) {
10415 return -TARGET_EFAULT;
10417 return ret;
10419 #ifdef PR_GET_NAME
10420 case PR_GET_NAME:
10422 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10423 if (!name) {
10424 return -TARGET_EFAULT;
10426 ret = get_errno(prctl(arg1, (unsigned long)name,
10427 arg3, arg4, arg5));
10428 unlock_user(name, arg2, 16);
10429 return ret;
10431 case PR_SET_NAME:
10433 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10434 if (!name) {
10435 return -TARGET_EFAULT;
10437 ret = get_errno(prctl(arg1, (unsigned long)name,
10438 arg3, arg4, arg5));
10439 unlock_user(name, arg2, 0);
10440 return ret;
10442 #endif
10443 #ifdef TARGET_MIPS
10444 case TARGET_PR_GET_FP_MODE:
10446 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10447 ret = 0;
10448 if (env->CP0_Status & (1 << CP0St_FR)) {
10449 ret |= TARGET_PR_FP_MODE_FR;
10451 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10452 ret |= TARGET_PR_FP_MODE_FRE;
10454 return ret;
10456 case TARGET_PR_SET_FP_MODE:
10458 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10459 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10460 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10461 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10462 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10464 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10465 TARGET_PR_FP_MODE_FRE;
10467 /* If nothing to change, return right away, successfully. */
10468 if (old_fr == new_fr && old_fre == new_fre) {
10469 return 0;
10471 /* Check the value is valid */
10472 if (arg2 & ~known_bits) {
10473 return -TARGET_EOPNOTSUPP;
10475 /* Setting FRE without FR is not supported. */
10476 if (new_fre && !new_fr) {
10477 return -TARGET_EOPNOTSUPP;
10479 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10480 /* FR1 is not supported */
10481 return -TARGET_EOPNOTSUPP;
10483 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10484 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10485 /* cannot set FR=0 */
10486 return -TARGET_EOPNOTSUPP;
10488 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10489 /* Cannot set FRE=1 */
10490 return -TARGET_EOPNOTSUPP;
10493 int i;
10494 fpr_t *fpr = env->active_fpu.fpr;
10495 for (i = 0; i < 32 ; i += 2) {
10496 if (!old_fr && new_fr) {
10497 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10498 } else if (old_fr && !new_fr) {
10499 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10503 if (new_fr) {
10504 env->CP0_Status |= (1 << CP0St_FR);
10505 env->hflags |= MIPS_HFLAG_F64;
10506 } else {
10507 env->CP0_Status &= ~(1 << CP0St_FR);
10508 env->hflags &= ~MIPS_HFLAG_F64;
10510 if (new_fre) {
10511 env->CP0_Config5 |= (1 << CP0C5_FRE);
10512 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10513 env->hflags |= MIPS_HFLAG_FRE;
10515 } else {
10516 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10517 env->hflags &= ~MIPS_HFLAG_FRE;
10520 return 0;
10522 #endif /* MIPS */
10523 #ifdef TARGET_AARCH64
10524 case TARGET_PR_SVE_SET_VL:
10526 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10527 * PR_SVE_VL_INHERIT. Note the kernel definition
10528 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10529 * even though the current architectural maximum is VQ=16.
10531 ret = -TARGET_EINVAL;
10532 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10533 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10534 CPUARMState *env = cpu_env;
10535 ARMCPU *cpu = env_archcpu(env);
10536 uint32_t vq, old_vq;
10538 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10539 vq = MAX(arg2 / 16, 1);
10540 vq = MIN(vq, cpu->sve_max_vq);
10542 if (vq < old_vq) {
10543 aarch64_sve_narrow_vq(env, vq);
10545 env->vfp.zcr_el[1] = vq - 1;
10546 arm_rebuild_hflags(env);
10547 ret = vq * 16;
10549 return ret;
10550 case TARGET_PR_SVE_GET_VL:
10551 ret = -TARGET_EINVAL;
10553 ARMCPU *cpu = env_archcpu(cpu_env);
10554 if (cpu_isar_feature(aa64_sve, cpu)) {
10555 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10558 return ret;
10559 case TARGET_PR_PAC_RESET_KEYS:
10561 CPUARMState *env = cpu_env;
10562 ARMCPU *cpu = env_archcpu(env);
10564 if (arg3 || arg4 || arg5) {
10565 return -TARGET_EINVAL;
10567 if (cpu_isar_feature(aa64_pauth, cpu)) {
10568 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10569 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10570 TARGET_PR_PAC_APGAKEY);
10571 int ret = 0;
10572 Error *err = NULL;
10574 if (arg2 == 0) {
10575 arg2 = all;
10576 } else if (arg2 & ~all) {
10577 return -TARGET_EINVAL;
10579 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10580 ret |= qemu_guest_getrandom(&env->keys.apia,
10581 sizeof(ARMPACKey), &err);
10583 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10584 ret |= qemu_guest_getrandom(&env->keys.apib,
10585 sizeof(ARMPACKey), &err);
10587 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10588 ret |= qemu_guest_getrandom(&env->keys.apda,
10589 sizeof(ARMPACKey), &err);
10591 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10592 ret |= qemu_guest_getrandom(&env->keys.apdb,
10593 sizeof(ARMPACKey), &err);
10595 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10596 ret |= qemu_guest_getrandom(&env->keys.apga,
10597 sizeof(ARMPACKey), &err);
10599 if (ret != 0) {
10601 * Some unknown failure in the crypto. The best
10602 * we can do is log it and fail the syscall.
10603 * The real syscall cannot fail this way.
10605 qemu_log_mask(LOG_UNIMP,
10606 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10607 error_get_pretty(err));
10608 error_free(err);
10609 return -TARGET_EIO;
10611 return 0;
10614 return -TARGET_EINVAL;
10615 #endif /* AARCH64 */
10616 case PR_GET_SECCOMP:
10617 case PR_SET_SECCOMP:
10618 /* Disable seccomp to prevent the target disabling syscalls we
10619 * need. */
10620 return -TARGET_EINVAL;
10621 default:
10622 /* Most prctl options have no pointer arguments */
10623 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10625 break;
10626 #ifdef TARGET_NR_arch_prctl
10627 case TARGET_NR_arch_prctl:
10628 return do_arch_prctl(cpu_env, arg1, arg2);
10629 #endif
10630 #ifdef TARGET_NR_pread64
10631 case TARGET_NR_pread64:
10632 if (regpairs_aligned(cpu_env, num)) {
10633 arg4 = arg5;
10634 arg5 = arg6;
10636 if (arg2 == 0 && arg3 == 0) {
10637 /* Special-case NULL buffer and zero length, which should succeed */
10638 p = 0;
10639 } else {
10640 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10641 if (!p) {
10642 return -TARGET_EFAULT;
10645 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10646 unlock_user(p, arg2, ret);
10647 return ret;
10648 case TARGET_NR_pwrite64:
10649 if (regpairs_aligned(cpu_env, num)) {
10650 arg4 = arg5;
10651 arg5 = arg6;
10653 if (arg2 == 0 && arg3 == 0) {
10654 /* Special-case NULL buffer and zero length, which should succeed */
10655 p = 0;
10656 } else {
10657 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10658 if (!p) {
10659 return -TARGET_EFAULT;
10662 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10663 unlock_user(p, arg2, 0);
10664 return ret;
10665 #endif
10666 case TARGET_NR_getcwd:
10667 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10668 return -TARGET_EFAULT;
10669 ret = get_errno(sys_getcwd1(p, arg2));
10670 unlock_user(p, arg1, ret);
10671 return ret;
10672 case TARGET_NR_capget:
10673 case TARGET_NR_capset:
10675 struct target_user_cap_header *target_header;
10676 struct target_user_cap_data *target_data = NULL;
10677 struct __user_cap_header_struct header;
10678 struct __user_cap_data_struct data[2];
10679 struct __user_cap_data_struct *dataptr = NULL;
10680 int i, target_datalen;
10681 int data_items = 1;
10683 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10684 return -TARGET_EFAULT;
10686 header.version = tswap32(target_header->version);
10687 header.pid = tswap32(target_header->pid);
10689 if (header.version != _LINUX_CAPABILITY_VERSION) {
10690 /* Version 2 and up takes pointer to two user_data structs */
10691 data_items = 2;
10694 target_datalen = sizeof(*target_data) * data_items;
10696 if (arg2) {
10697 if (num == TARGET_NR_capget) {
10698 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10699 } else {
10700 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10702 if (!target_data) {
10703 unlock_user_struct(target_header, arg1, 0);
10704 return -TARGET_EFAULT;
10707 if (num == TARGET_NR_capset) {
10708 for (i = 0; i < data_items; i++) {
10709 data[i].effective = tswap32(target_data[i].effective);
10710 data[i].permitted = tswap32(target_data[i].permitted);
10711 data[i].inheritable = tswap32(target_data[i].inheritable);
10715 dataptr = data;
10718 if (num == TARGET_NR_capget) {
10719 ret = get_errno(capget(&header, dataptr));
10720 } else {
10721 ret = get_errno(capset(&header, dataptr));
10724 /* The kernel always updates version for both capget and capset */
10725 target_header->version = tswap32(header.version);
10726 unlock_user_struct(target_header, arg1, 1);
10728 if (arg2) {
10729 if (num == TARGET_NR_capget) {
10730 for (i = 0; i < data_items; i++) {
10731 target_data[i].effective = tswap32(data[i].effective);
10732 target_data[i].permitted = tswap32(data[i].permitted);
10733 target_data[i].inheritable = tswap32(data[i].inheritable);
10735 unlock_user(target_data, arg2, target_datalen);
10736 } else {
10737 unlock_user(target_data, arg2, 0);
10740 return ret;
10742 case TARGET_NR_sigaltstack:
10743 return do_sigaltstack(arg1, arg2,
10744 get_sp_from_cpustate((CPUArchState *)cpu_env));
10746 #ifdef CONFIG_SENDFILE
10747 #ifdef TARGET_NR_sendfile
10748 case TARGET_NR_sendfile:
10750 off_t *offp = NULL;
10751 off_t off;
10752 if (arg3) {
10753 ret = get_user_sal(off, arg3);
10754 if (is_error(ret)) {
10755 return ret;
10757 offp = &off;
10759 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10760 if (!is_error(ret) && arg3) {
10761 abi_long ret2 = put_user_sal(off, arg3);
10762 if (is_error(ret2)) {
10763 ret = ret2;
10766 return ret;
10768 #endif
10769 #ifdef TARGET_NR_sendfile64
10770 case TARGET_NR_sendfile64:
10772 off_t *offp = NULL;
10773 off_t off;
10774 if (arg3) {
10775 ret = get_user_s64(off, arg3);
10776 if (is_error(ret)) {
10777 return ret;
10779 offp = &off;
10781 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10782 if (!is_error(ret) && arg3) {
10783 abi_long ret2 = put_user_s64(off, arg3);
10784 if (is_error(ret2)) {
10785 ret = ret2;
10788 return ret;
10790 #endif
10791 #endif
10792 #ifdef TARGET_NR_vfork
10793 case TARGET_NR_vfork:
10794 return get_errno(do_fork(cpu_env,
10795 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10796 0, 0, 0, 0));
10797 #endif
10798 #ifdef TARGET_NR_ugetrlimit
10799 case TARGET_NR_ugetrlimit:
10801 struct rlimit rlim;
10802 int resource = target_to_host_resource(arg1);
10803 ret = get_errno(getrlimit(resource, &rlim));
10804 if (!is_error(ret)) {
10805 struct target_rlimit *target_rlim;
10806 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10807 return -TARGET_EFAULT;
10808 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10809 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10810 unlock_user_struct(target_rlim, arg2, 1);
10812 return ret;
10814 #endif
10815 #ifdef TARGET_NR_truncate64
10816 case TARGET_NR_truncate64:
10817 if (!(p = lock_user_string(arg1)))
10818 return -TARGET_EFAULT;
10819 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10820 unlock_user(p, arg1, 0);
10821 return ret;
10822 #endif
10823 #ifdef TARGET_NR_ftruncate64
10824 case TARGET_NR_ftruncate64:
10825 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10826 #endif
10827 #ifdef TARGET_NR_stat64
10828 case TARGET_NR_stat64:
10829 if (!(p = lock_user_string(arg1))) {
10830 return -TARGET_EFAULT;
10832 ret = get_errno(stat(path(p), &st));
10833 unlock_user(p, arg1, 0);
10834 if (!is_error(ret))
10835 ret = host_to_target_stat64(cpu_env, arg2, &st);
10836 return ret;
10837 #endif
10838 #ifdef TARGET_NR_lstat64
10839 case TARGET_NR_lstat64:
10840 if (!(p = lock_user_string(arg1))) {
10841 return -TARGET_EFAULT;
10843 ret = get_errno(lstat(path(p), &st));
10844 unlock_user(p, arg1, 0);
10845 if (!is_error(ret))
10846 ret = host_to_target_stat64(cpu_env, arg2, &st);
10847 return ret;
10848 #endif
10849 #ifdef TARGET_NR_fstat64
10850 case TARGET_NR_fstat64:
10851 ret = get_errno(fstat(arg1, &st));
10852 if (!is_error(ret))
10853 ret = host_to_target_stat64(cpu_env, arg2, &st);
10854 return ret;
10855 #endif
10856 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10857 #ifdef TARGET_NR_fstatat64
10858 case TARGET_NR_fstatat64:
10859 #endif
10860 #ifdef TARGET_NR_newfstatat
10861 case TARGET_NR_newfstatat:
10862 #endif
10863 if (!(p = lock_user_string(arg2))) {
10864 return -TARGET_EFAULT;
10866 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10867 unlock_user(p, arg2, 0);
10868 if (!is_error(ret))
10869 ret = host_to_target_stat64(cpu_env, arg3, &st);
10870 return ret;
10871 #endif
10872 #if defined(TARGET_NR_statx)
10873 case TARGET_NR_statx:
10875 struct target_statx *target_stx;
10876 int dirfd = arg1;
10877 int flags = arg3;
10879 p = lock_user_string(arg2);
10880 if (p == NULL) {
10881 return -TARGET_EFAULT;
10883 #if defined(__NR_statx)
10886 * It is assumed that struct statx is architecture independent.
10888 struct target_statx host_stx;
10889 int mask = arg4;
10891 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10892 if (!is_error(ret)) {
10893 if (host_to_target_statx(&host_stx, arg5) != 0) {
10894 unlock_user(p, arg2, 0);
10895 return -TARGET_EFAULT;
10899 if (ret != -TARGET_ENOSYS) {
10900 unlock_user(p, arg2, 0);
10901 return ret;
10904 #endif
10905 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10906 unlock_user(p, arg2, 0);
10908 if (!is_error(ret)) {
10909 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10910 return -TARGET_EFAULT;
10912 memset(target_stx, 0, sizeof(*target_stx));
10913 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10914 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10915 __put_user(st.st_ino, &target_stx->stx_ino);
10916 __put_user(st.st_mode, &target_stx->stx_mode);
10917 __put_user(st.st_uid, &target_stx->stx_uid);
10918 __put_user(st.st_gid, &target_stx->stx_gid);
10919 __put_user(st.st_nlink, &target_stx->stx_nlink);
10920 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10921 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10922 __put_user(st.st_size, &target_stx->stx_size);
10923 __put_user(st.st_blksize, &target_stx->stx_blksize);
10924 __put_user(st.st_blocks, &target_stx->stx_blocks);
10925 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10926 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10927 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10928 unlock_user_struct(target_stx, arg5, 1);
10931 return ret;
10932 #endif
10933 #ifdef TARGET_NR_lchown
10934 case TARGET_NR_lchown:
10935 if (!(p = lock_user_string(arg1)))
10936 return -TARGET_EFAULT;
10937 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10938 unlock_user(p, arg1, 0);
10939 return ret;
10940 #endif
10941 #ifdef TARGET_NR_getuid
10942 case TARGET_NR_getuid:
10943 return get_errno(high2lowuid(getuid()));
10944 #endif
10945 #ifdef TARGET_NR_getgid
10946 case TARGET_NR_getgid:
10947 return get_errno(high2lowgid(getgid()));
10948 #endif
10949 #ifdef TARGET_NR_geteuid
10950 case TARGET_NR_geteuid:
10951 return get_errno(high2lowuid(geteuid()));
10952 #endif
10953 #ifdef TARGET_NR_getegid
10954 case TARGET_NR_getegid:
10955 return get_errno(high2lowgid(getegid()));
10956 #endif
10957 case TARGET_NR_setreuid:
10958 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10959 case TARGET_NR_setregid:
10960 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10961 case TARGET_NR_getgroups:
10963 int gidsetsize = arg1;
10964 target_id *target_grouplist;
10965 gid_t *grouplist;
10966 int i;
10968 grouplist = alloca(gidsetsize * sizeof(gid_t));
10969 ret = get_errno(getgroups(gidsetsize, grouplist));
10970 if (gidsetsize == 0)
10971 return ret;
10972 if (!is_error(ret)) {
10973 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10974 if (!target_grouplist)
10975 return -TARGET_EFAULT;
10976 for(i = 0;i < ret; i++)
10977 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10978 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10981 return ret;
10982 case TARGET_NR_setgroups:
10984 int gidsetsize = arg1;
10985 target_id *target_grouplist;
10986 gid_t *grouplist = NULL;
10987 int i;
10988 if (gidsetsize) {
10989 grouplist = alloca(gidsetsize * sizeof(gid_t));
10990 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10991 if (!target_grouplist) {
10992 return -TARGET_EFAULT;
10994 for (i = 0; i < gidsetsize; i++) {
10995 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10997 unlock_user(target_grouplist, arg2, 0);
10999 return get_errno(setgroups(gidsetsize, grouplist));
11001 case TARGET_NR_fchown:
11002 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11003 #if defined(TARGET_NR_fchownat)
11004 case TARGET_NR_fchownat:
11005 if (!(p = lock_user_string(arg2)))
11006 return -TARGET_EFAULT;
11007 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11008 low2highgid(arg4), arg5));
11009 unlock_user(p, arg2, 0);
11010 return ret;
11011 #endif
11012 #ifdef TARGET_NR_setresuid
11013 case TARGET_NR_setresuid:
11014 return get_errno(sys_setresuid(low2highuid(arg1),
11015 low2highuid(arg2),
11016 low2highuid(arg3)));
11017 #endif
11018 #ifdef TARGET_NR_getresuid
11019 case TARGET_NR_getresuid:
11021 uid_t ruid, euid, suid;
11022 ret = get_errno(getresuid(&ruid, &euid, &suid));
11023 if (!is_error(ret)) {
11024 if (put_user_id(high2lowuid(ruid), arg1)
11025 || put_user_id(high2lowuid(euid), arg2)
11026 || put_user_id(high2lowuid(suid), arg3))
11027 return -TARGET_EFAULT;
11030 return ret;
11031 #endif
11032 #ifdef TARGET_NR_getresgid
11033 case TARGET_NR_setresgid:
11034 return get_errno(sys_setresgid(low2highgid(arg1),
11035 low2highgid(arg2),
11036 low2highgid(arg3)));
11037 #endif
11038 #ifdef TARGET_NR_getresgid
11039 case TARGET_NR_getresgid:
11041 gid_t rgid, egid, sgid;
11042 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11043 if (!is_error(ret)) {
11044 if (put_user_id(high2lowgid(rgid), arg1)
11045 || put_user_id(high2lowgid(egid), arg2)
11046 || put_user_id(high2lowgid(sgid), arg3))
11047 return -TARGET_EFAULT;
11050 return ret;
11051 #endif
11052 #ifdef TARGET_NR_chown
11053 case TARGET_NR_chown:
11054 if (!(p = lock_user_string(arg1)))
11055 return -TARGET_EFAULT;
11056 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11057 unlock_user(p, arg1, 0);
11058 return ret;
11059 #endif
11060 case TARGET_NR_setuid:
11061 return get_errno(sys_setuid(low2highuid(arg1)));
11062 case TARGET_NR_setgid:
11063 return get_errno(sys_setgid(low2highgid(arg1)));
11064 case TARGET_NR_setfsuid:
11065 return get_errno(setfsuid(arg1));
11066 case TARGET_NR_setfsgid:
11067 return get_errno(setfsgid(arg1));
11069 #ifdef TARGET_NR_lchown32
11070 case TARGET_NR_lchown32:
11071 if (!(p = lock_user_string(arg1)))
11072 return -TARGET_EFAULT;
11073 ret = get_errno(lchown(p, arg2, arg3));
11074 unlock_user(p, arg1, 0);
11075 return ret;
11076 #endif
11077 #ifdef TARGET_NR_getuid32
11078 case TARGET_NR_getuid32:
11079 return get_errno(getuid());
11080 #endif
11082 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11083 /* Alpha specific */
11084 case TARGET_NR_getxuid:
11086 uid_t euid;
11087 euid=geteuid();
11088 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11090 return get_errno(getuid());
11091 #endif
11092 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11093 /* Alpha specific */
11094 case TARGET_NR_getxgid:
11096 uid_t egid;
11097 egid=getegid();
11098 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11100 return get_errno(getgid());
11101 #endif
11102 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11103 /* Alpha specific */
11104 case TARGET_NR_osf_getsysinfo:
11105 ret = -TARGET_EOPNOTSUPP;
11106 switch (arg1) {
11107 case TARGET_GSI_IEEE_FP_CONTROL:
11109 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11110 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11112 swcr &= ~SWCR_STATUS_MASK;
11113 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11115 if (put_user_u64 (swcr, arg2))
11116 return -TARGET_EFAULT;
11117 ret = 0;
11119 break;
11121 /* case GSI_IEEE_STATE_AT_SIGNAL:
11122 -- Not implemented in linux kernel.
11123 case GSI_UACPROC:
11124 -- Retrieves current unaligned access state; not much used.
11125 case GSI_PROC_TYPE:
11126 -- Retrieves implver information; surely not used.
11127 case GSI_GET_HWRPB:
11128 -- Grabs a copy of the HWRPB; surely not used.
11131 return ret;
11132 #endif
11133 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11134 /* Alpha specific */
11135 case TARGET_NR_osf_setsysinfo:
11136 ret = -TARGET_EOPNOTSUPP;
11137 switch (arg1) {
11138 case TARGET_SSI_IEEE_FP_CONTROL:
11140 uint64_t swcr, fpcr;
11142 if (get_user_u64 (swcr, arg2)) {
11143 return -TARGET_EFAULT;
11147 * The kernel calls swcr_update_status to update the
11148 * status bits from the fpcr at every point that it
11149 * could be queried. Therefore, we store the status
11150 * bits only in FPCR.
11152 ((CPUAlphaState *)cpu_env)->swcr
11153 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11155 fpcr = cpu_alpha_load_fpcr(cpu_env);
11156 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11157 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11158 cpu_alpha_store_fpcr(cpu_env, fpcr);
11159 ret = 0;
11161 break;
11163 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11165 uint64_t exc, fpcr, fex;
11167 if (get_user_u64(exc, arg2)) {
11168 return -TARGET_EFAULT;
11170 exc &= SWCR_STATUS_MASK;
11171 fpcr = cpu_alpha_load_fpcr(cpu_env);
11173 /* Old exceptions are not signaled. */
11174 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11175 fex = exc & ~fex;
11176 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11177 fex &= ((CPUArchState *)cpu_env)->swcr;
11179 /* Update the hardware fpcr. */
11180 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11181 cpu_alpha_store_fpcr(cpu_env, fpcr);
11183 if (fex) {
11184 int si_code = TARGET_FPE_FLTUNK;
11185 target_siginfo_t info;
11187 if (fex & SWCR_TRAP_ENABLE_DNO) {
11188 si_code = TARGET_FPE_FLTUND;
11190 if (fex & SWCR_TRAP_ENABLE_INE) {
11191 si_code = TARGET_FPE_FLTRES;
11193 if (fex & SWCR_TRAP_ENABLE_UNF) {
11194 si_code = TARGET_FPE_FLTUND;
11196 if (fex & SWCR_TRAP_ENABLE_OVF) {
11197 si_code = TARGET_FPE_FLTOVF;
11199 if (fex & SWCR_TRAP_ENABLE_DZE) {
11200 si_code = TARGET_FPE_FLTDIV;
11202 if (fex & SWCR_TRAP_ENABLE_INV) {
11203 si_code = TARGET_FPE_FLTINV;
11206 info.si_signo = SIGFPE;
11207 info.si_errno = 0;
11208 info.si_code = si_code;
11209 info._sifields._sigfault._addr
11210 = ((CPUArchState *)cpu_env)->pc;
11211 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11212 QEMU_SI_FAULT, &info);
11214 ret = 0;
11216 break;
11218 /* case SSI_NVPAIRS:
11219 -- Used with SSIN_UACPROC to enable unaligned accesses.
11220 case SSI_IEEE_STATE_AT_SIGNAL:
11221 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11222 -- Not implemented in linux kernel
11225 return ret;
11226 #endif
11227 #ifdef TARGET_NR_osf_sigprocmask
11228 /* Alpha specific. */
11229 case TARGET_NR_osf_sigprocmask:
11231 abi_ulong mask;
11232 int how;
11233 sigset_t set, oldset;
11235 switch(arg1) {
11236 case TARGET_SIG_BLOCK:
11237 how = SIG_BLOCK;
11238 break;
11239 case TARGET_SIG_UNBLOCK:
11240 how = SIG_UNBLOCK;
11241 break;
11242 case TARGET_SIG_SETMASK:
11243 how = SIG_SETMASK;
11244 break;
11245 default:
11246 return -TARGET_EINVAL;
11248 mask = arg2;
11249 target_to_host_old_sigset(&set, &mask);
11250 ret = do_sigprocmask(how, &set, &oldset);
11251 if (!ret) {
11252 host_to_target_old_sigset(&mask, &oldset);
11253 ret = mask;
11256 return ret;
11257 #endif
11259 #ifdef TARGET_NR_getgid32
11260 case TARGET_NR_getgid32:
11261 return get_errno(getgid());
11262 #endif
11263 #ifdef TARGET_NR_geteuid32
11264 case TARGET_NR_geteuid32:
11265 return get_errno(geteuid());
11266 #endif
11267 #ifdef TARGET_NR_getegid32
11268 case TARGET_NR_getegid32:
11269 return get_errno(getegid());
11270 #endif
11271 #ifdef TARGET_NR_setreuid32
11272 case TARGET_NR_setreuid32:
11273 return get_errno(setreuid(arg1, arg2));
11274 #endif
11275 #ifdef TARGET_NR_setregid32
11276 case TARGET_NR_setregid32:
11277 return get_errno(setregid(arg1, arg2));
11278 #endif
11279 #ifdef TARGET_NR_getgroups32
11280 case TARGET_NR_getgroups32:
11282 int gidsetsize = arg1;
11283 uint32_t *target_grouplist;
11284 gid_t *grouplist;
11285 int i;
11287 grouplist = alloca(gidsetsize * sizeof(gid_t));
11288 ret = get_errno(getgroups(gidsetsize, grouplist));
11289 if (gidsetsize == 0)
11290 return ret;
11291 if (!is_error(ret)) {
11292 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11293 if (!target_grouplist) {
11294 return -TARGET_EFAULT;
11296 for(i = 0;i < ret; i++)
11297 target_grouplist[i] = tswap32(grouplist[i]);
11298 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11301 return ret;
11302 #endif
11303 #ifdef TARGET_NR_setgroups32
11304 case TARGET_NR_setgroups32:
11306 int gidsetsize = arg1;
11307 uint32_t *target_grouplist;
11308 gid_t *grouplist;
11309 int i;
11311 grouplist = alloca(gidsetsize * sizeof(gid_t));
11312 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11313 if (!target_grouplist) {
11314 return -TARGET_EFAULT;
11316 for(i = 0;i < gidsetsize; i++)
11317 grouplist[i] = tswap32(target_grouplist[i]);
11318 unlock_user(target_grouplist, arg2, 0);
11319 return get_errno(setgroups(gidsetsize, grouplist));
11321 #endif
11322 #ifdef TARGET_NR_fchown32
11323 case TARGET_NR_fchown32:
11324 return get_errno(fchown(arg1, arg2, arg3));
11325 #endif
11326 #ifdef TARGET_NR_setresuid32
11327 case TARGET_NR_setresuid32:
11328 return get_errno(sys_setresuid(arg1, arg2, arg3));
11329 #endif
11330 #ifdef TARGET_NR_getresuid32
11331 case TARGET_NR_getresuid32:
11333 uid_t ruid, euid, suid;
11334 ret = get_errno(getresuid(&ruid, &euid, &suid));
11335 if (!is_error(ret)) {
11336 if (put_user_u32(ruid, arg1)
11337 || put_user_u32(euid, arg2)
11338 || put_user_u32(suid, arg3))
11339 return -TARGET_EFAULT;
11342 return ret;
11343 #endif
11344 #ifdef TARGET_NR_setresgid32
11345 case TARGET_NR_setresgid32:
11346 return get_errno(sys_setresgid(arg1, arg2, arg3));
11347 #endif
11348 #ifdef TARGET_NR_getresgid32
11349 case TARGET_NR_getresgid32:
11351 gid_t rgid, egid, sgid;
11352 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11353 if (!is_error(ret)) {
11354 if (put_user_u32(rgid, arg1)
11355 || put_user_u32(egid, arg2)
11356 || put_user_u32(sgid, arg3))
11357 return -TARGET_EFAULT;
11360 return ret;
11361 #endif
11362 #ifdef TARGET_NR_chown32
11363 case TARGET_NR_chown32:
11364 if (!(p = lock_user_string(arg1)))
11365 return -TARGET_EFAULT;
11366 ret = get_errno(chown(p, arg2, arg3));
11367 unlock_user(p, arg1, 0);
11368 return ret;
11369 #endif
11370 #ifdef TARGET_NR_setuid32
11371 case TARGET_NR_setuid32:
11372 return get_errno(sys_setuid(arg1));
11373 #endif
11374 #ifdef TARGET_NR_setgid32
11375 case TARGET_NR_setgid32:
11376 return get_errno(sys_setgid(arg1));
11377 #endif
11378 #ifdef TARGET_NR_setfsuid32
11379 case TARGET_NR_setfsuid32:
11380 return get_errno(setfsuid(arg1));
11381 #endif
11382 #ifdef TARGET_NR_setfsgid32
11383 case TARGET_NR_setfsgid32:
11384 return get_errno(setfsgid(arg1));
11385 #endif
11386 #ifdef TARGET_NR_mincore
11387 case TARGET_NR_mincore:
11389 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11390 if (!a) {
11391 return -TARGET_ENOMEM;
11393 p = lock_user_string(arg3);
11394 if (!p) {
11395 ret = -TARGET_EFAULT;
11396 } else {
11397 ret = get_errno(mincore(a, arg2, p));
11398 unlock_user(p, arg3, ret);
11400 unlock_user(a, arg1, 0);
11402 return ret;
11403 #endif
11404 #ifdef TARGET_NR_arm_fadvise64_64
11405 case TARGET_NR_arm_fadvise64_64:
11406 /* arm_fadvise64_64 looks like fadvise64_64 but
11407 * with different argument order: fd, advice, offset, len
11408 * rather than the usual fd, offset, len, advice.
11409 * Note that offset and len are both 64-bit so appear as
11410 * pairs of 32-bit registers.
11412 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11413 target_offset64(arg5, arg6), arg2);
11414 return -host_to_target_errno(ret);
11415 #endif
11417 #if TARGET_ABI_BITS == 32
11419 #ifdef TARGET_NR_fadvise64_64
11420 case TARGET_NR_fadvise64_64:
11421 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11422 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11423 ret = arg2;
11424 arg2 = arg3;
11425 arg3 = arg4;
11426 arg4 = arg5;
11427 arg5 = arg6;
11428 arg6 = ret;
11429 #else
11430 /* 6 args: fd, offset (high, low), len (high, low), advice */
11431 if (regpairs_aligned(cpu_env, num)) {
11432 /* offset is in (3,4), len in (5,6) and advice in 7 */
11433 arg2 = arg3;
11434 arg3 = arg4;
11435 arg4 = arg5;
11436 arg5 = arg6;
11437 arg6 = arg7;
11439 #endif
11440 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11441 target_offset64(arg4, arg5), arg6);
11442 return -host_to_target_errno(ret);
11443 #endif
11445 #ifdef TARGET_NR_fadvise64
11446 case TARGET_NR_fadvise64:
11447 /* 5 args: fd, offset (high, low), len, advice */
11448 if (regpairs_aligned(cpu_env, num)) {
11449 /* offset is in (3,4), len in 5 and advice in 6 */
11450 arg2 = arg3;
11451 arg3 = arg4;
11452 arg4 = arg5;
11453 arg5 = arg6;
11455 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11456 return -host_to_target_errno(ret);
11457 #endif
11459 #else /* not a 32-bit ABI */
11460 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11461 #ifdef TARGET_NR_fadvise64_64
11462 case TARGET_NR_fadvise64_64:
11463 #endif
11464 #ifdef TARGET_NR_fadvise64
11465 case TARGET_NR_fadvise64:
11466 #endif
11467 #ifdef TARGET_S390X
11468 switch (arg4) {
11469 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11470 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11471 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11472 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11473 default: break;
11475 #endif
11476 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11477 #endif
11478 #endif /* end of 64-bit ABI fadvise handling */
11480 #ifdef TARGET_NR_madvise
11481 case TARGET_NR_madvise:
11482 /* A straight passthrough may not be safe because qemu sometimes
11483 turns private file-backed mappings into anonymous mappings.
11484 This will break MADV_DONTNEED.
11485 This is a hint, so ignoring and returning success is ok. */
11486 return 0;
11487 #endif
11488 #ifdef TARGET_NR_fcntl64
11489 case TARGET_NR_fcntl64:
11491 int cmd;
11492 struct flock64 fl;
11493 from_flock64_fn *copyfrom = copy_from_user_flock64;
11494 to_flock64_fn *copyto = copy_to_user_flock64;
11496 #ifdef TARGET_ARM
11497 if (!((CPUARMState *)cpu_env)->eabi) {
11498 copyfrom = copy_from_user_oabi_flock64;
11499 copyto = copy_to_user_oabi_flock64;
11501 #endif
11503 cmd = target_to_host_fcntl_cmd(arg2);
11504 if (cmd == -TARGET_EINVAL) {
11505 return cmd;
11508 switch(arg2) {
11509 case TARGET_F_GETLK64:
11510 ret = copyfrom(&fl, arg3);
11511 if (ret) {
11512 break;
11514 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11515 if (ret == 0) {
11516 ret = copyto(arg3, &fl);
11518 break;
11520 case TARGET_F_SETLK64:
11521 case TARGET_F_SETLKW64:
11522 ret = copyfrom(&fl, arg3);
11523 if (ret) {
11524 break;
11526 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11527 break;
11528 default:
11529 ret = do_fcntl(arg1, arg2, arg3);
11530 break;
11532 return ret;
11534 #endif
11535 #ifdef TARGET_NR_cacheflush
11536 case TARGET_NR_cacheflush:
11537 /* self-modifying code is handled automatically, so nothing needed */
11538 return 0;
11539 #endif
11540 #ifdef TARGET_NR_getpagesize
11541 case TARGET_NR_getpagesize:
11542 return TARGET_PAGE_SIZE;
11543 #endif
11544 case TARGET_NR_gettid:
11545 return get_errno(sys_gettid());
11546 #ifdef TARGET_NR_readahead
11547 case TARGET_NR_readahead:
11548 #if TARGET_ABI_BITS == 32
11549 if (regpairs_aligned(cpu_env, num)) {
11550 arg2 = arg3;
11551 arg3 = arg4;
11552 arg4 = arg5;
11554 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11555 #else
11556 ret = get_errno(readahead(arg1, arg2, arg3));
11557 #endif
11558 return ret;
11559 #endif
11560 #ifdef CONFIG_ATTR
11561 #ifdef TARGET_NR_setxattr
11562 case TARGET_NR_listxattr:
11563 case TARGET_NR_llistxattr:
11565 void *p, *b = 0;
11566 if (arg2) {
11567 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11568 if (!b) {
11569 return -TARGET_EFAULT;
11572 p = lock_user_string(arg1);
11573 if (p) {
11574 if (num == TARGET_NR_listxattr) {
11575 ret = get_errno(listxattr(p, b, arg3));
11576 } else {
11577 ret = get_errno(llistxattr(p, b, arg3));
11579 } else {
11580 ret = -TARGET_EFAULT;
11582 unlock_user(p, arg1, 0);
11583 unlock_user(b, arg2, arg3);
11584 return ret;
11586 case TARGET_NR_flistxattr:
11588 void *b = 0;
11589 if (arg2) {
11590 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11591 if (!b) {
11592 return -TARGET_EFAULT;
11595 ret = get_errno(flistxattr(arg1, b, arg3));
11596 unlock_user(b, arg2, arg3);
11597 return ret;
11599 case TARGET_NR_setxattr:
11600 case TARGET_NR_lsetxattr:
11602 void *p, *n, *v = 0;
11603 if (arg3) {
11604 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11605 if (!v) {
11606 return -TARGET_EFAULT;
11609 p = lock_user_string(arg1);
11610 n = lock_user_string(arg2);
11611 if (p && n) {
11612 if (num == TARGET_NR_setxattr) {
11613 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11614 } else {
11615 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11617 } else {
11618 ret = -TARGET_EFAULT;
11620 unlock_user(p, arg1, 0);
11621 unlock_user(n, arg2, 0);
11622 unlock_user(v, arg3, 0);
11624 return ret;
11625 case TARGET_NR_fsetxattr:
11627 void *n, *v = 0;
11628 if (arg3) {
11629 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11630 if (!v) {
11631 return -TARGET_EFAULT;
11634 n = lock_user_string(arg2);
11635 if (n) {
11636 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11637 } else {
11638 ret = -TARGET_EFAULT;
11640 unlock_user(n, arg2, 0);
11641 unlock_user(v, arg3, 0);
11643 return ret;
11644 case TARGET_NR_getxattr:
11645 case TARGET_NR_lgetxattr:
11647 void *p, *n, *v = 0;
11648 if (arg3) {
11649 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11650 if (!v) {
11651 return -TARGET_EFAULT;
11654 p = lock_user_string(arg1);
11655 n = lock_user_string(arg2);
11656 if (p && n) {
11657 if (num == TARGET_NR_getxattr) {
11658 ret = get_errno(getxattr(p, n, v, arg4));
11659 } else {
11660 ret = get_errno(lgetxattr(p, n, v, arg4));
11662 } else {
11663 ret = -TARGET_EFAULT;
11665 unlock_user(p, arg1, 0);
11666 unlock_user(n, arg2, 0);
11667 unlock_user(v, arg3, arg4);
11669 return ret;
11670 case TARGET_NR_fgetxattr:
11672 void *n, *v = 0;
11673 if (arg3) {
11674 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11675 if (!v) {
11676 return -TARGET_EFAULT;
11679 n = lock_user_string(arg2);
11680 if (n) {
11681 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11682 } else {
11683 ret = -TARGET_EFAULT;
11685 unlock_user(n, arg2, 0);
11686 unlock_user(v, arg3, arg4);
11688 return ret;
11689 case TARGET_NR_removexattr:
11690 case TARGET_NR_lremovexattr:
11692 void *p, *n;
11693 p = lock_user_string(arg1);
11694 n = lock_user_string(arg2);
11695 if (p && n) {
11696 if (num == TARGET_NR_removexattr) {
11697 ret = get_errno(removexattr(p, n));
11698 } else {
11699 ret = get_errno(lremovexattr(p, n));
11701 } else {
11702 ret = -TARGET_EFAULT;
11704 unlock_user(p, arg1, 0);
11705 unlock_user(n, arg2, 0);
11707 return ret;
11708 case TARGET_NR_fremovexattr:
11710 void *n;
11711 n = lock_user_string(arg2);
11712 if (n) {
11713 ret = get_errno(fremovexattr(arg1, n));
11714 } else {
11715 ret = -TARGET_EFAULT;
11717 unlock_user(n, arg2, 0);
11719 return ret;
11720 #endif
11721 #endif /* CONFIG_ATTR */
11722 #ifdef TARGET_NR_set_thread_area
11723 case TARGET_NR_set_thread_area:
11724 #if defined(TARGET_MIPS)
11725 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11726 return 0;
11727 #elif defined(TARGET_CRIS)
11728 if (arg1 & 0xff)
11729 ret = -TARGET_EINVAL;
11730 else {
11731 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11732 ret = 0;
11734 return ret;
11735 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11736 return do_set_thread_area(cpu_env, arg1);
11737 #elif defined(TARGET_M68K)
11739 TaskState *ts = cpu->opaque;
11740 ts->tp_value = arg1;
11741 return 0;
11743 #else
11744 return -TARGET_ENOSYS;
11745 #endif
11746 #endif
11747 #ifdef TARGET_NR_get_thread_area
11748 case TARGET_NR_get_thread_area:
11749 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11750 return do_get_thread_area(cpu_env, arg1);
11751 #elif defined(TARGET_M68K)
11753 TaskState *ts = cpu->opaque;
11754 return ts->tp_value;
11756 #else
11757 return -TARGET_ENOSYS;
11758 #endif
11759 #endif
11760 #ifdef TARGET_NR_getdomainname
11761 case TARGET_NR_getdomainname:
11762 return -TARGET_ENOSYS;
11763 #endif
11765 #ifdef TARGET_NR_clock_settime
11766 case TARGET_NR_clock_settime:
11768 struct timespec ts;
11770 ret = target_to_host_timespec(&ts, arg2);
11771 if (!is_error(ret)) {
11772 ret = get_errno(clock_settime(arg1, &ts));
11774 return ret;
11776 #endif
11777 #ifdef TARGET_NR_clock_settime64
11778 case TARGET_NR_clock_settime64:
11780 struct timespec ts;
11782 ret = target_to_host_timespec64(&ts, arg2);
11783 if (!is_error(ret)) {
11784 ret = get_errno(clock_settime(arg1, &ts));
11786 return ret;
11788 #endif
11789 #ifdef TARGET_NR_clock_gettime
11790 case TARGET_NR_clock_gettime:
11792 struct timespec ts;
11793 ret = get_errno(clock_gettime(arg1, &ts));
11794 if (!is_error(ret)) {
11795 ret = host_to_target_timespec(arg2, &ts);
11797 return ret;
11799 #endif
11800 #ifdef TARGET_NR_clock_gettime64
11801 case TARGET_NR_clock_gettime64:
11803 struct timespec ts;
11804 ret = get_errno(clock_gettime(arg1, &ts));
11805 if (!is_error(ret)) {
11806 ret = host_to_target_timespec64(arg2, &ts);
11808 return ret;
11810 #endif
11811 #ifdef TARGET_NR_clock_getres
11812 case TARGET_NR_clock_getres:
11814 struct timespec ts;
11815 ret = get_errno(clock_getres(arg1, &ts));
11816 if (!is_error(ret)) {
11817 host_to_target_timespec(arg2, &ts);
11819 return ret;
11821 #endif
11822 #ifdef TARGET_NR_clock_nanosleep
11823 case TARGET_NR_clock_nanosleep:
11825 struct timespec ts;
11826 target_to_host_timespec(&ts, arg3);
11827 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11828 &ts, arg4 ? &ts : NULL));
11830 * if the call is interrupted by a signal handler, it fails
11831 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11832 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11834 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME) {
11835 host_to_target_timespec(arg4, &ts);
11838 return ret;
11840 #endif
11842 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11843 case TARGET_NR_set_tid_address:
11844 return get_errno(set_tid_address((int *)g2h(arg1)));
11845 #endif
11847 case TARGET_NR_tkill:
11848 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11850 case TARGET_NR_tgkill:
11851 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11852 target_to_host_signal(arg3)));
11854 #ifdef TARGET_NR_set_robust_list
11855 case TARGET_NR_set_robust_list:
11856 case TARGET_NR_get_robust_list:
11857 /* The ABI for supporting robust futexes has userspace pass
11858 * the kernel a pointer to a linked list which is updated by
11859 * userspace after the syscall; the list is walked by the kernel
11860 * when the thread exits. Since the linked list in QEMU guest
11861 * memory isn't a valid linked list for the host and we have
11862 * no way to reliably intercept the thread-death event, we can't
11863 * support these. Silently return ENOSYS so that guest userspace
11864 * falls back to a non-robust futex implementation (which should
11865 * be OK except in the corner case of the guest crashing while
11866 * holding a mutex that is shared with another process via
11867 * shared memory).
11869 return -TARGET_ENOSYS;
11870 #endif
11872 #if defined(TARGET_NR_utimensat)
11873 case TARGET_NR_utimensat:
11875 struct timespec *tsp, ts[2];
11876 if (!arg3) {
11877 tsp = NULL;
11878 } else {
11879 target_to_host_timespec(ts, arg3);
11880 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11881 tsp = ts;
11883 if (!arg2)
11884 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11885 else {
11886 if (!(p = lock_user_string(arg2))) {
11887 return -TARGET_EFAULT;
11889 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11890 unlock_user(p, arg2, 0);
11893 return ret;
11894 #endif
11895 #ifdef TARGET_NR_futex
11896 case TARGET_NR_futex:
11897 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11898 #endif
11899 #ifdef TARGET_NR_futex_time64
11900 case TARGET_NR_futex_time64:
11901 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11902 #endif
11903 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11904 case TARGET_NR_inotify_init:
11905 ret = get_errno(sys_inotify_init());
11906 if (ret >= 0) {
11907 fd_trans_register(ret, &target_inotify_trans);
11909 return ret;
11910 #endif
11911 #ifdef CONFIG_INOTIFY1
11912 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11913 case TARGET_NR_inotify_init1:
11914 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11915 fcntl_flags_tbl)));
11916 if (ret >= 0) {
11917 fd_trans_register(ret, &target_inotify_trans);
11919 return ret;
11920 #endif
11921 #endif
11922 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11923 case TARGET_NR_inotify_add_watch:
11924 p = lock_user_string(arg2);
11925 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11926 unlock_user(p, arg2, 0);
11927 return ret;
11928 #endif
11929 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11930 case TARGET_NR_inotify_rm_watch:
11931 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11932 #endif
11934 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11935 case TARGET_NR_mq_open:
11937 struct mq_attr posix_mq_attr;
11938 struct mq_attr *pposix_mq_attr;
11939 int host_flags;
11941 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11942 pposix_mq_attr = NULL;
11943 if (arg4) {
11944 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11945 return -TARGET_EFAULT;
11947 pposix_mq_attr = &posix_mq_attr;
11949 p = lock_user_string(arg1 - 1);
11950 if (!p) {
11951 return -TARGET_EFAULT;
11953 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11954 unlock_user (p, arg1, 0);
11956 return ret;
11958 case TARGET_NR_mq_unlink:
11959 p = lock_user_string(arg1 - 1);
11960 if (!p) {
11961 return -TARGET_EFAULT;
11963 ret = get_errno(mq_unlink(p));
11964 unlock_user (p, arg1, 0);
11965 return ret;
11967 #ifdef TARGET_NR_mq_timedsend
11968 case TARGET_NR_mq_timedsend:
11970 struct timespec ts;
11972 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11973 if (arg5 != 0) {
11974 target_to_host_timespec(&ts, arg5);
11975 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11976 host_to_target_timespec(arg5, &ts);
11977 } else {
11978 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11980 unlock_user (p, arg2, arg3);
11982 return ret;
11983 #endif
11985 #ifdef TARGET_NR_mq_timedreceive
11986 case TARGET_NR_mq_timedreceive:
11988 struct timespec ts;
11989 unsigned int prio;
11991 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11992 if (arg5 != 0) {
11993 target_to_host_timespec(&ts, arg5);
11994 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11995 &prio, &ts));
11996 host_to_target_timespec(arg5, &ts);
11997 } else {
11998 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11999 &prio, NULL));
12001 unlock_user (p, arg2, arg3);
12002 if (arg4 != 0)
12003 put_user_u32(prio, arg4);
12005 return ret;
12006 #endif
12008 /* Not implemented for now... */
12009 /* case TARGET_NR_mq_notify: */
12010 /* break; */
12012 case TARGET_NR_mq_getsetattr:
12014 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12015 ret = 0;
12016 if (arg2 != 0) {
12017 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12018 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12019 &posix_mq_attr_out));
12020 } else if (arg3 != 0) {
12021 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12023 if (ret == 0 && arg3 != 0) {
12024 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12027 return ret;
12028 #endif
12030 #ifdef CONFIG_SPLICE
12031 #ifdef TARGET_NR_tee
12032 case TARGET_NR_tee:
12034 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12036 return ret;
12037 #endif
12038 #ifdef TARGET_NR_splice
12039 case TARGET_NR_splice:
12041 loff_t loff_in, loff_out;
12042 loff_t *ploff_in = NULL, *ploff_out = NULL;
12043 if (arg2) {
12044 if (get_user_u64(loff_in, arg2)) {
12045 return -TARGET_EFAULT;
12047 ploff_in = &loff_in;
12049 if (arg4) {
12050 if (get_user_u64(loff_out, arg4)) {
12051 return -TARGET_EFAULT;
12053 ploff_out = &loff_out;
12055 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12056 if (arg2) {
12057 if (put_user_u64(loff_in, arg2)) {
12058 return -TARGET_EFAULT;
12061 if (arg4) {
12062 if (put_user_u64(loff_out, arg4)) {
12063 return -TARGET_EFAULT;
12067 return ret;
12068 #endif
12069 #ifdef TARGET_NR_vmsplice
12070 case TARGET_NR_vmsplice:
12072 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12073 if (vec != NULL) {
12074 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12075 unlock_iovec(vec, arg2, arg3, 0);
12076 } else {
12077 ret = -host_to_target_errno(errno);
12080 return ret;
12081 #endif
12082 #endif /* CONFIG_SPLICE */
12083 #ifdef CONFIG_EVENTFD
12084 #if defined(TARGET_NR_eventfd)
12085 case TARGET_NR_eventfd:
12086 ret = get_errno(eventfd(arg1, 0));
12087 if (ret >= 0) {
12088 fd_trans_register(ret, &target_eventfd_trans);
12090 return ret;
12091 #endif
12092 #if defined(TARGET_NR_eventfd2)
12093 case TARGET_NR_eventfd2:
12095 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12096 if (arg2 & TARGET_O_NONBLOCK) {
12097 host_flags |= O_NONBLOCK;
12099 if (arg2 & TARGET_O_CLOEXEC) {
12100 host_flags |= O_CLOEXEC;
12102 ret = get_errno(eventfd(arg1, host_flags));
12103 if (ret >= 0) {
12104 fd_trans_register(ret, &target_eventfd_trans);
12106 return ret;
12108 #endif
12109 #endif /* CONFIG_EVENTFD */
12110 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12111 case TARGET_NR_fallocate:
12112 #if TARGET_ABI_BITS == 32
12113 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12114 target_offset64(arg5, arg6)));
12115 #else
12116 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12117 #endif
12118 return ret;
12119 #endif
12120 #if defined(CONFIG_SYNC_FILE_RANGE)
12121 #if defined(TARGET_NR_sync_file_range)
12122 case TARGET_NR_sync_file_range:
12123 #if TARGET_ABI_BITS == 32
12124 #if defined(TARGET_MIPS)
12125 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12126 target_offset64(arg5, arg6), arg7));
12127 #else
12128 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12129 target_offset64(arg4, arg5), arg6));
12130 #endif /* !TARGET_MIPS */
12131 #else
12132 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12133 #endif
12134 return ret;
12135 #endif
12136 #if defined(TARGET_NR_sync_file_range2) || \
12137 defined(TARGET_NR_arm_sync_file_range)
12138 #if defined(TARGET_NR_sync_file_range2)
12139 case TARGET_NR_sync_file_range2:
12140 #endif
12141 #if defined(TARGET_NR_arm_sync_file_range)
12142 case TARGET_NR_arm_sync_file_range:
12143 #endif
12144 /* This is like sync_file_range but the arguments are reordered */
12145 #if TARGET_ABI_BITS == 32
12146 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12147 target_offset64(arg5, arg6), arg2));
12148 #else
12149 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12150 #endif
12151 return ret;
12152 #endif
12153 #endif
12154 #if defined(TARGET_NR_signalfd4)
12155 case TARGET_NR_signalfd4:
12156 return do_signalfd4(arg1, arg2, arg4);
12157 #endif
12158 #if defined(TARGET_NR_signalfd)
12159 case TARGET_NR_signalfd:
12160 return do_signalfd4(arg1, arg2, 0);
12161 #endif
12162 #if defined(CONFIG_EPOLL)
12163 #if defined(TARGET_NR_epoll_create)
12164 case TARGET_NR_epoll_create:
12165 return get_errno(epoll_create(arg1));
12166 #endif
12167 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12168 case TARGET_NR_epoll_create1:
12169 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12170 #endif
12171 #if defined(TARGET_NR_epoll_ctl)
12172 case TARGET_NR_epoll_ctl:
12174 struct epoll_event ep;
12175 struct epoll_event *epp = 0;
12176 if (arg4) {
12177 struct target_epoll_event *target_ep;
12178 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12179 return -TARGET_EFAULT;
12181 ep.events = tswap32(target_ep->events);
12182 /* The epoll_data_t union is just opaque data to the kernel,
12183 * so we transfer all 64 bits across and need not worry what
12184 * actual data type it is.
12186 ep.data.u64 = tswap64(target_ep->data.u64);
12187 unlock_user_struct(target_ep, arg4, 0);
12188 epp = &ep;
12190 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12192 #endif
12194 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12195 #if defined(TARGET_NR_epoll_wait)
12196 case TARGET_NR_epoll_wait:
12197 #endif
12198 #if defined(TARGET_NR_epoll_pwait)
12199 case TARGET_NR_epoll_pwait:
12200 #endif
12202 struct target_epoll_event *target_ep;
12203 struct epoll_event *ep;
12204 int epfd = arg1;
12205 int maxevents = arg3;
12206 int timeout = arg4;
12208 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12209 return -TARGET_EINVAL;
12212 target_ep = lock_user(VERIFY_WRITE, arg2,
12213 maxevents * sizeof(struct target_epoll_event), 1);
12214 if (!target_ep) {
12215 return -TARGET_EFAULT;
12218 ep = g_try_new(struct epoll_event, maxevents);
12219 if (!ep) {
12220 unlock_user(target_ep, arg2, 0);
12221 return -TARGET_ENOMEM;
12224 switch (num) {
12225 #if defined(TARGET_NR_epoll_pwait)
12226 case TARGET_NR_epoll_pwait:
12228 target_sigset_t *target_set;
12229 sigset_t _set, *set = &_set;
12231 if (arg5) {
12232 if (arg6 != sizeof(target_sigset_t)) {
12233 ret = -TARGET_EINVAL;
12234 break;
12237 target_set = lock_user(VERIFY_READ, arg5,
12238 sizeof(target_sigset_t), 1);
12239 if (!target_set) {
12240 ret = -TARGET_EFAULT;
12241 break;
12243 target_to_host_sigset(set, target_set);
12244 unlock_user(target_set, arg5, 0);
12245 } else {
12246 set = NULL;
12249 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12250 set, SIGSET_T_SIZE));
12251 break;
12253 #endif
12254 #if defined(TARGET_NR_epoll_wait)
12255 case TARGET_NR_epoll_wait:
12256 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12257 NULL, 0));
12258 break;
12259 #endif
12260 default:
12261 ret = -TARGET_ENOSYS;
12263 if (!is_error(ret)) {
12264 int i;
12265 for (i = 0; i < ret; i++) {
12266 target_ep[i].events = tswap32(ep[i].events);
12267 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12269 unlock_user(target_ep, arg2,
12270 ret * sizeof(struct target_epoll_event));
12271 } else {
12272 unlock_user(target_ep, arg2, 0);
12274 g_free(ep);
12275 return ret;
12277 #endif
12278 #endif
12279 #ifdef TARGET_NR_prlimit64
12280 case TARGET_NR_prlimit64:
12282 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12283 struct target_rlimit64 *target_rnew, *target_rold;
12284 struct host_rlimit64 rnew, rold, *rnewp = 0;
12285 int resource = target_to_host_resource(arg2);
12287 if (arg3 && (resource != RLIMIT_AS &&
12288 resource != RLIMIT_DATA &&
12289 resource != RLIMIT_STACK)) {
12290 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12291 return -TARGET_EFAULT;
12293 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12294 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12295 unlock_user_struct(target_rnew, arg3, 0);
12296 rnewp = &rnew;
12299 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12300 if (!is_error(ret) && arg4) {
12301 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12302 return -TARGET_EFAULT;
12304 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12305 target_rold->rlim_max = tswap64(rold.rlim_max);
12306 unlock_user_struct(target_rold, arg4, 1);
12308 return ret;
12310 #endif
12311 #ifdef TARGET_NR_gethostname
12312 case TARGET_NR_gethostname:
12314 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12315 if (name) {
12316 ret = get_errno(gethostname(name, arg2));
12317 unlock_user(name, arg1, arg2);
12318 } else {
12319 ret = -TARGET_EFAULT;
12321 return ret;
12323 #endif
12324 #ifdef TARGET_NR_atomic_cmpxchg_32
12325 case TARGET_NR_atomic_cmpxchg_32:
12327 /* should use start_exclusive from main.c */
12328 abi_ulong mem_value;
12329 if (get_user_u32(mem_value, arg6)) {
12330 target_siginfo_t info;
12331 info.si_signo = SIGSEGV;
12332 info.si_errno = 0;
12333 info.si_code = TARGET_SEGV_MAPERR;
12334 info._sifields._sigfault._addr = arg6;
12335 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12336 QEMU_SI_FAULT, &info);
12337 ret = 0xdeadbeef;
12340 if (mem_value == arg2)
12341 put_user_u32(arg1, arg6);
12342 return mem_value;
12344 #endif
12345 #ifdef TARGET_NR_atomic_barrier
12346 case TARGET_NR_atomic_barrier:
12347 /* Like the kernel implementation and the
12348 qemu arm barrier, no-op this? */
12349 return 0;
12350 #endif
12352 #ifdef TARGET_NR_timer_create
12353 case TARGET_NR_timer_create:
12355 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12357 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12359 int clkid = arg1;
12360 int timer_index = next_free_host_timer();
12362 if (timer_index < 0) {
12363 ret = -TARGET_EAGAIN;
12364 } else {
12365 timer_t *phtimer = g_posix_timers + timer_index;
12367 if (arg2) {
12368 phost_sevp = &host_sevp;
12369 ret = target_to_host_sigevent(phost_sevp, arg2);
12370 if (ret != 0) {
12371 return ret;
12375 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12376 if (ret) {
12377 phtimer = NULL;
12378 } else {
12379 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12380 return -TARGET_EFAULT;
12384 return ret;
12386 #endif
12388 #ifdef TARGET_NR_timer_settime
12389 case TARGET_NR_timer_settime:
12391 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12392 * struct itimerspec * old_value */
12393 target_timer_t timerid = get_timer_id(arg1);
12395 if (timerid < 0) {
12396 ret = timerid;
12397 } else if (arg3 == 0) {
12398 ret = -TARGET_EINVAL;
12399 } else {
12400 timer_t htimer = g_posix_timers[timerid];
12401 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12403 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12404 return -TARGET_EFAULT;
12406 ret = get_errno(
12407 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12408 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12409 return -TARGET_EFAULT;
12412 return ret;
12414 #endif
12416 #ifdef TARGET_NR_timer_gettime
12417 case TARGET_NR_timer_gettime:
12419 /* args: timer_t timerid, struct itimerspec *curr_value */
12420 target_timer_t timerid = get_timer_id(arg1);
12422 if (timerid < 0) {
12423 ret = timerid;
12424 } else if (!arg2) {
12425 ret = -TARGET_EFAULT;
12426 } else {
12427 timer_t htimer = g_posix_timers[timerid];
12428 struct itimerspec hspec;
12429 ret = get_errno(timer_gettime(htimer, &hspec));
12431 if (host_to_target_itimerspec(arg2, &hspec)) {
12432 ret = -TARGET_EFAULT;
12435 return ret;
12437 #endif
12439 #ifdef TARGET_NR_timer_getoverrun
12440 case TARGET_NR_timer_getoverrun:
12442 /* args: timer_t timerid */
12443 target_timer_t timerid = get_timer_id(arg1);
12445 if (timerid < 0) {
12446 ret = timerid;
12447 } else {
12448 timer_t htimer = g_posix_timers[timerid];
12449 ret = get_errno(timer_getoverrun(htimer));
12451 return ret;
12453 #endif
12455 #ifdef TARGET_NR_timer_delete
12456 case TARGET_NR_timer_delete:
12458 /* args: timer_t timerid */
12459 target_timer_t timerid = get_timer_id(arg1);
12461 if (timerid < 0) {
12462 ret = timerid;
12463 } else {
12464 timer_t htimer = g_posix_timers[timerid];
12465 ret = get_errno(timer_delete(htimer));
12466 g_posix_timers[timerid] = 0;
12468 return ret;
12470 #endif
12472 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12473 case TARGET_NR_timerfd_create:
12474 return get_errno(timerfd_create(arg1,
12475 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12476 #endif
12478 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12479 case TARGET_NR_timerfd_gettime:
12481 struct itimerspec its_curr;
12483 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12485 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12486 return -TARGET_EFAULT;
12489 return ret;
12490 #endif
12492 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12493 case TARGET_NR_timerfd_settime:
12495 struct itimerspec its_new, its_old, *p_new;
12497 if (arg3) {
12498 if (target_to_host_itimerspec(&its_new, arg3)) {
12499 return -TARGET_EFAULT;
12501 p_new = &its_new;
12502 } else {
12503 p_new = NULL;
12506 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12508 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12509 return -TARGET_EFAULT;
12512 return ret;
12513 #endif
12515 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12516 case TARGET_NR_ioprio_get:
12517 return get_errno(ioprio_get(arg1, arg2));
12518 #endif
12520 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12521 case TARGET_NR_ioprio_set:
12522 return get_errno(ioprio_set(arg1, arg2, arg3));
12523 #endif
12525 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12526 case TARGET_NR_setns:
12527 return get_errno(setns(arg1, arg2));
12528 #endif
12529 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12530 case TARGET_NR_unshare:
12531 return get_errno(unshare(arg1));
12532 #endif
12533 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12534 case TARGET_NR_kcmp:
12535 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12536 #endif
12537 #ifdef TARGET_NR_swapcontext
12538 case TARGET_NR_swapcontext:
12539 /* PowerPC specific. */
12540 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12541 #endif
12542 #ifdef TARGET_NR_memfd_create
12543 case TARGET_NR_memfd_create:
12544 p = lock_user_string(arg1);
12545 if (!p) {
12546 return -TARGET_EFAULT;
12548 ret = get_errno(memfd_create(p, arg2));
12549 fd_trans_unregister(ret);
12550 unlock_user(p, arg1, 0);
12551 return ret;
12552 #endif
12553 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12554 case TARGET_NR_membarrier:
12555 return get_errno(membarrier(arg1, arg2));
12556 #endif
12558 default:
12559 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12560 return -TARGET_ENOSYS;
12562 return ret;
12565 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12566 abi_long arg2, abi_long arg3, abi_long arg4,
12567 abi_long arg5, abi_long arg6, abi_long arg7,
12568 abi_long arg8)
12570 CPUState *cpu = env_cpu(cpu_env);
12571 abi_long ret;
12573 #ifdef DEBUG_ERESTARTSYS
12574 /* Debug-only code for exercising the syscall-restart code paths
12575 * in the per-architecture cpu main loops: restart every syscall
12576 * the guest makes once before letting it through.
12579 static bool flag;
12580 flag = !flag;
12581 if (flag) {
12582 return -TARGET_ERESTARTSYS;
12585 #endif
12587 record_syscall_start(cpu, num, arg1,
12588 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12590 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12591 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12594 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12595 arg5, arg6, arg7, arg8);
12597 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12598 print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12601 record_syscall_return(cpu, num, ret);
12602 return ret;