Merge tag 'v5.1.0-rc2'
[qemu/ar7.git] / linux-user / syscall.c
blob640918e93e0a200ad823e2591fc72e12dce33238
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
129 #ifndef CLONE_IO
130 #define CLONE_IO 0x80000000 /* Clone io context */
131 #endif
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
238 type6 arg6) \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
295 loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299 siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309 const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313 const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317 unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320 unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324 void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326 struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328 struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341 unsigned long, idx1, unsigned long, idx2)
342 #endif
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349 unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
357 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
358 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
359 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
360 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
361 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
362 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
363 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
364 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
365 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
366 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
367 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
371 #endif
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
374 #endif
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
377 #endif
378 #if defined(O_PATH)
379 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
380 #endif
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
383 #endif
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388 { 0, 0, 0, 0 }
391 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 #ifdef TARGET_NR_utimensat
394 #if defined(__NR_utimensat)
395 #define __NR_sys_utimensat __NR_utimensat
396 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
397 const struct timespec *,tsp,int,flags)
398 #else
399 static int sys_utimensat(int dirfd, const char *pathname,
400 const struct timespec times[2], int flags)
402 errno = ENOSYS;
403 return -1;
405 #endif
406 #endif /* TARGET_NR_utimensat */
408 #ifdef TARGET_NR_renameat2
409 #if defined(__NR_renameat2)
410 #define __NR_sys_renameat2 __NR_renameat2
411 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
412 const char *, new, unsigned int, flags)
413 #else
414 static int sys_renameat2(int oldfd, const char *old,
415 int newfd, const char *new, int flags)
417 if (flags == 0) {
418 return renameat(oldfd, old, newfd, new);
420 errno = ENOSYS;
421 return -1;
423 #endif
424 #endif /* TARGET_NR_renameat2 */
426 #ifdef CONFIG_INOTIFY
427 #include <sys/inotify.h>
429 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
430 static int sys_inotify_init(void)
432 return (inotify_init());
434 #endif
435 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
436 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 return (inotify_add_watch(fd, pathname, mask));
440 #endif
441 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
442 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 return (inotify_rm_watch(fd, wd));
446 #endif
447 #ifdef CONFIG_INOTIFY1
448 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
449 static int sys_inotify_init1(int flags)
451 return (inotify_init1(flags));
453 #endif
454 #endif
455 #else
456 /* Userspace can usually survive runtime without inotify */
457 #undef TARGET_NR_inotify_init
458 #undef TARGET_NR_inotify_init1
459 #undef TARGET_NR_inotify_add_watch
460 #undef TARGET_NR_inotify_rm_watch
461 #endif /* CONFIG_INOTIFY */
463 #if defined(TARGET_NR_prlimit64)
464 #ifndef __NR_prlimit64
465 # define __NR_prlimit64 -1
466 #endif
467 #define __NR_sys_prlimit64 __NR_prlimit64
468 /* The glibc rlimit structure may not be that used by the underlying syscall */
469 struct host_rlimit64 {
470 uint64_t rlim_cur;
471 uint64_t rlim_max;
473 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
474 const struct host_rlimit64 *, new_limit,
475 struct host_rlimit64 *, old_limit)
476 #endif
479 #if defined(TARGET_NR_timer_create)
480 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
481 static timer_t g_posix_timers[32] = { 0, } ;
483 static inline int next_free_host_timer(void)
485 int k ;
486 /* FIXME: Does finding the next free slot require a lock? */
487 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
488 if (g_posix_timers[k] == 0) {
489 g_posix_timers[k] = (timer_t) 1;
490 return k;
493 return -1;
495 #endif
497 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
498 #ifdef TARGET_ARM
499 static inline int regpairs_aligned(void *cpu_env, int num)
501 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
503 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
504 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
505 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
506 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
507 * of registers which translates to the same as ARM/MIPS, because we start with
508 * r3 as arg1 */
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #elif defined(TARGET_SH4)
511 /* SH4 doesn't align register pairs, except for p{read,write}64 */
512 static inline int regpairs_aligned(void *cpu_env, int num)
514 switch (num) {
515 case TARGET_NR_pread64:
516 case TARGET_NR_pwrite64:
517 return 1;
519 default:
520 return 0;
523 #elif defined(TARGET_XTENSA)
524 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
525 #else
526 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
527 #endif
529 #define ERRNO_TABLE_SIZE 1200
531 /* target_to_host_errno_table[] is initialized from
532 * host_to_target_errno_table[] in syscall_init(). */
533 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
537 * This list is the union of errno values overridden in asm-<arch>/errno.h
538 * minus the errnos that are not actually generic to all archs.
540 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
541 [EAGAIN] = TARGET_EAGAIN,
542 [EIDRM] = TARGET_EIDRM,
543 [ECHRNG] = TARGET_ECHRNG,
544 [EL2NSYNC] = TARGET_EL2NSYNC,
545 [EL3HLT] = TARGET_EL3HLT,
546 [EL3RST] = TARGET_EL3RST,
547 [ELNRNG] = TARGET_ELNRNG,
548 [EUNATCH] = TARGET_EUNATCH,
549 [ENOCSI] = TARGET_ENOCSI,
550 [EL2HLT] = TARGET_EL2HLT,
551 [EDEADLK] = TARGET_EDEADLK,
552 [ENOLCK] = TARGET_ENOLCK,
553 [EBADE] = TARGET_EBADE,
554 [EBADR] = TARGET_EBADR,
555 [EXFULL] = TARGET_EXFULL,
556 [ENOANO] = TARGET_ENOANO,
557 [EBADRQC] = TARGET_EBADRQC,
558 [EBADSLT] = TARGET_EBADSLT,
559 [EBFONT] = TARGET_EBFONT,
560 [ENOSTR] = TARGET_ENOSTR,
561 [ENODATA] = TARGET_ENODATA,
562 [ETIME] = TARGET_ETIME,
563 [ENOSR] = TARGET_ENOSR,
564 [ENONET] = TARGET_ENONET,
565 [ENOPKG] = TARGET_ENOPKG,
566 [EREMOTE] = TARGET_EREMOTE,
567 [ENOLINK] = TARGET_ENOLINK,
568 [EADV] = TARGET_EADV,
569 [ESRMNT] = TARGET_ESRMNT,
570 [ECOMM] = TARGET_ECOMM,
571 [EPROTO] = TARGET_EPROTO,
572 [EDOTDOT] = TARGET_EDOTDOT,
573 [EMULTIHOP] = TARGET_EMULTIHOP,
574 [EBADMSG] = TARGET_EBADMSG,
575 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
576 [EOVERFLOW] = TARGET_EOVERFLOW,
577 [ENOTUNIQ] = TARGET_ENOTUNIQ,
578 [EBADFD] = TARGET_EBADFD,
579 [EREMCHG] = TARGET_EREMCHG,
580 [ELIBACC] = TARGET_ELIBACC,
581 [ELIBBAD] = TARGET_ELIBBAD,
582 [ELIBSCN] = TARGET_ELIBSCN,
583 [ELIBMAX] = TARGET_ELIBMAX,
584 [ELIBEXEC] = TARGET_ELIBEXEC,
585 [EILSEQ] = TARGET_EILSEQ,
586 [ENOSYS] = TARGET_ENOSYS,
587 [ELOOP] = TARGET_ELOOP,
588 [ERESTART] = TARGET_ERESTART,
589 [ESTRPIPE] = TARGET_ESTRPIPE,
590 [ENOTEMPTY] = TARGET_ENOTEMPTY,
591 [EUSERS] = TARGET_EUSERS,
592 [ENOTSOCK] = TARGET_ENOTSOCK,
593 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
594 [EMSGSIZE] = TARGET_EMSGSIZE,
595 [EPROTOTYPE] = TARGET_EPROTOTYPE,
596 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
597 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
598 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
599 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
600 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
601 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
602 [EADDRINUSE] = TARGET_EADDRINUSE,
603 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
604 [ENETDOWN] = TARGET_ENETDOWN,
605 [ENETUNREACH] = TARGET_ENETUNREACH,
606 [ENETRESET] = TARGET_ENETRESET,
607 [ECONNABORTED] = TARGET_ECONNABORTED,
608 [ECONNRESET] = TARGET_ECONNRESET,
609 [ENOBUFS] = TARGET_ENOBUFS,
610 [EISCONN] = TARGET_EISCONN,
611 [ENOTCONN] = TARGET_ENOTCONN,
612 [EUCLEAN] = TARGET_EUCLEAN,
613 [ENOTNAM] = TARGET_ENOTNAM,
614 [ENAVAIL] = TARGET_ENAVAIL,
615 [EISNAM] = TARGET_EISNAM,
616 [EREMOTEIO] = TARGET_EREMOTEIO,
617 [EDQUOT] = TARGET_EDQUOT,
618 [ESHUTDOWN] = TARGET_ESHUTDOWN,
619 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
620 [ETIMEDOUT] = TARGET_ETIMEDOUT,
621 [ECONNREFUSED] = TARGET_ECONNREFUSED,
622 [EHOSTDOWN] = TARGET_EHOSTDOWN,
623 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
624 [EALREADY] = TARGET_EALREADY,
625 [EINPROGRESS] = TARGET_EINPROGRESS,
626 [ESTALE] = TARGET_ESTALE,
627 [ECANCELED] = TARGET_ECANCELED,
628 [ENOMEDIUM] = TARGET_ENOMEDIUM,
629 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
630 #ifdef ENOKEY
631 [ENOKEY] = TARGET_ENOKEY,
632 #endif
633 #ifdef EKEYEXPIRED
634 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
635 #endif
636 #ifdef EKEYREVOKED
637 [EKEYREVOKED] = TARGET_EKEYREVOKED,
638 #endif
639 #ifdef EKEYREJECTED
640 [EKEYREJECTED] = TARGET_EKEYREJECTED,
641 #endif
642 #ifdef EOWNERDEAD
643 [EOWNERDEAD] = TARGET_EOWNERDEAD,
644 #endif
645 #ifdef ENOTRECOVERABLE
646 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
647 #endif
648 #ifdef ENOMSG
649 [ENOMSG] = TARGET_ENOMSG,
650 #endif
651 #ifdef ERKFILL
652 [ERFKILL] = TARGET_ERFKILL,
653 #endif
654 #ifdef EHWPOISON
655 [EHWPOISON] = TARGET_EHWPOISON,
656 #endif
659 static inline int host_to_target_errno(int err)
661 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
662 host_to_target_errno_table[err]) {
663 return host_to_target_errno_table[err];
665 return err;
668 static inline int target_to_host_errno(int err)
670 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
671 target_to_host_errno_table[err]) {
672 return target_to_host_errno_table[err];
674 return err;
677 static inline abi_long get_errno(abi_long ret)
679 if (ret == -1)
680 return -host_to_target_errno(errno);
681 else
682 return ret;
685 const char *target_strerror(int err)
687 if (err == TARGET_ERESTARTSYS) {
688 return "To be restarted";
690 if (err == TARGET_QEMU_ESIGRETURN) {
691 return "Successful exit from sigreturn";
694 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
695 return NULL;
697 return strerror(target_to_host_errno(err));
700 #define safe_syscall0(type, name) \
701 static type safe_##name(void) \
703 return safe_syscall(__NR_##name); \
706 #define safe_syscall1(type, name, type1, arg1) \
707 static type safe_##name(type1 arg1) \
709 return safe_syscall(__NR_##name, arg1); \
712 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
713 static type safe_##name(type1 arg1, type2 arg2) \
715 return safe_syscall(__NR_##name, arg1, arg2); \
718 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
724 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
725 type4, arg4) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
731 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
732 type4, arg4, type5, arg5) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
734 type5 arg5) \
736 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
739 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
740 type4, arg4, type5, arg5, type6, arg6) \
741 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
742 type5 arg5, type6 arg6) \
744 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
747 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
748 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
749 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
750 int, flags, mode_t, mode)
751 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
752 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
753 struct rusage *, rusage)
754 #endif
755 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
756 int, options, struct rusage *, rusage)
757 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
758 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
759 defined(TARGET_NR_pselect6)
760 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
761 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
762 #endif
763 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
764 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
765 struct timespec *, tsp, const sigset_t *, sigmask,
766 size_t, sigsetsize)
767 #endif
768 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
769 int, maxevents, int, timeout, const sigset_t *, sigmask,
770 size_t, sigsetsize)
771 #if defined(__NR_futex)
772 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
773 const struct timespec *,timeout,int *,uaddr2,int,val3)
774 #endif
775 #if defined(__NR_futex_time64)
776 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
777 const struct timespec *,timeout,int *,uaddr2,int,val3)
778 #endif
779 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
780 safe_syscall2(int, kill, pid_t, pid, int, sig)
781 safe_syscall2(int, tkill, int, tid, int, sig)
782 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
783 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
784 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
785 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
786 unsigned long, pos_l, unsigned long, pos_h)
787 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
788 unsigned long, pos_l, unsigned long, pos_h)
789 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
790 socklen_t, addrlen)
791 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
792 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
793 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
794 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
795 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
796 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
797 safe_syscall2(int, flock, int, fd, int, operation)
798 #ifdef TARGET_NR_rt_sigtimedwait
799 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
800 const struct timespec *, uts, size_t, sigsetsize)
801 #endif
802 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
803 int, flags)
804 #if defined(TARGET_NR_nanosleep)
805 safe_syscall2(int, nanosleep, const struct timespec *, req,
806 struct timespec *, rem)
807 #endif
808 #ifdef TARGET_NR_clock_nanosleep
809 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
810 const struct timespec *, req, struct timespec *, rem)
811 #endif
812 #ifdef __NR_ipc
813 #ifdef __s390x__
814 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
815 void *, ptr)
816 #else
817 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
818 void *, ptr, long, fifth)
819 #endif
820 #endif
821 #ifdef __NR_msgsnd
822 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
823 int, flags)
824 #endif
825 #ifdef __NR_msgrcv
826 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
827 long, msgtype, int, flags)
828 #endif
829 #ifdef __NR_semtimedop
830 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
831 unsigned, nsops, const struct timespec *, timeout)
832 #endif
833 #ifdef TARGET_NR_mq_timedsend
834 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
835 size_t, len, unsigned, prio, const struct timespec *, timeout)
836 #endif
837 #ifdef TARGET_NR_mq_timedreceive
838 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
839 size_t, len, unsigned *, prio, const struct timespec *, timeout)
840 #endif
841 /* We do ioctl like this rather than via safe_syscall3 to preserve the
842 * "third argument might be integer or pointer or not present" behaviour of
843 * the libc function.
845 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
846 /* Similarly for fcntl. Note that callers must always:
847 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
848 * use the flock64 struct rather than unsuffixed flock
849 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
851 #ifdef __NR_fcntl64
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
853 #else
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
855 #endif
857 static inline int host_to_target_sock_type(int host_type)
859 int target_type;
861 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
862 case SOCK_DGRAM:
863 target_type = TARGET_SOCK_DGRAM;
864 break;
865 case SOCK_STREAM:
866 target_type = TARGET_SOCK_STREAM;
867 break;
868 default:
869 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
870 break;
873 #if defined(SOCK_CLOEXEC)
874 if (host_type & SOCK_CLOEXEC) {
875 target_type |= TARGET_SOCK_CLOEXEC;
877 #endif
879 #if defined(SOCK_NONBLOCK)
880 if (host_type & SOCK_NONBLOCK) {
881 target_type |= TARGET_SOCK_NONBLOCK;
883 #endif
885 return target_type;
888 static abi_ulong target_brk;
889 static abi_ulong target_original_brk;
890 static abi_ulong brk_page;
892 void target_set_brk(abi_ulong new_brk)
894 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
895 brk_page = HOST_PAGE_ALIGN(target_brk);
898 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
899 #define DEBUGF_BRK(message, args...)
901 /* do_brk() must return target values and target errnos. */
902 abi_long do_brk(abi_ulong new_brk)
904 abi_long mapped_addr;
905 abi_ulong new_alloc_size;
907 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
909 if (!new_brk) {
910 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
911 return target_brk;
913 if (new_brk < target_original_brk) {
914 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
915 target_brk);
916 return target_brk;
919 /* If the new brk is less than the highest page reserved to the
920 * target heap allocation, set it and we're almost done... */
921 if (new_brk <= brk_page) {
922 /* Heap contents are initialized to zero, as for anonymous
923 * mapped pages. */
924 if (new_brk > target_brk) {
925 memset(g2h(target_brk), 0, new_brk - target_brk);
927 target_brk = new_brk;
928 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
929 return target_brk;
932 /* We need to allocate more memory after the brk... Note that
933 * we don't use MAP_FIXED because that will map over the top of
934 * any existing mapping (like the one with the host libc or qemu
935 * itself); instead we treat "mapped but at wrong address" as
936 * a failure and unmap again.
938 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
939 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
940 PROT_READ|PROT_WRITE,
941 MAP_ANON|MAP_PRIVATE, 0, 0));
943 if (mapped_addr == brk_page) {
944 /* Heap contents are initialized to zero, as for anonymous
945 * mapped pages. Technically the new pages are already
946 * initialized to zero since they *are* anonymous mapped
947 * pages, however we have to take care with the contents that
948 * come from the remaining part of the previous page: it may
949 * contains garbage data due to a previous heap usage (grown
950 * then shrunken). */
951 memset(g2h(target_brk), 0, brk_page - target_brk);
953 target_brk = new_brk;
954 brk_page = HOST_PAGE_ALIGN(target_brk);
955 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
956 target_brk);
957 return target_brk;
958 } else if (mapped_addr != -1) {
959 /* Mapped but at wrong address, meaning there wasn't actually
960 * enough space for this brk.
962 target_munmap(mapped_addr, new_alloc_size);
963 mapped_addr = -1;
964 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
966 else {
967 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
970 #if defined(TARGET_ALPHA)
971 /* We (partially) emulate OSF/1 on Alpha, which requires we
972 return a proper errno, not an unchanged brk value. */
973 return -TARGET_ENOMEM;
974 #endif
975 /* For everything else, return the previous break. */
976 return target_brk;
979 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
980 defined(TARGET_NR_pselect6)
981 static inline abi_long copy_from_user_fdset(fd_set *fds,
982 abi_ulong target_fds_addr,
983 int n)
985 int i, nw, j, k;
986 abi_ulong b, *target_fds;
988 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
989 if (!(target_fds = lock_user(VERIFY_READ,
990 target_fds_addr,
991 sizeof(abi_ulong) * nw,
992 1)))
993 return -TARGET_EFAULT;
995 FD_ZERO(fds);
996 k = 0;
997 for (i = 0; i < nw; i++) {
998 /* grab the abi_ulong */
999 __get_user(b, &target_fds[i]);
1000 for (j = 0; j < TARGET_ABI_BITS; j++) {
1001 /* check the bit inside the abi_ulong */
1002 if ((b >> j) & 1)
1003 FD_SET(k, fds);
1004 k++;
1008 unlock_user(target_fds, target_fds_addr, 0);
1010 return 0;
1013 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1014 abi_ulong target_fds_addr,
1015 int n)
1017 if (target_fds_addr) {
1018 if (copy_from_user_fdset(fds, target_fds_addr, n))
1019 return -TARGET_EFAULT;
1020 *fds_ptr = fds;
1021 } else {
1022 *fds_ptr = NULL;
1024 return 0;
1027 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1028 const fd_set *fds,
1029 int n)
1031 int i, nw, j, k;
1032 abi_long v;
1033 abi_ulong *target_fds;
1035 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1036 if (!(target_fds = lock_user(VERIFY_WRITE,
1037 target_fds_addr,
1038 sizeof(abi_ulong) * nw,
1039 0)))
1040 return -TARGET_EFAULT;
1042 k = 0;
1043 for (i = 0; i < nw; i++) {
1044 v = 0;
1045 for (j = 0; j < TARGET_ABI_BITS; j++) {
1046 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1047 k++;
1049 __put_user(v, &target_fds[i]);
1052 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1054 return 0;
1056 #endif
1058 #if defined(__alpha__)
1059 #define HOST_HZ 1024
1060 #else
1061 #define HOST_HZ 100
1062 #endif
1064 static inline abi_long host_to_target_clock_t(long ticks)
1066 #if HOST_HZ == TARGET_HZ
1067 return ticks;
1068 #else
1069 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1070 #endif
1073 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1074 const struct rusage *rusage)
1076 struct target_rusage *target_rusage;
1078 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1079 return -TARGET_EFAULT;
1080 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1081 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1082 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1083 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1084 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1085 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1086 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1087 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1088 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1089 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1090 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1091 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1092 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1093 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1094 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1095 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1096 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1097 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1098 unlock_user_struct(target_rusage, target_addr, 1);
1100 return 0;
1103 #ifdef TARGET_NR_setrlimit
1104 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1106 abi_ulong target_rlim_swap;
1107 rlim_t result;
1109 target_rlim_swap = tswapal(target_rlim);
1110 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1111 return RLIM_INFINITY;
1113 result = target_rlim_swap;
1114 if (target_rlim_swap != (rlim_t)result)
1115 return RLIM_INFINITY;
1117 return result;
1119 #endif
1121 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1122 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1124 abi_ulong target_rlim_swap;
1125 abi_ulong result;
1127 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1128 target_rlim_swap = TARGET_RLIM_INFINITY;
1129 else
1130 target_rlim_swap = rlim;
1131 result = tswapal(target_rlim_swap);
1133 return result;
1135 #endif
1137 static inline int target_to_host_resource(int code)
1139 switch (code) {
1140 case TARGET_RLIMIT_AS:
1141 return RLIMIT_AS;
1142 case TARGET_RLIMIT_CORE:
1143 return RLIMIT_CORE;
1144 case TARGET_RLIMIT_CPU:
1145 return RLIMIT_CPU;
1146 case TARGET_RLIMIT_DATA:
1147 return RLIMIT_DATA;
1148 case TARGET_RLIMIT_FSIZE:
1149 return RLIMIT_FSIZE;
1150 case TARGET_RLIMIT_LOCKS:
1151 return RLIMIT_LOCKS;
1152 case TARGET_RLIMIT_MEMLOCK:
1153 return RLIMIT_MEMLOCK;
1154 case TARGET_RLIMIT_MSGQUEUE:
1155 return RLIMIT_MSGQUEUE;
1156 case TARGET_RLIMIT_NICE:
1157 return RLIMIT_NICE;
1158 case TARGET_RLIMIT_NOFILE:
1159 return RLIMIT_NOFILE;
1160 case TARGET_RLIMIT_NPROC:
1161 return RLIMIT_NPROC;
1162 case TARGET_RLIMIT_RSS:
1163 return RLIMIT_RSS;
1164 case TARGET_RLIMIT_RTPRIO:
1165 return RLIMIT_RTPRIO;
1166 case TARGET_RLIMIT_SIGPENDING:
1167 return RLIMIT_SIGPENDING;
1168 case TARGET_RLIMIT_STACK:
1169 return RLIMIT_STACK;
1170 default:
1171 return code;
1175 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1176 abi_ulong target_tv_addr)
1178 struct target_timeval *target_tv;
1180 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1181 return -TARGET_EFAULT;
1184 __get_user(tv->tv_sec, &target_tv->tv_sec);
1185 __get_user(tv->tv_usec, &target_tv->tv_usec);
1187 unlock_user_struct(target_tv, target_tv_addr, 0);
1189 return 0;
1192 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1193 const struct timeval *tv)
1195 struct target_timeval *target_tv;
1197 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1198 return -TARGET_EFAULT;
1201 __put_user(tv->tv_sec, &target_tv->tv_sec);
1202 __put_user(tv->tv_usec, &target_tv->tv_usec);
1204 unlock_user_struct(target_tv, target_tv_addr, 1);
1206 return 0;
1209 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1210 const struct timeval *tv)
1212 struct target__kernel_sock_timeval *target_tv;
1214 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1215 return -TARGET_EFAULT;
1218 __put_user(tv->tv_sec, &target_tv->tv_sec);
1219 __put_user(tv->tv_usec, &target_tv->tv_usec);
1221 unlock_user_struct(target_tv, target_tv_addr, 1);
1223 return 0;
1226 #if defined(TARGET_NR_futex) || \
1227 defined(TARGET_NR_rt_sigtimedwait) || \
1228 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1229 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1230 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1231 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1232 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop)
1233 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1234 abi_ulong target_addr)
1236 struct target_timespec *target_ts;
1238 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1239 return -TARGET_EFAULT;
1241 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1242 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243 unlock_user_struct(target_ts, target_addr, 0);
1244 return 0;
1246 #endif
1248 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1249 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1250 abi_ulong target_addr)
1252 struct target__kernel_timespec *target_ts;
1254 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1255 return -TARGET_EFAULT;
1257 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1258 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1259 unlock_user_struct(target_ts, target_addr, 0);
1260 return 0;
1262 #endif
1264 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1265 struct timespec *host_ts)
1267 struct target_timespec *target_ts;
1269 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1270 return -TARGET_EFAULT;
1272 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1273 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1274 unlock_user_struct(target_ts, target_addr, 1);
1275 return 0;
1278 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1279 struct timespec *host_ts)
1281 struct target__kernel_timespec *target_ts;
1283 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1284 return -TARGET_EFAULT;
1286 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1287 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1288 unlock_user_struct(target_ts, target_addr, 1);
1289 return 0;
1292 #if defined(TARGET_NR_gettimeofday)
1293 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1294 struct timezone *tz)
1296 struct target_timezone *target_tz;
1298 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1299 return -TARGET_EFAULT;
1302 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1303 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 unlock_user_struct(target_tz, target_tz_addr, 1);
1307 return 0;
1309 #endif
1311 #if defined(TARGET_NR_settimeofday)
1312 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1313 abi_ulong target_tz_addr)
1315 struct target_timezone *target_tz;
1317 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1318 return -TARGET_EFAULT;
1321 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1322 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324 unlock_user_struct(target_tz, target_tz_addr, 0);
1326 return 0;
1328 #endif
1330 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1331 #include <mqueue.h>
1333 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1334 abi_ulong target_mq_attr_addr)
1336 struct target_mq_attr *target_mq_attr;
1338 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1339 target_mq_attr_addr, 1))
1340 return -TARGET_EFAULT;
1342 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1343 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1344 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1345 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349 return 0;
1352 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1353 const struct mq_attr *attr)
1355 struct target_mq_attr *target_mq_attr;
1357 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1358 target_mq_attr_addr, 0))
1359 return -TARGET_EFAULT;
1361 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1362 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1363 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1364 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368 return 0;
1370 #endif
1372 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1373 /* do_select() must return target values and target errnos. */
1374 static abi_long do_select(int n,
1375 abi_ulong rfd_addr, abi_ulong wfd_addr,
1376 abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 fd_set rfds, wfds, efds;
1379 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1380 struct timeval tv;
1381 struct timespec ts, *ts_ptr;
1382 abi_long ret;
1384 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1385 if (ret) {
1386 return ret;
1388 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1389 if (ret) {
1390 return ret;
1392 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1393 if (ret) {
1394 return ret;
1397 if (target_tv_addr) {
1398 if (copy_from_user_timeval(&tv, target_tv_addr))
1399 return -TARGET_EFAULT;
1400 ts.tv_sec = tv.tv_sec;
1401 ts.tv_nsec = tv.tv_usec * 1000;
1402 ts_ptr = &ts;
1403 } else {
1404 ts_ptr = NULL;
1407 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1408 ts_ptr, NULL));
1410 if (!is_error(ret)) {
1411 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1412 return -TARGET_EFAULT;
1413 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1414 return -TARGET_EFAULT;
1415 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1416 return -TARGET_EFAULT;
1418 if (target_tv_addr) {
1419 tv.tv_sec = ts.tv_sec;
1420 tv.tv_usec = ts.tv_nsec / 1000;
1421 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1422 return -TARGET_EFAULT;
1427 return ret;
1430 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1431 static abi_long do_old_select(abi_ulong arg1)
1433 struct target_sel_arg_struct *sel;
1434 abi_ulong inp, outp, exp, tvp;
1435 long nsel;
1437 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1438 return -TARGET_EFAULT;
1441 nsel = tswapal(sel->n);
1442 inp = tswapal(sel->inp);
1443 outp = tswapal(sel->outp);
1444 exp = tswapal(sel->exp);
1445 tvp = tswapal(sel->tvp);
1447 unlock_user_struct(sel, arg1, 0);
1449 return do_select(nsel, inp, outp, exp, tvp);
1451 #endif
1452 #endif
1454 static abi_long do_pipe2(int host_pipe[], int flags)
1456 #ifdef CONFIG_PIPE2
1457 return pipe2(host_pipe, flags);
1458 #else
1459 return -ENOSYS;
1460 #endif
1463 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1464 int flags, int is_pipe2)
1466 int host_pipe[2];
1467 abi_long ret;
1468 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470 if (is_error(ret))
1471 return get_errno(ret);
1473 /* Several targets have special calling conventions for the original
1474 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1475 if (!is_pipe2) {
1476 #if defined(TARGET_ALPHA)
1477 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1478 return host_pipe[0];
1479 #elif defined(TARGET_MIPS)
1480 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1481 return host_pipe[0];
1482 #elif defined(TARGET_SH4)
1483 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1484 return host_pipe[0];
1485 #elif defined(TARGET_SPARC)
1486 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1487 return host_pipe[0];
1488 #endif
1491 if (put_user_s32(host_pipe[0], pipedes)
1492 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1493 return -TARGET_EFAULT;
1494 return get_errno(ret);
1497 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1498 abi_ulong target_addr,
1499 socklen_t len)
1501 struct target_ip_mreqn *target_smreqn;
1503 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1504 if (!target_smreqn)
1505 return -TARGET_EFAULT;
1506 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1507 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1508 if (len == sizeof(struct target_ip_mreqn))
1509 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1510 unlock_user(target_smreqn, target_addr, 0);
1512 return 0;
1515 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1516 abi_ulong target_addr,
1517 socklen_t len)
1519 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1520 sa_family_t sa_family;
1521 struct target_sockaddr *target_saddr;
1523 if (fd_trans_target_to_host_addr(fd)) {
1524 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1527 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1528 if (!target_saddr)
1529 return -TARGET_EFAULT;
1531 sa_family = tswap16(target_saddr->sa_family);
1533 /* Oops. The caller might send a incomplete sun_path; sun_path
1534 * must be terminated by \0 (see the manual page), but
1535 * unfortunately it is quite common to specify sockaddr_un
1536 * length as "strlen(x->sun_path)" while it should be
1537 * "strlen(...) + 1". We'll fix that here if needed.
1538 * Linux kernel has a similar feature.
1541 if (sa_family == AF_UNIX) {
1542 if (len < unix_maxlen && len > 0) {
1543 char *cp = (char*)target_saddr;
1545 if ( cp[len-1] && !cp[len] )
1546 len++;
1548 if (len > unix_maxlen)
1549 len = unix_maxlen;
1552 memcpy(addr, target_saddr, len);
1553 addr->sa_family = sa_family;
1554 if (sa_family == AF_NETLINK) {
1555 struct sockaddr_nl *nladdr;
1557 nladdr = (struct sockaddr_nl *)addr;
1558 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1559 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1560 } else if (sa_family == AF_PACKET) {
1561 struct target_sockaddr_ll *lladdr;
1563 lladdr = (struct target_sockaddr_ll *)addr;
1564 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1565 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567 unlock_user(target_saddr, target_addr, 0);
1569 return 0;
1572 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1573 struct sockaddr *addr,
1574 socklen_t len)
1576 struct target_sockaddr *target_saddr;
1578 if (len == 0) {
1579 return 0;
1581 assert(addr);
1583 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1584 if (!target_saddr)
1585 return -TARGET_EFAULT;
1586 memcpy(target_saddr, addr, len);
1587 if (len >= offsetof(struct target_sockaddr, sa_family) +
1588 sizeof(target_saddr->sa_family)) {
1589 target_saddr->sa_family = tswap16(addr->sa_family);
1591 if (addr->sa_family == AF_NETLINK &&
1592 len >= sizeof(struct target_sockaddr_nl)) {
1593 struct target_sockaddr_nl *target_nl =
1594 (struct target_sockaddr_nl *)target_saddr;
1595 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1596 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1597 } else if (addr->sa_family == AF_PACKET) {
1598 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1599 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1600 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1601 } else if (addr->sa_family == AF_INET6 &&
1602 len >= sizeof(struct target_sockaddr_in6)) {
1603 struct target_sockaddr_in6 *target_in6 =
1604 (struct target_sockaddr_in6 *)target_saddr;
1605 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607 unlock_user(target_saddr, target_addr, len);
1609 return 0;
1612 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1613 struct target_msghdr *target_msgh)
1615 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1616 abi_long msg_controllen;
1617 abi_ulong target_cmsg_addr;
1618 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1619 socklen_t space = 0;
1621 msg_controllen = tswapal(target_msgh->msg_controllen);
1622 if (msg_controllen < sizeof (struct target_cmsghdr))
1623 goto the_end;
1624 target_cmsg_addr = tswapal(target_msgh->msg_control);
1625 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1626 target_cmsg_start = target_cmsg;
1627 if (!target_cmsg)
1628 return -TARGET_EFAULT;
1630 while (cmsg && target_cmsg) {
1631 void *data = CMSG_DATA(cmsg);
1632 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634 int len = tswapal(target_cmsg->cmsg_len)
1635 - sizeof(struct target_cmsghdr);
1637 space += CMSG_SPACE(len);
1638 if (space > msgh->msg_controllen) {
1639 space -= CMSG_SPACE(len);
1640 /* This is a QEMU bug, since we allocated the payload
1641 * area ourselves (unlike overflow in host-to-target
1642 * conversion, which is just the guest giving us a buffer
1643 * that's too small). It can't happen for the payload types
1644 * we currently support; if it becomes an issue in future
1645 * we would need to improve our allocation strategy to
1646 * something more intelligent than "twice the size of the
1647 * target buffer we're reading from".
1649 qemu_log_mask(LOG_UNIMP,
1650 ("Unsupported ancillary data %d/%d: "
1651 "unhandled msg size\n"),
1652 tswap32(target_cmsg->cmsg_level),
1653 tswap32(target_cmsg->cmsg_type));
1654 break;
1657 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1658 cmsg->cmsg_level = SOL_SOCKET;
1659 } else {
1660 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1663 cmsg->cmsg_len = CMSG_LEN(len);
1665 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1666 int *fd = (int *)data;
1667 int *target_fd = (int *)target_data;
1668 int i, numfds = len / sizeof(int);
1670 for (i = 0; i < numfds; i++) {
1671 __get_user(fd[i], target_fd + i);
1673 } else if (cmsg->cmsg_level == SOL_SOCKET
1674 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1675 struct ucred *cred = (struct ucred *)data;
1676 struct target_ucred *target_cred =
1677 (struct target_ucred *)target_data;
1679 __get_user(cred->pid, &target_cred->pid);
1680 __get_user(cred->uid, &target_cred->uid);
1681 __get_user(cred->gid, &target_cred->gid);
1682 } else {
1683 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1684 cmsg->cmsg_level, cmsg->cmsg_type);
1685 memcpy(data, target_data, len);
1688 cmsg = CMSG_NXTHDR(msgh, cmsg);
1689 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1690 target_cmsg_start);
1692 unlock_user(target_cmsg, target_cmsg_addr, 0);
1693 the_end:
1694 msgh->msg_controllen = space;
1695 return 0;
1698 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1699 struct msghdr *msgh)
1701 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1702 abi_long msg_controllen;
1703 abi_ulong target_cmsg_addr;
1704 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1705 socklen_t space = 0;
1707 msg_controllen = tswapal(target_msgh->msg_controllen);
1708 if (msg_controllen < sizeof (struct target_cmsghdr))
1709 goto the_end;
1710 target_cmsg_addr = tswapal(target_msgh->msg_control);
1711 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1712 target_cmsg_start = target_cmsg;
1713 if (!target_cmsg)
1714 return -TARGET_EFAULT;
1716 while (cmsg && target_cmsg) {
1717 void *data = CMSG_DATA(cmsg);
1718 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1721 int tgt_len, tgt_space;
1723 /* We never copy a half-header but may copy half-data;
1724 * this is Linux's behaviour in put_cmsg(). Note that
1725 * truncation here is a guest problem (which we report
1726 * to the guest via the CTRUNC bit), unlike truncation
1727 * in target_to_host_cmsg, which is a QEMU bug.
1729 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1730 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1731 break;
1734 if (cmsg->cmsg_level == SOL_SOCKET) {
1735 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1736 } else {
1737 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741 /* Payload types which need a different size of payload on
1742 * the target must adjust tgt_len here.
1744 tgt_len = len;
1745 switch (cmsg->cmsg_level) {
1746 case SOL_SOCKET:
1747 switch (cmsg->cmsg_type) {
1748 case SO_TIMESTAMP:
1749 tgt_len = sizeof(struct target_timeval);
1750 break;
1751 default:
1752 break;
1754 break;
1755 default:
1756 break;
1759 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1760 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1761 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1764 /* We must now copy-and-convert len bytes of payload
1765 * into tgt_len bytes of destination space. Bear in mind
1766 * that in both source and destination we may be dealing
1767 * with a truncated value!
1769 switch (cmsg->cmsg_level) {
1770 case SOL_SOCKET:
1771 switch (cmsg->cmsg_type) {
1772 case SCM_RIGHTS:
1774 int *fd = (int *)data;
1775 int *target_fd = (int *)target_data;
1776 int i, numfds = tgt_len / sizeof(int);
1778 for (i = 0; i < numfds; i++) {
1779 __put_user(fd[i], target_fd + i);
1781 break;
1783 case SO_TIMESTAMP:
1785 struct timeval *tv = (struct timeval *)data;
1786 struct target_timeval *target_tv =
1787 (struct target_timeval *)target_data;
1789 if (len != sizeof(struct timeval) ||
1790 tgt_len != sizeof(struct target_timeval)) {
1791 goto unimplemented;
1794 /* copy struct timeval to target */
1795 __put_user(tv->tv_sec, &target_tv->tv_sec);
1796 __put_user(tv->tv_usec, &target_tv->tv_usec);
1797 break;
1799 case SCM_CREDENTIALS:
1801 struct ucred *cred = (struct ucred *)data;
1802 struct target_ucred *target_cred =
1803 (struct target_ucred *)target_data;
1805 __put_user(cred->pid, &target_cred->pid);
1806 __put_user(cred->uid, &target_cred->uid);
1807 __put_user(cred->gid, &target_cred->gid);
1808 break;
1810 default:
1811 goto unimplemented;
1813 break;
1815 case SOL_IP:
1816 switch (cmsg->cmsg_type) {
1817 case IP_TTL:
1819 uint32_t *v = (uint32_t *)data;
1820 uint32_t *t_int = (uint32_t *)target_data;
1822 if (len != sizeof(uint32_t) ||
1823 tgt_len != sizeof(uint32_t)) {
1824 goto unimplemented;
1826 __put_user(*v, t_int);
1827 break;
1829 case IP_RECVERR:
1831 struct errhdr_t {
1832 struct sock_extended_err ee;
1833 struct sockaddr_in offender;
1835 struct errhdr_t *errh = (struct errhdr_t *)data;
1836 struct errhdr_t *target_errh =
1837 (struct errhdr_t *)target_data;
1839 if (len != sizeof(struct errhdr_t) ||
1840 tgt_len != sizeof(struct errhdr_t)) {
1841 goto unimplemented;
1843 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1844 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1845 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1846 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1847 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1848 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1849 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1850 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1851 (void *) &errh->offender, sizeof(errh->offender));
1852 break;
1854 default:
1855 goto unimplemented;
1857 break;
1859 case SOL_IPV6:
1860 switch (cmsg->cmsg_type) {
1861 case IPV6_HOPLIMIT:
1863 uint32_t *v = (uint32_t *)data;
1864 uint32_t *t_int = (uint32_t *)target_data;
1866 if (len != sizeof(uint32_t) ||
1867 tgt_len != sizeof(uint32_t)) {
1868 goto unimplemented;
1870 __put_user(*v, t_int);
1871 break;
1873 case IPV6_RECVERR:
1875 struct errhdr6_t {
1876 struct sock_extended_err ee;
1877 struct sockaddr_in6 offender;
1879 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1880 struct errhdr6_t *target_errh =
1881 (struct errhdr6_t *)target_data;
1883 if (len != sizeof(struct errhdr6_t) ||
1884 tgt_len != sizeof(struct errhdr6_t)) {
1885 goto unimplemented;
1887 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1888 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1889 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1890 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1891 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1892 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1893 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1894 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1895 (void *) &errh->offender, sizeof(errh->offender));
1896 break;
1898 default:
1899 goto unimplemented;
1901 break;
1903 default:
1904 unimplemented:
1905 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1906 cmsg->cmsg_level, cmsg->cmsg_type);
1907 memcpy(target_data, data, MIN(len, tgt_len));
1908 if (tgt_len > len) {
1909 memset(target_data + len, 0, tgt_len - len);
1913 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1914 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1915 if (msg_controllen < tgt_space) {
1916 tgt_space = msg_controllen;
1918 msg_controllen -= tgt_space;
1919 space += tgt_space;
1920 cmsg = CMSG_NXTHDR(msgh, cmsg);
1921 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1922 target_cmsg_start);
1924 unlock_user(target_cmsg, target_cmsg_addr, space);
1925 the_end:
1926 target_msgh->msg_controllen = tswapal(space);
1927 return 0;
1930 /* do_setsockopt() Must return target values and target errnos. */
1931 static abi_long do_setsockopt(int sockfd, int level, int optname,
1932 abi_ulong optval_addr, socklen_t optlen)
1934 abi_long ret;
1935 int val;
1936 struct ip_mreqn *ip_mreq;
1937 struct ip_mreq_source *ip_mreq_source;
1939 switch(level) {
1940 case SOL_TCP:
1941 /* TCP options all take an 'int' value. */
1942 if (optlen < sizeof(uint32_t))
1943 return -TARGET_EINVAL;
1945 if (get_user_u32(val, optval_addr))
1946 return -TARGET_EFAULT;
1947 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1948 break;
1949 case SOL_IP:
1950 switch(optname) {
1951 case IP_TOS:
1952 case IP_TTL:
1953 case IP_HDRINCL:
1954 case IP_ROUTER_ALERT:
1955 case IP_RECVOPTS:
1956 case IP_RETOPTS:
1957 case IP_PKTINFO:
1958 case IP_MTU_DISCOVER:
1959 case IP_RECVERR:
1960 case IP_RECVTTL:
1961 case IP_RECVTOS:
1962 #ifdef IP_FREEBIND
1963 case IP_FREEBIND:
1964 #endif
1965 case IP_MULTICAST_TTL:
1966 case IP_MULTICAST_LOOP:
1967 val = 0;
1968 if (optlen >= sizeof(uint32_t)) {
1969 if (get_user_u32(val, optval_addr))
1970 return -TARGET_EFAULT;
1971 } else if (optlen >= 1) {
1972 if (get_user_u8(val, optval_addr))
1973 return -TARGET_EFAULT;
1975 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1976 break;
1977 case IP_ADD_MEMBERSHIP:
1978 case IP_DROP_MEMBERSHIP:
1979 if (optlen < sizeof (struct target_ip_mreq) ||
1980 optlen > sizeof (struct target_ip_mreqn))
1981 return -TARGET_EINVAL;
1983 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1984 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1985 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1986 break;
1988 case IP_BLOCK_SOURCE:
1989 case IP_UNBLOCK_SOURCE:
1990 case IP_ADD_SOURCE_MEMBERSHIP:
1991 case IP_DROP_SOURCE_MEMBERSHIP:
1992 if (optlen != sizeof (struct target_ip_mreq_source))
1993 return -TARGET_EINVAL;
1995 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1997 unlock_user (ip_mreq_source, optval_addr, 0);
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2004 case SOL_IPV6:
2005 switch (optname) {
2006 case IPV6_MTU_DISCOVER:
2007 case IPV6_MTU:
2008 case IPV6_V6ONLY:
2009 case IPV6_RECVPKTINFO:
2010 case IPV6_UNICAST_HOPS:
2011 case IPV6_MULTICAST_HOPS:
2012 case IPV6_MULTICAST_LOOP:
2013 case IPV6_RECVERR:
2014 case IPV6_RECVHOPLIMIT:
2015 case IPV6_2292HOPLIMIT:
2016 case IPV6_CHECKSUM:
2017 case IPV6_ADDRFORM:
2018 case IPV6_2292PKTINFO:
2019 case IPV6_RECVTCLASS:
2020 case IPV6_RECVRTHDR:
2021 case IPV6_2292RTHDR:
2022 case IPV6_RECVHOPOPTS:
2023 case IPV6_2292HOPOPTS:
2024 case IPV6_RECVDSTOPTS:
2025 case IPV6_2292DSTOPTS:
2026 case IPV6_TCLASS:
2027 #ifdef IPV6_RECVPATHMTU
2028 case IPV6_RECVPATHMTU:
2029 #endif
2030 #ifdef IPV6_TRANSPARENT
2031 case IPV6_TRANSPARENT:
2032 #endif
2033 #ifdef IPV6_FREEBIND
2034 case IPV6_FREEBIND:
2035 #endif
2036 #ifdef IPV6_RECVORIGDSTADDR
2037 case IPV6_RECVORIGDSTADDR:
2038 #endif
2039 val = 0;
2040 if (optlen < sizeof(uint32_t)) {
2041 return -TARGET_EINVAL;
2043 if (get_user_u32(val, optval_addr)) {
2044 return -TARGET_EFAULT;
2046 ret = get_errno(setsockopt(sockfd, level, optname,
2047 &val, sizeof(val)));
2048 break;
2049 case IPV6_PKTINFO:
2051 struct in6_pktinfo pki;
2053 if (optlen < sizeof(pki)) {
2054 return -TARGET_EINVAL;
2057 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2058 return -TARGET_EFAULT;
2061 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063 ret = get_errno(setsockopt(sockfd, level, optname,
2064 &pki, sizeof(pki)));
2065 break;
2067 case IPV6_ADD_MEMBERSHIP:
2068 case IPV6_DROP_MEMBERSHIP:
2070 struct ipv6_mreq ipv6mreq;
2072 if (optlen < sizeof(ipv6mreq)) {
2073 return -TARGET_EINVAL;
2076 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2077 return -TARGET_EFAULT;
2080 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082 ret = get_errno(setsockopt(sockfd, level, optname,
2083 &ipv6mreq, sizeof(ipv6mreq)));
2084 break;
2086 default:
2087 goto unimplemented;
2089 break;
2090 case SOL_ICMPV6:
2091 switch (optname) {
2092 case ICMPV6_FILTER:
2094 struct icmp6_filter icmp6f;
2096 if (optlen > sizeof(icmp6f)) {
2097 optlen = sizeof(icmp6f);
2100 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2101 return -TARGET_EFAULT;
2104 for (val = 0; val < 8; val++) {
2105 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2108 ret = get_errno(setsockopt(sockfd, level, optname,
2109 &icmp6f, optlen));
2110 break;
2112 default:
2113 goto unimplemented;
2115 break;
2116 case SOL_RAW:
2117 switch (optname) {
2118 case ICMP_FILTER:
2119 case IPV6_CHECKSUM:
2120 /* those take an u32 value */
2121 if (optlen < sizeof(uint32_t)) {
2122 return -TARGET_EINVAL;
2125 if (get_user_u32(val, optval_addr)) {
2126 return -TARGET_EFAULT;
2128 ret = get_errno(setsockopt(sockfd, level, optname,
2129 &val, sizeof(val)));
2130 break;
2132 default:
2133 goto unimplemented;
2135 break;
2136 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2137 case SOL_ALG:
2138 switch (optname) {
2139 case ALG_SET_KEY:
2141 char *alg_key = g_malloc(optlen);
2143 if (!alg_key) {
2144 return -TARGET_ENOMEM;
2146 if (copy_from_user(alg_key, optval_addr, optlen)) {
2147 g_free(alg_key);
2148 return -TARGET_EFAULT;
2150 ret = get_errno(setsockopt(sockfd, level, optname,
2151 alg_key, optlen));
2152 g_free(alg_key);
2153 break;
2155 case ALG_SET_AEAD_AUTHSIZE:
2157 ret = get_errno(setsockopt(sockfd, level, optname,
2158 NULL, optlen));
2159 break;
2161 default:
2162 goto unimplemented;
2164 break;
2165 #endif
2166 case TARGET_SOL_SOCKET:
2167 switch (optname) {
2168 case TARGET_SO_RCVTIMEO:
2170 struct timeval tv;
2172 optname = SO_RCVTIMEO;
2174 set_timeout:
2175 if (optlen != sizeof(struct target_timeval)) {
2176 return -TARGET_EINVAL;
2179 if (copy_from_user_timeval(&tv, optval_addr)) {
2180 return -TARGET_EFAULT;
2183 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2184 &tv, sizeof(tv)));
2185 return ret;
2187 case TARGET_SO_SNDTIMEO:
2188 optname = SO_SNDTIMEO;
2189 goto set_timeout;
2190 case TARGET_SO_ATTACH_FILTER:
2192 struct target_sock_fprog *tfprog;
2193 struct target_sock_filter *tfilter;
2194 struct sock_fprog fprog;
2195 struct sock_filter *filter;
2196 int i;
2198 if (optlen != sizeof(*tfprog)) {
2199 return -TARGET_EINVAL;
2201 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2202 return -TARGET_EFAULT;
2204 if (!lock_user_struct(VERIFY_READ, tfilter,
2205 tswapal(tfprog->filter), 0)) {
2206 unlock_user_struct(tfprog, optval_addr, 1);
2207 return -TARGET_EFAULT;
2210 fprog.len = tswap16(tfprog->len);
2211 filter = g_try_new(struct sock_filter, fprog.len);
2212 if (filter == NULL) {
2213 unlock_user_struct(tfilter, tfprog->filter, 1);
2214 unlock_user_struct(tfprog, optval_addr, 1);
2215 return -TARGET_ENOMEM;
2217 for (i = 0; i < fprog.len; i++) {
2218 filter[i].code = tswap16(tfilter[i].code);
2219 filter[i].jt = tfilter[i].jt;
2220 filter[i].jf = tfilter[i].jf;
2221 filter[i].k = tswap32(tfilter[i].k);
2223 fprog.filter = filter;
2225 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2226 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2227 g_free(filter);
2229 unlock_user_struct(tfilter, tfprog->filter, 1);
2230 unlock_user_struct(tfprog, optval_addr, 1);
2231 return ret;
2233 case TARGET_SO_BINDTODEVICE:
2235 char *dev_ifname, *addr_ifname;
2237 if (optlen > IFNAMSIZ - 1) {
2238 optlen = IFNAMSIZ - 1;
2240 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241 if (!dev_ifname) {
2242 return -TARGET_EFAULT;
2244 optname = SO_BINDTODEVICE;
2245 addr_ifname = alloca(IFNAMSIZ);
2246 memcpy(addr_ifname, dev_ifname, optlen);
2247 addr_ifname[optlen] = 0;
2248 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2249 addr_ifname, optlen));
2250 unlock_user (dev_ifname, optval_addr, 0);
2251 return ret;
2253 case TARGET_SO_LINGER:
2255 struct linger lg;
2256 struct target_linger *tlg;
2258 if (optlen != sizeof(struct target_linger)) {
2259 return -TARGET_EINVAL;
2261 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2262 return -TARGET_EFAULT;
2264 __get_user(lg.l_onoff, &tlg->l_onoff);
2265 __get_user(lg.l_linger, &tlg->l_linger);
2266 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2267 &lg, sizeof(lg)));
2268 unlock_user_struct(tlg, optval_addr, 0);
2269 return ret;
2271 /* Options with 'int' argument. */
2272 case TARGET_SO_DEBUG:
2273 optname = SO_DEBUG;
2274 break;
2275 case TARGET_SO_REUSEADDR:
2276 optname = SO_REUSEADDR;
2277 break;
2278 #ifdef SO_REUSEPORT
2279 case TARGET_SO_REUSEPORT:
2280 optname = SO_REUSEPORT;
2281 break;
2282 #endif
2283 case TARGET_SO_TYPE:
2284 optname = SO_TYPE;
2285 break;
2286 case TARGET_SO_ERROR:
2287 optname = SO_ERROR;
2288 break;
2289 case TARGET_SO_DONTROUTE:
2290 optname = SO_DONTROUTE;
2291 break;
2292 case TARGET_SO_BROADCAST:
2293 optname = SO_BROADCAST;
2294 break;
2295 case TARGET_SO_SNDBUF:
2296 optname = SO_SNDBUF;
2297 break;
2298 case TARGET_SO_SNDBUFFORCE:
2299 optname = SO_SNDBUFFORCE;
2300 break;
2301 case TARGET_SO_RCVBUF:
2302 optname = SO_RCVBUF;
2303 break;
2304 case TARGET_SO_RCVBUFFORCE:
2305 optname = SO_RCVBUFFORCE;
2306 break;
2307 case TARGET_SO_KEEPALIVE:
2308 optname = SO_KEEPALIVE;
2309 break;
2310 case TARGET_SO_OOBINLINE:
2311 optname = SO_OOBINLINE;
2312 break;
2313 case TARGET_SO_NO_CHECK:
2314 optname = SO_NO_CHECK;
2315 break;
2316 case TARGET_SO_PRIORITY:
2317 optname = SO_PRIORITY;
2318 break;
2319 #ifdef SO_BSDCOMPAT
2320 case TARGET_SO_BSDCOMPAT:
2321 optname = SO_BSDCOMPAT;
2322 break;
2323 #endif
2324 case TARGET_SO_PASSCRED:
2325 optname = SO_PASSCRED;
2326 break;
2327 case TARGET_SO_PASSSEC:
2328 optname = SO_PASSSEC;
2329 break;
2330 case TARGET_SO_TIMESTAMP:
2331 optname = SO_TIMESTAMP;
2332 break;
2333 case TARGET_SO_RCVLOWAT:
2334 optname = SO_RCVLOWAT;
2335 break;
2336 default:
2337 goto unimplemented;
2339 if (optlen < sizeof(uint32_t))
2340 return -TARGET_EINVAL;
2342 if (get_user_u32(val, optval_addr))
2343 return -TARGET_EFAULT;
2344 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2345 break;
2346 #ifdef SOL_NETLINK
2347 case SOL_NETLINK:
2348 switch (optname) {
2349 case NETLINK_PKTINFO:
2350 case NETLINK_ADD_MEMBERSHIP:
2351 case NETLINK_DROP_MEMBERSHIP:
2352 case NETLINK_BROADCAST_ERROR:
2353 case NETLINK_NO_ENOBUFS:
2354 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2355 case NETLINK_LISTEN_ALL_NSID:
2356 case NETLINK_CAP_ACK:
2357 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2359 case NETLINK_EXT_ACK:
2360 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2362 case NETLINK_GET_STRICT_CHK:
2363 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2364 break;
2365 default:
2366 goto unimplemented;
2368 val = 0;
2369 if (optlen < sizeof(uint32_t)) {
2370 return -TARGET_EINVAL;
2372 if (get_user_u32(val, optval_addr)) {
2373 return -TARGET_EFAULT;
2375 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2376 sizeof(val)));
2377 break;
2378 #endif /* SOL_NETLINK */
2379 default:
2380 unimplemented:
2381 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2382 level, optname);
2383 ret = -TARGET_ENOPROTOOPT;
2385 return ret;
2388 /* do_getsockopt() Must return target values and target errnos. */
2389 static abi_long do_getsockopt(int sockfd, int level, int optname,
2390 abi_ulong optval_addr, abi_ulong optlen)
2392 abi_long ret;
2393 int len, val;
2394 socklen_t lv;
2396 switch(level) {
2397 case TARGET_SOL_SOCKET:
2398 level = SOL_SOCKET;
2399 switch (optname) {
2400 /* These don't just return a single integer */
2401 case TARGET_SO_PEERNAME:
2402 goto unimplemented;
2403 case TARGET_SO_RCVTIMEO: {
2404 struct timeval tv;
2405 socklen_t tvlen;
2407 optname = SO_RCVTIMEO;
2409 get_timeout:
2410 if (get_user_u32(len, optlen)) {
2411 return -TARGET_EFAULT;
2413 if (len < 0) {
2414 return -TARGET_EINVAL;
2417 tvlen = sizeof(tv);
2418 ret = get_errno(getsockopt(sockfd, level, optname,
2419 &tv, &tvlen));
2420 if (ret < 0) {
2421 return ret;
2423 if (len > sizeof(struct target_timeval)) {
2424 len = sizeof(struct target_timeval);
2426 if (copy_to_user_timeval(optval_addr, &tv)) {
2427 return -TARGET_EFAULT;
2429 if (put_user_u32(len, optlen)) {
2430 return -TARGET_EFAULT;
2432 break;
2434 case TARGET_SO_SNDTIMEO:
2435 optname = SO_SNDTIMEO;
2436 goto get_timeout;
2437 case TARGET_SO_PEERCRED: {
2438 struct ucred cr;
2439 socklen_t crlen;
2440 struct target_ucred *tcr;
2442 if (get_user_u32(len, optlen)) {
2443 return -TARGET_EFAULT;
2445 if (len < 0) {
2446 return -TARGET_EINVAL;
2449 crlen = sizeof(cr);
2450 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2451 &cr, &crlen));
2452 if (ret < 0) {
2453 return ret;
2455 if (len > crlen) {
2456 len = crlen;
2458 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2459 return -TARGET_EFAULT;
2461 __put_user(cr.pid, &tcr->pid);
2462 __put_user(cr.uid, &tcr->uid);
2463 __put_user(cr.gid, &tcr->gid);
2464 unlock_user_struct(tcr, optval_addr, 1);
2465 if (put_user_u32(len, optlen)) {
2466 return -TARGET_EFAULT;
2468 break;
2470 case TARGET_SO_PEERSEC: {
2471 char *name;
2473 if (get_user_u32(len, optlen)) {
2474 return -TARGET_EFAULT;
2476 if (len < 0) {
2477 return -TARGET_EINVAL;
2479 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2480 if (!name) {
2481 return -TARGET_EFAULT;
2483 lv = len;
2484 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2485 name, &lv));
2486 if (put_user_u32(lv, optlen)) {
2487 ret = -TARGET_EFAULT;
2489 unlock_user(name, optval_addr, lv);
2490 break;
2492 case TARGET_SO_LINGER:
2494 struct linger lg;
2495 socklen_t lglen;
2496 struct target_linger *tlg;
2498 if (get_user_u32(len, optlen)) {
2499 return -TARGET_EFAULT;
2501 if (len < 0) {
2502 return -TARGET_EINVAL;
2505 lglen = sizeof(lg);
2506 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2507 &lg, &lglen));
2508 if (ret < 0) {
2509 return ret;
2511 if (len > lglen) {
2512 len = lglen;
2514 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2515 return -TARGET_EFAULT;
2517 __put_user(lg.l_onoff, &tlg->l_onoff);
2518 __put_user(lg.l_linger, &tlg->l_linger);
2519 unlock_user_struct(tlg, optval_addr, 1);
2520 if (put_user_u32(len, optlen)) {
2521 return -TARGET_EFAULT;
2523 break;
2525 /* Options with 'int' argument. */
2526 case TARGET_SO_DEBUG:
2527 optname = SO_DEBUG;
2528 goto int_case;
2529 case TARGET_SO_REUSEADDR:
2530 optname = SO_REUSEADDR;
2531 goto int_case;
2532 #ifdef SO_REUSEPORT
2533 case TARGET_SO_REUSEPORT:
2534 optname = SO_REUSEPORT;
2535 goto int_case;
2536 #endif
2537 case TARGET_SO_TYPE:
2538 optname = SO_TYPE;
2539 goto int_case;
2540 case TARGET_SO_ERROR:
2541 optname = SO_ERROR;
2542 goto int_case;
2543 case TARGET_SO_DONTROUTE:
2544 optname = SO_DONTROUTE;
2545 goto int_case;
2546 case TARGET_SO_BROADCAST:
2547 optname = SO_BROADCAST;
2548 goto int_case;
2549 case TARGET_SO_SNDBUF:
2550 optname = SO_SNDBUF;
2551 goto int_case;
2552 case TARGET_SO_RCVBUF:
2553 optname = SO_RCVBUF;
2554 goto int_case;
2555 case TARGET_SO_KEEPALIVE:
2556 optname = SO_KEEPALIVE;
2557 goto int_case;
2558 case TARGET_SO_OOBINLINE:
2559 optname = SO_OOBINLINE;
2560 goto int_case;
2561 case TARGET_SO_NO_CHECK:
2562 optname = SO_NO_CHECK;
2563 goto int_case;
2564 case TARGET_SO_PRIORITY:
2565 optname = SO_PRIORITY;
2566 goto int_case;
2567 #ifdef SO_BSDCOMPAT
2568 case TARGET_SO_BSDCOMPAT:
2569 optname = SO_BSDCOMPAT;
2570 goto int_case;
2571 #endif
2572 case TARGET_SO_PASSCRED:
2573 optname = SO_PASSCRED;
2574 goto int_case;
2575 case TARGET_SO_TIMESTAMP:
2576 optname = SO_TIMESTAMP;
2577 goto int_case;
2578 case TARGET_SO_RCVLOWAT:
2579 optname = SO_RCVLOWAT;
2580 goto int_case;
2581 case TARGET_SO_ACCEPTCONN:
2582 optname = SO_ACCEPTCONN;
2583 goto int_case;
2584 default:
2585 goto int_case;
2587 break;
2588 case SOL_TCP:
2589 /* TCP options all take an 'int' value. */
2590 int_case:
2591 if (get_user_u32(len, optlen))
2592 return -TARGET_EFAULT;
2593 if (len < 0)
2594 return -TARGET_EINVAL;
2595 lv = sizeof(lv);
2596 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2597 if (ret < 0)
2598 return ret;
2599 if (optname == SO_TYPE) {
2600 val = host_to_target_sock_type(val);
2602 if (len > lv)
2603 len = lv;
2604 if (len == 4) {
2605 if (put_user_u32(val, optval_addr))
2606 return -TARGET_EFAULT;
2607 } else {
2608 if (put_user_u8(val, optval_addr))
2609 return -TARGET_EFAULT;
2611 if (put_user_u32(len, optlen))
2612 return -TARGET_EFAULT;
2613 break;
2614 case SOL_IP:
2615 switch(optname) {
2616 case IP_TOS:
2617 case IP_TTL:
2618 case IP_HDRINCL:
2619 case IP_ROUTER_ALERT:
2620 case IP_RECVOPTS:
2621 case IP_RETOPTS:
2622 case IP_PKTINFO:
2623 case IP_MTU_DISCOVER:
2624 case IP_RECVERR:
2625 case IP_RECVTOS:
2626 #ifdef IP_FREEBIND
2627 case IP_FREEBIND:
2628 #endif
2629 case IP_MULTICAST_TTL:
2630 case IP_MULTICAST_LOOP:
2631 if (get_user_u32(len, optlen))
2632 return -TARGET_EFAULT;
2633 if (len < 0)
2634 return -TARGET_EINVAL;
2635 lv = sizeof(lv);
2636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2637 if (ret < 0)
2638 return ret;
2639 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2640 len = 1;
2641 if (put_user_u32(len, optlen)
2642 || put_user_u8(val, optval_addr))
2643 return -TARGET_EFAULT;
2644 } else {
2645 if (len > sizeof(int))
2646 len = sizeof(int);
2647 if (put_user_u32(len, optlen)
2648 || put_user_u32(val, optval_addr))
2649 return -TARGET_EFAULT;
2651 break;
2652 default:
2653 ret = -TARGET_ENOPROTOOPT;
2654 break;
2656 break;
2657 case SOL_IPV6:
2658 switch (optname) {
2659 case IPV6_MTU_DISCOVER:
2660 case IPV6_MTU:
2661 case IPV6_V6ONLY:
2662 case IPV6_RECVPKTINFO:
2663 case IPV6_UNICAST_HOPS:
2664 case IPV6_MULTICAST_HOPS:
2665 case IPV6_MULTICAST_LOOP:
2666 case IPV6_RECVERR:
2667 case IPV6_RECVHOPLIMIT:
2668 case IPV6_2292HOPLIMIT:
2669 case IPV6_CHECKSUM:
2670 case IPV6_ADDRFORM:
2671 case IPV6_2292PKTINFO:
2672 case IPV6_RECVTCLASS:
2673 case IPV6_RECVRTHDR:
2674 case IPV6_2292RTHDR:
2675 case IPV6_RECVHOPOPTS:
2676 case IPV6_2292HOPOPTS:
2677 case IPV6_RECVDSTOPTS:
2678 case IPV6_2292DSTOPTS:
2679 case IPV6_TCLASS:
2680 #ifdef IPV6_RECVPATHMTU
2681 case IPV6_RECVPATHMTU:
2682 #endif
2683 #ifdef IPV6_TRANSPARENT
2684 case IPV6_TRANSPARENT:
2685 #endif
2686 #ifdef IPV6_FREEBIND
2687 case IPV6_FREEBIND:
2688 #endif
2689 #ifdef IPV6_RECVORIGDSTADDR
2690 case IPV6_RECVORIGDSTADDR:
2691 #endif
2692 if (get_user_u32(len, optlen))
2693 return -TARGET_EFAULT;
2694 if (len < 0)
2695 return -TARGET_EINVAL;
2696 lv = sizeof(lv);
2697 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2698 if (ret < 0)
2699 return ret;
2700 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2701 len = 1;
2702 if (put_user_u32(len, optlen)
2703 || put_user_u8(val, optval_addr))
2704 return -TARGET_EFAULT;
2705 } else {
2706 if (len > sizeof(int))
2707 len = sizeof(int);
2708 if (put_user_u32(len, optlen)
2709 || put_user_u32(val, optval_addr))
2710 return -TARGET_EFAULT;
2712 break;
2713 default:
2714 ret = -TARGET_ENOPROTOOPT;
2715 break;
2717 break;
2718 #ifdef SOL_NETLINK
2719 case SOL_NETLINK:
2720 switch (optname) {
2721 case NETLINK_PKTINFO:
2722 case NETLINK_BROADCAST_ERROR:
2723 case NETLINK_NO_ENOBUFS:
2724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2725 case NETLINK_LISTEN_ALL_NSID:
2726 case NETLINK_CAP_ACK:
2727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2729 case NETLINK_EXT_ACK:
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2732 case NETLINK_GET_STRICT_CHK:
2733 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2734 if (get_user_u32(len, optlen)) {
2735 return -TARGET_EFAULT;
2737 if (len != sizeof(val)) {
2738 return -TARGET_EINVAL;
2740 lv = len;
2741 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2742 if (ret < 0) {
2743 return ret;
2745 if (put_user_u32(lv, optlen)
2746 || put_user_u32(val, optval_addr)) {
2747 return -TARGET_EFAULT;
2749 break;
2750 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2751 case NETLINK_LIST_MEMBERSHIPS:
2753 uint32_t *results;
2754 int i;
2755 if (get_user_u32(len, optlen)) {
2756 return -TARGET_EFAULT;
2758 if (len < 0) {
2759 return -TARGET_EINVAL;
2761 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2762 if (!results) {
2763 return -TARGET_EFAULT;
2765 lv = len;
2766 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2767 if (ret < 0) {
2768 unlock_user(results, optval_addr, 0);
2769 return ret;
2771 /* swap host endianess to target endianess. */
2772 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2773 results[i] = tswap32(results[i]);
2775 if (put_user_u32(lv, optlen)) {
2776 return -TARGET_EFAULT;
2778 unlock_user(results, optval_addr, 0);
2779 break;
2781 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2782 default:
2783 goto unimplemented;
2785 break;
2786 #endif /* SOL_NETLINK */
2787 default:
2788 unimplemented:
2789 qemu_log_mask(LOG_UNIMP,
2790 "getsockopt level=%d optname=%d not yet supported\n",
2791 level, optname);
2792 ret = -TARGET_EOPNOTSUPP;
2793 break;
2795 return ret;
2798 /* Convert target low/high pair representing file offset into the host
2799 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2800 * as the kernel doesn't handle them either.
2802 static void target_to_host_low_high(abi_ulong tlow,
2803 abi_ulong thigh,
2804 unsigned long *hlow,
2805 unsigned long *hhigh)
2807 uint64_t off = tlow |
2808 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2809 TARGET_LONG_BITS / 2;
2811 *hlow = off;
2812 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2815 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2816 abi_ulong count, int copy)
2818 struct target_iovec *target_vec;
2819 struct iovec *vec;
2820 abi_ulong total_len, max_len;
2821 int i;
2822 int err = 0;
2823 bool bad_address = false;
2825 if (count == 0) {
2826 errno = 0;
2827 return NULL;
2829 if (count > IOV_MAX) {
2830 errno = EINVAL;
2831 return NULL;
2834 vec = g_try_new0(struct iovec, count);
2835 if (vec == NULL) {
2836 errno = ENOMEM;
2837 return NULL;
2840 target_vec = lock_user(VERIFY_READ, target_addr,
2841 count * sizeof(struct target_iovec), 1);
2842 if (target_vec == NULL) {
2843 err = EFAULT;
2844 goto fail2;
2847 /* ??? If host page size > target page size, this will result in a
2848 value larger than what we can actually support. */
2849 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2850 total_len = 0;
2852 for (i = 0; i < count; i++) {
2853 abi_ulong base = tswapal(target_vec[i].iov_base);
2854 abi_long len = tswapal(target_vec[i].iov_len);
2856 if (len < 0) {
2857 err = EINVAL;
2858 goto fail;
2859 } else if (len == 0) {
2860 /* Zero length pointer is ignored. */
2861 vec[i].iov_base = 0;
2862 } else {
2863 vec[i].iov_base = lock_user(type, base, len, copy);
2864 /* If the first buffer pointer is bad, this is a fault. But
2865 * subsequent bad buffers will result in a partial write; this
2866 * is realized by filling the vector with null pointers and
2867 * zero lengths. */
2868 if (!vec[i].iov_base) {
2869 if (i == 0) {
2870 err = EFAULT;
2871 goto fail;
2872 } else {
2873 bad_address = true;
2876 if (bad_address) {
2877 len = 0;
2879 if (len > max_len - total_len) {
2880 len = max_len - total_len;
2883 vec[i].iov_len = len;
2884 total_len += len;
2887 unlock_user(target_vec, target_addr, 0);
2888 return vec;
2890 fail:
2891 while (--i >= 0) {
2892 if (tswapal(target_vec[i].iov_len) > 0) {
2893 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2896 unlock_user(target_vec, target_addr, 0);
2897 fail2:
2898 g_free(vec);
2899 errno = err;
2900 return NULL;
2903 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2904 abi_ulong count, int copy)
2906 struct target_iovec *target_vec;
2907 int i;
2909 target_vec = lock_user(VERIFY_READ, target_addr,
2910 count * sizeof(struct target_iovec), 1);
2911 if (target_vec) {
2912 for (i = 0; i < count; i++) {
2913 abi_ulong base = tswapal(target_vec[i].iov_base);
2914 abi_long len = tswapal(target_vec[i].iov_len);
2915 if (len < 0) {
2916 break;
2918 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920 unlock_user(target_vec, target_addr, 0);
2923 g_free(vec);
2926 static inline int target_to_host_sock_type(int *type)
2928 int host_type = 0;
2929 int target_type = *type;
2931 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2932 case TARGET_SOCK_DGRAM:
2933 host_type = SOCK_DGRAM;
2934 break;
2935 case TARGET_SOCK_STREAM:
2936 host_type = SOCK_STREAM;
2937 break;
2938 default:
2939 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2940 break;
2942 if (target_type & TARGET_SOCK_CLOEXEC) {
2943 #if defined(SOCK_CLOEXEC)
2944 host_type |= SOCK_CLOEXEC;
2945 #else
2946 return -TARGET_EINVAL;
2947 #endif
2949 if (target_type & TARGET_SOCK_NONBLOCK) {
2950 #if defined(SOCK_NONBLOCK)
2951 host_type |= SOCK_NONBLOCK;
2952 #elif !defined(O_NONBLOCK)
2953 return -TARGET_EINVAL;
2954 #endif
2956 *type = host_type;
2957 return 0;
2960 /* Try to emulate socket type flags after socket creation. */
2961 static int sock_flags_fixup(int fd, int target_type)
2963 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2964 if (target_type & TARGET_SOCK_NONBLOCK) {
2965 int flags = fcntl(fd, F_GETFL);
2966 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2967 close(fd);
2968 return -TARGET_EINVAL;
2971 #endif
2972 return fd;
2975 /* do_socket() Must return target values and target errnos. */
2976 static abi_long do_socket(int domain, int type, int protocol)
2978 int target_type = type;
2979 int ret;
2981 ret = target_to_host_sock_type(&type);
2982 if (ret) {
2983 return ret;
2986 if (domain == PF_NETLINK && !(
2987 #ifdef CONFIG_RTNETLINK
2988 protocol == NETLINK_ROUTE ||
2989 #endif
2990 protocol == NETLINK_KOBJECT_UEVENT ||
2991 protocol == NETLINK_AUDIT)) {
2992 return -TARGET_EPROTONOSUPPORT;
2995 if (domain == AF_PACKET ||
2996 (domain == AF_INET && type == SOCK_PACKET)) {
2997 protocol = tswap16(protocol);
3000 ret = get_errno(socket(domain, type, protocol));
3001 if (ret >= 0) {
3002 ret = sock_flags_fixup(ret, target_type);
3003 if (type == SOCK_PACKET) {
3004 /* Manage an obsolete case :
3005 * if socket type is SOCK_PACKET, bind by name
3007 fd_trans_register(ret, &target_packet_trans);
3008 } else if (domain == PF_NETLINK) {
3009 switch (protocol) {
3010 #ifdef CONFIG_RTNETLINK
3011 case NETLINK_ROUTE:
3012 fd_trans_register(ret, &target_netlink_route_trans);
3013 break;
3014 #endif
3015 case NETLINK_KOBJECT_UEVENT:
3016 /* nothing to do: messages are strings */
3017 break;
3018 case NETLINK_AUDIT:
3019 fd_trans_register(ret, &target_netlink_audit_trans);
3020 break;
3021 default:
3022 g_assert_not_reached();
3026 return ret;
3029 /* do_bind() Must return target values and target errnos. */
3030 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3031 socklen_t addrlen)
3033 void *addr;
3034 abi_long ret;
3036 if ((int)addrlen < 0) {
3037 return -TARGET_EINVAL;
3040 addr = alloca(addrlen+1);
3042 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3043 if (ret)
3044 return ret;
3046 return get_errno(bind(sockfd, addr, addrlen));
3049 /* do_connect() Must return target values and target errnos. */
3050 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3051 socklen_t addrlen)
3053 void *addr;
3054 abi_long ret;
3056 if ((int)addrlen < 0) {
3057 return -TARGET_EINVAL;
3060 addr = alloca(addrlen+1);
3062 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3063 if (ret)
3064 return ret;
3066 return get_errno(safe_connect(sockfd, addr, addrlen));
3069 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3070 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3071 int flags, int send)
3073 abi_long ret, len;
3074 struct msghdr msg;
3075 abi_ulong count;
3076 struct iovec *vec;
3077 abi_ulong target_vec;
3079 if (msgp->msg_name) {
3080 msg.msg_namelen = tswap32(msgp->msg_namelen);
3081 msg.msg_name = alloca(msg.msg_namelen+1);
3082 ret = target_to_host_sockaddr(fd, msg.msg_name,
3083 tswapal(msgp->msg_name),
3084 msg.msg_namelen);
3085 if (ret == -TARGET_EFAULT) {
3086 /* For connected sockets msg_name and msg_namelen must
3087 * be ignored, so returning EFAULT immediately is wrong.
3088 * Instead, pass a bad msg_name to the host kernel, and
3089 * let it decide whether to return EFAULT or not.
3091 msg.msg_name = (void *)-1;
3092 } else if (ret) {
3093 goto out2;
3095 } else {
3096 msg.msg_name = NULL;
3097 msg.msg_namelen = 0;
3099 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3100 msg.msg_control = alloca(msg.msg_controllen);
3101 memset(msg.msg_control, 0, msg.msg_controllen);
3103 msg.msg_flags = tswap32(msgp->msg_flags);
3105 count = tswapal(msgp->msg_iovlen);
3106 target_vec = tswapal(msgp->msg_iov);
3108 if (count > IOV_MAX) {
3109 /* sendrcvmsg returns a different errno for this condition than
3110 * readv/writev, so we must catch it here before lock_iovec() does.
3112 ret = -TARGET_EMSGSIZE;
3113 goto out2;
3116 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3117 target_vec, count, send);
3118 if (vec == NULL) {
3119 ret = -host_to_target_errno(errno);
3120 goto out2;
3122 msg.msg_iovlen = count;
3123 msg.msg_iov = vec;
3125 if (send) {
3126 if (fd_trans_target_to_host_data(fd)) {
3127 void *host_msg;
3129 host_msg = g_malloc(msg.msg_iov->iov_len);
3130 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3131 ret = fd_trans_target_to_host_data(fd)(host_msg,
3132 msg.msg_iov->iov_len);
3133 if (ret >= 0) {
3134 msg.msg_iov->iov_base = host_msg;
3135 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137 g_free(host_msg);
3138 } else {
3139 ret = target_to_host_cmsg(&msg, msgp);
3140 if (ret == 0) {
3141 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3144 } else {
3145 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3146 if (!is_error(ret)) {
3147 len = ret;
3148 if (fd_trans_host_to_target_data(fd)) {
3149 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3150 MIN(msg.msg_iov->iov_len, len));
3151 } else {
3152 ret = host_to_target_cmsg(msgp, &msg);
3154 if (!is_error(ret)) {
3155 msgp->msg_namelen = tswap32(msg.msg_namelen);
3156 msgp->msg_flags = tswap32(msg.msg_flags);
3157 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3158 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3159 msg.msg_name, msg.msg_namelen);
3160 if (ret) {
3161 goto out;
3165 ret = len;
3170 out:
3171 unlock_iovec(vec, target_vec, count, !send);
3172 out2:
3173 return ret;
3176 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3177 int flags, int send)
3179 abi_long ret;
3180 struct target_msghdr *msgp;
3182 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3183 msgp,
3184 target_msg,
3185 send ? 1 : 0)) {
3186 return -TARGET_EFAULT;
3188 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3189 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3190 return ret;
3193 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3194 * so it might not have this *mmsg-specific flag either.
3196 #ifndef MSG_WAITFORONE
3197 #define MSG_WAITFORONE 0x10000
3198 #endif
3200 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3201 unsigned int vlen, unsigned int flags,
3202 int send)
3204 struct target_mmsghdr *mmsgp;
3205 abi_long ret = 0;
3206 int i;
3208 if (vlen > UIO_MAXIOV) {
3209 vlen = UIO_MAXIOV;
3212 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3213 if (!mmsgp) {
3214 return -TARGET_EFAULT;
3217 for (i = 0; i < vlen; i++) {
3218 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3219 if (is_error(ret)) {
3220 break;
3222 mmsgp[i].msg_len = tswap32(ret);
3223 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3224 if (flags & MSG_WAITFORONE) {
3225 flags |= MSG_DONTWAIT;
3229 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231 /* Return number of datagrams sent if we sent any at all;
3232 * otherwise return the error.
3234 if (i) {
3235 return i;
3237 return ret;
3240 /* do_accept4() Must return target values and target errnos. */
3241 static abi_long do_accept4(int fd, abi_ulong target_addr,
3242 abi_ulong target_addrlen_addr, int flags)
3244 socklen_t addrlen, ret_addrlen;
3245 void *addr;
3246 abi_long ret;
3247 int host_flags;
3249 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251 if (target_addr == 0) {
3252 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3255 /* linux returns EINVAL if addrlen pointer is invalid */
3256 if (get_user_u32(addrlen, target_addrlen_addr))
3257 return -TARGET_EINVAL;
3259 if ((int)addrlen < 0) {
3260 return -TARGET_EINVAL;
3263 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3264 return -TARGET_EINVAL;
3266 addr = alloca(addrlen);
3268 ret_addrlen = addrlen;
3269 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3270 if (!is_error(ret)) {
3271 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3272 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3273 ret = -TARGET_EFAULT;
3276 return ret;
3279 /* do_getpeername() Must return target values and target errnos. */
3280 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3281 abi_ulong target_addrlen_addr)
3283 socklen_t addrlen, ret_addrlen;
3284 void *addr;
3285 abi_long ret;
3287 if (get_user_u32(addrlen, target_addrlen_addr))
3288 return -TARGET_EFAULT;
3290 if ((int)addrlen < 0) {
3291 return -TARGET_EINVAL;
3294 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3295 return -TARGET_EFAULT;
3297 addr = alloca(addrlen);
3299 ret_addrlen = addrlen;
3300 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3301 if (!is_error(ret)) {
3302 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3303 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3304 ret = -TARGET_EFAULT;
3307 return ret;
3310 /* do_getsockname() Must return target values and target errnos. */
3311 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3312 abi_ulong target_addrlen_addr)
3314 socklen_t addrlen, ret_addrlen;
3315 void *addr;
3316 abi_long ret;
3318 if (get_user_u32(addrlen, target_addrlen_addr))
3319 return -TARGET_EFAULT;
3321 if ((int)addrlen < 0) {
3322 return -TARGET_EINVAL;
3325 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3326 return -TARGET_EFAULT;
3328 addr = alloca(addrlen);
3330 ret_addrlen = addrlen;
3331 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3332 if (!is_error(ret)) {
3333 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3334 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3335 ret = -TARGET_EFAULT;
3338 return ret;
3341 /* do_socketpair() Must return target values and target errnos. */
3342 static abi_long do_socketpair(int domain, int type, int protocol,
3343 abi_ulong target_tab_addr)
3345 int tab[2];
3346 abi_long ret;
3348 target_to_host_sock_type(&type);
3350 ret = get_errno(socketpair(domain, type, protocol, tab));
3351 if (!is_error(ret)) {
3352 if (put_user_s32(tab[0], target_tab_addr)
3353 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3354 ret = -TARGET_EFAULT;
3356 return ret;
3359 /* do_sendto() Must return target values and target errnos. */
3360 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3361 abi_ulong target_addr, socklen_t addrlen)
3363 void *addr;
3364 void *host_msg;
3365 void *copy_msg = NULL;
3366 abi_long ret;
3368 if ((int)addrlen < 0) {
3369 return -TARGET_EINVAL;
3372 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3373 if (!host_msg)
3374 return -TARGET_EFAULT;
3375 if (fd_trans_target_to_host_data(fd)) {
3376 copy_msg = host_msg;
3377 host_msg = g_malloc(len);
3378 memcpy(host_msg, copy_msg, len);
3379 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3380 if (ret < 0) {
3381 goto fail;
3384 if (target_addr) {
3385 addr = alloca(addrlen+1);
3386 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3387 if (ret) {
3388 goto fail;
3390 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3391 } else {
3392 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394 fail:
3395 if (copy_msg) {
3396 g_free(host_msg);
3397 host_msg = copy_msg;
3399 unlock_user(host_msg, msg, 0);
3400 return ret;
3403 /* do_recvfrom() Must return target values and target errnos. */
3404 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3405 abi_ulong target_addr,
3406 abi_ulong target_addrlen)
3408 socklen_t addrlen, ret_addrlen;
3409 void *addr;
3410 void *host_msg;
3411 abi_long ret;
3413 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3414 if (!host_msg)
3415 return -TARGET_EFAULT;
3416 if (target_addr) {
3417 if (get_user_u32(addrlen, target_addrlen)) {
3418 ret = -TARGET_EFAULT;
3419 goto fail;
3421 if ((int)addrlen < 0) {
3422 ret = -TARGET_EINVAL;
3423 goto fail;
3425 addr = alloca(addrlen);
3426 ret_addrlen = addrlen;
3427 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3428 addr, &ret_addrlen));
3429 } else {
3430 addr = NULL; /* To keep compiler quiet. */
3431 addrlen = 0; /* To keep compiler quiet. */
3432 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434 if (!is_error(ret)) {
3435 if (fd_trans_host_to_target_data(fd)) {
3436 abi_long trans;
3437 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3438 if (is_error(trans)) {
3439 ret = trans;
3440 goto fail;
3443 if (target_addr) {
3444 host_to_target_sockaddr(target_addr, addr,
3445 MIN(addrlen, ret_addrlen));
3446 if (put_user_u32(ret_addrlen, target_addrlen)) {
3447 ret = -TARGET_EFAULT;
3448 goto fail;
3451 unlock_user(host_msg, msg, len);
3452 } else {
3453 fail:
3454 unlock_user(host_msg, msg, 0);
3456 return ret;
3459 #ifdef TARGET_NR_socketcall
3460 /* do_socketcall() must return target values and target errnos. */
3461 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 static const unsigned nargs[] = { /* number of arguments per operation */
3464 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3465 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3466 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3468 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3469 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3472 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3473 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3474 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3475 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3476 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3477 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3478 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3479 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3480 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3481 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3482 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3483 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3485 abi_long a[6]; /* max 6 args */
3486 unsigned i;
3488 /* check the range of the first argument num */
3489 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3490 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3491 return -TARGET_EINVAL;
3493 /* ensure we have space for args */
3494 if (nargs[num] > ARRAY_SIZE(a)) {
3495 return -TARGET_EINVAL;
3497 /* collect the arguments in a[] according to nargs[] */
3498 for (i = 0; i < nargs[num]; ++i) {
3499 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3500 return -TARGET_EFAULT;
3503 /* now when we have the args, invoke the appropriate underlying function */
3504 switch (num) {
3505 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3506 return do_socket(a[0], a[1], a[2]);
3507 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3508 return do_bind(a[0], a[1], a[2]);
3509 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3510 return do_connect(a[0], a[1], a[2]);
3511 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3512 return get_errno(listen(a[0], a[1]));
3513 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3514 return do_accept4(a[0], a[1], a[2], 0);
3515 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3516 return do_getsockname(a[0], a[1], a[2]);
3517 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3518 return do_getpeername(a[0], a[1], a[2]);
3519 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3520 return do_socketpair(a[0], a[1], a[2], a[3]);
3521 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3522 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3523 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3524 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3525 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3526 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3527 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3528 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3529 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3530 return get_errno(shutdown(a[0], a[1]));
3531 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3532 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3533 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3534 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3535 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3536 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3537 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3538 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3539 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3540 return do_accept4(a[0], a[1], a[2], a[3]);
3541 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3542 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3543 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3544 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3545 default:
3546 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3547 return -TARGET_EINVAL;
3550 #endif
3552 #define N_SHM_REGIONS 32
3554 static struct shm_region {
3555 abi_ulong start;
3556 abi_ulong size;
3557 bool in_use;
3558 } shm_regions[N_SHM_REGIONS];
3560 #ifndef TARGET_SEMID64_DS
3561 /* asm-generic version of this struct */
3562 struct target_semid64_ds
3564 struct target_ipc_perm sem_perm;
3565 abi_ulong sem_otime;
3566 #if TARGET_ABI_BITS == 32
3567 abi_ulong __unused1;
3568 #endif
3569 abi_ulong sem_ctime;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused2;
3572 #endif
3573 abi_ulong sem_nsems;
3574 abi_ulong __unused3;
3575 abi_ulong __unused4;
3577 #endif
3579 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3580 abi_ulong target_addr)
3582 struct target_ipc_perm *target_ip;
3583 struct target_semid64_ds *target_sd;
3585 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3586 return -TARGET_EFAULT;
3587 target_ip = &(target_sd->sem_perm);
3588 host_ip->__key = tswap32(target_ip->__key);
3589 host_ip->uid = tswap32(target_ip->uid);
3590 host_ip->gid = tswap32(target_ip->gid);
3591 host_ip->cuid = tswap32(target_ip->cuid);
3592 host_ip->cgid = tswap32(target_ip->cgid);
3593 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3594 host_ip->mode = tswap32(target_ip->mode);
3595 #else
3596 host_ip->mode = tswap16(target_ip->mode);
3597 #endif
3598 #if defined(TARGET_PPC)
3599 host_ip->__seq = tswap32(target_ip->__seq);
3600 #else
3601 host_ip->__seq = tswap16(target_ip->__seq);
3602 #endif
3603 unlock_user_struct(target_sd, target_addr, 0);
3604 return 0;
3607 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3608 struct ipc_perm *host_ip)
3610 struct target_ipc_perm *target_ip;
3611 struct target_semid64_ds *target_sd;
3613 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3614 return -TARGET_EFAULT;
3615 target_ip = &(target_sd->sem_perm);
3616 target_ip->__key = tswap32(host_ip->__key);
3617 target_ip->uid = tswap32(host_ip->uid);
3618 target_ip->gid = tswap32(host_ip->gid);
3619 target_ip->cuid = tswap32(host_ip->cuid);
3620 target_ip->cgid = tswap32(host_ip->cgid);
3621 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3622 target_ip->mode = tswap32(host_ip->mode);
3623 #else
3624 target_ip->mode = tswap16(host_ip->mode);
3625 #endif
3626 #if defined(TARGET_PPC)
3627 target_ip->__seq = tswap32(host_ip->__seq);
3628 #else
3629 target_ip->__seq = tswap16(host_ip->__seq);
3630 #endif
3631 unlock_user_struct(target_sd, target_addr, 1);
3632 return 0;
3635 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3636 abi_ulong target_addr)
3638 struct target_semid64_ds *target_sd;
3640 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3641 return -TARGET_EFAULT;
3642 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3643 return -TARGET_EFAULT;
3644 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3645 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3646 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3647 unlock_user_struct(target_sd, target_addr, 0);
3648 return 0;
3651 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3652 struct semid_ds *host_sd)
3654 struct target_semid64_ds *target_sd;
3656 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3657 return -TARGET_EFAULT;
3658 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3659 return -TARGET_EFAULT;
3660 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3661 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3662 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3663 unlock_user_struct(target_sd, target_addr, 1);
3664 return 0;
3667 struct target_seminfo {
3668 int semmap;
3669 int semmni;
3670 int semmns;
3671 int semmnu;
3672 int semmsl;
3673 int semopm;
3674 int semume;
3675 int semusz;
3676 int semvmx;
3677 int semaem;
3680 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3681 struct seminfo *host_seminfo)
3683 struct target_seminfo *target_seminfo;
3684 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3685 return -TARGET_EFAULT;
3686 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3687 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3688 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3689 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3690 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3691 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3692 __put_user(host_seminfo->semume, &target_seminfo->semume);
3693 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3694 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3695 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3696 unlock_user_struct(target_seminfo, target_addr, 1);
3697 return 0;
3700 union semun {
3701 int val;
3702 struct semid_ds *buf;
3703 unsigned short *array;
3704 struct seminfo *__buf;
3707 union target_semun {
3708 int val;
3709 abi_ulong buf;
3710 abi_ulong array;
3711 abi_ulong __buf;
3714 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3715 abi_ulong target_addr)
3717 int nsems;
3718 unsigned short *array;
3719 union semun semun;
3720 struct semid_ds semid_ds;
3721 int i, ret;
3723 semun.buf = &semid_ds;
3725 ret = semctl(semid, 0, IPC_STAT, semun);
3726 if (ret == -1)
3727 return get_errno(ret);
3729 nsems = semid_ds.sem_nsems;
3731 *host_array = g_try_new(unsigned short, nsems);
3732 if (!*host_array) {
3733 return -TARGET_ENOMEM;
3735 array = lock_user(VERIFY_READ, target_addr,
3736 nsems*sizeof(unsigned short), 1);
3737 if (!array) {
3738 g_free(*host_array);
3739 return -TARGET_EFAULT;
3742 for(i=0; i<nsems; i++) {
3743 __get_user((*host_array)[i], &array[i]);
3745 unlock_user(array, target_addr, 0);
3747 return 0;
3750 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3751 unsigned short **host_array)
3753 int nsems;
3754 unsigned short *array;
3755 union semun semun;
3756 struct semid_ds semid_ds;
3757 int i, ret;
3759 semun.buf = &semid_ds;
3761 ret = semctl(semid, 0, IPC_STAT, semun);
3762 if (ret == -1)
3763 return get_errno(ret);
3765 nsems = semid_ds.sem_nsems;
3767 array = lock_user(VERIFY_WRITE, target_addr,
3768 nsems*sizeof(unsigned short), 0);
3769 if (!array)
3770 return -TARGET_EFAULT;
3772 for(i=0; i<nsems; i++) {
3773 __put_user((*host_array)[i], &array[i]);
3775 g_free(*host_array);
3776 unlock_user(array, target_addr, 1);
3778 return 0;
3781 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3782 abi_ulong target_arg)
3784 union target_semun target_su = { .buf = target_arg };
3785 union semun arg;
3786 struct semid_ds dsarg;
3787 unsigned short *array = NULL;
3788 struct seminfo seminfo;
3789 abi_long ret = -TARGET_EINVAL;
3790 abi_long err;
3791 cmd &= 0xff;
3793 switch( cmd ) {
3794 case GETVAL:
3795 case SETVAL:
3796 /* In 64 bit cross-endian situations, we will erroneously pick up
3797 * the wrong half of the union for the "val" element. To rectify
3798 * this, the entire 8-byte structure is byteswapped, followed by
3799 * a swap of the 4 byte val field. In other cases, the data is
3800 * already in proper host byte order. */
3801 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3802 target_su.buf = tswapal(target_su.buf);
3803 arg.val = tswap32(target_su.val);
3804 } else {
3805 arg.val = target_su.val;
3807 ret = get_errno(semctl(semid, semnum, cmd, arg));
3808 break;
3809 case GETALL:
3810 case SETALL:
3811 err = target_to_host_semarray(semid, &array, target_su.array);
3812 if (err)
3813 return err;
3814 arg.array = array;
3815 ret = get_errno(semctl(semid, semnum, cmd, arg));
3816 err = host_to_target_semarray(semid, target_su.array, &array);
3817 if (err)
3818 return err;
3819 break;
3820 case IPC_STAT:
3821 case IPC_SET:
3822 case SEM_STAT:
3823 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3824 if (err)
3825 return err;
3826 arg.buf = &dsarg;
3827 ret = get_errno(semctl(semid, semnum, cmd, arg));
3828 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3829 if (err)
3830 return err;
3831 break;
3832 case IPC_INFO:
3833 case SEM_INFO:
3834 arg.__buf = &seminfo;
3835 ret = get_errno(semctl(semid, semnum, cmd, arg));
3836 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3837 if (err)
3838 return err;
3839 break;
3840 case IPC_RMID:
3841 case GETPID:
3842 case GETNCNT:
3843 case GETZCNT:
3844 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3845 break;
3848 return ret;
3851 struct target_sembuf {
3852 unsigned short sem_num;
3853 short sem_op;
3854 short sem_flg;
3857 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3858 abi_ulong target_addr,
3859 unsigned nsops)
3861 struct target_sembuf *target_sembuf;
3862 int i;
3864 target_sembuf = lock_user(VERIFY_READ, target_addr,
3865 nsops*sizeof(struct target_sembuf), 1);
3866 if (!target_sembuf)
3867 return -TARGET_EFAULT;
3869 for(i=0; i<nsops; i++) {
3870 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3871 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3872 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3875 unlock_user(target_sembuf, target_addr, 0);
3877 return 0;
3880 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3881 defined(TARGET_NR_semtimedop)
3884 * This macro is required to handle the s390 variants, which passes the
3885 * arguments in a different order than default.
3887 #ifdef __s390x__
3888 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3889 (__nsops), (__timeout), (__sops)
3890 #else
3891 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3892 (__nsops), 0, (__sops), (__timeout)
3893 #endif
3895 static inline abi_long do_semtimedop(int semid,
3896 abi_long ptr,
3897 unsigned nsops,
3898 abi_long timeout)
3900 struct sembuf sops[nsops];
3901 struct timespec ts, *pts = NULL;
3902 abi_long ret;
3904 if (timeout) {
3905 pts = &ts;
3906 if (target_to_host_timespec(pts, timeout)) {
3907 return -TARGET_EFAULT;
3911 if (target_to_host_sembuf(sops, ptr, nsops))
3912 return -TARGET_EFAULT;
3914 ret = -TARGET_ENOSYS;
3915 #ifdef __NR_semtimedop
3916 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3917 #endif
3918 #ifdef __NR_ipc
3919 if (ret == -TARGET_ENOSYS) {
3920 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3921 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3923 #endif
3924 return ret;
3926 #endif
3928 struct target_msqid_ds
3930 struct target_ipc_perm msg_perm;
3931 abi_ulong msg_stime;
3932 #if TARGET_ABI_BITS == 32
3933 abi_ulong __unused1;
3934 #endif
3935 abi_ulong msg_rtime;
3936 #if TARGET_ABI_BITS == 32
3937 abi_ulong __unused2;
3938 #endif
3939 abi_ulong msg_ctime;
3940 #if TARGET_ABI_BITS == 32
3941 abi_ulong __unused3;
3942 #endif
3943 abi_ulong __msg_cbytes;
3944 abi_ulong msg_qnum;
3945 abi_ulong msg_qbytes;
3946 abi_ulong msg_lspid;
3947 abi_ulong msg_lrpid;
3948 abi_ulong __unused4;
3949 abi_ulong __unused5;
3952 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3953 abi_ulong target_addr)
3955 struct target_msqid_ds *target_md;
3957 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3958 return -TARGET_EFAULT;
3959 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3960 return -TARGET_EFAULT;
3961 host_md->msg_stime = tswapal(target_md->msg_stime);
3962 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3963 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3964 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3965 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3966 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3967 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3968 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3969 unlock_user_struct(target_md, target_addr, 0);
3970 return 0;
3973 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3974 struct msqid_ds *host_md)
3976 struct target_msqid_ds *target_md;
3978 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3979 return -TARGET_EFAULT;
3980 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3981 return -TARGET_EFAULT;
3982 target_md->msg_stime = tswapal(host_md->msg_stime);
3983 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3984 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3985 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3986 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3987 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3988 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3989 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3990 unlock_user_struct(target_md, target_addr, 1);
3991 return 0;
3994 struct target_msginfo {
3995 int msgpool;
3996 int msgmap;
3997 int msgmax;
3998 int msgmnb;
3999 int msgmni;
4000 int msgssz;
4001 int msgtql;
4002 unsigned short int msgseg;
4005 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4006 struct msginfo *host_msginfo)
4008 struct target_msginfo *target_msginfo;
4009 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4010 return -TARGET_EFAULT;
4011 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4012 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4013 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4014 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4015 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4016 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4017 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4018 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4019 unlock_user_struct(target_msginfo, target_addr, 1);
4020 return 0;
4023 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4025 struct msqid_ds dsarg;
4026 struct msginfo msginfo;
4027 abi_long ret = -TARGET_EINVAL;
4029 cmd &= 0xff;
4031 switch (cmd) {
4032 case IPC_STAT:
4033 case IPC_SET:
4034 case MSG_STAT:
4035 if (target_to_host_msqid_ds(&dsarg,ptr))
4036 return -TARGET_EFAULT;
4037 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4038 if (host_to_target_msqid_ds(ptr,&dsarg))
4039 return -TARGET_EFAULT;
4040 break;
4041 case IPC_RMID:
4042 ret = get_errno(msgctl(msgid, cmd, NULL));
4043 break;
4044 case IPC_INFO:
4045 case MSG_INFO:
4046 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4047 if (host_to_target_msginfo(ptr, &msginfo))
4048 return -TARGET_EFAULT;
4049 break;
4052 return ret;
4055 struct target_msgbuf {
4056 abi_long mtype;
4057 char mtext[1];
4060 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4061 ssize_t msgsz, int msgflg)
4063 struct target_msgbuf *target_mb;
4064 struct msgbuf *host_mb;
4065 abi_long ret = 0;
4067 if (msgsz < 0) {
4068 return -TARGET_EINVAL;
4071 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4072 return -TARGET_EFAULT;
4073 host_mb = g_try_malloc(msgsz + sizeof(long));
4074 if (!host_mb) {
4075 unlock_user_struct(target_mb, msgp, 0);
4076 return -TARGET_ENOMEM;
4078 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4079 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4080 ret = -TARGET_ENOSYS;
4081 #ifdef __NR_msgsnd
4082 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4083 #endif
4084 #ifdef __NR_ipc
4085 if (ret == -TARGET_ENOSYS) {
4086 #ifdef __s390x__
4087 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4088 host_mb));
4089 #else
4090 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4091 host_mb, 0));
4092 #endif
4094 #endif
4095 g_free(host_mb);
4096 unlock_user_struct(target_mb, msgp, 0);
4098 return ret;
4101 #ifdef __NR_ipc
4102 #if defined(__sparc__)
4103 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4104 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4105 #elif defined(__s390x__)
4106 /* The s390 sys_ipc variant has only five parameters. */
4107 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4108 ((long int[]){(long int)__msgp, __msgtyp})
4109 #else
4110 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4111 ((long int[]){(long int)__msgp, __msgtyp}), 0
4112 #endif
4113 #endif
4115 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4116 ssize_t msgsz, abi_long msgtyp,
4117 int msgflg)
4119 struct target_msgbuf *target_mb;
4120 char *target_mtext;
4121 struct msgbuf *host_mb;
4122 abi_long ret = 0;
4124 if (msgsz < 0) {
4125 return -TARGET_EINVAL;
4128 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4129 return -TARGET_EFAULT;
4131 host_mb = g_try_malloc(msgsz + sizeof(long));
4132 if (!host_mb) {
4133 ret = -TARGET_ENOMEM;
4134 goto end;
4136 ret = -TARGET_ENOSYS;
4137 #ifdef __NR_msgrcv
4138 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4139 #endif
4140 #ifdef __NR_ipc
4141 if (ret == -TARGET_ENOSYS) {
4142 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4143 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4145 #endif
4147 if (ret > 0) {
4148 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4149 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4150 if (!target_mtext) {
4151 ret = -TARGET_EFAULT;
4152 goto end;
4154 memcpy(target_mb->mtext, host_mb->mtext, ret);
4155 unlock_user(target_mtext, target_mtext_addr, ret);
4158 target_mb->mtype = tswapal(host_mb->mtype);
4160 end:
4161 if (target_mb)
4162 unlock_user_struct(target_mb, msgp, 1);
4163 g_free(host_mb);
4164 return ret;
4167 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4168 abi_ulong target_addr)
4170 struct target_shmid_ds *target_sd;
4172 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4173 return -TARGET_EFAULT;
4174 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4175 return -TARGET_EFAULT;
4176 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4177 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4178 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4179 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4180 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4181 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4182 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4183 unlock_user_struct(target_sd, target_addr, 0);
4184 return 0;
4187 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4188 struct shmid_ds *host_sd)
4190 struct target_shmid_ds *target_sd;
4192 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4193 return -TARGET_EFAULT;
4194 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4195 return -TARGET_EFAULT;
4196 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4197 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4198 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4199 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4200 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4201 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4202 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4203 unlock_user_struct(target_sd, target_addr, 1);
4204 return 0;
4207 struct target_shminfo {
4208 abi_ulong shmmax;
4209 abi_ulong shmmin;
4210 abi_ulong shmmni;
4211 abi_ulong shmseg;
4212 abi_ulong shmall;
4215 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4216 struct shminfo *host_shminfo)
4218 struct target_shminfo *target_shminfo;
4219 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4220 return -TARGET_EFAULT;
4221 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4222 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4223 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4224 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4225 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4226 unlock_user_struct(target_shminfo, target_addr, 1);
4227 return 0;
4230 struct target_shm_info {
4231 int used_ids;
4232 abi_ulong shm_tot;
4233 abi_ulong shm_rss;
4234 abi_ulong shm_swp;
4235 abi_ulong swap_attempts;
4236 abi_ulong swap_successes;
4239 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4240 struct shm_info *host_shm_info)
4242 struct target_shm_info *target_shm_info;
4243 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4244 return -TARGET_EFAULT;
4245 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4246 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4247 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4248 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4249 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4250 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4251 unlock_user_struct(target_shm_info, target_addr, 1);
4252 return 0;
4255 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4257 struct shmid_ds dsarg;
4258 struct shminfo shminfo;
4259 struct shm_info shm_info;
4260 abi_long ret = -TARGET_EINVAL;
4262 cmd &= 0xff;
4264 switch(cmd) {
4265 case IPC_STAT:
4266 case IPC_SET:
4267 case SHM_STAT:
4268 if (target_to_host_shmid_ds(&dsarg, buf))
4269 return -TARGET_EFAULT;
4270 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4271 if (host_to_target_shmid_ds(buf, &dsarg))
4272 return -TARGET_EFAULT;
4273 break;
4274 case IPC_INFO:
4275 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4276 if (host_to_target_shminfo(buf, &shminfo))
4277 return -TARGET_EFAULT;
4278 break;
4279 case SHM_INFO:
4280 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4281 if (host_to_target_shm_info(buf, &shm_info))
4282 return -TARGET_EFAULT;
4283 break;
4284 case IPC_RMID:
4285 case SHM_LOCK:
4286 case SHM_UNLOCK:
4287 ret = get_errno(shmctl(shmid, cmd, NULL));
4288 break;
4291 return ret;
4294 #ifndef TARGET_FORCE_SHMLBA
4295 /* For most architectures, SHMLBA is the same as the page size;
4296 * some architectures have larger values, in which case they should
4297 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4298 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4299 * and defining its own value for SHMLBA.
4301 * The kernel also permits SHMLBA to be set by the architecture to a
4302 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4303 * this means that addresses are rounded to the large size if
4304 * SHM_RND is set but addresses not aligned to that size are not rejected
4305 * as long as they are at least page-aligned. Since the only architecture
4306 * which uses this is ia64 this code doesn't provide for that oddity.
4308 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4310 return TARGET_PAGE_SIZE;
4312 #endif
4314 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4315 int shmid, abi_ulong shmaddr, int shmflg)
4317 abi_long raddr;
4318 void *host_raddr;
4319 struct shmid_ds shm_info;
4320 int i,ret;
4321 abi_ulong shmlba;
4323 /* find out the length of the shared memory segment */
4324 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4325 if (is_error(ret)) {
4326 /* can't get length, bail out */
4327 return ret;
4330 shmlba = target_shmlba(cpu_env);
4332 if (shmaddr & (shmlba - 1)) {
4333 if (shmflg & SHM_RND) {
4334 shmaddr &= ~(shmlba - 1);
4335 } else {
4336 return -TARGET_EINVAL;
4339 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4340 return -TARGET_EINVAL;
4343 mmap_lock();
4345 if (shmaddr)
4346 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4347 else {
4348 abi_ulong mmap_start;
4350 /* In order to use the host shmat, we need to honor host SHMLBA. */
4351 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4353 if (mmap_start == -1) {
4354 errno = ENOMEM;
4355 host_raddr = (void *)-1;
4356 } else
4357 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4360 if (host_raddr == (void *)-1) {
4361 mmap_unlock();
4362 return get_errno((long)host_raddr);
4364 raddr=h2g((unsigned long)host_raddr);
4366 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4367 PAGE_VALID | PAGE_READ |
4368 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4370 for (i = 0; i < N_SHM_REGIONS; i++) {
4371 if (!shm_regions[i].in_use) {
4372 shm_regions[i].in_use = true;
4373 shm_regions[i].start = raddr;
4374 shm_regions[i].size = shm_info.shm_segsz;
4375 break;
4379 mmap_unlock();
4380 return raddr;
4384 static inline abi_long do_shmdt(abi_ulong shmaddr)
4386 int i;
4387 abi_long rv;
4389 mmap_lock();
4391 for (i = 0; i < N_SHM_REGIONS; ++i) {
4392 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4393 shm_regions[i].in_use = false;
4394 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4395 break;
4398 rv = get_errno(shmdt(g2h(shmaddr)));
4400 mmap_unlock();
4402 return rv;
4405 #ifdef TARGET_NR_ipc
4406 /* ??? This only works with linear mappings. */
4407 /* do_ipc() must return target values and target errnos. */
4408 static abi_long do_ipc(CPUArchState *cpu_env,
4409 unsigned int call, abi_long first,
4410 abi_long second, abi_long third,
4411 abi_long ptr, abi_long fifth)
4413 int version;
4414 abi_long ret = 0;
4416 version = call >> 16;
4417 call &= 0xffff;
4419 switch (call) {
4420 case IPCOP_semop:
4421 ret = do_semtimedop(first, ptr, second, 0);
4422 break;
4423 case IPCOP_semtimedop:
4425 * The s390 sys_ipc variant has only five parameters instead of six
4426 * (as for default variant) and the only difference is the handling of
4427 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4428 * to a struct timespec where the generic variant uses fifth parameter.
4430 #if defined(TARGET_S390X)
4431 ret = do_semtimedop(first, ptr, second, third);
4432 #else
4433 ret = do_semtimedop(first, ptr, second, fifth);
4434 #endif
4435 break;
4437 case IPCOP_semget:
4438 ret = get_errno(semget(first, second, third));
4439 break;
4441 case IPCOP_semctl: {
4442 /* The semun argument to semctl is passed by value, so dereference the
4443 * ptr argument. */
4444 abi_ulong atptr;
4445 get_user_ual(atptr, ptr);
4446 ret = do_semctl(first, second, third, atptr);
4447 break;
4450 case IPCOP_msgget:
4451 ret = get_errno(msgget(first, second));
4452 break;
4454 case IPCOP_msgsnd:
4455 ret = do_msgsnd(first, ptr, second, third);
4456 break;
4458 case IPCOP_msgctl:
4459 ret = do_msgctl(first, second, ptr);
4460 break;
4462 case IPCOP_msgrcv:
4463 switch (version) {
4464 case 0:
4466 struct target_ipc_kludge {
4467 abi_long msgp;
4468 abi_long msgtyp;
4469 } *tmp;
4471 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4472 ret = -TARGET_EFAULT;
4473 break;
4476 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4478 unlock_user_struct(tmp, ptr, 0);
4479 break;
4481 default:
4482 ret = do_msgrcv(first, ptr, second, fifth, third);
4484 break;
4486 case IPCOP_shmat:
4487 switch (version) {
4488 default:
4490 abi_ulong raddr;
4491 raddr = do_shmat(cpu_env, first, ptr, second);
4492 if (is_error(raddr))
4493 return get_errno(raddr);
4494 if (put_user_ual(raddr, third))
4495 return -TARGET_EFAULT;
4496 break;
4498 case 1:
4499 ret = -TARGET_EINVAL;
4500 break;
4502 break;
4503 case IPCOP_shmdt:
4504 ret = do_shmdt(ptr);
4505 break;
4507 case IPCOP_shmget:
4508 /* IPC_* flag values are the same on all linux platforms */
4509 ret = get_errno(shmget(first, second, third));
4510 break;
4512 /* IPC_* and SHM_* command values are the same on all linux platforms */
4513 case IPCOP_shmctl:
4514 ret = do_shmctl(first, second, ptr);
4515 break;
4516 default:
4517 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4518 call, version);
4519 ret = -TARGET_ENOSYS;
4520 break;
4522 return ret;
4524 #endif
4526 /* kernel structure types definitions */
4528 #define STRUCT(name, ...) STRUCT_ ## name,
4529 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4530 enum {
4531 #include "syscall_types.h"
4532 STRUCT_MAX
4534 #undef STRUCT
4535 #undef STRUCT_SPECIAL
4537 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4538 #define STRUCT_SPECIAL(name)
4539 #include "syscall_types.h"
4540 #undef STRUCT
4541 #undef STRUCT_SPECIAL
4543 #define MAX_STRUCT_SIZE 4096
4545 #ifdef CONFIG_FIEMAP
4546 /* So fiemap access checks don't overflow on 32 bit systems.
4547 * This is very slightly smaller than the limit imposed by
4548 * the underlying kernel.
4550 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4551 / sizeof(struct fiemap_extent))
4553 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4554 int fd, int cmd, abi_long arg)
4556 /* The parameter for this ioctl is a struct fiemap followed
4557 * by an array of struct fiemap_extent whose size is set
4558 * in fiemap->fm_extent_count. The array is filled in by the
4559 * ioctl.
4561 int target_size_in, target_size_out;
4562 struct fiemap *fm;
4563 const argtype *arg_type = ie->arg_type;
4564 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4565 void *argptr, *p;
4566 abi_long ret;
4567 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4568 uint32_t outbufsz;
4569 int free_fm = 0;
4571 assert(arg_type[0] == TYPE_PTR);
4572 assert(ie->access == IOC_RW);
4573 arg_type++;
4574 target_size_in = thunk_type_size(arg_type, 0);
4575 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4576 if (!argptr) {
4577 return -TARGET_EFAULT;
4579 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4580 unlock_user(argptr, arg, 0);
4581 fm = (struct fiemap *)buf_temp;
4582 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4583 return -TARGET_EINVAL;
4586 outbufsz = sizeof (*fm) +
4587 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4589 if (outbufsz > MAX_STRUCT_SIZE) {
4590 /* We can't fit all the extents into the fixed size buffer.
4591 * Allocate one that is large enough and use it instead.
4593 fm = g_try_malloc(outbufsz);
4594 if (!fm) {
4595 return -TARGET_ENOMEM;
4597 memcpy(fm, buf_temp, sizeof(struct fiemap));
4598 free_fm = 1;
4600 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4601 if (!is_error(ret)) {
4602 target_size_out = target_size_in;
4603 /* An extent_count of 0 means we were only counting the extents
4604 * so there are no structs to copy
4606 if (fm->fm_extent_count != 0) {
4607 target_size_out += fm->fm_mapped_extents * extent_size;
4609 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4610 if (!argptr) {
4611 ret = -TARGET_EFAULT;
4612 } else {
4613 /* Convert the struct fiemap */
4614 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4615 if (fm->fm_extent_count != 0) {
4616 p = argptr + target_size_in;
4617 /* ...and then all the struct fiemap_extents */
4618 for (i = 0; i < fm->fm_mapped_extents; i++) {
4619 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4620 THUNK_TARGET);
4621 p += extent_size;
4624 unlock_user(argptr, arg, target_size_out);
4627 if (free_fm) {
4628 g_free(fm);
4630 return ret;
4632 #endif
4634 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4635 int fd, int cmd, abi_long arg)
4637 const argtype *arg_type = ie->arg_type;
4638 int target_size;
4639 void *argptr;
4640 int ret;
4641 struct ifconf *host_ifconf;
4642 uint32_t outbufsz;
4643 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4644 int target_ifreq_size;
4645 int nb_ifreq;
4646 int free_buf = 0;
4647 int i;
4648 int target_ifc_len;
4649 abi_long target_ifc_buf;
4650 int host_ifc_len;
4651 char *host_ifc_buf;
4653 assert(arg_type[0] == TYPE_PTR);
4654 assert(ie->access == IOC_RW);
4656 arg_type++;
4657 target_size = thunk_type_size(arg_type, 0);
4659 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4660 if (!argptr)
4661 return -TARGET_EFAULT;
4662 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4663 unlock_user(argptr, arg, 0);
4665 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4666 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4667 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4669 if (target_ifc_buf != 0) {
4670 target_ifc_len = host_ifconf->ifc_len;
4671 nb_ifreq = target_ifc_len / target_ifreq_size;
4672 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4674 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4675 if (outbufsz > MAX_STRUCT_SIZE) {
4677 * We can't fit all the extents into the fixed size buffer.
4678 * Allocate one that is large enough and use it instead.
4680 host_ifconf = malloc(outbufsz);
4681 if (!host_ifconf) {
4682 return -TARGET_ENOMEM;
4684 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4685 free_buf = 1;
4687 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4689 host_ifconf->ifc_len = host_ifc_len;
4690 } else {
4691 host_ifc_buf = NULL;
4693 host_ifconf->ifc_buf = host_ifc_buf;
4695 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4696 if (!is_error(ret)) {
4697 /* convert host ifc_len to target ifc_len */
4699 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4700 target_ifc_len = nb_ifreq * target_ifreq_size;
4701 host_ifconf->ifc_len = target_ifc_len;
4703 /* restore target ifc_buf */
4705 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4707 /* copy struct ifconf to target user */
4709 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4710 if (!argptr)
4711 return -TARGET_EFAULT;
4712 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4713 unlock_user(argptr, arg, target_size);
4715 if (target_ifc_buf != 0) {
4716 /* copy ifreq[] to target user */
4717 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4718 for (i = 0; i < nb_ifreq ; i++) {
4719 thunk_convert(argptr + i * target_ifreq_size,
4720 host_ifc_buf + i * sizeof(struct ifreq),
4721 ifreq_arg_type, THUNK_TARGET);
4723 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4727 if (free_buf) {
4728 free(host_ifconf);
4731 return ret;
4734 #if defined(CONFIG_USBFS)
4735 #if HOST_LONG_BITS > 64
4736 #error USBDEVFS thunks do not support >64 bit hosts yet.
4737 #endif
4738 struct live_urb {
4739 uint64_t target_urb_adr;
4740 uint64_t target_buf_adr;
4741 char *target_buf_ptr;
4742 struct usbdevfs_urb host_urb;
4745 static GHashTable *usbdevfs_urb_hashtable(void)
4747 static GHashTable *urb_hashtable;
4749 if (!urb_hashtable) {
4750 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4752 return urb_hashtable;
4755 static void urb_hashtable_insert(struct live_urb *urb)
4757 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4758 g_hash_table_insert(urb_hashtable, urb, urb);
4761 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4763 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4764 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4767 static void urb_hashtable_remove(struct live_urb *urb)
4769 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4770 g_hash_table_remove(urb_hashtable, urb);
4773 static abi_long
4774 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4775 int fd, int cmd, abi_long arg)
4777 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4778 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4779 struct live_urb *lurb;
4780 void *argptr;
4781 uint64_t hurb;
4782 int target_size;
4783 uintptr_t target_urb_adr;
4784 abi_long ret;
4786 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4788 memset(buf_temp, 0, sizeof(uint64_t));
4789 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4790 if (is_error(ret)) {
4791 return ret;
4794 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4795 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4796 if (!lurb->target_urb_adr) {
4797 return -TARGET_EFAULT;
4799 urb_hashtable_remove(lurb);
4800 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4801 lurb->host_urb.buffer_length);
4802 lurb->target_buf_ptr = NULL;
4804 /* restore the guest buffer pointer */
4805 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4807 /* update the guest urb struct */
4808 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4809 if (!argptr) {
4810 g_free(lurb);
4811 return -TARGET_EFAULT;
4813 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4814 unlock_user(argptr, lurb->target_urb_adr, target_size);
4816 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4817 /* write back the urb handle */
4818 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4819 if (!argptr) {
4820 g_free(lurb);
4821 return -TARGET_EFAULT;
4824 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4825 target_urb_adr = lurb->target_urb_adr;
4826 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4827 unlock_user(argptr, arg, target_size);
4829 g_free(lurb);
4830 return ret;
4833 static abi_long
4834 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4835 uint8_t *buf_temp __attribute__((unused)),
4836 int fd, int cmd, abi_long arg)
4838 struct live_urb *lurb;
4840 /* map target address back to host URB with metadata. */
4841 lurb = urb_hashtable_lookup(arg);
4842 if (!lurb) {
4843 return -TARGET_EFAULT;
4845 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4848 static abi_long
4849 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4850 int fd, int cmd, abi_long arg)
4852 const argtype *arg_type = ie->arg_type;
4853 int target_size;
4854 abi_long ret;
4855 void *argptr;
4856 int rw_dir;
4857 struct live_urb *lurb;
4860 * each submitted URB needs to map to a unique ID for the
4861 * kernel, and that unique ID needs to be a pointer to
4862 * host memory. hence, we need to malloc for each URB.
4863 * isochronous transfers have a variable length struct.
4865 arg_type++;
4866 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4868 /* construct host copy of urb and metadata */
4869 lurb = g_try_malloc0(sizeof(struct live_urb));
4870 if (!lurb) {
4871 return -TARGET_ENOMEM;
4874 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4875 if (!argptr) {
4876 g_free(lurb);
4877 return -TARGET_EFAULT;
4879 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4880 unlock_user(argptr, arg, 0);
4882 lurb->target_urb_adr = arg;
4883 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4885 /* buffer space used depends on endpoint type so lock the entire buffer */
4886 /* control type urbs should check the buffer contents for true direction */
4887 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4888 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4889 lurb->host_urb.buffer_length, 1);
4890 if (lurb->target_buf_ptr == NULL) {
4891 g_free(lurb);
4892 return -TARGET_EFAULT;
4895 /* update buffer pointer in host copy */
4896 lurb->host_urb.buffer = lurb->target_buf_ptr;
4898 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4899 if (is_error(ret)) {
4900 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4901 g_free(lurb);
4902 } else {
4903 urb_hashtable_insert(lurb);
4906 return ret;
4908 #endif /* CONFIG_USBFS */
4910 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4911 int cmd, abi_long arg)
4913 void *argptr;
4914 struct dm_ioctl *host_dm;
4915 abi_long guest_data;
4916 uint32_t guest_data_size;
4917 int target_size;
4918 const argtype *arg_type = ie->arg_type;
4919 abi_long ret;
4920 void *big_buf = NULL;
4921 char *host_data;
4923 arg_type++;
4924 target_size = thunk_type_size(arg_type, 0);
4925 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4926 if (!argptr) {
4927 ret = -TARGET_EFAULT;
4928 goto out;
4930 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931 unlock_user(argptr, arg, 0);
4933 /* buf_temp is too small, so fetch things into a bigger buffer */
4934 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4935 memcpy(big_buf, buf_temp, target_size);
4936 buf_temp = big_buf;
4937 host_dm = big_buf;
4939 guest_data = arg + host_dm->data_start;
4940 if ((guest_data - arg) < 0) {
4941 ret = -TARGET_EINVAL;
4942 goto out;
4944 guest_data_size = host_dm->data_size - host_dm->data_start;
4945 host_data = (char*)host_dm + host_dm->data_start;
4947 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4948 if (!argptr) {
4949 ret = -TARGET_EFAULT;
4950 goto out;
4953 switch (ie->host_cmd) {
4954 case DM_REMOVE_ALL:
4955 case DM_LIST_DEVICES:
4956 case DM_DEV_CREATE:
4957 case DM_DEV_REMOVE:
4958 case DM_DEV_SUSPEND:
4959 case DM_DEV_STATUS:
4960 case DM_DEV_WAIT:
4961 case DM_TABLE_STATUS:
4962 case DM_TABLE_CLEAR:
4963 case DM_TABLE_DEPS:
4964 case DM_LIST_VERSIONS:
4965 /* no input data */
4966 break;
4967 case DM_DEV_RENAME:
4968 case DM_DEV_SET_GEOMETRY:
4969 /* data contains only strings */
4970 memcpy(host_data, argptr, guest_data_size);
4971 break;
4972 case DM_TARGET_MSG:
4973 memcpy(host_data, argptr, guest_data_size);
4974 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4975 break;
4976 case DM_TABLE_LOAD:
4978 void *gspec = argptr;
4979 void *cur_data = host_data;
4980 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4981 int spec_size = thunk_type_size(arg_type, 0);
4982 int i;
4984 for (i = 0; i < host_dm->target_count; i++) {
4985 struct dm_target_spec *spec = cur_data;
4986 uint32_t next;
4987 int slen;
4989 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4990 slen = strlen((char*)gspec + spec_size) + 1;
4991 next = spec->next;
4992 spec->next = sizeof(*spec) + slen;
4993 strcpy((char*)&spec[1], gspec + spec_size);
4994 gspec += next;
4995 cur_data += spec->next;
4997 break;
4999 default:
5000 ret = -TARGET_EINVAL;
5001 unlock_user(argptr, guest_data, 0);
5002 goto out;
5004 unlock_user(argptr, guest_data, 0);
5006 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5007 if (!is_error(ret)) {
5008 guest_data = arg + host_dm->data_start;
5009 guest_data_size = host_dm->data_size - host_dm->data_start;
5010 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5011 switch (ie->host_cmd) {
5012 case DM_REMOVE_ALL:
5013 case DM_DEV_CREATE:
5014 case DM_DEV_REMOVE:
5015 case DM_DEV_RENAME:
5016 case DM_DEV_SUSPEND:
5017 case DM_DEV_STATUS:
5018 case DM_TABLE_LOAD:
5019 case DM_TABLE_CLEAR:
5020 case DM_TARGET_MSG:
5021 case DM_DEV_SET_GEOMETRY:
5022 /* no return data */
5023 break;
5024 case DM_LIST_DEVICES:
5026 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5027 uint32_t remaining_data = guest_data_size;
5028 void *cur_data = argptr;
5029 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5030 int nl_size = 12; /* can't use thunk_size due to alignment */
5032 while (1) {
5033 uint32_t next = nl->next;
5034 if (next) {
5035 nl->next = nl_size + (strlen(nl->name) + 1);
5037 if (remaining_data < nl->next) {
5038 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5039 break;
5041 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5042 strcpy(cur_data + nl_size, nl->name);
5043 cur_data += nl->next;
5044 remaining_data -= nl->next;
5045 if (!next) {
5046 break;
5048 nl = (void*)nl + next;
5050 break;
5052 case DM_DEV_WAIT:
5053 case DM_TABLE_STATUS:
5055 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5056 void *cur_data = argptr;
5057 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5058 int spec_size = thunk_type_size(arg_type, 0);
5059 int i;
5061 for (i = 0; i < host_dm->target_count; i++) {
5062 uint32_t next = spec->next;
5063 int slen = strlen((char*)&spec[1]) + 1;
5064 spec->next = (cur_data - argptr) + spec_size + slen;
5065 if (guest_data_size < spec->next) {
5066 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5067 break;
5069 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5070 strcpy(cur_data + spec_size, (char*)&spec[1]);
5071 cur_data = argptr + spec->next;
5072 spec = (void*)host_dm + host_dm->data_start + next;
5074 break;
5076 case DM_TABLE_DEPS:
5078 void *hdata = (void*)host_dm + host_dm->data_start;
5079 int count = *(uint32_t*)hdata;
5080 uint64_t *hdev = hdata + 8;
5081 uint64_t *gdev = argptr + 8;
5082 int i;
5084 *(uint32_t*)argptr = tswap32(count);
5085 for (i = 0; i < count; i++) {
5086 *gdev = tswap64(*hdev);
5087 gdev++;
5088 hdev++;
5090 break;
5092 case DM_LIST_VERSIONS:
5094 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5095 uint32_t remaining_data = guest_data_size;
5096 void *cur_data = argptr;
5097 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5098 int vers_size = thunk_type_size(arg_type, 0);
5100 while (1) {
5101 uint32_t next = vers->next;
5102 if (next) {
5103 vers->next = vers_size + (strlen(vers->name) + 1);
5105 if (remaining_data < vers->next) {
5106 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5107 break;
5109 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5110 strcpy(cur_data + vers_size, vers->name);
5111 cur_data += vers->next;
5112 remaining_data -= vers->next;
5113 if (!next) {
5114 break;
5116 vers = (void*)vers + next;
5118 break;
5120 default:
5121 unlock_user(argptr, guest_data, 0);
5122 ret = -TARGET_EINVAL;
5123 goto out;
5125 unlock_user(argptr, guest_data, guest_data_size);
5127 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5128 if (!argptr) {
5129 ret = -TARGET_EFAULT;
5130 goto out;
5132 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5133 unlock_user(argptr, arg, target_size);
5135 out:
5136 g_free(big_buf);
5137 return ret;
5140 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5141 int cmd, abi_long arg)
5143 void *argptr;
5144 int target_size;
5145 const argtype *arg_type = ie->arg_type;
5146 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5147 abi_long ret;
5149 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5150 struct blkpg_partition host_part;
5152 /* Read and convert blkpg */
5153 arg_type++;
5154 target_size = thunk_type_size(arg_type, 0);
5155 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5156 if (!argptr) {
5157 ret = -TARGET_EFAULT;
5158 goto out;
5160 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5161 unlock_user(argptr, arg, 0);
5163 switch (host_blkpg->op) {
5164 case BLKPG_ADD_PARTITION:
5165 case BLKPG_DEL_PARTITION:
5166 /* payload is struct blkpg_partition */
5167 break;
5168 default:
5169 /* Unknown opcode */
5170 ret = -TARGET_EINVAL;
5171 goto out;
5174 /* Read and convert blkpg->data */
5175 arg = (abi_long)(uintptr_t)host_blkpg->data;
5176 target_size = thunk_type_size(part_arg_type, 0);
5177 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5178 if (!argptr) {
5179 ret = -TARGET_EFAULT;
5180 goto out;
5182 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5183 unlock_user(argptr, arg, 0);
5185 /* Swizzle the data pointer to our local copy and call! */
5186 host_blkpg->data = &host_part;
5187 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5189 out:
5190 return ret;
5193 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5194 int fd, int cmd, abi_long arg)
5196 const argtype *arg_type = ie->arg_type;
5197 const StructEntry *se;
5198 const argtype *field_types;
5199 const int *dst_offsets, *src_offsets;
5200 int target_size;
5201 void *argptr;
5202 abi_ulong *target_rt_dev_ptr = NULL;
5203 unsigned long *host_rt_dev_ptr = NULL;
5204 abi_long ret;
5205 int i;
5207 assert(ie->access == IOC_W);
5208 assert(*arg_type == TYPE_PTR);
5209 arg_type++;
5210 assert(*arg_type == TYPE_STRUCT);
5211 target_size = thunk_type_size(arg_type, 0);
5212 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5213 if (!argptr) {
5214 return -TARGET_EFAULT;
5216 arg_type++;
5217 assert(*arg_type == (int)STRUCT_rtentry);
5218 se = struct_entries + *arg_type++;
5219 assert(se->convert[0] == NULL);
5220 /* convert struct here to be able to catch rt_dev string */
5221 field_types = se->field_types;
5222 dst_offsets = se->field_offsets[THUNK_HOST];
5223 src_offsets = se->field_offsets[THUNK_TARGET];
5224 for (i = 0; i < se->nb_fields; i++) {
5225 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5226 assert(*field_types == TYPE_PTRVOID);
5227 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5228 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5229 if (*target_rt_dev_ptr != 0) {
5230 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5231 tswapal(*target_rt_dev_ptr));
5232 if (!*host_rt_dev_ptr) {
5233 unlock_user(argptr, arg, 0);
5234 return -TARGET_EFAULT;
5236 } else {
5237 *host_rt_dev_ptr = 0;
5239 field_types++;
5240 continue;
5242 field_types = thunk_convert(buf_temp + dst_offsets[i],
5243 argptr + src_offsets[i],
5244 field_types, THUNK_HOST);
5246 unlock_user(argptr, arg, 0);
5248 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5250 assert(host_rt_dev_ptr != NULL);
5251 assert(target_rt_dev_ptr != NULL);
5252 if (*host_rt_dev_ptr != 0) {
5253 unlock_user((void *)*host_rt_dev_ptr,
5254 *target_rt_dev_ptr, 0);
5256 return ret;
5259 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5260 int fd, int cmd, abi_long arg)
5262 int sig = target_to_host_signal(arg);
5263 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5266 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5267 int fd, int cmd, abi_long arg)
5269 struct timeval tv;
5270 abi_long ret;
5272 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5273 if (is_error(ret)) {
5274 return ret;
5277 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5278 if (copy_to_user_timeval(arg, &tv)) {
5279 return -TARGET_EFAULT;
5281 } else {
5282 if (copy_to_user_timeval64(arg, &tv)) {
5283 return -TARGET_EFAULT;
5287 return ret;
5290 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5291 int fd, int cmd, abi_long arg)
5293 struct timespec ts;
5294 abi_long ret;
5296 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5297 if (is_error(ret)) {
5298 return ret;
5301 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5302 if (host_to_target_timespec(arg, &ts)) {
5303 return -TARGET_EFAULT;
5305 } else{
5306 if (host_to_target_timespec64(arg, &ts)) {
5307 return -TARGET_EFAULT;
5311 return ret;
5314 #ifdef TIOCGPTPEER
5315 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5316 int fd, int cmd, abi_long arg)
5318 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5319 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5321 #endif
5323 #ifdef HAVE_DRM_H
5325 static void unlock_drm_version(struct drm_version *host_ver,
5326 struct target_drm_version *target_ver,
5327 bool copy)
5329 unlock_user(host_ver->name, target_ver->name,
5330 copy ? host_ver->name_len : 0);
5331 unlock_user(host_ver->date, target_ver->date,
5332 copy ? host_ver->date_len : 0);
5333 unlock_user(host_ver->desc, target_ver->desc,
5334 copy ? host_ver->desc_len : 0);
5337 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5338 struct target_drm_version *target_ver)
5340 memset(host_ver, 0, sizeof(*host_ver));
5342 __get_user(host_ver->name_len, &target_ver->name_len);
5343 if (host_ver->name_len) {
5344 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5345 target_ver->name_len, 0);
5346 if (!host_ver->name) {
5347 return -EFAULT;
5351 __get_user(host_ver->date_len, &target_ver->date_len);
5352 if (host_ver->date_len) {
5353 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5354 target_ver->date_len, 0);
5355 if (!host_ver->date) {
5356 goto err;
5360 __get_user(host_ver->desc_len, &target_ver->desc_len);
5361 if (host_ver->desc_len) {
5362 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5363 target_ver->desc_len, 0);
5364 if (!host_ver->desc) {
5365 goto err;
5369 return 0;
5370 err:
5371 unlock_drm_version(host_ver, target_ver, false);
5372 return -EFAULT;
5375 static inline void host_to_target_drmversion(
5376 struct target_drm_version *target_ver,
5377 struct drm_version *host_ver)
5379 __put_user(host_ver->version_major, &target_ver->version_major);
5380 __put_user(host_ver->version_minor, &target_ver->version_minor);
5381 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5382 __put_user(host_ver->name_len, &target_ver->name_len);
5383 __put_user(host_ver->date_len, &target_ver->date_len);
5384 __put_user(host_ver->desc_len, &target_ver->desc_len);
5385 unlock_drm_version(host_ver, target_ver, true);
5388 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5389 int fd, int cmd, abi_long arg)
5391 struct drm_version *ver;
5392 struct target_drm_version *target_ver;
5393 abi_long ret;
5395 switch (ie->host_cmd) {
5396 case DRM_IOCTL_VERSION:
5397 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5398 return -TARGET_EFAULT;
5400 ver = (struct drm_version *)buf_temp;
5401 ret = target_to_host_drmversion(ver, target_ver);
5402 if (!is_error(ret)) {
5403 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5404 if (is_error(ret)) {
5405 unlock_drm_version(ver, target_ver, false);
5406 } else {
5407 host_to_target_drmversion(target_ver, ver);
5410 unlock_user_struct(target_ver, arg, 0);
5411 return ret;
5413 return -TARGET_ENOSYS;
5416 #endif
5418 IOCTLEntry ioctl_entries[] = {
5419 #define IOCTL(cmd, access, ...) \
5420 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5421 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5422 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5423 #define IOCTL_IGNORE(cmd) \
5424 { TARGET_ ## cmd, 0, #cmd },
5425 #include "ioctls.h"
5426 { 0, 0, },
5429 /* ??? Implement proper locking for ioctls. */
5430 /* do_ioctl() Must return target values and target errnos. */
5431 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5433 const IOCTLEntry *ie;
5434 const argtype *arg_type;
5435 abi_long ret;
5436 uint8_t buf_temp[MAX_STRUCT_SIZE];
5437 int target_size;
5438 void *argptr;
5440 ie = ioctl_entries;
5441 for(;;) {
5442 if (ie->target_cmd == 0) {
5443 qemu_log_mask(
5444 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5445 return -TARGET_ENOSYS;
5447 if (ie->target_cmd == cmd)
5448 break;
5449 ie++;
5451 arg_type = ie->arg_type;
5452 if (ie->do_ioctl) {
5453 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5454 } else if (!ie->host_cmd) {
5455 /* Some architectures define BSD ioctls in their headers
5456 that are not implemented in Linux. */
5457 return -TARGET_ENOSYS;
5460 switch(arg_type[0]) {
5461 case TYPE_NULL:
5462 /* no argument */
5463 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5464 break;
5465 case TYPE_PTRVOID:
5466 case TYPE_INT:
5467 case TYPE_LONG:
5468 case TYPE_ULONG:
5469 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5470 break;
5471 case TYPE_PTR:
5472 arg_type++;
5473 target_size = thunk_type_size(arg_type, 0);
5474 switch(ie->access) {
5475 case IOC_R:
5476 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5477 if (!is_error(ret)) {
5478 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5479 if (!argptr)
5480 return -TARGET_EFAULT;
5481 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5482 unlock_user(argptr, arg, target_size);
5484 break;
5485 case IOC_W:
5486 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5487 if (!argptr)
5488 return -TARGET_EFAULT;
5489 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5490 unlock_user(argptr, arg, 0);
5491 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5492 break;
5493 default:
5494 case IOC_RW:
5495 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5496 if (!argptr)
5497 return -TARGET_EFAULT;
5498 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5499 unlock_user(argptr, arg, 0);
5500 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5501 if (!is_error(ret)) {
5502 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5503 if (!argptr)
5504 return -TARGET_EFAULT;
5505 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5506 unlock_user(argptr, arg, target_size);
5508 break;
5510 break;
5511 default:
5512 qemu_log_mask(LOG_UNIMP,
5513 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5514 (long)cmd, arg_type[0]);
5515 ret = -TARGET_ENOSYS;
5516 break;
5518 return ret;
5521 static const bitmask_transtbl iflag_tbl[] = {
5522 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5523 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5524 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5525 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5526 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5527 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5528 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5529 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5530 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5531 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5532 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5533 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5534 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5535 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5536 { 0, 0, 0, 0 }
5539 static const bitmask_transtbl oflag_tbl[] = {
5540 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5541 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5542 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5543 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5544 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5545 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5546 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5547 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5548 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5549 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5550 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5551 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5552 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5553 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5554 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5555 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5556 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5557 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5558 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5559 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5560 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5561 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5562 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5563 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5564 { 0, 0, 0, 0 }
5567 static const bitmask_transtbl cflag_tbl[] = {
5568 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5569 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5570 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5571 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5572 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5573 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5574 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5575 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5576 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5577 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5578 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5579 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5580 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5581 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5582 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5583 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5584 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5585 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5586 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5587 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5588 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5589 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5590 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5591 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5592 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5593 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5594 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5595 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5596 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5597 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5598 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5599 { 0, 0, 0, 0 }
5602 static const bitmask_transtbl lflag_tbl[] = {
5603 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5604 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5605 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5606 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5607 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5608 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5609 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5610 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5611 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5612 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5613 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5614 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5615 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5616 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5617 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5618 { 0, 0, 0, 0 }
5621 static void target_to_host_termios (void *dst, const void *src)
5623 struct host_termios *host = dst;
5624 const struct target_termios *target = src;
5626 host->c_iflag =
5627 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5628 host->c_oflag =
5629 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5630 host->c_cflag =
5631 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5632 host->c_lflag =
5633 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5634 host->c_line = target->c_line;
5636 memset(host->c_cc, 0, sizeof(host->c_cc));
5637 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5638 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5639 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5640 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5641 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5642 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5643 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5644 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5645 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5646 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5647 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5648 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5649 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5650 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5651 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5652 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5653 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5656 static void host_to_target_termios (void *dst, const void *src)
5658 struct target_termios *target = dst;
5659 const struct host_termios *host = src;
5661 target->c_iflag =
5662 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5663 target->c_oflag =
5664 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5665 target->c_cflag =
5666 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5667 target->c_lflag =
5668 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5669 target->c_line = host->c_line;
5671 memset(target->c_cc, 0, sizeof(target->c_cc));
5672 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5673 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5674 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5675 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5676 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5677 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5678 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5679 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5680 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5681 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5682 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5683 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5684 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5685 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5686 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5687 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5688 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5691 static const StructEntry struct_termios_def = {
5692 .convert = { host_to_target_termios, target_to_host_termios },
5693 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5694 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5697 static bitmask_transtbl mmap_flags_tbl[] = {
5698 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5699 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5700 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5701 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5702 MAP_ANONYMOUS, MAP_ANONYMOUS },
5703 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5704 MAP_GROWSDOWN, MAP_GROWSDOWN },
5705 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5706 MAP_DENYWRITE, MAP_DENYWRITE },
5707 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5708 MAP_EXECUTABLE, MAP_EXECUTABLE },
5709 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5710 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5711 MAP_NORESERVE, MAP_NORESERVE },
5712 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5713 /* MAP_STACK had been ignored by the kernel for quite some time.
5714 Recognize it for the target insofar as we do not want to pass
5715 it through to the host. */
5716 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5717 { 0, 0, 0, 0 }
5721 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5722 * TARGET_I386 is defined if TARGET_X86_64 is defined
5724 #if defined(TARGET_I386)
5726 /* NOTE: there is really one LDT for all the threads */
5727 static uint8_t *ldt_table;
5729 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5731 int size;
5732 void *p;
5734 if (!ldt_table)
5735 return 0;
5736 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5737 if (size > bytecount)
5738 size = bytecount;
5739 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5740 if (!p)
5741 return -TARGET_EFAULT;
5742 /* ??? Should this by byteswapped? */
5743 memcpy(p, ldt_table, size);
5744 unlock_user(p, ptr, size);
5745 return size;
5748 /* XXX: add locking support */
5749 static abi_long write_ldt(CPUX86State *env,
5750 abi_ulong ptr, unsigned long bytecount, int oldmode)
5752 struct target_modify_ldt_ldt_s ldt_info;
5753 struct target_modify_ldt_ldt_s *target_ldt_info;
5754 int seg_32bit, contents, read_exec_only, limit_in_pages;
5755 int seg_not_present, useable, lm;
5756 uint32_t *lp, entry_1, entry_2;
5758 if (bytecount != sizeof(ldt_info))
5759 return -TARGET_EINVAL;
5760 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5761 return -TARGET_EFAULT;
5762 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5763 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5764 ldt_info.limit = tswap32(target_ldt_info->limit);
5765 ldt_info.flags = tswap32(target_ldt_info->flags);
5766 unlock_user_struct(target_ldt_info, ptr, 0);
5768 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5769 return -TARGET_EINVAL;
5770 seg_32bit = ldt_info.flags & 1;
5771 contents = (ldt_info.flags >> 1) & 3;
5772 read_exec_only = (ldt_info.flags >> 3) & 1;
5773 limit_in_pages = (ldt_info.flags >> 4) & 1;
5774 seg_not_present = (ldt_info.flags >> 5) & 1;
5775 useable = (ldt_info.flags >> 6) & 1;
5776 #ifdef TARGET_ABI32
5777 lm = 0;
5778 #else
5779 lm = (ldt_info.flags >> 7) & 1;
5780 #endif
5781 if (contents == 3) {
5782 if (oldmode)
5783 return -TARGET_EINVAL;
5784 if (seg_not_present == 0)
5785 return -TARGET_EINVAL;
5787 /* allocate the LDT */
5788 if (!ldt_table) {
5789 env->ldt.base = target_mmap(0,
5790 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5791 PROT_READ|PROT_WRITE,
5792 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5793 if (env->ldt.base == -1)
5794 return -TARGET_ENOMEM;
5795 memset(g2h(env->ldt.base), 0,
5796 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5797 env->ldt.limit = 0xffff;
5798 ldt_table = g2h(env->ldt.base);
5801 /* NOTE: same code as Linux kernel */
5802 /* Allow LDTs to be cleared by the user. */
5803 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5804 if (oldmode ||
5805 (contents == 0 &&
5806 read_exec_only == 1 &&
5807 seg_32bit == 0 &&
5808 limit_in_pages == 0 &&
5809 seg_not_present == 1 &&
5810 useable == 0 )) {
5811 entry_1 = 0;
5812 entry_2 = 0;
5813 goto install;
5817 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5818 (ldt_info.limit & 0x0ffff);
5819 entry_2 = (ldt_info.base_addr & 0xff000000) |
5820 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5821 (ldt_info.limit & 0xf0000) |
5822 ((read_exec_only ^ 1) << 9) |
5823 (contents << 10) |
5824 ((seg_not_present ^ 1) << 15) |
5825 (seg_32bit << 22) |
5826 (limit_in_pages << 23) |
5827 (lm << 21) |
5828 0x7000;
5829 if (!oldmode)
5830 entry_2 |= (useable << 20);
5832 /* Install the new entry ... */
5833 install:
5834 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5835 lp[0] = tswap32(entry_1);
5836 lp[1] = tswap32(entry_2);
5837 return 0;
5840 /* specific and weird i386 syscalls */
5841 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5842 unsigned long bytecount)
5844 abi_long ret;
5846 switch (func) {
5847 case 0:
5848 ret = read_ldt(ptr, bytecount);
5849 break;
5850 case 1:
5851 ret = write_ldt(env, ptr, bytecount, 1);
5852 break;
5853 case 0x11:
5854 ret = write_ldt(env, ptr, bytecount, 0);
5855 break;
5856 default:
5857 ret = -TARGET_ENOSYS;
5858 break;
5860 return ret;
5863 #if defined(TARGET_ABI32)
5864 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5866 uint64_t *gdt_table = g2h(env->gdt.base);
5867 struct target_modify_ldt_ldt_s ldt_info;
5868 struct target_modify_ldt_ldt_s *target_ldt_info;
5869 int seg_32bit, contents, read_exec_only, limit_in_pages;
5870 int seg_not_present, useable, lm;
5871 uint32_t *lp, entry_1, entry_2;
5872 int i;
5874 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5875 if (!target_ldt_info)
5876 return -TARGET_EFAULT;
5877 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5878 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5879 ldt_info.limit = tswap32(target_ldt_info->limit);
5880 ldt_info.flags = tswap32(target_ldt_info->flags);
5881 if (ldt_info.entry_number == -1) {
5882 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5883 if (gdt_table[i] == 0) {
5884 ldt_info.entry_number = i;
5885 target_ldt_info->entry_number = tswap32(i);
5886 break;
5890 unlock_user_struct(target_ldt_info, ptr, 1);
5892 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5893 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5894 return -TARGET_EINVAL;
5895 seg_32bit = ldt_info.flags & 1;
5896 contents = (ldt_info.flags >> 1) & 3;
5897 read_exec_only = (ldt_info.flags >> 3) & 1;
5898 limit_in_pages = (ldt_info.flags >> 4) & 1;
5899 seg_not_present = (ldt_info.flags >> 5) & 1;
5900 useable = (ldt_info.flags >> 6) & 1;
5901 #ifdef TARGET_ABI32
5902 lm = 0;
5903 #else
5904 lm = (ldt_info.flags >> 7) & 1;
5905 #endif
5907 if (contents == 3) {
5908 if (seg_not_present == 0)
5909 return -TARGET_EINVAL;
5912 /* NOTE: same code as Linux kernel */
5913 /* Allow LDTs to be cleared by the user. */
5914 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5915 if ((contents == 0 &&
5916 read_exec_only == 1 &&
5917 seg_32bit == 0 &&
5918 limit_in_pages == 0 &&
5919 seg_not_present == 1 &&
5920 useable == 0 )) {
5921 entry_1 = 0;
5922 entry_2 = 0;
5923 goto install;
5927 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5928 (ldt_info.limit & 0x0ffff);
5929 entry_2 = (ldt_info.base_addr & 0xff000000) |
5930 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5931 (ldt_info.limit & 0xf0000) |
5932 ((read_exec_only ^ 1) << 9) |
5933 (contents << 10) |
5934 ((seg_not_present ^ 1) << 15) |
5935 (seg_32bit << 22) |
5936 (limit_in_pages << 23) |
5937 (useable << 20) |
5938 (lm << 21) |
5939 0x7000;
5941 /* Install the new entry ... */
5942 install:
5943 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5944 lp[0] = tswap32(entry_1);
5945 lp[1] = tswap32(entry_2);
5946 return 0;
5949 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5951 struct target_modify_ldt_ldt_s *target_ldt_info;
5952 uint64_t *gdt_table = g2h(env->gdt.base);
5953 uint32_t base_addr, limit, flags;
5954 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5955 int seg_not_present, useable, lm;
5956 uint32_t *lp, entry_1, entry_2;
5958 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5959 if (!target_ldt_info)
5960 return -TARGET_EFAULT;
5961 idx = tswap32(target_ldt_info->entry_number);
5962 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5963 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5964 unlock_user_struct(target_ldt_info, ptr, 1);
5965 return -TARGET_EINVAL;
5967 lp = (uint32_t *)(gdt_table + idx);
5968 entry_1 = tswap32(lp[0]);
5969 entry_2 = tswap32(lp[1]);
5971 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5972 contents = (entry_2 >> 10) & 3;
5973 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5974 seg_32bit = (entry_2 >> 22) & 1;
5975 limit_in_pages = (entry_2 >> 23) & 1;
5976 useable = (entry_2 >> 20) & 1;
5977 #ifdef TARGET_ABI32
5978 lm = 0;
5979 #else
5980 lm = (entry_2 >> 21) & 1;
5981 #endif
5982 flags = (seg_32bit << 0) | (contents << 1) |
5983 (read_exec_only << 3) | (limit_in_pages << 4) |
5984 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5985 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5986 base_addr = (entry_1 >> 16) |
5987 (entry_2 & 0xff000000) |
5988 ((entry_2 & 0xff) << 16);
5989 target_ldt_info->base_addr = tswapal(base_addr);
5990 target_ldt_info->limit = tswap32(limit);
5991 target_ldt_info->flags = tswap32(flags);
5992 unlock_user_struct(target_ldt_info, ptr, 1);
5993 return 0;
5996 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5998 return -TARGET_ENOSYS;
6000 #else
6001 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6003 abi_long ret = 0;
6004 abi_ulong val;
6005 int idx;
6007 switch(code) {
6008 case TARGET_ARCH_SET_GS:
6009 case TARGET_ARCH_SET_FS:
6010 if (code == TARGET_ARCH_SET_GS)
6011 idx = R_GS;
6012 else
6013 idx = R_FS;
6014 cpu_x86_load_seg(env, idx, 0);
6015 env->segs[idx].base = addr;
6016 break;
6017 case TARGET_ARCH_GET_GS:
6018 case TARGET_ARCH_GET_FS:
6019 if (code == TARGET_ARCH_GET_GS)
6020 idx = R_GS;
6021 else
6022 idx = R_FS;
6023 val = env->segs[idx].base;
6024 if (put_user(val, addr, abi_ulong))
6025 ret = -TARGET_EFAULT;
6026 break;
6027 default:
6028 ret = -TARGET_EINVAL;
6029 break;
6031 return ret;
6033 #endif /* defined(TARGET_ABI32 */
6035 #endif /* defined(TARGET_I386) */
6037 #define NEW_STACK_SIZE 0x40000
6040 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6041 typedef struct {
6042 CPUArchState *env;
6043 pthread_mutex_t mutex;
6044 pthread_cond_t cond;
6045 pthread_t thread;
6046 uint32_t tid;
6047 abi_ulong child_tidptr;
6048 abi_ulong parent_tidptr;
6049 sigset_t sigmask;
6050 } new_thread_info;
6052 static void * QEMU_NORETURN clone_func(void *arg)
6054 new_thread_info *info = arg;
6055 CPUArchState *env;
6056 CPUState *cpu;
6057 TaskState *ts;
6059 rcu_register_thread();
6060 tcg_register_thread();
6061 env = info->env;
6062 cpu = env_cpu(env);
6063 thread_cpu = cpu;
6064 ts = (TaskState *)cpu->opaque;
6065 info->tid = sys_gettid();
6066 task_settid(ts);
6067 if (info->child_tidptr)
6068 put_user_u32(info->tid, info->child_tidptr);
6069 if (info->parent_tidptr)
6070 put_user_u32(info->tid, info->parent_tidptr);
6071 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6072 /* Enable signals. */
6073 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6074 /* Signal to the parent that we're ready. */
6075 pthread_mutex_lock(&info->mutex);
6076 pthread_cond_broadcast(&info->cond);
6077 pthread_mutex_unlock(&info->mutex);
6078 /* Wait until the parent has finished initializing the tls state. */
6079 pthread_mutex_lock(&clone_lock);
6080 pthread_mutex_unlock(&clone_lock);
6081 cpu_loop(env);
6082 /* never exits */
6085 /* do_fork() Must return host values and target errnos (unlike most
6086 do_*() functions). */
6087 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6088 abi_ulong parent_tidptr, target_ulong newtls,
6089 abi_ulong child_tidptr)
6091 CPUState *cpu = env_cpu(env);
6092 int ret;
6093 TaskState *ts;
6094 CPUState *new_cpu;
6095 CPUArchState *new_env;
6096 sigset_t sigmask;
6098 flags &= ~CLONE_IGNORED_FLAGS;
6100 /* Emulate vfork() with fork() */
6101 if (flags & CLONE_VFORK)
6102 flags &= ~(CLONE_VFORK | CLONE_VM);
6104 if (flags & CLONE_VM) {
6105 TaskState *parent_ts = (TaskState *)cpu->opaque;
6106 new_thread_info info;
6107 pthread_attr_t attr;
6109 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6110 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6111 return -TARGET_EINVAL;
6114 ts = g_new0(TaskState, 1);
6115 init_task_state(ts);
6117 /* Grab a mutex so that thread setup appears atomic. */
6118 pthread_mutex_lock(&clone_lock);
6120 /* we create a new CPU instance. */
6121 new_env = cpu_copy(env);
6122 /* Init regs that differ from the parent. */
6123 cpu_clone_regs_child(new_env, newsp, flags);
6124 cpu_clone_regs_parent(env, flags);
6125 new_cpu = env_cpu(new_env);
6126 new_cpu->opaque = ts;
6127 ts->bprm = parent_ts->bprm;
6128 ts->info = parent_ts->info;
6129 ts->signal_mask = parent_ts->signal_mask;
6131 if (flags & CLONE_CHILD_CLEARTID) {
6132 ts->child_tidptr = child_tidptr;
6135 if (flags & CLONE_SETTLS) {
6136 cpu_set_tls (new_env, newtls);
6139 memset(&info, 0, sizeof(info));
6140 pthread_mutex_init(&info.mutex, NULL);
6141 pthread_mutex_lock(&info.mutex);
6142 pthread_cond_init(&info.cond, NULL);
6143 info.env = new_env;
6144 if (flags & CLONE_CHILD_SETTID) {
6145 info.child_tidptr = child_tidptr;
6147 if (flags & CLONE_PARENT_SETTID) {
6148 info.parent_tidptr = parent_tidptr;
6151 ret = pthread_attr_init(&attr);
6152 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6153 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6154 /* It is not safe to deliver signals until the child has finished
6155 initializing, so temporarily block all signals. */
6156 sigfillset(&sigmask);
6157 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6158 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6160 /* If this is our first additional thread, we need to ensure we
6161 * generate code for parallel execution and flush old translations.
6163 if (!parallel_cpus) {
6164 parallel_cpus = true;
6165 tb_flush(cpu);
6168 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6169 /* TODO: Free new CPU state if thread creation failed. */
6171 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6172 pthread_attr_destroy(&attr);
6173 if (ret == 0) {
6174 /* Wait for the child to initialize. */
6175 pthread_cond_wait(&info.cond, &info.mutex);
6176 ret = info.tid;
6177 } else {
6178 ret = -1;
6180 pthread_mutex_unlock(&info.mutex);
6181 pthread_cond_destroy(&info.cond);
6182 pthread_mutex_destroy(&info.mutex);
6183 pthread_mutex_unlock(&clone_lock);
6184 } else {
6185 /* if no CLONE_VM, we consider it is a fork */
6186 if (flags & CLONE_INVALID_FORK_FLAGS) {
6187 return -TARGET_EINVAL;
6190 /* We can't support custom termination signals */
6191 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6192 return -TARGET_EINVAL;
6195 if (block_signals()) {
6196 return -TARGET_ERESTARTSYS;
6199 fork_start();
6200 ret = fork();
6201 if (ret == 0) {
6202 /* Child Process. */
6203 cpu_clone_regs_child(env, newsp, flags);
6204 fork_end(1);
6205 /* There is a race condition here. The parent process could
6206 theoretically read the TID in the child process before the child
6207 tid is set. This would require using either ptrace
6208 (not implemented) or having *_tidptr to point at a shared memory
6209 mapping. We can't repeat the spinlock hack used above because
6210 the child process gets its own copy of the lock. */
6211 if (flags & CLONE_CHILD_SETTID)
6212 put_user_u32(sys_gettid(), child_tidptr);
6213 if (flags & CLONE_PARENT_SETTID)
6214 put_user_u32(sys_gettid(), parent_tidptr);
6215 ts = (TaskState *)cpu->opaque;
6216 if (flags & CLONE_SETTLS)
6217 cpu_set_tls (env, newtls);
6218 if (flags & CLONE_CHILD_CLEARTID)
6219 ts->child_tidptr = child_tidptr;
6220 } else {
6221 cpu_clone_regs_parent(env, flags);
6222 fork_end(0);
6225 return ret;
6228 /* warning : doesn't handle linux specific flags... */
6229 static int target_to_host_fcntl_cmd(int cmd)
6231 int ret;
6233 switch(cmd) {
6234 case TARGET_F_DUPFD:
6235 case TARGET_F_GETFD:
6236 case TARGET_F_SETFD:
6237 case TARGET_F_GETFL:
6238 case TARGET_F_SETFL:
6239 case TARGET_F_OFD_GETLK:
6240 case TARGET_F_OFD_SETLK:
6241 case TARGET_F_OFD_SETLKW:
6242 ret = cmd;
6243 break;
6244 case TARGET_F_GETLK:
6245 ret = F_GETLK64;
6246 break;
6247 case TARGET_F_SETLK:
6248 ret = F_SETLK64;
6249 break;
6250 case TARGET_F_SETLKW:
6251 ret = F_SETLKW64;
6252 break;
6253 case TARGET_F_GETOWN:
6254 ret = F_GETOWN;
6255 break;
6256 case TARGET_F_SETOWN:
6257 ret = F_SETOWN;
6258 break;
6259 case TARGET_F_GETSIG:
6260 ret = F_GETSIG;
6261 break;
6262 case TARGET_F_SETSIG:
6263 ret = F_SETSIG;
6264 break;
6265 #if TARGET_ABI_BITS == 32
6266 case TARGET_F_GETLK64:
6267 ret = F_GETLK64;
6268 break;
6269 case TARGET_F_SETLK64:
6270 ret = F_SETLK64;
6271 break;
6272 case TARGET_F_SETLKW64:
6273 ret = F_SETLKW64;
6274 break;
6275 #endif
6276 case TARGET_F_SETLEASE:
6277 ret = F_SETLEASE;
6278 break;
6279 case TARGET_F_GETLEASE:
6280 ret = F_GETLEASE;
6281 break;
6282 #ifdef F_DUPFD_CLOEXEC
6283 case TARGET_F_DUPFD_CLOEXEC:
6284 ret = F_DUPFD_CLOEXEC;
6285 break;
6286 #endif
6287 case TARGET_F_NOTIFY:
6288 ret = F_NOTIFY;
6289 break;
6290 #ifdef F_GETOWN_EX
6291 case TARGET_F_GETOWN_EX:
6292 ret = F_GETOWN_EX;
6293 break;
6294 #endif
6295 #ifdef F_SETOWN_EX
6296 case TARGET_F_SETOWN_EX:
6297 ret = F_SETOWN_EX;
6298 break;
6299 #endif
6300 #ifdef F_SETPIPE_SZ
6301 case TARGET_F_SETPIPE_SZ:
6302 ret = F_SETPIPE_SZ;
6303 break;
6304 case TARGET_F_GETPIPE_SZ:
6305 ret = F_GETPIPE_SZ;
6306 break;
6307 #endif
6308 default:
6309 ret = -TARGET_EINVAL;
6310 break;
6313 #if defined(__powerpc64__)
6314 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6315 * is not supported by kernel. The glibc fcntl call actually adjusts
6316 * them to 5, 6 and 7 before making the syscall(). Since we make the
6317 * syscall directly, adjust to what is supported by the kernel.
6319 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6320 ret -= F_GETLK64 - 5;
6322 #endif
6324 return ret;
6327 #define FLOCK_TRANSTBL \
6328 switch (type) { \
6329 TRANSTBL_CONVERT(F_RDLCK); \
6330 TRANSTBL_CONVERT(F_WRLCK); \
6331 TRANSTBL_CONVERT(F_UNLCK); \
6332 TRANSTBL_CONVERT(F_EXLCK); \
6333 TRANSTBL_CONVERT(F_SHLCK); \
6336 static int target_to_host_flock(int type)
6338 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6339 FLOCK_TRANSTBL
6340 #undef TRANSTBL_CONVERT
6341 return -TARGET_EINVAL;
6344 static int host_to_target_flock(int type)
6346 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6347 FLOCK_TRANSTBL
6348 #undef TRANSTBL_CONVERT
6349 /* if we don't know how to convert the value coming
6350 * from the host we copy to the target field as-is
6352 return type;
6355 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6356 abi_ulong target_flock_addr)
6358 struct target_flock *target_fl;
6359 int l_type;
6361 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6362 return -TARGET_EFAULT;
6365 __get_user(l_type, &target_fl->l_type);
6366 l_type = target_to_host_flock(l_type);
6367 if (l_type < 0) {
6368 return l_type;
6370 fl->l_type = l_type;
6371 __get_user(fl->l_whence, &target_fl->l_whence);
6372 __get_user(fl->l_start, &target_fl->l_start);
6373 __get_user(fl->l_len, &target_fl->l_len);
6374 __get_user(fl->l_pid, &target_fl->l_pid);
6375 unlock_user_struct(target_fl, target_flock_addr, 0);
6376 return 0;
6379 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6380 const struct flock64 *fl)
6382 struct target_flock *target_fl;
6383 short l_type;
6385 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6386 return -TARGET_EFAULT;
6389 l_type = host_to_target_flock(fl->l_type);
6390 __put_user(l_type, &target_fl->l_type);
6391 __put_user(fl->l_whence, &target_fl->l_whence);
6392 __put_user(fl->l_start, &target_fl->l_start);
6393 __put_user(fl->l_len, &target_fl->l_len);
6394 __put_user(fl->l_pid, &target_fl->l_pid);
6395 unlock_user_struct(target_fl, target_flock_addr, 1);
6396 return 0;
6399 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6400 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6402 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6403 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6404 abi_ulong target_flock_addr)
6406 struct target_oabi_flock64 *target_fl;
6407 int l_type;
6409 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6410 return -TARGET_EFAULT;
6413 __get_user(l_type, &target_fl->l_type);
6414 l_type = target_to_host_flock(l_type);
6415 if (l_type < 0) {
6416 return l_type;
6418 fl->l_type = l_type;
6419 __get_user(fl->l_whence, &target_fl->l_whence);
6420 __get_user(fl->l_start, &target_fl->l_start);
6421 __get_user(fl->l_len, &target_fl->l_len);
6422 __get_user(fl->l_pid, &target_fl->l_pid);
6423 unlock_user_struct(target_fl, target_flock_addr, 0);
6424 return 0;
6427 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6428 const struct flock64 *fl)
6430 struct target_oabi_flock64 *target_fl;
6431 short l_type;
6433 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6434 return -TARGET_EFAULT;
6437 l_type = host_to_target_flock(fl->l_type);
6438 __put_user(l_type, &target_fl->l_type);
6439 __put_user(fl->l_whence, &target_fl->l_whence);
6440 __put_user(fl->l_start, &target_fl->l_start);
6441 __put_user(fl->l_len, &target_fl->l_len);
6442 __put_user(fl->l_pid, &target_fl->l_pid);
6443 unlock_user_struct(target_fl, target_flock_addr, 1);
6444 return 0;
6446 #endif
6448 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6449 abi_ulong target_flock_addr)
6451 struct target_flock64 *target_fl;
6452 int l_type;
6454 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6455 return -TARGET_EFAULT;
6458 __get_user(l_type, &target_fl->l_type);
6459 l_type = target_to_host_flock(l_type);
6460 if (l_type < 0) {
6461 return l_type;
6463 fl->l_type = l_type;
6464 __get_user(fl->l_whence, &target_fl->l_whence);
6465 __get_user(fl->l_start, &target_fl->l_start);
6466 __get_user(fl->l_len, &target_fl->l_len);
6467 __get_user(fl->l_pid, &target_fl->l_pid);
6468 unlock_user_struct(target_fl, target_flock_addr, 0);
6469 return 0;
6472 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6473 const struct flock64 *fl)
6475 struct target_flock64 *target_fl;
6476 short l_type;
6478 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6479 return -TARGET_EFAULT;
6482 l_type = host_to_target_flock(fl->l_type);
6483 __put_user(l_type, &target_fl->l_type);
6484 __put_user(fl->l_whence, &target_fl->l_whence);
6485 __put_user(fl->l_start, &target_fl->l_start);
6486 __put_user(fl->l_len, &target_fl->l_len);
6487 __put_user(fl->l_pid, &target_fl->l_pid);
6488 unlock_user_struct(target_fl, target_flock_addr, 1);
6489 return 0;
6492 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6494 struct flock64 fl64;
6495 #ifdef F_GETOWN_EX
6496 struct f_owner_ex fox;
6497 struct target_f_owner_ex *target_fox;
6498 #endif
6499 abi_long ret;
6500 int host_cmd = target_to_host_fcntl_cmd(cmd);
6502 if (host_cmd == -TARGET_EINVAL)
6503 return host_cmd;
6505 switch(cmd) {
6506 case TARGET_F_GETLK:
6507 ret = copy_from_user_flock(&fl64, arg);
6508 if (ret) {
6509 return ret;
6511 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6512 if (ret == 0) {
6513 ret = copy_to_user_flock(arg, &fl64);
6515 break;
6517 case TARGET_F_SETLK:
6518 case TARGET_F_SETLKW:
6519 ret = copy_from_user_flock(&fl64, arg);
6520 if (ret) {
6521 return ret;
6523 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6524 break;
6526 case TARGET_F_GETLK64:
6527 case TARGET_F_OFD_GETLK:
6528 ret = copy_from_user_flock64(&fl64, arg);
6529 if (ret) {
6530 return ret;
6532 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6533 if (ret == 0) {
6534 ret = copy_to_user_flock64(arg, &fl64);
6536 break;
6537 case TARGET_F_SETLK64:
6538 case TARGET_F_SETLKW64:
6539 case TARGET_F_OFD_SETLK:
6540 case TARGET_F_OFD_SETLKW:
6541 ret = copy_from_user_flock64(&fl64, arg);
6542 if (ret) {
6543 return ret;
6545 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6546 break;
6548 case TARGET_F_GETFL:
6549 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6550 if (ret >= 0) {
6551 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6553 break;
6555 case TARGET_F_SETFL:
6556 ret = get_errno(safe_fcntl(fd, host_cmd,
6557 target_to_host_bitmask(arg,
6558 fcntl_flags_tbl)));
6559 break;
6561 #ifdef F_GETOWN_EX
6562 case TARGET_F_GETOWN_EX:
6563 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6564 if (ret >= 0) {
6565 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6566 return -TARGET_EFAULT;
6567 target_fox->type = tswap32(fox.type);
6568 target_fox->pid = tswap32(fox.pid);
6569 unlock_user_struct(target_fox, arg, 1);
6571 break;
6572 #endif
6574 #ifdef F_SETOWN_EX
6575 case TARGET_F_SETOWN_EX:
6576 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6577 return -TARGET_EFAULT;
6578 fox.type = tswap32(target_fox->type);
6579 fox.pid = tswap32(target_fox->pid);
6580 unlock_user_struct(target_fox, arg, 0);
6581 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6582 break;
6583 #endif
6585 case TARGET_F_SETOWN:
6586 case TARGET_F_GETOWN:
6587 case TARGET_F_SETSIG:
6588 case TARGET_F_GETSIG:
6589 case TARGET_F_SETLEASE:
6590 case TARGET_F_GETLEASE:
6591 case TARGET_F_SETPIPE_SZ:
6592 case TARGET_F_GETPIPE_SZ:
6593 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6594 break;
6596 default:
6597 ret = get_errno(safe_fcntl(fd, cmd, arg));
6598 break;
6600 return ret;
6603 #ifdef USE_UID16
6605 static inline int high2lowuid(int uid)
6607 if (uid > 65535)
6608 return 65534;
6609 else
6610 return uid;
6613 static inline int high2lowgid(int gid)
6615 if (gid > 65535)
6616 return 65534;
6617 else
6618 return gid;
6621 static inline int low2highuid(int uid)
6623 if ((int16_t)uid == -1)
6624 return -1;
6625 else
6626 return uid;
6629 static inline int low2highgid(int gid)
6631 if ((int16_t)gid == -1)
6632 return -1;
6633 else
6634 return gid;
6636 static inline int tswapid(int id)
6638 return tswap16(id);
6641 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6643 #else /* !USE_UID16 */
6644 static inline int high2lowuid(int uid)
6646 return uid;
6648 static inline int high2lowgid(int gid)
6650 return gid;
6652 static inline int low2highuid(int uid)
6654 return uid;
6656 static inline int low2highgid(int gid)
6658 return gid;
6660 static inline int tswapid(int id)
6662 return tswap32(id);
6665 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6667 #endif /* USE_UID16 */
6669 /* We must do direct syscalls for setting UID/GID, because we want to
6670 * implement the Linux system call semantics of "change only for this thread",
6671 * not the libc/POSIX semantics of "change for all threads in process".
6672 * (See http://ewontfix.com/17/ for more details.)
6673 * We use the 32-bit version of the syscalls if present; if it is not
6674 * then either the host architecture supports 32-bit UIDs natively with
6675 * the standard syscall, or the 16-bit UID is the best we can do.
6677 #ifdef __NR_setuid32
6678 #define __NR_sys_setuid __NR_setuid32
6679 #else
6680 #define __NR_sys_setuid __NR_setuid
6681 #endif
6682 #ifdef __NR_setgid32
6683 #define __NR_sys_setgid __NR_setgid32
6684 #else
6685 #define __NR_sys_setgid __NR_setgid
6686 #endif
6687 #ifdef __NR_setresuid32
6688 #define __NR_sys_setresuid __NR_setresuid32
6689 #else
6690 #define __NR_sys_setresuid __NR_setresuid
6691 #endif
6692 #ifdef __NR_setresgid32
6693 #define __NR_sys_setresgid __NR_setresgid32
6694 #else
6695 #define __NR_sys_setresgid __NR_setresgid
6696 #endif
6698 _syscall1(int, sys_setuid, uid_t, uid)
6699 _syscall1(int, sys_setgid, gid_t, gid)
6700 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6701 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6703 void syscall_init(void)
6705 IOCTLEntry *ie;
6706 const argtype *arg_type;
6707 int size;
6708 int i;
6710 thunk_init(STRUCT_MAX);
6712 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6713 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6714 #include "syscall_types.h"
6715 #undef STRUCT
6716 #undef STRUCT_SPECIAL
6718 /* Build target_to_host_errno_table[] table from
6719 * host_to_target_errno_table[]. */
6720 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6721 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6724 /* we patch the ioctl size if necessary. We rely on the fact that
6725 no ioctl has all the bits at '1' in the size field */
6726 ie = ioctl_entries;
6727 while (ie->target_cmd != 0) {
6728 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6729 TARGET_IOC_SIZEMASK) {
6730 arg_type = ie->arg_type;
6731 if (arg_type[0] != TYPE_PTR) {
6732 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6733 ie->target_cmd);
6734 exit(1);
6736 arg_type++;
6737 size = thunk_type_size(arg_type, 0);
6738 ie->target_cmd = (ie->target_cmd &
6739 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6740 (size << TARGET_IOC_SIZESHIFT);
6743 /* automatic consistency check if same arch */
6744 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6745 (defined(__x86_64__) && defined(TARGET_X86_64))
6746 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6747 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6748 ie->name, ie->target_cmd, ie->host_cmd);
6750 #endif
6751 ie++;
6755 #ifdef TARGET_NR_truncate64
6756 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6757 abi_long arg2,
6758 abi_long arg3,
6759 abi_long arg4)
6761 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6762 arg2 = arg3;
6763 arg3 = arg4;
6765 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6767 #endif
6769 #ifdef TARGET_NR_ftruncate64
6770 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6771 abi_long arg2,
6772 abi_long arg3,
6773 abi_long arg4)
6775 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6776 arg2 = arg3;
6777 arg3 = arg4;
6779 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6781 #endif
6783 #if defined(TARGET_NR_timer_settime) || \
6784 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6785 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6786 abi_ulong target_addr)
6788 struct target_itimerspec *target_itspec;
6790 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6791 return -TARGET_EFAULT;
6794 host_itspec->it_interval.tv_sec =
6795 tswapal(target_itspec->it_interval.tv_sec);
6796 host_itspec->it_interval.tv_nsec =
6797 tswapal(target_itspec->it_interval.tv_nsec);
6798 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6799 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6801 unlock_user_struct(target_itspec, target_addr, 1);
6802 return 0;
6804 #endif
6806 #if ((defined(TARGET_NR_timerfd_gettime) || \
6807 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6808 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6809 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6810 struct itimerspec *host_its)
6812 struct target_itimerspec *target_itspec;
6814 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6815 return -TARGET_EFAULT;
6818 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6819 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6821 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6822 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6824 unlock_user_struct(target_itspec, target_addr, 0);
6825 return 0;
6827 #endif
6829 #if defined(TARGET_NR_adjtimex) || \
6830 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6831 static inline abi_long target_to_host_timex(struct timex *host_tx,
6832 abi_long target_addr)
6834 struct target_timex *target_tx;
6836 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6837 return -TARGET_EFAULT;
6840 __get_user(host_tx->modes, &target_tx->modes);
6841 __get_user(host_tx->offset, &target_tx->offset);
6842 __get_user(host_tx->freq, &target_tx->freq);
6843 __get_user(host_tx->maxerror, &target_tx->maxerror);
6844 __get_user(host_tx->esterror, &target_tx->esterror);
6845 __get_user(host_tx->status, &target_tx->status);
6846 __get_user(host_tx->constant, &target_tx->constant);
6847 __get_user(host_tx->precision, &target_tx->precision);
6848 __get_user(host_tx->tolerance, &target_tx->tolerance);
6849 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6850 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6851 __get_user(host_tx->tick, &target_tx->tick);
6852 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6853 __get_user(host_tx->jitter, &target_tx->jitter);
6854 __get_user(host_tx->shift, &target_tx->shift);
6855 __get_user(host_tx->stabil, &target_tx->stabil);
6856 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6857 __get_user(host_tx->calcnt, &target_tx->calcnt);
6858 __get_user(host_tx->errcnt, &target_tx->errcnt);
6859 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6860 __get_user(host_tx->tai, &target_tx->tai);
6862 unlock_user_struct(target_tx, target_addr, 0);
6863 return 0;
6866 static inline abi_long host_to_target_timex(abi_long target_addr,
6867 struct timex *host_tx)
6869 struct target_timex *target_tx;
6871 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6872 return -TARGET_EFAULT;
6875 __put_user(host_tx->modes, &target_tx->modes);
6876 __put_user(host_tx->offset, &target_tx->offset);
6877 __put_user(host_tx->freq, &target_tx->freq);
6878 __put_user(host_tx->maxerror, &target_tx->maxerror);
6879 __put_user(host_tx->esterror, &target_tx->esterror);
6880 __put_user(host_tx->status, &target_tx->status);
6881 __put_user(host_tx->constant, &target_tx->constant);
6882 __put_user(host_tx->precision, &target_tx->precision);
6883 __put_user(host_tx->tolerance, &target_tx->tolerance);
6884 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6885 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6886 __put_user(host_tx->tick, &target_tx->tick);
6887 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6888 __put_user(host_tx->jitter, &target_tx->jitter);
6889 __put_user(host_tx->shift, &target_tx->shift);
6890 __put_user(host_tx->stabil, &target_tx->stabil);
6891 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6892 __put_user(host_tx->calcnt, &target_tx->calcnt);
6893 __put_user(host_tx->errcnt, &target_tx->errcnt);
6894 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6895 __put_user(host_tx->tai, &target_tx->tai);
6897 unlock_user_struct(target_tx, target_addr, 1);
6898 return 0;
6900 #endif
6902 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6903 abi_ulong target_addr)
6905 struct target_sigevent *target_sevp;
6907 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6908 return -TARGET_EFAULT;
6911 /* This union is awkward on 64 bit systems because it has a 32 bit
6912 * integer and a pointer in it; we follow the conversion approach
6913 * used for handling sigval types in signal.c so the guest should get
6914 * the correct value back even if we did a 64 bit byteswap and it's
6915 * using the 32 bit integer.
6917 host_sevp->sigev_value.sival_ptr =
6918 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6919 host_sevp->sigev_signo =
6920 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6921 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6922 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6924 unlock_user_struct(target_sevp, target_addr, 1);
6925 return 0;
6928 #if defined(TARGET_NR_mlockall)
6929 static inline int target_to_host_mlockall_arg(int arg)
6931 int result = 0;
6933 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6934 result |= MCL_CURRENT;
6936 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6937 result |= MCL_FUTURE;
6939 return result;
6941 #endif
6943 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6944 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6945 defined(TARGET_NR_newfstatat))
6946 static inline abi_long host_to_target_stat64(void *cpu_env,
6947 abi_ulong target_addr,
6948 struct stat *host_st)
6950 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6951 if (((CPUARMState *)cpu_env)->eabi) {
6952 struct target_eabi_stat64 *target_st;
6954 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6955 return -TARGET_EFAULT;
6956 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6957 __put_user(host_st->st_dev, &target_st->st_dev);
6958 __put_user(host_st->st_ino, &target_st->st_ino);
6959 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6960 __put_user(host_st->st_ino, &target_st->__st_ino);
6961 #endif
6962 __put_user(host_st->st_mode, &target_st->st_mode);
6963 __put_user(host_st->st_nlink, &target_st->st_nlink);
6964 __put_user(host_st->st_uid, &target_st->st_uid);
6965 __put_user(host_st->st_gid, &target_st->st_gid);
6966 __put_user(host_st->st_rdev, &target_st->st_rdev);
6967 __put_user(host_st->st_size, &target_st->st_size);
6968 __put_user(host_st->st_blksize, &target_st->st_blksize);
6969 __put_user(host_st->st_blocks, &target_st->st_blocks);
6970 __put_user(host_st->st_atime, &target_st->target_st_atime);
6971 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6972 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6973 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6974 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6975 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6976 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6977 #endif
6978 unlock_user_struct(target_st, target_addr, 1);
6979 } else
6980 #endif
6982 #if defined(TARGET_HAS_STRUCT_STAT64)
6983 struct target_stat64 *target_st;
6984 #else
6985 struct target_stat *target_st;
6986 #endif
6988 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6989 return -TARGET_EFAULT;
6990 memset(target_st, 0, sizeof(*target_st));
6991 __put_user(host_st->st_dev, &target_st->st_dev);
6992 __put_user(host_st->st_ino, &target_st->st_ino);
6993 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6994 __put_user(host_st->st_ino, &target_st->__st_ino);
6995 #endif
6996 __put_user(host_st->st_mode, &target_st->st_mode);
6997 __put_user(host_st->st_nlink, &target_st->st_nlink);
6998 __put_user(host_st->st_uid, &target_st->st_uid);
6999 __put_user(host_st->st_gid, &target_st->st_gid);
7000 __put_user(host_st->st_rdev, &target_st->st_rdev);
7001 /* XXX: better use of kernel struct */
7002 __put_user(host_st->st_size, &target_st->st_size);
7003 __put_user(host_st->st_blksize, &target_st->st_blksize);
7004 __put_user(host_st->st_blocks, &target_st->st_blocks);
7005 __put_user(host_st->st_atime, &target_st->target_st_atime);
7006 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7007 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7008 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7009 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7010 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7011 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7012 #endif
7013 unlock_user_struct(target_st, target_addr, 1);
7016 return 0;
7018 #endif
7020 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7021 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7022 abi_ulong target_addr)
7024 struct target_statx *target_stx;
7026 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7027 return -TARGET_EFAULT;
7029 memset(target_stx, 0, sizeof(*target_stx));
7031 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7032 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7033 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7034 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7035 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7036 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7037 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7038 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7039 __put_user(host_stx->stx_size, &target_stx->stx_size);
7040 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7041 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7042 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7043 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7044 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7045 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7046 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7047 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7048 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7049 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7050 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7051 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7052 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7053 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7055 unlock_user_struct(target_stx, target_addr, 1);
7057 return 0;
7059 #endif
7061 static int do_sys_futex(int *uaddr, int op, int val,
7062 const struct timespec *timeout, int *uaddr2,
7063 int val3)
7065 #if HOST_LONG_BITS == 64
7066 #if defined(__NR_futex)
7067 /* always a 64-bit time_t, it doesn't define _time64 version */
7068 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7070 #endif
7071 #else /* HOST_LONG_BITS == 64 */
7072 #if defined(__NR_futex_time64)
7073 if (sizeof(timeout->tv_sec) == 8) {
7074 /* _time64 function on 32bit arch */
7075 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7077 #endif
7078 #if defined(__NR_futex)
7079 /* old function on 32bit arch */
7080 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7081 #endif
7082 #endif /* HOST_LONG_BITS == 64 */
7083 g_assert_not_reached();
7086 static int do_safe_futex(int *uaddr, int op, int val,
7087 const struct timespec *timeout, int *uaddr2,
7088 int val3)
7090 #if HOST_LONG_BITS == 64
7091 #if defined(__NR_futex)
7092 /* always a 64-bit time_t, it doesn't define _time64 version */
7093 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7094 #endif
7095 #else /* HOST_LONG_BITS == 64 */
7096 #if defined(__NR_futex_time64)
7097 if (sizeof(timeout->tv_sec) == 8) {
7098 /* _time64 function on 32bit arch */
7099 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7100 val3));
7102 #endif
7103 #if defined(__NR_futex)
7104 /* old function on 32bit arch */
7105 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7106 #endif
7107 #endif /* HOST_LONG_BITS == 64 */
7108 return -TARGET_ENOSYS;
7111 /* ??? Using host futex calls even when target atomic operations
7112 are not really atomic probably breaks things. However implementing
7113 futexes locally would make futexes shared between multiple processes
7114 tricky. However they're probably useless because guest atomic
7115 operations won't work either. */
7116 #if defined(TARGET_NR_futex)
7117 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7118 target_ulong uaddr2, int val3)
7120 struct timespec ts, *pts;
7121 int base_op;
7123 /* ??? We assume FUTEX_* constants are the same on both host
7124 and target. */
7125 #ifdef FUTEX_CMD_MASK
7126 base_op = op & FUTEX_CMD_MASK;
7127 #else
7128 base_op = op;
7129 #endif
7130 switch (base_op) {
7131 case FUTEX_WAIT:
7132 case FUTEX_WAIT_BITSET:
7133 if (timeout) {
7134 pts = &ts;
7135 target_to_host_timespec(pts, timeout);
7136 } else {
7137 pts = NULL;
7139 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7140 case FUTEX_WAKE:
7141 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7142 case FUTEX_FD:
7143 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7144 case FUTEX_REQUEUE:
7145 case FUTEX_CMP_REQUEUE:
7146 case FUTEX_WAKE_OP:
7147 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7148 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7149 But the prototype takes a `struct timespec *'; insert casts
7150 to satisfy the compiler. We do not need to tswap TIMEOUT
7151 since it's not compared to guest memory. */
7152 pts = (struct timespec *)(uintptr_t) timeout;
7153 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7154 (base_op == FUTEX_CMP_REQUEUE
7155 ? tswap32(val3)
7156 : val3));
7157 default:
7158 return -TARGET_ENOSYS;
7161 #endif
7163 #if defined(TARGET_NR_futex_time64)
7164 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7165 target_ulong uaddr2, int val3)
7167 struct timespec ts, *pts;
7168 int base_op;
7170 /* ??? We assume FUTEX_* constants are the same on both host
7171 and target. */
7172 #ifdef FUTEX_CMD_MASK
7173 base_op = op & FUTEX_CMD_MASK;
7174 #else
7175 base_op = op;
7176 #endif
7177 switch (base_op) {
7178 case FUTEX_WAIT:
7179 case FUTEX_WAIT_BITSET:
7180 if (timeout) {
7181 pts = &ts;
7182 target_to_host_timespec64(pts, timeout);
7183 } else {
7184 pts = NULL;
7186 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7187 case FUTEX_WAKE:
7188 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7189 case FUTEX_FD:
7190 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7191 case FUTEX_REQUEUE:
7192 case FUTEX_CMP_REQUEUE:
7193 case FUTEX_WAKE_OP:
7194 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7195 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7196 But the prototype takes a `struct timespec *'; insert casts
7197 to satisfy the compiler. We do not need to tswap TIMEOUT
7198 since it's not compared to guest memory. */
7199 pts = (struct timespec *)(uintptr_t) timeout;
7200 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7201 (base_op == FUTEX_CMP_REQUEUE
7202 ? tswap32(val3)
7203 : val3));
7204 default:
7205 return -TARGET_ENOSYS;
7208 #endif
7210 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7211 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7212 abi_long handle, abi_long mount_id,
7213 abi_long flags)
7215 struct file_handle *target_fh;
7216 struct file_handle *fh;
7217 int mid = 0;
7218 abi_long ret;
7219 char *name;
7220 unsigned int size, total_size;
7222 if (get_user_s32(size, handle)) {
7223 return -TARGET_EFAULT;
7226 name = lock_user_string(pathname);
7227 if (!name) {
7228 return -TARGET_EFAULT;
7231 total_size = sizeof(struct file_handle) + size;
7232 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7233 if (!target_fh) {
7234 unlock_user(name, pathname, 0);
7235 return -TARGET_EFAULT;
7238 fh = g_malloc0(total_size);
7239 fh->handle_bytes = size;
7241 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7242 unlock_user(name, pathname, 0);
7244 /* man name_to_handle_at(2):
7245 * Other than the use of the handle_bytes field, the caller should treat
7246 * the file_handle structure as an opaque data type
7249 memcpy(target_fh, fh, total_size);
7250 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7251 target_fh->handle_type = tswap32(fh->handle_type);
7252 g_free(fh);
7253 unlock_user(target_fh, handle, total_size);
7255 if (put_user_s32(mid, mount_id)) {
7256 return -TARGET_EFAULT;
7259 return ret;
7262 #endif
7264 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7265 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7266 abi_long flags)
7268 struct file_handle *target_fh;
7269 struct file_handle *fh;
7270 unsigned int size, total_size;
7271 abi_long ret;
7273 if (get_user_s32(size, handle)) {
7274 return -TARGET_EFAULT;
7277 total_size = sizeof(struct file_handle) + size;
7278 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7279 if (!target_fh) {
7280 return -TARGET_EFAULT;
7283 fh = g_memdup(target_fh, total_size);
7284 fh->handle_bytes = size;
7285 fh->handle_type = tswap32(target_fh->handle_type);
7287 ret = get_errno(open_by_handle_at(mount_fd, fh,
7288 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7290 g_free(fh);
7292 unlock_user(target_fh, handle, total_size);
7294 return ret;
7296 #endif
7298 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7300 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7302 int host_flags;
7303 target_sigset_t *target_mask;
7304 sigset_t host_mask;
7305 abi_long ret;
7307 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7308 return -TARGET_EINVAL;
7310 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7311 return -TARGET_EFAULT;
7314 target_to_host_sigset(&host_mask, target_mask);
7316 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7318 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7319 if (ret >= 0) {
7320 fd_trans_register(ret, &target_signalfd_trans);
7323 unlock_user_struct(target_mask, mask, 0);
7325 return ret;
7327 #endif
7329 /* Map host to target signal numbers for the wait family of syscalls.
7330 Assume all other status bits are the same. */
7331 int host_to_target_waitstatus(int status)
7333 if (WIFSIGNALED(status)) {
7334 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7336 if (WIFSTOPPED(status)) {
7337 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7338 | (status & 0xff);
7340 return status;
7343 static int open_self_cmdline(void *cpu_env, int fd)
7345 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7346 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7347 int i;
7349 for (i = 0; i < bprm->argc; i++) {
7350 size_t len = strlen(bprm->argv[i]) + 1;
7352 if (write(fd, bprm->argv[i], len) != len) {
7353 return -1;
7357 return 0;
7360 static int open_self_maps(void *cpu_env, int fd)
7362 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7363 TaskState *ts = cpu->opaque;
7364 GSList *map_info = read_self_maps();
7365 GSList *s;
7366 int count;
7368 for (s = map_info; s; s = g_slist_next(s)) {
7369 MapInfo *e = (MapInfo *) s->data;
7371 if (h2g_valid(e->start)) {
7372 unsigned long min = e->start;
7373 unsigned long max = e->end;
7374 int flags = page_get_flags(h2g(min));
7375 const char *path;
7377 max = h2g_valid(max - 1) ?
7378 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7380 if (page_check_range(h2g(min), max - min, flags) == -1) {
7381 continue;
7384 if (h2g(min) == ts->info->stack_limit) {
7385 path = "[stack]";
7386 } else {
7387 path = e->path;
7390 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7391 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7392 h2g(min), h2g(max - 1) + 1,
7393 e->is_read ? 'r' : '-',
7394 e->is_write ? 'w' : '-',
7395 e->is_exec ? 'x' : '-',
7396 e->is_priv ? 'p' : '-',
7397 (uint64_t) e->offset, e->dev, e->inode);
7398 if (path) {
7399 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7400 } else {
7401 dprintf(fd, "\n");
7406 free_self_maps(map_info);
7408 #ifdef TARGET_VSYSCALL_PAGE
7410 * We only support execution from the vsyscall page.
7411 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7413 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7414 " --xp 00000000 00:00 0",
7415 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7416 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7417 #endif
7419 return 0;
7422 static int open_self_stat(void *cpu_env, int fd)
7424 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7425 TaskState *ts = cpu->opaque;
7426 g_autoptr(GString) buf = g_string_new(NULL);
7427 int i;
7429 for (i = 0; i < 44; i++) {
7430 if (i == 0) {
7431 /* pid */
7432 g_string_printf(buf, FMT_pid " ", getpid());
7433 } else if (i == 1) {
7434 /* app name */
7435 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7436 bin = bin ? bin + 1 : ts->bprm->argv[0];
7437 g_string_printf(buf, "(%.15s) ", bin);
7438 } else if (i == 27) {
7439 /* stack bottom */
7440 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7441 } else {
7442 /* for the rest, there is MasterCard */
7443 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7446 if (write(fd, buf->str, buf->len) != buf->len) {
7447 return -1;
7451 return 0;
7454 static int open_self_auxv(void *cpu_env, int fd)
7456 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7457 TaskState *ts = cpu->opaque;
7458 abi_ulong auxv = ts->info->saved_auxv;
7459 abi_ulong len = ts->info->auxv_len;
7460 char *ptr;
7463 * Auxiliary vector is stored in target process stack.
7464 * read in whole auxv vector and copy it to file
7466 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7467 if (ptr != NULL) {
7468 while (len > 0) {
7469 ssize_t r;
7470 r = write(fd, ptr, len);
7471 if (r <= 0) {
7472 break;
7474 len -= r;
7475 ptr += r;
7477 lseek(fd, 0, SEEK_SET);
7478 unlock_user(ptr, auxv, len);
7481 return 0;
7484 static int is_proc_myself(const char *filename, const char *entry)
7486 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7487 filename += strlen("/proc/");
7488 if (!strncmp(filename, "self/", strlen("self/"))) {
7489 filename += strlen("self/");
7490 } else if (*filename >= '1' && *filename <= '9') {
7491 char myself[80];
7492 snprintf(myself, sizeof(myself), "%d/", getpid());
7493 if (!strncmp(filename, myself, strlen(myself))) {
7494 filename += strlen(myself);
7495 } else {
7496 return 0;
7498 } else {
7499 return 0;
7501 if (!strcmp(filename, entry)) {
7502 return 1;
7505 return 0;
7508 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7509 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7510 static int is_proc(const char *filename, const char *entry)
7512 return strcmp(filename, entry) == 0;
7514 #endif
7516 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7517 static int open_net_route(void *cpu_env, int fd)
7519 FILE *fp;
7520 char *line = NULL;
7521 size_t len = 0;
7522 ssize_t read;
7524 fp = fopen("/proc/net/route", "r");
7525 if (fp == NULL) {
7526 return -1;
7529 /* read header */
7531 read = getline(&line, &len, fp);
7532 dprintf(fd, "%s", line);
7534 /* read routes */
7536 while ((read = getline(&line, &len, fp)) != -1) {
7537 char iface[16];
7538 uint32_t dest, gw, mask;
7539 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7540 int fields;
7542 fields = sscanf(line,
7543 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7544 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7545 &mask, &mtu, &window, &irtt);
7546 if (fields != 11) {
7547 continue;
7549 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7550 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7551 metric, tswap32(mask), mtu, window, irtt);
7554 free(line);
7555 fclose(fp);
7557 return 0;
7559 #endif
7561 #if defined(TARGET_SPARC)
7562 static int open_cpuinfo(void *cpu_env, int fd)
7564 dprintf(fd, "type\t\t: sun4u\n");
7565 return 0;
7567 #endif
7569 #if defined(TARGET_HPPA)
7570 static int open_cpuinfo(void *cpu_env, int fd)
7572 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7573 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7574 dprintf(fd, "capabilities\t: os32\n");
7575 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7576 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7577 return 0;
7579 #endif
7581 #if defined(TARGET_M68K)
7582 static int open_hardware(void *cpu_env, int fd)
7584 dprintf(fd, "Model:\t\tqemu-m68k\n");
7585 return 0;
7587 #endif
7589 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7591 struct fake_open {
7592 const char *filename;
7593 int (*fill)(void *cpu_env, int fd);
7594 int (*cmp)(const char *s1, const char *s2);
7596 const struct fake_open *fake_open;
7597 static const struct fake_open fakes[] = {
7598 { "maps", open_self_maps, is_proc_myself },
7599 { "stat", open_self_stat, is_proc_myself },
7600 { "auxv", open_self_auxv, is_proc_myself },
7601 { "cmdline", open_self_cmdline, is_proc_myself },
7602 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7603 { "/proc/net/route", open_net_route, is_proc },
7604 #endif
7605 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7606 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7607 #endif
7608 #if defined(TARGET_M68K)
7609 { "/proc/hardware", open_hardware, is_proc },
7610 #endif
7611 { NULL, NULL, NULL }
7614 if (is_proc_myself(pathname, "exe")) {
7615 int execfd = qemu_getauxval(AT_EXECFD);
7616 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7619 for (fake_open = fakes; fake_open->filename; fake_open++) {
7620 if (fake_open->cmp(pathname, fake_open->filename)) {
7621 break;
7625 if (fake_open->filename) {
7626 const char *tmpdir;
7627 char filename[PATH_MAX];
7628 int fd, r;
7630 /* create temporary file to map stat to */
7631 tmpdir = getenv("TMPDIR");
7632 if (!tmpdir)
7633 tmpdir = "/tmp";
7634 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7635 fd = mkstemp(filename);
7636 if (fd < 0) {
7637 return fd;
7639 unlink(filename);
7641 if ((r = fake_open->fill(cpu_env, fd))) {
7642 int e = errno;
7643 close(fd);
7644 errno = e;
7645 return r;
7647 lseek(fd, 0, SEEK_SET);
7649 return fd;
7652 return safe_openat(dirfd, path(pathname), flags, mode);
7655 #define TIMER_MAGIC 0x0caf0000
7656 #define TIMER_MAGIC_MASK 0xffff0000
7658 /* Convert QEMU provided timer ID back to internal 16bit index format */
7659 static target_timer_t get_timer_id(abi_long arg)
7661 target_timer_t timerid = arg;
7663 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7664 return -TARGET_EINVAL;
7667 timerid &= 0xffff;
7669 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7670 return -TARGET_EINVAL;
7673 return timerid;
7676 static int target_to_host_cpu_mask(unsigned long *host_mask,
7677 size_t host_size,
7678 abi_ulong target_addr,
7679 size_t target_size)
7681 unsigned target_bits = sizeof(abi_ulong) * 8;
7682 unsigned host_bits = sizeof(*host_mask) * 8;
7683 abi_ulong *target_mask;
7684 unsigned i, j;
7686 assert(host_size >= target_size);
7688 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7689 if (!target_mask) {
7690 return -TARGET_EFAULT;
7692 memset(host_mask, 0, host_size);
7694 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7695 unsigned bit = i * target_bits;
7696 abi_ulong val;
7698 __get_user(val, &target_mask[i]);
7699 for (j = 0; j < target_bits; j++, bit++) {
7700 if (val & (1UL << j)) {
7701 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7706 unlock_user(target_mask, target_addr, 0);
7707 return 0;
7710 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7711 size_t host_size,
7712 abi_ulong target_addr,
7713 size_t target_size)
7715 unsigned target_bits = sizeof(abi_ulong) * 8;
7716 unsigned host_bits = sizeof(*host_mask) * 8;
7717 abi_ulong *target_mask;
7718 unsigned i, j;
7720 assert(host_size >= target_size);
7722 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7723 if (!target_mask) {
7724 return -TARGET_EFAULT;
7727 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7728 unsigned bit = i * target_bits;
7729 abi_ulong val = 0;
7731 for (j = 0; j < target_bits; j++, bit++) {
7732 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7733 val |= 1UL << j;
7736 __put_user(val, &target_mask[i]);
7739 unlock_user(target_mask, target_addr, target_size);
7740 return 0;
7743 /* This is an internal helper for do_syscall so that it is easier
7744 * to have a single return point, so that actions, such as logging
7745 * of syscall results, can be performed.
7746 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7748 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7749 abi_long arg2, abi_long arg3, abi_long arg4,
7750 abi_long arg5, abi_long arg6, abi_long arg7,
7751 abi_long arg8)
7753 CPUState *cpu = env_cpu(cpu_env);
7754 abi_long ret;
7755 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7756 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7757 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7758 || defined(TARGET_NR_statx)
7759 struct stat st;
7760 #endif
7761 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7762 || defined(TARGET_NR_fstatfs)
7763 struct statfs stfs;
7764 #endif
7765 void *p;
7767 switch(num) {
7768 case TARGET_NR_exit:
7769 /* In old applications this may be used to implement _exit(2).
7770 However in threaded applictions it is used for thread termination,
7771 and _exit_group is used for application termination.
7772 Do thread termination if we have more then one thread. */
7774 if (block_signals()) {
7775 return -TARGET_ERESTARTSYS;
7778 pthread_mutex_lock(&clone_lock);
7780 if (CPU_NEXT(first_cpu)) {
7781 TaskState *ts = cpu->opaque;
7783 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7784 object_unref(OBJECT(cpu));
7786 * At this point the CPU should be unrealized and removed
7787 * from cpu lists. We can clean-up the rest of the thread
7788 * data without the lock held.
7791 pthread_mutex_unlock(&clone_lock);
7793 if (ts->child_tidptr) {
7794 put_user_u32(0, ts->child_tidptr);
7795 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7796 NULL, NULL, 0);
7798 thread_cpu = NULL;
7799 g_free(ts);
7800 rcu_unregister_thread();
7801 pthread_exit(NULL);
7804 pthread_mutex_unlock(&clone_lock);
7805 preexit_cleanup(cpu_env, arg1);
7806 _exit(arg1);
7807 return 0; /* avoid warning */
7808 case TARGET_NR_read:
7809 if (arg2 == 0 && arg3 == 0) {
7810 return get_errno(safe_read(arg1, 0, 0));
7811 } else {
7812 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7813 return -TARGET_EFAULT;
7814 ret = get_errno(safe_read(arg1, p, arg3));
7815 if (ret >= 0 &&
7816 fd_trans_host_to_target_data(arg1)) {
7817 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7819 unlock_user(p, arg2, ret);
7821 return ret;
7822 case TARGET_NR_write:
7823 if (arg2 == 0 && arg3 == 0) {
7824 return get_errno(safe_write(arg1, 0, 0));
7826 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7827 return -TARGET_EFAULT;
7828 if (fd_trans_target_to_host_data(arg1)) {
7829 void *copy = g_malloc(arg3);
7830 memcpy(copy, p, arg3);
7831 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7832 if (ret >= 0) {
7833 ret = get_errno(safe_write(arg1, copy, ret));
7835 g_free(copy);
7836 } else {
7837 ret = get_errno(safe_write(arg1, p, arg3));
7839 unlock_user(p, arg2, 0);
7840 return ret;
7842 #ifdef TARGET_NR_open
7843 case TARGET_NR_open:
7844 if (!(p = lock_user_string(arg1)))
7845 return -TARGET_EFAULT;
7846 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7847 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7848 arg3));
7849 fd_trans_unregister(ret);
7850 unlock_user(p, arg1, 0);
7851 return ret;
7852 #endif
7853 case TARGET_NR_openat:
7854 if (!(p = lock_user_string(arg2)))
7855 return -TARGET_EFAULT;
7856 ret = get_errno(do_openat(cpu_env, arg1, p,
7857 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7858 arg4));
7859 fd_trans_unregister(ret);
7860 unlock_user(p, arg2, 0);
7861 return ret;
7862 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7863 case TARGET_NR_name_to_handle_at:
7864 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7865 return ret;
7866 #endif
7867 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7868 case TARGET_NR_open_by_handle_at:
7869 ret = do_open_by_handle_at(arg1, arg2, arg3);
7870 fd_trans_unregister(ret);
7871 return ret;
7872 #endif
7873 case TARGET_NR_close:
7874 fd_trans_unregister(arg1);
7875 return get_errno(close(arg1));
7877 case TARGET_NR_brk:
7878 return do_brk(arg1);
7879 #ifdef TARGET_NR_fork
7880 case TARGET_NR_fork:
7881 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7882 #endif
7883 #ifdef TARGET_NR_waitpid
7884 case TARGET_NR_waitpid:
7886 int status;
7887 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7888 if (!is_error(ret) && arg2 && ret
7889 && put_user_s32(host_to_target_waitstatus(status), arg2))
7890 return -TARGET_EFAULT;
7892 return ret;
7893 #endif
7894 #ifdef TARGET_NR_waitid
7895 case TARGET_NR_waitid:
7897 siginfo_t info;
7898 info.si_pid = 0;
7899 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7900 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7901 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7902 return -TARGET_EFAULT;
7903 host_to_target_siginfo(p, &info);
7904 unlock_user(p, arg3, sizeof(target_siginfo_t));
7907 return ret;
7908 #endif
7909 #ifdef TARGET_NR_creat /* not on alpha */
7910 case TARGET_NR_creat:
7911 if (!(p = lock_user_string(arg1)))
7912 return -TARGET_EFAULT;
7913 ret = get_errno(creat(p, arg2));
7914 fd_trans_unregister(ret);
7915 unlock_user(p, arg1, 0);
7916 return ret;
7917 #endif
7918 #ifdef TARGET_NR_link
7919 case TARGET_NR_link:
7921 void * p2;
7922 p = lock_user_string(arg1);
7923 p2 = lock_user_string(arg2);
7924 if (!p || !p2)
7925 ret = -TARGET_EFAULT;
7926 else
7927 ret = get_errno(link(p, p2));
7928 unlock_user(p2, arg2, 0);
7929 unlock_user(p, arg1, 0);
7931 return ret;
7932 #endif
7933 #if defined(TARGET_NR_linkat)
7934 case TARGET_NR_linkat:
7936 void * p2 = NULL;
7937 if (!arg2 || !arg4)
7938 return -TARGET_EFAULT;
7939 p = lock_user_string(arg2);
7940 p2 = lock_user_string(arg4);
7941 if (!p || !p2)
7942 ret = -TARGET_EFAULT;
7943 else
7944 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7945 unlock_user(p, arg2, 0);
7946 unlock_user(p2, arg4, 0);
7948 return ret;
7949 #endif
7950 #ifdef TARGET_NR_unlink
7951 case TARGET_NR_unlink:
7952 if (!(p = lock_user_string(arg1)))
7953 return -TARGET_EFAULT;
7954 ret = get_errno(unlink(p));
7955 unlock_user(p, arg1, 0);
7956 return ret;
7957 #endif
7958 #if defined(TARGET_NR_unlinkat)
7959 case TARGET_NR_unlinkat:
7960 if (!(p = lock_user_string(arg2)))
7961 return -TARGET_EFAULT;
7962 ret = get_errno(unlinkat(arg1, p, arg3));
7963 unlock_user(p, arg2, 0);
7964 return ret;
7965 #endif
7966 case TARGET_NR_execve:
7968 char **argp, **envp;
7969 int argc, envc;
7970 abi_ulong gp;
7971 abi_ulong guest_argp;
7972 abi_ulong guest_envp;
7973 abi_ulong addr;
7974 char **q;
7975 int total_size = 0;
7977 argc = 0;
7978 guest_argp = arg2;
7979 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7980 if (get_user_ual(addr, gp))
7981 return -TARGET_EFAULT;
7982 if (!addr)
7983 break;
7984 argc++;
7986 envc = 0;
7987 guest_envp = arg3;
7988 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7989 if (get_user_ual(addr, gp))
7990 return -TARGET_EFAULT;
7991 if (!addr)
7992 break;
7993 envc++;
7996 argp = g_new0(char *, argc + 1);
7997 envp = g_new0(char *, envc + 1);
7999 for (gp = guest_argp, q = argp; gp;
8000 gp += sizeof(abi_ulong), q++) {
8001 if (get_user_ual(addr, gp))
8002 goto execve_efault;
8003 if (!addr)
8004 break;
8005 if (!(*q = lock_user_string(addr)))
8006 goto execve_efault;
8007 total_size += strlen(*q) + 1;
8009 *q = NULL;
8011 for (gp = guest_envp, q = envp; gp;
8012 gp += sizeof(abi_ulong), q++) {
8013 if (get_user_ual(addr, gp))
8014 goto execve_efault;
8015 if (!addr)
8016 break;
8017 if (!(*q = lock_user_string(addr)))
8018 goto execve_efault;
8019 total_size += strlen(*q) + 1;
8021 *q = NULL;
8023 if (!(p = lock_user_string(arg1)))
8024 goto execve_efault;
8025 /* Although execve() is not an interruptible syscall it is
8026 * a special case where we must use the safe_syscall wrapper:
8027 * if we allow a signal to happen before we make the host
8028 * syscall then we will 'lose' it, because at the point of
8029 * execve the process leaves QEMU's control. So we use the
8030 * safe syscall wrapper to ensure that we either take the
8031 * signal as a guest signal, or else it does not happen
8032 * before the execve completes and makes it the other
8033 * program's problem.
8035 ret = get_errno(safe_execve(p, argp, envp));
8036 unlock_user(p, arg1, 0);
8038 goto execve_end;
8040 execve_efault:
8041 ret = -TARGET_EFAULT;
8043 execve_end:
8044 for (gp = guest_argp, q = argp; *q;
8045 gp += sizeof(abi_ulong), q++) {
8046 if (get_user_ual(addr, gp)
8047 || !addr)
8048 break;
8049 unlock_user(*q, addr, 0);
8051 for (gp = guest_envp, q = envp; *q;
8052 gp += sizeof(abi_ulong), q++) {
8053 if (get_user_ual(addr, gp)
8054 || !addr)
8055 break;
8056 unlock_user(*q, addr, 0);
8059 g_free(argp);
8060 g_free(envp);
8062 return ret;
8063 case TARGET_NR_chdir:
8064 if (!(p = lock_user_string(arg1)))
8065 return -TARGET_EFAULT;
8066 ret = get_errno(chdir(p));
8067 unlock_user(p, arg1, 0);
8068 return ret;
8069 #ifdef TARGET_NR_time
8070 case TARGET_NR_time:
8072 time_t host_time;
8073 ret = get_errno(time(&host_time));
8074 if (!is_error(ret)
8075 && arg1
8076 && put_user_sal(host_time, arg1))
8077 return -TARGET_EFAULT;
8079 return ret;
8080 #endif
8081 #ifdef TARGET_NR_mknod
8082 case TARGET_NR_mknod:
8083 if (!(p = lock_user_string(arg1)))
8084 return -TARGET_EFAULT;
8085 ret = get_errno(mknod(p, arg2, arg3));
8086 unlock_user(p, arg1, 0);
8087 return ret;
8088 #endif
8089 #if defined(TARGET_NR_mknodat)
8090 case TARGET_NR_mknodat:
8091 if (!(p = lock_user_string(arg2)))
8092 return -TARGET_EFAULT;
8093 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8094 unlock_user(p, arg2, 0);
8095 return ret;
8096 #endif
8097 #ifdef TARGET_NR_chmod
8098 case TARGET_NR_chmod:
8099 if (!(p = lock_user_string(arg1)))
8100 return -TARGET_EFAULT;
8101 ret = get_errno(chmod(p, arg2));
8102 unlock_user(p, arg1, 0);
8103 return ret;
8104 #endif
8105 #ifdef TARGET_NR_lseek
8106 case TARGET_NR_lseek:
8107 return get_errno(lseek(arg1, arg2, arg3));
8108 #endif
8109 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8110 /* Alpha specific */
8111 case TARGET_NR_getxpid:
8112 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8113 return get_errno(getpid());
8114 #endif
8115 #ifdef TARGET_NR_getpid
8116 case TARGET_NR_getpid:
8117 return get_errno(getpid());
8118 #endif
8119 case TARGET_NR_mount:
8121 /* need to look at the data field */
8122 void *p2, *p3;
8124 if (arg1) {
8125 p = lock_user_string(arg1);
8126 if (!p) {
8127 return -TARGET_EFAULT;
8129 } else {
8130 p = NULL;
8133 p2 = lock_user_string(arg2);
8134 if (!p2) {
8135 if (arg1) {
8136 unlock_user(p, arg1, 0);
8138 return -TARGET_EFAULT;
8141 if (arg3) {
8142 p3 = lock_user_string(arg3);
8143 if (!p3) {
8144 if (arg1) {
8145 unlock_user(p, arg1, 0);
8147 unlock_user(p2, arg2, 0);
8148 return -TARGET_EFAULT;
8150 } else {
8151 p3 = NULL;
8154 /* FIXME - arg5 should be locked, but it isn't clear how to
8155 * do that since it's not guaranteed to be a NULL-terminated
8156 * string.
8158 if (!arg5) {
8159 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8160 } else {
8161 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8163 ret = get_errno(ret);
8165 if (arg1) {
8166 unlock_user(p, arg1, 0);
8168 unlock_user(p2, arg2, 0);
8169 if (arg3) {
8170 unlock_user(p3, arg3, 0);
8173 return ret;
8174 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8175 #if defined(TARGET_NR_umount)
8176 case TARGET_NR_umount:
8177 #endif
8178 #if defined(TARGET_NR_oldumount)
8179 case TARGET_NR_oldumount:
8180 #endif
8181 if (!(p = lock_user_string(arg1)))
8182 return -TARGET_EFAULT;
8183 ret = get_errno(umount(p));
8184 unlock_user(p, arg1, 0);
8185 return ret;
8186 #endif
8187 #ifdef TARGET_NR_stime /* not on alpha */
8188 case TARGET_NR_stime:
8190 struct timespec ts;
8191 ts.tv_nsec = 0;
8192 if (get_user_sal(ts.tv_sec, arg1)) {
8193 return -TARGET_EFAULT;
8195 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8197 #endif
8198 #ifdef TARGET_NR_alarm /* not on alpha */
8199 case TARGET_NR_alarm:
8200 return alarm(arg1);
8201 #endif
8202 #ifdef TARGET_NR_pause /* not on alpha */
8203 case TARGET_NR_pause:
8204 if (!block_signals()) {
8205 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8207 return -TARGET_EINTR;
8208 #endif
8209 #ifdef TARGET_NR_utime
8210 case TARGET_NR_utime:
8212 struct utimbuf tbuf, *host_tbuf;
8213 struct target_utimbuf *target_tbuf;
8214 if (arg2) {
8215 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8216 return -TARGET_EFAULT;
8217 tbuf.actime = tswapal(target_tbuf->actime);
8218 tbuf.modtime = tswapal(target_tbuf->modtime);
8219 unlock_user_struct(target_tbuf, arg2, 0);
8220 host_tbuf = &tbuf;
8221 } else {
8222 host_tbuf = NULL;
8224 if (!(p = lock_user_string(arg1)))
8225 return -TARGET_EFAULT;
8226 ret = get_errno(utime(p, host_tbuf));
8227 unlock_user(p, arg1, 0);
8229 return ret;
8230 #endif
8231 #ifdef TARGET_NR_utimes
8232 case TARGET_NR_utimes:
8234 struct timeval *tvp, tv[2];
8235 if (arg2) {
8236 if (copy_from_user_timeval(&tv[0], arg2)
8237 || copy_from_user_timeval(&tv[1],
8238 arg2 + sizeof(struct target_timeval)))
8239 return -TARGET_EFAULT;
8240 tvp = tv;
8241 } else {
8242 tvp = NULL;
8244 if (!(p = lock_user_string(arg1)))
8245 return -TARGET_EFAULT;
8246 ret = get_errno(utimes(p, tvp));
8247 unlock_user(p, arg1, 0);
8249 return ret;
8250 #endif
8251 #if defined(TARGET_NR_futimesat)
8252 case TARGET_NR_futimesat:
8254 struct timeval *tvp, tv[2];
8255 if (arg3) {
8256 if (copy_from_user_timeval(&tv[0], arg3)
8257 || copy_from_user_timeval(&tv[1],
8258 arg3 + sizeof(struct target_timeval)))
8259 return -TARGET_EFAULT;
8260 tvp = tv;
8261 } else {
8262 tvp = NULL;
8264 if (!(p = lock_user_string(arg2))) {
8265 return -TARGET_EFAULT;
8267 ret = get_errno(futimesat(arg1, path(p), tvp));
8268 unlock_user(p, arg2, 0);
8270 return ret;
8271 #endif
8272 #ifdef TARGET_NR_access
8273 case TARGET_NR_access:
8274 if (!(p = lock_user_string(arg1))) {
8275 return -TARGET_EFAULT;
8277 ret = get_errno(access(path(p), arg2));
8278 unlock_user(p, arg1, 0);
8279 return ret;
8280 #endif
8281 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8282 case TARGET_NR_faccessat:
8283 if (!(p = lock_user_string(arg2))) {
8284 return -TARGET_EFAULT;
8286 ret = get_errno(faccessat(arg1, p, arg3, 0));
8287 unlock_user(p, arg2, 0);
8288 return ret;
8289 #endif
8290 #ifdef TARGET_NR_nice /* not on alpha */
8291 case TARGET_NR_nice:
8292 return get_errno(nice(arg1));
8293 #endif
8294 case TARGET_NR_sync:
8295 sync();
8296 return 0;
8297 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8298 case TARGET_NR_syncfs:
8299 return get_errno(syncfs(arg1));
8300 #endif
8301 case TARGET_NR_kill:
8302 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8303 #ifdef TARGET_NR_rename
8304 case TARGET_NR_rename:
8306 void *p2;
8307 p = lock_user_string(arg1);
8308 p2 = lock_user_string(arg2);
8309 if (!p || !p2)
8310 ret = -TARGET_EFAULT;
8311 else
8312 ret = get_errno(rename(p, p2));
8313 unlock_user(p2, arg2, 0);
8314 unlock_user(p, arg1, 0);
8316 return ret;
8317 #endif
8318 #if defined(TARGET_NR_renameat)
8319 case TARGET_NR_renameat:
8321 void *p2;
8322 p = lock_user_string(arg2);
8323 p2 = lock_user_string(arg4);
8324 if (!p || !p2)
8325 ret = -TARGET_EFAULT;
8326 else
8327 ret = get_errno(renameat(arg1, p, arg3, p2));
8328 unlock_user(p2, arg4, 0);
8329 unlock_user(p, arg2, 0);
8331 return ret;
8332 #endif
8333 #if defined(TARGET_NR_renameat2)
8334 case TARGET_NR_renameat2:
8336 void *p2;
8337 p = lock_user_string(arg2);
8338 p2 = lock_user_string(arg4);
8339 if (!p || !p2) {
8340 ret = -TARGET_EFAULT;
8341 } else {
8342 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8344 unlock_user(p2, arg4, 0);
8345 unlock_user(p, arg2, 0);
8347 return ret;
8348 #endif
8349 #ifdef TARGET_NR_mkdir
8350 case TARGET_NR_mkdir:
8351 if (!(p = lock_user_string(arg1)))
8352 return -TARGET_EFAULT;
8353 ret = get_errno(mkdir(p, arg2));
8354 unlock_user(p, arg1, 0);
8355 return ret;
8356 #endif
8357 #if defined(TARGET_NR_mkdirat)
8358 case TARGET_NR_mkdirat:
8359 if (!(p = lock_user_string(arg2)))
8360 return -TARGET_EFAULT;
8361 ret = get_errno(mkdirat(arg1, p, arg3));
8362 unlock_user(p, arg2, 0);
8363 return ret;
8364 #endif
8365 #ifdef TARGET_NR_rmdir
8366 case TARGET_NR_rmdir:
8367 if (!(p = lock_user_string(arg1)))
8368 return -TARGET_EFAULT;
8369 ret = get_errno(rmdir(p));
8370 unlock_user(p, arg1, 0);
8371 return ret;
8372 #endif
8373 case TARGET_NR_dup:
8374 ret = get_errno(dup(arg1));
8375 if (ret >= 0) {
8376 fd_trans_dup(arg1, ret);
8378 return ret;
8379 #ifdef TARGET_NR_pipe
8380 case TARGET_NR_pipe:
8381 return do_pipe(cpu_env, arg1, 0, 0);
8382 #endif
8383 #ifdef TARGET_NR_pipe2
8384 case TARGET_NR_pipe2:
8385 return do_pipe(cpu_env, arg1,
8386 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8387 #endif
8388 case TARGET_NR_times:
8390 struct target_tms *tmsp;
8391 struct tms tms;
8392 ret = get_errno(times(&tms));
8393 if (arg1) {
8394 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8395 if (!tmsp)
8396 return -TARGET_EFAULT;
8397 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8398 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8399 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8400 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8402 if (!is_error(ret))
8403 ret = host_to_target_clock_t(ret);
8405 return ret;
8406 case TARGET_NR_acct:
8407 if (arg1 == 0) {
8408 ret = get_errno(acct(NULL));
8409 } else {
8410 if (!(p = lock_user_string(arg1))) {
8411 return -TARGET_EFAULT;
8413 ret = get_errno(acct(path(p)));
8414 unlock_user(p, arg1, 0);
8416 return ret;
8417 #ifdef TARGET_NR_umount2
8418 case TARGET_NR_umount2:
8419 if (!(p = lock_user_string(arg1)))
8420 return -TARGET_EFAULT;
8421 ret = get_errno(umount2(p, arg2));
8422 unlock_user(p, arg1, 0);
8423 return ret;
8424 #endif
8425 case TARGET_NR_ioctl:
8426 return do_ioctl(arg1, arg2, arg3);
8427 #ifdef TARGET_NR_fcntl
8428 case TARGET_NR_fcntl:
8429 return do_fcntl(arg1, arg2, arg3);
8430 #endif
8431 case TARGET_NR_setpgid:
8432 return get_errno(setpgid(arg1, arg2));
8433 case TARGET_NR_umask:
8434 return get_errno(umask(arg1));
8435 case TARGET_NR_chroot:
8436 if (!(p = lock_user_string(arg1)))
8437 return -TARGET_EFAULT;
8438 ret = get_errno(chroot(p));
8439 unlock_user(p, arg1, 0);
8440 return ret;
8441 #ifdef TARGET_NR_dup2
8442 case TARGET_NR_dup2:
8443 ret = get_errno(dup2(arg1, arg2));
8444 if (ret >= 0) {
8445 fd_trans_dup(arg1, arg2);
8447 return ret;
8448 #endif
8449 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8450 case TARGET_NR_dup3:
8452 int host_flags;
8454 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8455 return -EINVAL;
8457 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8458 ret = get_errno(dup3(arg1, arg2, host_flags));
8459 if (ret >= 0) {
8460 fd_trans_dup(arg1, arg2);
8462 return ret;
8464 #endif
8465 #ifdef TARGET_NR_getppid /* not on alpha */
8466 case TARGET_NR_getppid:
8467 return get_errno(getppid());
8468 #endif
8469 #ifdef TARGET_NR_getpgrp
8470 case TARGET_NR_getpgrp:
8471 return get_errno(getpgrp());
8472 #endif
8473 case TARGET_NR_setsid:
8474 return get_errno(setsid());
8475 #ifdef TARGET_NR_sigaction
8476 case TARGET_NR_sigaction:
8478 #if defined(TARGET_ALPHA)
8479 struct target_sigaction act, oact, *pact = 0;
8480 struct target_old_sigaction *old_act;
8481 if (arg2) {
8482 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8483 return -TARGET_EFAULT;
8484 act._sa_handler = old_act->_sa_handler;
8485 target_siginitset(&act.sa_mask, old_act->sa_mask);
8486 act.sa_flags = old_act->sa_flags;
8487 act.sa_restorer = 0;
8488 unlock_user_struct(old_act, arg2, 0);
8489 pact = &act;
8491 ret = get_errno(do_sigaction(arg1, pact, &oact));
8492 if (!is_error(ret) && arg3) {
8493 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8494 return -TARGET_EFAULT;
8495 old_act->_sa_handler = oact._sa_handler;
8496 old_act->sa_mask = oact.sa_mask.sig[0];
8497 old_act->sa_flags = oact.sa_flags;
8498 unlock_user_struct(old_act, arg3, 1);
8500 #elif defined(TARGET_MIPS)
8501 struct target_sigaction act, oact, *pact, *old_act;
8503 if (arg2) {
8504 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8505 return -TARGET_EFAULT;
8506 act._sa_handler = old_act->_sa_handler;
8507 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8508 act.sa_flags = old_act->sa_flags;
8509 unlock_user_struct(old_act, arg2, 0);
8510 pact = &act;
8511 } else {
8512 pact = NULL;
8515 ret = get_errno(do_sigaction(arg1, pact, &oact));
8517 if (!is_error(ret) && arg3) {
8518 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8519 return -TARGET_EFAULT;
8520 old_act->_sa_handler = oact._sa_handler;
8521 old_act->sa_flags = oact.sa_flags;
8522 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8523 old_act->sa_mask.sig[1] = 0;
8524 old_act->sa_mask.sig[2] = 0;
8525 old_act->sa_mask.sig[3] = 0;
8526 unlock_user_struct(old_act, arg3, 1);
8528 #else
8529 struct target_old_sigaction *old_act;
8530 struct target_sigaction act, oact, *pact;
8531 if (arg2) {
8532 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8533 return -TARGET_EFAULT;
8534 act._sa_handler = old_act->_sa_handler;
8535 target_siginitset(&act.sa_mask, old_act->sa_mask);
8536 act.sa_flags = old_act->sa_flags;
8537 act.sa_restorer = old_act->sa_restorer;
8538 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8539 act.ka_restorer = 0;
8540 #endif
8541 unlock_user_struct(old_act, arg2, 0);
8542 pact = &act;
8543 } else {
8544 pact = NULL;
8546 ret = get_errno(do_sigaction(arg1, pact, &oact));
8547 if (!is_error(ret) && arg3) {
8548 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8549 return -TARGET_EFAULT;
8550 old_act->_sa_handler = oact._sa_handler;
8551 old_act->sa_mask = oact.sa_mask.sig[0];
8552 old_act->sa_flags = oact.sa_flags;
8553 old_act->sa_restorer = oact.sa_restorer;
8554 unlock_user_struct(old_act, arg3, 1);
8556 #endif
8558 return ret;
8559 #endif
8560 case TARGET_NR_rt_sigaction:
8562 #if defined(TARGET_ALPHA)
8563 /* For Alpha and SPARC this is a 5 argument syscall, with
8564 * a 'restorer' parameter which must be copied into the
8565 * sa_restorer field of the sigaction struct.
8566 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8567 * and arg5 is the sigsetsize.
8568 * Alpha also has a separate rt_sigaction struct that it uses
8569 * here; SPARC uses the usual sigaction struct.
8571 struct target_rt_sigaction *rt_act;
8572 struct target_sigaction act, oact, *pact = 0;
8574 if (arg4 != sizeof(target_sigset_t)) {
8575 return -TARGET_EINVAL;
8577 if (arg2) {
8578 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8579 return -TARGET_EFAULT;
8580 act._sa_handler = rt_act->_sa_handler;
8581 act.sa_mask = rt_act->sa_mask;
8582 act.sa_flags = rt_act->sa_flags;
8583 act.sa_restorer = arg5;
8584 unlock_user_struct(rt_act, arg2, 0);
8585 pact = &act;
8587 ret = get_errno(do_sigaction(arg1, pact, &oact));
8588 if (!is_error(ret) && arg3) {
8589 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8590 return -TARGET_EFAULT;
8591 rt_act->_sa_handler = oact._sa_handler;
8592 rt_act->sa_mask = oact.sa_mask;
8593 rt_act->sa_flags = oact.sa_flags;
8594 unlock_user_struct(rt_act, arg3, 1);
8596 #else
8597 #ifdef TARGET_SPARC
8598 target_ulong restorer = arg4;
8599 target_ulong sigsetsize = arg5;
8600 #else
8601 target_ulong sigsetsize = arg4;
8602 #endif
8603 struct target_sigaction *act;
8604 struct target_sigaction *oact;
8606 if (sigsetsize != sizeof(target_sigset_t)) {
8607 return -TARGET_EINVAL;
8609 if (arg2) {
8610 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8611 return -TARGET_EFAULT;
8613 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8614 act->ka_restorer = restorer;
8615 #endif
8616 } else {
8617 act = NULL;
8619 if (arg3) {
8620 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8621 ret = -TARGET_EFAULT;
8622 goto rt_sigaction_fail;
8624 } else
8625 oact = NULL;
8626 ret = get_errno(do_sigaction(arg1, act, oact));
8627 rt_sigaction_fail:
8628 if (act)
8629 unlock_user_struct(act, arg2, 0);
8630 if (oact)
8631 unlock_user_struct(oact, arg3, 1);
8632 #endif
8634 return ret;
8635 #ifdef TARGET_NR_sgetmask /* not on alpha */
8636 case TARGET_NR_sgetmask:
8638 sigset_t cur_set;
8639 abi_ulong target_set;
8640 ret = do_sigprocmask(0, NULL, &cur_set);
8641 if (!ret) {
8642 host_to_target_old_sigset(&target_set, &cur_set);
8643 ret = target_set;
8646 return ret;
8647 #endif
8648 #ifdef TARGET_NR_ssetmask /* not on alpha */
8649 case TARGET_NR_ssetmask:
8651 sigset_t set, oset;
8652 abi_ulong target_set = arg1;
8653 target_to_host_old_sigset(&set, &target_set);
8654 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8655 if (!ret) {
8656 host_to_target_old_sigset(&target_set, &oset);
8657 ret = target_set;
8660 return ret;
8661 #endif
8662 #ifdef TARGET_NR_sigprocmask
8663 case TARGET_NR_sigprocmask:
8665 #if defined(TARGET_ALPHA)
8666 sigset_t set, oldset;
8667 abi_ulong mask;
8668 int how;
8670 switch (arg1) {
8671 case TARGET_SIG_BLOCK:
8672 how = SIG_BLOCK;
8673 break;
8674 case TARGET_SIG_UNBLOCK:
8675 how = SIG_UNBLOCK;
8676 break;
8677 case TARGET_SIG_SETMASK:
8678 how = SIG_SETMASK;
8679 break;
8680 default:
8681 return -TARGET_EINVAL;
8683 mask = arg2;
8684 target_to_host_old_sigset(&set, &mask);
8686 ret = do_sigprocmask(how, &set, &oldset);
8687 if (!is_error(ret)) {
8688 host_to_target_old_sigset(&mask, &oldset);
8689 ret = mask;
8690 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8692 #else
8693 sigset_t set, oldset, *set_ptr;
8694 int how;
8696 if (arg2) {
8697 switch (arg1) {
8698 case TARGET_SIG_BLOCK:
8699 how = SIG_BLOCK;
8700 break;
8701 case TARGET_SIG_UNBLOCK:
8702 how = SIG_UNBLOCK;
8703 break;
8704 case TARGET_SIG_SETMASK:
8705 how = SIG_SETMASK;
8706 break;
8707 default:
8708 return -TARGET_EINVAL;
8710 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8711 return -TARGET_EFAULT;
8712 target_to_host_old_sigset(&set, p);
8713 unlock_user(p, arg2, 0);
8714 set_ptr = &set;
8715 } else {
8716 how = 0;
8717 set_ptr = NULL;
8719 ret = do_sigprocmask(how, set_ptr, &oldset);
8720 if (!is_error(ret) && arg3) {
8721 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8722 return -TARGET_EFAULT;
8723 host_to_target_old_sigset(p, &oldset);
8724 unlock_user(p, arg3, sizeof(target_sigset_t));
8726 #endif
8728 return ret;
8729 #endif
8730 case TARGET_NR_rt_sigprocmask:
8732 int how = arg1;
8733 sigset_t set, oldset, *set_ptr;
8735 if (arg4 != sizeof(target_sigset_t)) {
8736 return -TARGET_EINVAL;
8739 if (arg2) {
8740 switch(how) {
8741 case TARGET_SIG_BLOCK:
8742 how = SIG_BLOCK;
8743 break;
8744 case TARGET_SIG_UNBLOCK:
8745 how = SIG_UNBLOCK;
8746 break;
8747 case TARGET_SIG_SETMASK:
8748 how = SIG_SETMASK;
8749 break;
8750 default:
8751 return -TARGET_EINVAL;
8753 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8754 return -TARGET_EFAULT;
8755 target_to_host_sigset(&set, p);
8756 unlock_user(p, arg2, 0);
8757 set_ptr = &set;
8758 } else {
8759 how = 0;
8760 set_ptr = NULL;
8762 ret = do_sigprocmask(how, set_ptr, &oldset);
8763 if (!is_error(ret) && arg3) {
8764 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8765 return -TARGET_EFAULT;
8766 host_to_target_sigset(p, &oldset);
8767 unlock_user(p, arg3, sizeof(target_sigset_t));
8770 return ret;
8771 #ifdef TARGET_NR_sigpending
8772 case TARGET_NR_sigpending:
8774 sigset_t set;
8775 ret = get_errno(sigpending(&set));
8776 if (!is_error(ret)) {
8777 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8778 return -TARGET_EFAULT;
8779 host_to_target_old_sigset(p, &set);
8780 unlock_user(p, arg1, sizeof(target_sigset_t));
8783 return ret;
8784 #endif
8785 case TARGET_NR_rt_sigpending:
8787 sigset_t set;
8789 /* Yes, this check is >, not != like most. We follow the kernel's
8790 * logic and it does it like this because it implements
8791 * NR_sigpending through the same code path, and in that case
8792 * the old_sigset_t is smaller in size.
8794 if (arg2 > sizeof(target_sigset_t)) {
8795 return -TARGET_EINVAL;
8798 ret = get_errno(sigpending(&set));
8799 if (!is_error(ret)) {
8800 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8801 return -TARGET_EFAULT;
8802 host_to_target_sigset(p, &set);
8803 unlock_user(p, arg1, sizeof(target_sigset_t));
8806 return ret;
8807 #ifdef TARGET_NR_sigsuspend
8808 case TARGET_NR_sigsuspend:
8810 TaskState *ts = cpu->opaque;
8811 #if defined(TARGET_ALPHA)
8812 abi_ulong mask = arg1;
8813 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8814 #else
8815 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8816 return -TARGET_EFAULT;
8817 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8818 unlock_user(p, arg1, 0);
8819 #endif
8820 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8821 SIGSET_T_SIZE));
8822 if (ret != -TARGET_ERESTARTSYS) {
8823 ts->in_sigsuspend = 1;
8826 return ret;
8827 #endif
8828 case TARGET_NR_rt_sigsuspend:
8830 TaskState *ts = cpu->opaque;
8832 if (arg2 != sizeof(target_sigset_t)) {
8833 return -TARGET_EINVAL;
8835 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8836 return -TARGET_EFAULT;
8837 target_to_host_sigset(&ts->sigsuspend_mask, p);
8838 unlock_user(p, arg1, 0);
8839 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8840 SIGSET_T_SIZE));
8841 if (ret != -TARGET_ERESTARTSYS) {
8842 ts->in_sigsuspend = 1;
8845 return ret;
8846 #ifdef TARGET_NR_rt_sigtimedwait
8847 case TARGET_NR_rt_sigtimedwait:
8849 sigset_t set;
8850 struct timespec uts, *puts;
8851 siginfo_t uinfo;
8853 if (arg4 != sizeof(target_sigset_t)) {
8854 return -TARGET_EINVAL;
8857 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8858 return -TARGET_EFAULT;
8859 target_to_host_sigset(&set, p);
8860 unlock_user(p, arg1, 0);
8861 if (arg3) {
8862 puts = &uts;
8863 if (target_to_host_timespec(puts, arg3)) {
8864 return -TARGET_EFAULT;
8866 } else {
8867 puts = NULL;
8869 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8870 SIGSET_T_SIZE));
8871 if (!is_error(ret)) {
8872 if (arg2) {
8873 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8875 if (!p) {
8876 return -TARGET_EFAULT;
8878 host_to_target_siginfo(p, &uinfo);
8879 unlock_user(p, arg2, sizeof(target_siginfo_t));
8881 ret = host_to_target_signal(ret);
8884 return ret;
8885 #endif
8886 case TARGET_NR_rt_sigqueueinfo:
8888 siginfo_t uinfo;
8890 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8891 if (!p) {
8892 return -TARGET_EFAULT;
8894 target_to_host_siginfo(&uinfo, p);
8895 unlock_user(p, arg3, 0);
8896 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8898 return ret;
8899 case TARGET_NR_rt_tgsigqueueinfo:
8901 siginfo_t uinfo;
8903 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8904 if (!p) {
8905 return -TARGET_EFAULT;
8907 target_to_host_siginfo(&uinfo, p);
8908 unlock_user(p, arg4, 0);
8909 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8911 return ret;
8912 #ifdef TARGET_NR_sigreturn
8913 case TARGET_NR_sigreturn:
8914 if (block_signals()) {
8915 return -TARGET_ERESTARTSYS;
8917 return do_sigreturn(cpu_env);
8918 #endif
8919 case TARGET_NR_rt_sigreturn:
8920 if (block_signals()) {
8921 return -TARGET_ERESTARTSYS;
8923 return do_rt_sigreturn(cpu_env);
8924 case TARGET_NR_sethostname:
8925 if (!(p = lock_user_string(arg1)))
8926 return -TARGET_EFAULT;
8927 ret = get_errno(sethostname(p, arg2));
8928 unlock_user(p, arg1, 0);
8929 return ret;
8930 #ifdef TARGET_NR_setrlimit
8931 case TARGET_NR_setrlimit:
8933 int resource = target_to_host_resource(arg1);
8934 struct target_rlimit *target_rlim;
8935 struct rlimit rlim;
8936 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8937 return -TARGET_EFAULT;
8938 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8939 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8940 unlock_user_struct(target_rlim, arg2, 0);
8942 * If we just passed through resource limit settings for memory then
8943 * they would also apply to QEMU's own allocations, and QEMU will
8944 * crash or hang or die if its allocations fail. Ideally we would
8945 * track the guest allocations in QEMU and apply the limits ourselves.
8946 * For now, just tell the guest the call succeeded but don't actually
8947 * limit anything.
8949 if (resource != RLIMIT_AS &&
8950 resource != RLIMIT_DATA &&
8951 resource != RLIMIT_STACK) {
8952 return get_errno(setrlimit(resource, &rlim));
8953 } else {
8954 return 0;
8957 #endif
8958 #ifdef TARGET_NR_getrlimit
8959 case TARGET_NR_getrlimit:
8961 int resource = target_to_host_resource(arg1);
8962 struct target_rlimit *target_rlim;
8963 struct rlimit rlim;
8965 ret = get_errno(getrlimit(resource, &rlim));
8966 if (!is_error(ret)) {
8967 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8968 return -TARGET_EFAULT;
8969 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8970 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8971 unlock_user_struct(target_rlim, arg2, 1);
8974 return ret;
8975 #endif
8976 case TARGET_NR_getrusage:
8978 struct rusage rusage;
8979 ret = get_errno(getrusage(arg1, &rusage));
8980 if (!is_error(ret)) {
8981 ret = host_to_target_rusage(arg2, &rusage);
8984 return ret;
8985 #if defined(TARGET_NR_gettimeofday)
8986 case TARGET_NR_gettimeofday:
8988 struct timeval tv;
8989 struct timezone tz;
8991 ret = get_errno(gettimeofday(&tv, &tz));
8992 if (!is_error(ret)) {
8993 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8994 return -TARGET_EFAULT;
8996 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8997 return -TARGET_EFAULT;
9001 return ret;
9002 #endif
9003 #if defined(TARGET_NR_settimeofday)
9004 case TARGET_NR_settimeofday:
9006 struct timeval tv, *ptv = NULL;
9007 struct timezone tz, *ptz = NULL;
9009 if (arg1) {
9010 if (copy_from_user_timeval(&tv, arg1)) {
9011 return -TARGET_EFAULT;
9013 ptv = &tv;
9016 if (arg2) {
9017 if (copy_from_user_timezone(&tz, arg2)) {
9018 return -TARGET_EFAULT;
9020 ptz = &tz;
9023 return get_errno(settimeofday(ptv, ptz));
9025 #endif
9026 #if defined(TARGET_NR_select)
9027 case TARGET_NR_select:
9028 #if defined(TARGET_WANT_NI_OLD_SELECT)
9029 /* some architectures used to have old_select here
9030 * but now ENOSYS it.
9032 ret = -TARGET_ENOSYS;
9033 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9034 ret = do_old_select(arg1);
9035 #else
9036 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9037 #endif
9038 return ret;
9039 #endif
9040 #ifdef TARGET_NR_pselect6
9041 case TARGET_NR_pselect6:
9043 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9044 fd_set rfds, wfds, efds;
9045 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9046 struct timespec ts, *ts_ptr;
9049 * The 6th arg is actually two args smashed together,
9050 * so we cannot use the C library.
9052 sigset_t set;
9053 struct {
9054 sigset_t *set;
9055 size_t size;
9056 } sig, *sig_ptr;
9058 abi_ulong arg_sigset, arg_sigsize, *arg7;
9059 target_sigset_t *target_sigset;
9061 n = arg1;
9062 rfd_addr = arg2;
9063 wfd_addr = arg3;
9064 efd_addr = arg4;
9065 ts_addr = arg5;
9067 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9068 if (ret) {
9069 return ret;
9071 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9072 if (ret) {
9073 return ret;
9075 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9076 if (ret) {
9077 return ret;
9081 * This takes a timespec, and not a timeval, so we cannot
9082 * use the do_select() helper ...
9084 if (ts_addr) {
9085 if (target_to_host_timespec(&ts, ts_addr)) {
9086 return -TARGET_EFAULT;
9088 ts_ptr = &ts;
9089 } else {
9090 ts_ptr = NULL;
9093 /* Extract the two packed args for the sigset */
9094 if (arg6) {
9095 sig_ptr = &sig;
9096 sig.size = SIGSET_T_SIZE;
9098 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9099 if (!arg7) {
9100 return -TARGET_EFAULT;
9102 arg_sigset = tswapal(arg7[0]);
9103 arg_sigsize = tswapal(arg7[1]);
9104 unlock_user(arg7, arg6, 0);
9106 if (arg_sigset) {
9107 sig.set = &set;
9108 if (arg_sigsize != sizeof(*target_sigset)) {
9109 /* Like the kernel, we enforce correct size sigsets */
9110 return -TARGET_EINVAL;
9112 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9113 sizeof(*target_sigset), 1);
9114 if (!target_sigset) {
9115 return -TARGET_EFAULT;
9117 target_to_host_sigset(&set, target_sigset);
9118 unlock_user(target_sigset, arg_sigset, 0);
9119 } else {
9120 sig.set = NULL;
9122 } else {
9123 sig_ptr = NULL;
9126 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9127 ts_ptr, sig_ptr));
9129 if (!is_error(ret)) {
9130 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9131 return -TARGET_EFAULT;
9132 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9133 return -TARGET_EFAULT;
9134 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9135 return -TARGET_EFAULT;
9137 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9138 return -TARGET_EFAULT;
9141 return ret;
9142 #endif
9143 #ifdef TARGET_NR_symlink
9144 case TARGET_NR_symlink:
9146 void *p2;
9147 p = lock_user_string(arg1);
9148 p2 = lock_user_string(arg2);
9149 if (!p || !p2)
9150 ret = -TARGET_EFAULT;
9151 else
9152 ret = get_errno(symlink(p, p2));
9153 unlock_user(p2, arg2, 0);
9154 unlock_user(p, arg1, 0);
9156 return ret;
9157 #endif
9158 #if defined(TARGET_NR_symlinkat)
9159 case TARGET_NR_symlinkat:
9161 void *p2;
9162 p = lock_user_string(arg1);
9163 p2 = lock_user_string(arg3);
9164 if (!p || !p2)
9165 ret = -TARGET_EFAULT;
9166 else
9167 ret = get_errno(symlinkat(p, arg2, p2));
9168 unlock_user(p2, arg3, 0);
9169 unlock_user(p, arg1, 0);
9171 return ret;
9172 #endif
9173 #ifdef TARGET_NR_readlink
9174 case TARGET_NR_readlink:
9176 void *p2;
9177 p = lock_user_string(arg1);
9178 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9179 if (!p || !p2) {
9180 ret = -TARGET_EFAULT;
9181 } else if (!arg3) {
9182 /* Short circuit this for the magic exe check. */
9183 ret = -TARGET_EINVAL;
9184 } else if (is_proc_myself((const char *)p, "exe")) {
9185 char real[PATH_MAX], *temp;
9186 temp = realpath(exec_path, real);
9187 /* Return value is # of bytes that we wrote to the buffer. */
9188 if (temp == NULL) {
9189 ret = get_errno(-1);
9190 } else {
9191 /* Don't worry about sign mismatch as earlier mapping
9192 * logic would have thrown a bad address error. */
9193 ret = MIN(strlen(real), arg3);
9194 /* We cannot NUL terminate the string. */
9195 memcpy(p2, real, ret);
9197 } else {
9198 ret = get_errno(readlink(path(p), p2, arg3));
9200 unlock_user(p2, arg2, ret);
9201 unlock_user(p, arg1, 0);
9203 return ret;
9204 #endif
9205 #if defined(TARGET_NR_readlinkat)
9206 case TARGET_NR_readlinkat:
9208 void *p2;
9209 p = lock_user_string(arg2);
9210 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9211 if (!p || !p2) {
9212 ret = -TARGET_EFAULT;
9213 } else if (is_proc_myself((const char *)p, "exe")) {
9214 char real[PATH_MAX], *temp;
9215 temp = realpath(exec_path, real);
9216 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9217 snprintf((char *)p2, arg4, "%s", real);
9218 } else {
9219 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9221 unlock_user(p2, arg3, ret);
9222 unlock_user(p, arg2, 0);
9224 return ret;
9225 #endif
9226 #ifdef TARGET_NR_swapon
9227 case TARGET_NR_swapon:
9228 if (!(p = lock_user_string(arg1)))
9229 return -TARGET_EFAULT;
9230 ret = get_errno(swapon(p, arg2));
9231 unlock_user(p, arg1, 0);
9232 return ret;
9233 #endif
9234 case TARGET_NR_reboot:
9235 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9236 /* arg4 must be ignored in all other cases */
9237 p = lock_user_string(arg4);
9238 if (!p) {
9239 return -TARGET_EFAULT;
9241 ret = get_errno(reboot(arg1, arg2, arg3, p));
9242 unlock_user(p, arg4, 0);
9243 } else {
9244 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9246 return ret;
9247 #ifdef TARGET_NR_mmap
9248 case TARGET_NR_mmap:
9249 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9250 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9251 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9252 || defined(TARGET_S390X)
9254 abi_ulong *v;
9255 abi_ulong v1, v2, v3, v4, v5, v6;
9256 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9257 return -TARGET_EFAULT;
9258 v1 = tswapal(v[0]);
9259 v2 = tswapal(v[1]);
9260 v3 = tswapal(v[2]);
9261 v4 = tswapal(v[3]);
9262 v5 = tswapal(v[4]);
9263 v6 = tswapal(v[5]);
9264 unlock_user(v, arg1, 0);
9265 ret = get_errno(target_mmap(v1, v2, v3,
9266 target_to_host_bitmask(v4, mmap_flags_tbl),
9267 v5, v6));
9269 #else
9270 ret = get_errno(target_mmap(arg1, arg2, arg3,
9271 target_to_host_bitmask(arg4, mmap_flags_tbl),
9272 arg5,
9273 arg6));
9274 #endif
9275 return ret;
9276 #endif
9277 #ifdef TARGET_NR_mmap2
9278 case TARGET_NR_mmap2:
9279 #ifndef MMAP_SHIFT
9280 #define MMAP_SHIFT 12
9281 #endif
9282 ret = target_mmap(arg1, arg2, arg3,
9283 target_to_host_bitmask(arg4, mmap_flags_tbl),
9284 arg5, arg6 << MMAP_SHIFT);
9285 return get_errno(ret);
9286 #endif
9287 case TARGET_NR_munmap:
9288 return get_errno(target_munmap(arg1, arg2));
9289 case TARGET_NR_mprotect:
9291 TaskState *ts = cpu->opaque;
9292 /* Special hack to detect libc making the stack executable. */
9293 if ((arg3 & PROT_GROWSDOWN)
9294 && arg1 >= ts->info->stack_limit
9295 && arg1 <= ts->info->start_stack) {
9296 arg3 &= ~PROT_GROWSDOWN;
9297 arg2 = arg2 + arg1 - ts->info->stack_limit;
9298 arg1 = ts->info->stack_limit;
9301 return get_errno(target_mprotect(arg1, arg2, arg3));
9302 #ifdef TARGET_NR_mremap
9303 case TARGET_NR_mremap:
9304 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9305 #endif
9306 /* ??? msync/mlock/munlock are broken for softmmu. */
9307 #ifdef TARGET_NR_msync
9308 case TARGET_NR_msync:
9309 return get_errno(msync(g2h(arg1), arg2, arg3));
9310 #endif
9311 #ifdef TARGET_NR_mlock
9312 case TARGET_NR_mlock:
9313 return get_errno(mlock(g2h(arg1), arg2));
9314 #endif
9315 #ifdef TARGET_NR_munlock
9316 case TARGET_NR_munlock:
9317 return get_errno(munlock(g2h(arg1), arg2));
9318 #endif
9319 #ifdef TARGET_NR_mlockall
9320 case TARGET_NR_mlockall:
9321 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9322 #endif
9323 #ifdef TARGET_NR_munlockall
9324 case TARGET_NR_munlockall:
9325 return get_errno(munlockall());
9326 #endif
9327 #ifdef TARGET_NR_truncate
9328 case TARGET_NR_truncate:
9329 if (!(p = lock_user_string(arg1)))
9330 return -TARGET_EFAULT;
9331 ret = get_errno(truncate(p, arg2));
9332 unlock_user(p, arg1, 0);
9333 return ret;
9334 #endif
9335 #ifdef TARGET_NR_ftruncate
9336 case TARGET_NR_ftruncate:
9337 return get_errno(ftruncate(arg1, arg2));
9338 #endif
9339 case TARGET_NR_fchmod:
9340 return get_errno(fchmod(arg1, arg2));
9341 #if defined(TARGET_NR_fchmodat)
9342 case TARGET_NR_fchmodat:
9343 if (!(p = lock_user_string(arg2)))
9344 return -TARGET_EFAULT;
9345 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9346 unlock_user(p, arg2, 0);
9347 return ret;
9348 #endif
9349 case TARGET_NR_getpriority:
9350 /* Note that negative values are valid for getpriority, so we must
9351 differentiate based on errno settings. */
9352 errno = 0;
9353 ret = getpriority(arg1, arg2);
9354 if (ret == -1 && errno != 0) {
9355 return -host_to_target_errno(errno);
9357 #ifdef TARGET_ALPHA
9358 /* Return value is the unbiased priority. Signal no error. */
9359 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9360 #else
9361 /* Return value is a biased priority to avoid negative numbers. */
9362 ret = 20 - ret;
9363 #endif
9364 return ret;
9365 case TARGET_NR_setpriority:
9366 return get_errno(setpriority(arg1, arg2, arg3));
9367 #ifdef TARGET_NR_statfs
9368 case TARGET_NR_statfs:
9369 if (!(p = lock_user_string(arg1))) {
9370 return -TARGET_EFAULT;
9372 ret = get_errno(statfs(path(p), &stfs));
9373 unlock_user(p, arg1, 0);
9374 convert_statfs:
9375 if (!is_error(ret)) {
9376 struct target_statfs *target_stfs;
9378 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9379 return -TARGET_EFAULT;
9380 __put_user(stfs.f_type, &target_stfs->f_type);
9381 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9382 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9383 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9384 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9385 __put_user(stfs.f_files, &target_stfs->f_files);
9386 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9387 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9388 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9389 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9390 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9391 #ifdef _STATFS_F_FLAGS
9392 __put_user(stfs.f_flags, &target_stfs->f_flags);
9393 #else
9394 __put_user(0, &target_stfs->f_flags);
9395 #endif
9396 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9397 unlock_user_struct(target_stfs, arg2, 1);
9399 return ret;
9400 #endif
9401 #ifdef TARGET_NR_fstatfs
9402 case TARGET_NR_fstatfs:
9403 ret = get_errno(fstatfs(arg1, &stfs));
9404 goto convert_statfs;
9405 #endif
9406 #ifdef TARGET_NR_statfs64
9407 case TARGET_NR_statfs64:
9408 if (!(p = lock_user_string(arg1))) {
9409 return -TARGET_EFAULT;
9411 ret = get_errno(statfs(path(p), &stfs));
9412 unlock_user(p, arg1, 0);
9413 convert_statfs64:
9414 if (!is_error(ret)) {
9415 struct target_statfs64 *target_stfs;
9417 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9418 return -TARGET_EFAULT;
9419 __put_user(stfs.f_type, &target_stfs->f_type);
9420 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9421 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9422 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9423 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9424 __put_user(stfs.f_files, &target_stfs->f_files);
9425 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9426 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9427 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9428 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9429 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9430 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9431 unlock_user_struct(target_stfs, arg3, 1);
9433 return ret;
9434 case TARGET_NR_fstatfs64:
9435 ret = get_errno(fstatfs(arg1, &stfs));
9436 goto convert_statfs64;
9437 #endif
9438 #ifdef TARGET_NR_socketcall
9439 case TARGET_NR_socketcall:
9440 return do_socketcall(arg1, arg2);
9441 #endif
9442 #ifdef TARGET_NR_accept
9443 case TARGET_NR_accept:
9444 return do_accept4(arg1, arg2, arg3, 0);
9445 #endif
9446 #ifdef TARGET_NR_accept4
9447 case TARGET_NR_accept4:
9448 return do_accept4(arg1, arg2, arg3, arg4);
9449 #endif
9450 #ifdef TARGET_NR_bind
9451 case TARGET_NR_bind:
9452 return do_bind(arg1, arg2, arg3);
9453 #endif
9454 #ifdef TARGET_NR_connect
9455 case TARGET_NR_connect:
9456 return do_connect(arg1, arg2, arg3);
9457 #endif
9458 #ifdef TARGET_NR_getpeername
9459 case TARGET_NR_getpeername:
9460 return do_getpeername(arg1, arg2, arg3);
9461 #endif
9462 #ifdef TARGET_NR_getsockname
9463 case TARGET_NR_getsockname:
9464 return do_getsockname(arg1, arg2, arg3);
9465 #endif
9466 #ifdef TARGET_NR_getsockopt
9467 case TARGET_NR_getsockopt:
9468 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9469 #endif
9470 #ifdef TARGET_NR_listen
9471 case TARGET_NR_listen:
9472 return get_errno(listen(arg1, arg2));
9473 #endif
9474 #ifdef TARGET_NR_recv
9475 case TARGET_NR_recv:
9476 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9477 #endif
9478 #ifdef TARGET_NR_recvfrom
9479 case TARGET_NR_recvfrom:
9480 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9481 #endif
9482 #ifdef TARGET_NR_recvmsg
9483 case TARGET_NR_recvmsg:
9484 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9485 #endif
9486 #ifdef TARGET_NR_send
9487 case TARGET_NR_send:
9488 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9489 #endif
9490 #ifdef TARGET_NR_sendmsg
9491 case TARGET_NR_sendmsg:
9492 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9493 #endif
9494 #ifdef TARGET_NR_sendmmsg
9495 case TARGET_NR_sendmmsg:
9496 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9497 #endif
9498 #ifdef TARGET_NR_recvmmsg
9499 case TARGET_NR_recvmmsg:
9500 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9501 #endif
9502 #ifdef TARGET_NR_sendto
9503 case TARGET_NR_sendto:
9504 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9505 #endif
9506 #ifdef TARGET_NR_shutdown
9507 case TARGET_NR_shutdown:
9508 return get_errno(shutdown(arg1, arg2));
9509 #endif
9510 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9511 case TARGET_NR_getrandom:
9512 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9513 if (!p) {
9514 return -TARGET_EFAULT;
9516 ret = get_errno(getrandom(p, arg2, arg3));
9517 unlock_user(p, arg1, ret);
9518 return ret;
9519 #endif
9520 #ifdef TARGET_NR_socket
9521 case TARGET_NR_socket:
9522 return do_socket(arg1, arg2, arg3);
9523 #endif
9524 #ifdef TARGET_NR_socketpair
9525 case TARGET_NR_socketpair:
9526 return do_socketpair(arg1, arg2, arg3, arg4);
9527 #endif
9528 #ifdef TARGET_NR_setsockopt
9529 case TARGET_NR_setsockopt:
9530 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9531 #endif
9532 #if defined(TARGET_NR_syslog)
9533 case TARGET_NR_syslog:
9535 int len = arg2;
9537 switch (arg1) {
9538 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9539 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9540 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9541 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9542 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9543 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9544 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9545 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9546 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9547 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9548 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9549 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9551 if (len < 0) {
9552 return -TARGET_EINVAL;
9554 if (len == 0) {
9555 return 0;
9557 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9558 if (!p) {
9559 return -TARGET_EFAULT;
9561 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9562 unlock_user(p, arg2, arg3);
9564 return ret;
9565 default:
9566 return -TARGET_EINVAL;
9569 break;
9570 #endif
9571 case TARGET_NR_setitimer:
9573 struct itimerval value, ovalue, *pvalue;
9575 if (arg2) {
9576 pvalue = &value;
9577 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9578 || copy_from_user_timeval(&pvalue->it_value,
9579 arg2 + sizeof(struct target_timeval)))
9580 return -TARGET_EFAULT;
9581 } else {
9582 pvalue = NULL;
9584 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9585 if (!is_error(ret) && arg3) {
9586 if (copy_to_user_timeval(arg3,
9587 &ovalue.it_interval)
9588 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9589 &ovalue.it_value))
9590 return -TARGET_EFAULT;
9593 return ret;
9594 case TARGET_NR_getitimer:
9596 struct itimerval value;
9598 ret = get_errno(getitimer(arg1, &value));
9599 if (!is_error(ret) && arg2) {
9600 if (copy_to_user_timeval(arg2,
9601 &value.it_interval)
9602 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9603 &value.it_value))
9604 return -TARGET_EFAULT;
9607 return ret;
9608 #ifdef TARGET_NR_stat
9609 case TARGET_NR_stat:
9610 if (!(p = lock_user_string(arg1))) {
9611 return -TARGET_EFAULT;
9613 ret = get_errno(stat(path(p), &st));
9614 unlock_user(p, arg1, 0);
9615 goto do_stat;
9616 #endif
9617 #ifdef TARGET_NR_lstat
9618 case TARGET_NR_lstat:
9619 if (!(p = lock_user_string(arg1))) {
9620 return -TARGET_EFAULT;
9622 ret = get_errno(lstat(path(p), &st));
9623 unlock_user(p, arg1, 0);
9624 goto do_stat;
9625 #endif
9626 #ifdef TARGET_NR_fstat
9627 case TARGET_NR_fstat:
9629 ret = get_errno(fstat(arg1, &st));
9630 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9631 do_stat:
9632 #endif
9633 if (!is_error(ret)) {
9634 struct target_stat *target_st;
9636 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9637 return -TARGET_EFAULT;
9638 memset(target_st, 0, sizeof(*target_st));
9639 __put_user(st.st_dev, &target_st->st_dev);
9640 __put_user(st.st_ino, &target_st->st_ino);
9641 __put_user(st.st_mode, &target_st->st_mode);
9642 __put_user(st.st_uid, &target_st->st_uid);
9643 __put_user(st.st_gid, &target_st->st_gid);
9644 __put_user(st.st_nlink, &target_st->st_nlink);
9645 __put_user(st.st_rdev, &target_st->st_rdev);
9646 __put_user(st.st_size, &target_st->st_size);
9647 __put_user(st.st_blksize, &target_st->st_blksize);
9648 __put_user(st.st_blocks, &target_st->st_blocks);
9649 __put_user(st.st_atime, &target_st->target_st_atime);
9650 __put_user(st.st_mtime, &target_st->target_st_mtime);
9651 __put_user(st.st_ctime, &target_st->target_st_ctime);
9652 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9653 defined(TARGET_STAT_HAVE_NSEC)
9654 __put_user(st.st_atim.tv_nsec,
9655 &target_st->target_st_atime_nsec);
9656 __put_user(st.st_mtim.tv_nsec,
9657 &target_st->target_st_mtime_nsec);
9658 __put_user(st.st_ctim.tv_nsec,
9659 &target_st->target_st_ctime_nsec);
9660 #endif
9661 unlock_user_struct(target_st, arg2, 1);
9664 return ret;
9665 #endif
9666 case TARGET_NR_vhangup:
9667 return get_errno(vhangup());
9668 #ifdef TARGET_NR_syscall
9669 case TARGET_NR_syscall:
9670 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9671 arg6, arg7, arg8, 0);
9672 #endif
9673 #if defined(TARGET_NR_wait4)
9674 case TARGET_NR_wait4:
9676 int status;
9677 abi_long status_ptr = arg2;
9678 struct rusage rusage, *rusage_ptr;
9679 abi_ulong target_rusage = arg4;
9680 abi_long rusage_err;
9681 if (target_rusage)
9682 rusage_ptr = &rusage;
9683 else
9684 rusage_ptr = NULL;
9685 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9686 if (!is_error(ret)) {
9687 if (status_ptr && ret) {
9688 status = host_to_target_waitstatus(status);
9689 if (put_user_s32(status, status_ptr))
9690 return -TARGET_EFAULT;
9692 if (target_rusage) {
9693 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9694 if (rusage_err) {
9695 ret = rusage_err;
9700 return ret;
9701 #endif
9702 #ifdef TARGET_NR_swapoff
9703 case TARGET_NR_swapoff:
9704 if (!(p = lock_user_string(arg1)))
9705 return -TARGET_EFAULT;
9706 ret = get_errno(swapoff(p));
9707 unlock_user(p, arg1, 0);
9708 return ret;
9709 #endif
9710 case TARGET_NR_sysinfo:
9712 struct target_sysinfo *target_value;
9713 struct sysinfo value;
9714 ret = get_errno(sysinfo(&value));
9715 if (!is_error(ret) && arg1)
9717 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9718 return -TARGET_EFAULT;
9719 __put_user(value.uptime, &target_value->uptime);
9720 __put_user(value.loads[0], &target_value->loads[0]);
9721 __put_user(value.loads[1], &target_value->loads[1]);
9722 __put_user(value.loads[2], &target_value->loads[2]);
9723 __put_user(value.totalram, &target_value->totalram);
9724 __put_user(value.freeram, &target_value->freeram);
9725 __put_user(value.sharedram, &target_value->sharedram);
9726 __put_user(value.bufferram, &target_value->bufferram);
9727 __put_user(value.totalswap, &target_value->totalswap);
9728 __put_user(value.freeswap, &target_value->freeswap);
9729 __put_user(value.procs, &target_value->procs);
9730 __put_user(value.totalhigh, &target_value->totalhigh);
9731 __put_user(value.freehigh, &target_value->freehigh);
9732 __put_user(value.mem_unit, &target_value->mem_unit);
9733 unlock_user_struct(target_value, arg1, 1);
9736 return ret;
9737 #ifdef TARGET_NR_ipc
9738 case TARGET_NR_ipc:
9739 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9740 #endif
9741 #ifdef TARGET_NR_semget
9742 case TARGET_NR_semget:
9743 return get_errno(semget(arg1, arg2, arg3));
9744 #endif
9745 #ifdef TARGET_NR_semop
9746 case TARGET_NR_semop:
9747 return do_semtimedop(arg1, arg2, arg3, 0);
9748 #endif
9749 #ifdef TARGET_NR_semtimedop
9750 case TARGET_NR_semtimedop:
9751 return do_semtimedop(arg1, arg2, arg3, arg4);
9752 #endif
9753 #ifdef TARGET_NR_semctl
9754 case TARGET_NR_semctl:
9755 return do_semctl(arg1, arg2, arg3, arg4);
9756 #endif
9757 #ifdef TARGET_NR_msgctl
9758 case TARGET_NR_msgctl:
9759 return do_msgctl(arg1, arg2, arg3);
9760 #endif
9761 #ifdef TARGET_NR_msgget
9762 case TARGET_NR_msgget:
9763 return get_errno(msgget(arg1, arg2));
9764 #endif
9765 #ifdef TARGET_NR_msgrcv
9766 case TARGET_NR_msgrcv:
9767 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9768 #endif
9769 #ifdef TARGET_NR_msgsnd
9770 case TARGET_NR_msgsnd:
9771 return do_msgsnd(arg1, arg2, arg3, arg4);
9772 #endif
9773 #ifdef TARGET_NR_shmget
9774 case TARGET_NR_shmget:
9775 return get_errno(shmget(arg1, arg2, arg3));
9776 #endif
9777 #ifdef TARGET_NR_shmctl
9778 case TARGET_NR_shmctl:
9779 return do_shmctl(arg1, arg2, arg3);
9780 #endif
9781 #ifdef TARGET_NR_shmat
9782 case TARGET_NR_shmat:
9783 return do_shmat(cpu_env, arg1, arg2, arg3);
9784 #endif
9785 #ifdef TARGET_NR_shmdt
9786 case TARGET_NR_shmdt:
9787 return do_shmdt(arg1);
9788 #endif
9789 case TARGET_NR_fsync:
9790 return get_errno(fsync(arg1));
9791 case TARGET_NR_clone:
9792 /* Linux manages to have three different orderings for its
9793 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9794 * match the kernel's CONFIG_CLONE_* settings.
9795 * Microblaze is further special in that it uses a sixth
9796 * implicit argument to clone for the TLS pointer.
9798 #if defined(TARGET_MICROBLAZE)
9799 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9800 #elif defined(TARGET_CLONE_BACKWARDS)
9801 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9802 #elif defined(TARGET_CLONE_BACKWARDS2)
9803 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9804 #else
9805 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9806 #endif
9807 return ret;
9808 #ifdef __NR_exit_group
9809 /* new thread calls */
9810 case TARGET_NR_exit_group:
9811 preexit_cleanup(cpu_env, arg1);
9812 return get_errno(exit_group(arg1));
9813 #endif
9814 case TARGET_NR_setdomainname:
9815 if (!(p = lock_user_string(arg1)))
9816 return -TARGET_EFAULT;
9817 ret = get_errno(setdomainname(p, arg2));
9818 unlock_user(p, arg1, 0);
9819 return ret;
9820 case TARGET_NR_uname:
9821 /* no need to transcode because we use the linux syscall */
9823 struct new_utsname * buf;
9825 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9826 return -TARGET_EFAULT;
9827 ret = get_errno(sys_uname(buf));
9828 if (!is_error(ret)) {
9829 /* Overwrite the native machine name with whatever is being
9830 emulated. */
9831 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9832 sizeof(buf->machine));
9833 /* Allow the user to override the reported release. */
9834 if (qemu_uname_release && *qemu_uname_release) {
9835 g_strlcpy(buf->release, qemu_uname_release,
9836 sizeof(buf->release));
9839 unlock_user_struct(buf, arg1, 1);
9841 return ret;
9842 #ifdef TARGET_I386
9843 case TARGET_NR_modify_ldt:
9844 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9845 #if !defined(TARGET_X86_64)
9846 case TARGET_NR_vm86:
9847 return do_vm86(cpu_env, arg1, arg2);
9848 #endif
9849 #endif
9850 #if defined(TARGET_NR_adjtimex)
9851 case TARGET_NR_adjtimex:
9853 struct timex host_buf;
9855 if (target_to_host_timex(&host_buf, arg1) != 0) {
9856 return -TARGET_EFAULT;
9858 ret = get_errno(adjtimex(&host_buf));
9859 if (!is_error(ret)) {
9860 if (host_to_target_timex(arg1, &host_buf) != 0) {
9861 return -TARGET_EFAULT;
9865 return ret;
9866 #endif
9867 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9868 case TARGET_NR_clock_adjtime:
9870 struct timex htx, *phtx = &htx;
9872 if (target_to_host_timex(phtx, arg2) != 0) {
9873 return -TARGET_EFAULT;
9875 ret = get_errno(clock_adjtime(arg1, phtx));
9876 if (!is_error(ret) && phtx) {
9877 if (host_to_target_timex(arg2, phtx) != 0) {
9878 return -TARGET_EFAULT;
9882 return ret;
9883 #endif
9884 case TARGET_NR_getpgid:
9885 return get_errno(getpgid(arg1));
9886 case TARGET_NR_fchdir:
9887 return get_errno(fchdir(arg1));
9888 case TARGET_NR_personality:
9889 return get_errno(personality(arg1));
9890 #ifdef TARGET_NR__llseek /* Not on alpha */
9891 case TARGET_NR__llseek:
9893 int64_t res;
9894 #if !defined(__NR_llseek)
9895 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9896 if (res == -1) {
9897 ret = get_errno(res);
9898 } else {
9899 ret = 0;
9901 #else
9902 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9903 #endif
9904 if ((ret == 0) && put_user_s64(res, arg4)) {
9905 return -TARGET_EFAULT;
9908 return ret;
9909 #endif
9910 #ifdef TARGET_NR_getdents
9911 case TARGET_NR_getdents:
9912 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9913 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9915 struct target_dirent *target_dirp;
9916 struct linux_dirent *dirp;
9917 abi_long count = arg3;
9919 dirp = g_try_malloc(count);
9920 if (!dirp) {
9921 return -TARGET_ENOMEM;
9924 ret = get_errno(sys_getdents(arg1, dirp, count));
9925 if (!is_error(ret)) {
9926 struct linux_dirent *de;
9927 struct target_dirent *tde;
9928 int len = ret;
9929 int reclen, treclen;
9930 int count1, tnamelen;
9932 count1 = 0;
9933 de = dirp;
9934 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9935 return -TARGET_EFAULT;
9936 tde = target_dirp;
9937 while (len > 0) {
9938 reclen = de->d_reclen;
9939 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9940 assert(tnamelen >= 0);
9941 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9942 assert(count1 + treclen <= count);
9943 tde->d_reclen = tswap16(treclen);
9944 tde->d_ino = tswapal(de->d_ino);
9945 tde->d_off = tswapal(de->d_off);
9946 memcpy(tde->d_name, de->d_name, tnamelen);
9947 de = (struct linux_dirent *)((char *)de + reclen);
9948 len -= reclen;
9949 tde = (struct target_dirent *)((char *)tde + treclen);
9950 count1 += treclen;
9952 ret = count1;
9953 unlock_user(target_dirp, arg2, ret);
9955 g_free(dirp);
9957 #else
9959 struct linux_dirent *dirp;
9960 abi_long count = arg3;
9962 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9963 return -TARGET_EFAULT;
9964 ret = get_errno(sys_getdents(arg1, dirp, count));
9965 if (!is_error(ret)) {
9966 struct linux_dirent *de;
9967 int len = ret;
9968 int reclen;
9969 de = dirp;
9970 while (len > 0) {
9971 reclen = de->d_reclen;
9972 if (reclen > len)
9973 break;
9974 de->d_reclen = tswap16(reclen);
9975 tswapls(&de->d_ino);
9976 tswapls(&de->d_off);
9977 de = (struct linux_dirent *)((char *)de + reclen);
9978 len -= reclen;
9981 unlock_user(dirp, arg2, ret);
9983 #endif
9984 #else
9985 /* Implement getdents in terms of getdents64 */
9987 struct linux_dirent64 *dirp;
9988 abi_long count = arg3;
9990 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9991 if (!dirp) {
9992 return -TARGET_EFAULT;
9994 ret = get_errno(sys_getdents64(arg1, dirp, count));
9995 if (!is_error(ret)) {
9996 /* Convert the dirent64 structs to target dirent. We do this
9997 * in-place, since we can guarantee that a target_dirent is no
9998 * larger than a dirent64; however this means we have to be
9999 * careful to read everything before writing in the new format.
10001 struct linux_dirent64 *de;
10002 struct target_dirent *tde;
10003 int len = ret;
10004 int tlen = 0;
10006 de = dirp;
10007 tde = (struct target_dirent *)dirp;
10008 while (len > 0) {
10009 int namelen, treclen;
10010 int reclen = de->d_reclen;
10011 uint64_t ino = de->d_ino;
10012 int64_t off = de->d_off;
10013 uint8_t type = de->d_type;
10015 namelen = strlen(de->d_name);
10016 treclen = offsetof(struct target_dirent, d_name)
10017 + namelen + 2;
10018 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10020 memmove(tde->d_name, de->d_name, namelen + 1);
10021 tde->d_ino = tswapal(ino);
10022 tde->d_off = tswapal(off);
10023 tde->d_reclen = tswap16(treclen);
10024 /* The target_dirent type is in what was formerly a padding
10025 * byte at the end of the structure:
10027 *(((char *)tde) + treclen - 1) = type;
10029 de = (struct linux_dirent64 *)((char *)de + reclen);
10030 tde = (struct target_dirent *)((char *)tde + treclen);
10031 len -= reclen;
10032 tlen += treclen;
10034 ret = tlen;
10036 unlock_user(dirp, arg2, ret);
10038 #endif
10039 return ret;
10040 #endif /* TARGET_NR_getdents */
10041 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10042 case TARGET_NR_getdents64:
10044 struct linux_dirent64 *dirp;
10045 abi_long count = arg3;
10046 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10047 return -TARGET_EFAULT;
10048 ret = get_errno(sys_getdents64(arg1, dirp, count));
10049 if (!is_error(ret)) {
10050 struct linux_dirent64 *de;
10051 int len = ret;
10052 int reclen;
10053 de = dirp;
10054 while (len > 0) {
10055 reclen = de->d_reclen;
10056 if (reclen > len)
10057 break;
10058 de->d_reclen = tswap16(reclen);
10059 tswap64s((uint64_t *)&de->d_ino);
10060 tswap64s((uint64_t *)&de->d_off);
10061 de = (struct linux_dirent64 *)((char *)de + reclen);
10062 len -= reclen;
10065 unlock_user(dirp, arg2, ret);
10067 return ret;
10068 #endif /* TARGET_NR_getdents64 */
10069 #if defined(TARGET_NR__newselect)
10070 case TARGET_NR__newselect:
10071 return do_select(arg1, arg2, arg3, arg4, arg5);
10072 #endif
10073 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10074 # ifdef TARGET_NR_poll
10075 case TARGET_NR_poll:
10076 # endif
10077 # ifdef TARGET_NR_ppoll
10078 case TARGET_NR_ppoll:
10079 # endif
10081 struct target_pollfd *target_pfd;
10082 unsigned int nfds = arg2;
10083 struct pollfd *pfd;
10084 unsigned int i;
10086 pfd = NULL;
10087 target_pfd = NULL;
10088 if (nfds) {
10089 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10090 return -TARGET_EINVAL;
10093 target_pfd = lock_user(VERIFY_WRITE, arg1,
10094 sizeof(struct target_pollfd) * nfds, 1);
10095 if (!target_pfd) {
10096 return -TARGET_EFAULT;
10099 pfd = alloca(sizeof(struct pollfd) * nfds);
10100 for (i = 0; i < nfds; i++) {
10101 pfd[i].fd = tswap32(target_pfd[i].fd);
10102 pfd[i].events = tswap16(target_pfd[i].events);
10106 switch (num) {
10107 # ifdef TARGET_NR_ppoll
10108 case TARGET_NR_ppoll:
10110 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10111 target_sigset_t *target_set;
10112 sigset_t _set, *set = &_set;
10114 if (arg3) {
10115 if (target_to_host_timespec(timeout_ts, arg3)) {
10116 unlock_user(target_pfd, arg1, 0);
10117 return -TARGET_EFAULT;
10119 } else {
10120 timeout_ts = NULL;
10123 if (arg4) {
10124 if (arg5 != sizeof(target_sigset_t)) {
10125 unlock_user(target_pfd, arg1, 0);
10126 return -TARGET_EINVAL;
10129 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10130 if (!target_set) {
10131 unlock_user(target_pfd, arg1, 0);
10132 return -TARGET_EFAULT;
10134 target_to_host_sigset(set, target_set);
10135 } else {
10136 set = NULL;
10139 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10140 set, SIGSET_T_SIZE));
10142 if (!is_error(ret) && arg3) {
10143 host_to_target_timespec(arg3, timeout_ts);
10145 if (arg4) {
10146 unlock_user(target_set, arg4, 0);
10148 break;
10150 # endif
10151 # ifdef TARGET_NR_poll
10152 case TARGET_NR_poll:
10154 struct timespec ts, *pts;
10156 if (arg3 >= 0) {
10157 /* Convert ms to secs, ns */
10158 ts.tv_sec = arg3 / 1000;
10159 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10160 pts = &ts;
10161 } else {
10162 /* -ve poll() timeout means "infinite" */
10163 pts = NULL;
10165 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10166 break;
10168 # endif
10169 default:
10170 g_assert_not_reached();
10173 if (!is_error(ret)) {
10174 for(i = 0; i < nfds; i++) {
10175 target_pfd[i].revents = tswap16(pfd[i].revents);
10178 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10180 return ret;
10181 #endif
10182 case TARGET_NR_flock:
10183 /* NOTE: the flock constant seems to be the same for every
10184 Linux platform */
10185 return get_errno(safe_flock(arg1, arg2));
10186 case TARGET_NR_readv:
10188 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10189 if (vec != NULL) {
10190 ret = get_errno(safe_readv(arg1, vec, arg3));
10191 unlock_iovec(vec, arg2, arg3, 1);
10192 } else {
10193 ret = -host_to_target_errno(errno);
10196 return ret;
10197 case TARGET_NR_writev:
10199 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10200 if (vec != NULL) {
10201 ret = get_errno(safe_writev(arg1, vec, arg3));
10202 unlock_iovec(vec, arg2, arg3, 0);
10203 } else {
10204 ret = -host_to_target_errno(errno);
10207 return ret;
10208 #if defined(TARGET_NR_preadv)
10209 case TARGET_NR_preadv:
10211 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10212 if (vec != NULL) {
10213 unsigned long low, high;
10215 target_to_host_low_high(arg4, arg5, &low, &high);
10216 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10217 unlock_iovec(vec, arg2, arg3, 1);
10218 } else {
10219 ret = -host_to_target_errno(errno);
10222 return ret;
10223 #endif
10224 #if defined(TARGET_NR_pwritev)
10225 case TARGET_NR_pwritev:
10227 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10228 if (vec != NULL) {
10229 unsigned long low, high;
10231 target_to_host_low_high(arg4, arg5, &low, &high);
10232 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10233 unlock_iovec(vec, arg2, arg3, 0);
10234 } else {
10235 ret = -host_to_target_errno(errno);
10238 return ret;
10239 #endif
10240 case TARGET_NR_getsid:
10241 return get_errno(getsid(arg1));
10242 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10243 case TARGET_NR_fdatasync:
10244 return get_errno(fdatasync(arg1));
10245 #endif
10246 #ifdef TARGET_NR__sysctl
10247 case TARGET_NR__sysctl:
10248 /* We don't implement this, but ENOTDIR is always a safe
10249 return value. */
10250 return -TARGET_ENOTDIR;
10251 #endif
10252 case TARGET_NR_sched_getaffinity:
10254 unsigned int mask_size;
10255 unsigned long *mask;
10258 * sched_getaffinity needs multiples of ulong, so need to take
10259 * care of mismatches between target ulong and host ulong sizes.
10261 if (arg2 & (sizeof(abi_ulong) - 1)) {
10262 return -TARGET_EINVAL;
10264 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10266 mask = alloca(mask_size);
10267 memset(mask, 0, mask_size);
10268 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10270 if (!is_error(ret)) {
10271 if (ret > arg2) {
10272 /* More data returned than the caller's buffer will fit.
10273 * This only happens if sizeof(abi_long) < sizeof(long)
10274 * and the caller passed us a buffer holding an odd number
10275 * of abi_longs. If the host kernel is actually using the
10276 * extra 4 bytes then fail EINVAL; otherwise we can just
10277 * ignore them and only copy the interesting part.
10279 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10280 if (numcpus > arg2 * 8) {
10281 return -TARGET_EINVAL;
10283 ret = arg2;
10286 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10287 return -TARGET_EFAULT;
10291 return ret;
10292 case TARGET_NR_sched_setaffinity:
10294 unsigned int mask_size;
10295 unsigned long *mask;
10298 * sched_setaffinity needs multiples of ulong, so need to take
10299 * care of mismatches between target ulong and host ulong sizes.
10301 if (arg2 & (sizeof(abi_ulong) - 1)) {
10302 return -TARGET_EINVAL;
10304 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10305 mask = alloca(mask_size);
10307 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10308 if (ret) {
10309 return ret;
10312 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10314 case TARGET_NR_getcpu:
10316 unsigned cpu, node;
10317 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10318 arg2 ? &node : NULL,
10319 NULL));
10320 if (is_error(ret)) {
10321 return ret;
10323 if (arg1 && put_user_u32(cpu, arg1)) {
10324 return -TARGET_EFAULT;
10326 if (arg2 && put_user_u32(node, arg2)) {
10327 return -TARGET_EFAULT;
10330 return ret;
10331 case TARGET_NR_sched_setparam:
10333 struct sched_param *target_schp;
10334 struct sched_param schp;
10336 if (arg2 == 0) {
10337 return -TARGET_EINVAL;
10339 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10340 return -TARGET_EFAULT;
10341 schp.sched_priority = tswap32(target_schp->sched_priority);
10342 unlock_user_struct(target_schp, arg2, 0);
10343 return get_errno(sched_setparam(arg1, &schp));
10345 case TARGET_NR_sched_getparam:
10347 struct sched_param *target_schp;
10348 struct sched_param schp;
10350 if (arg2 == 0) {
10351 return -TARGET_EINVAL;
10353 ret = get_errno(sched_getparam(arg1, &schp));
10354 if (!is_error(ret)) {
10355 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10356 return -TARGET_EFAULT;
10357 target_schp->sched_priority = tswap32(schp.sched_priority);
10358 unlock_user_struct(target_schp, arg2, 1);
10361 return ret;
10362 case TARGET_NR_sched_setscheduler:
10364 struct sched_param *target_schp;
10365 struct sched_param schp;
10366 if (arg3 == 0) {
10367 return -TARGET_EINVAL;
10369 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10370 return -TARGET_EFAULT;
10371 schp.sched_priority = tswap32(target_schp->sched_priority);
10372 unlock_user_struct(target_schp, arg3, 0);
10373 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10375 case TARGET_NR_sched_getscheduler:
10376 return get_errno(sched_getscheduler(arg1));
10377 case TARGET_NR_sched_yield:
10378 return get_errno(sched_yield());
10379 case TARGET_NR_sched_get_priority_max:
10380 return get_errno(sched_get_priority_max(arg1));
10381 case TARGET_NR_sched_get_priority_min:
10382 return get_errno(sched_get_priority_min(arg1));
10383 #ifdef TARGET_NR_sched_rr_get_interval
10384 case TARGET_NR_sched_rr_get_interval:
10386 struct timespec ts;
10387 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10388 if (!is_error(ret)) {
10389 ret = host_to_target_timespec(arg2, &ts);
10392 return ret;
10393 #endif
10394 #if defined(TARGET_NR_nanosleep)
10395 case TARGET_NR_nanosleep:
10397 struct timespec req, rem;
10398 target_to_host_timespec(&req, arg1);
10399 ret = get_errno(safe_nanosleep(&req, &rem));
10400 if (is_error(ret) && arg2) {
10401 host_to_target_timespec(arg2, &rem);
10404 return ret;
10405 #endif
10406 case TARGET_NR_prctl:
10407 switch (arg1) {
10408 case PR_GET_PDEATHSIG:
10410 int deathsig;
10411 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10412 if (!is_error(ret) && arg2
10413 && put_user_ual(deathsig, arg2)) {
10414 return -TARGET_EFAULT;
10416 return ret;
10418 #ifdef PR_GET_NAME
10419 case PR_GET_NAME:
10421 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10422 if (!name) {
10423 return -TARGET_EFAULT;
10425 ret = get_errno(prctl(arg1, (unsigned long)name,
10426 arg3, arg4, arg5));
10427 unlock_user(name, arg2, 16);
10428 return ret;
10430 case PR_SET_NAME:
10432 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10433 if (!name) {
10434 return -TARGET_EFAULT;
10436 ret = get_errno(prctl(arg1, (unsigned long)name,
10437 arg3, arg4, arg5));
10438 unlock_user(name, arg2, 0);
10439 return ret;
10441 #endif
10442 #ifdef TARGET_MIPS
10443 case TARGET_PR_GET_FP_MODE:
10445 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10446 ret = 0;
10447 if (env->CP0_Status & (1 << CP0St_FR)) {
10448 ret |= TARGET_PR_FP_MODE_FR;
10450 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10451 ret |= TARGET_PR_FP_MODE_FRE;
10453 return ret;
10455 case TARGET_PR_SET_FP_MODE:
10457 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10458 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10459 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10460 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10461 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10463 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10464 TARGET_PR_FP_MODE_FRE;
10466 /* If nothing to change, return right away, successfully. */
10467 if (old_fr == new_fr && old_fre == new_fre) {
10468 return 0;
10470 /* Check the value is valid */
10471 if (arg2 & ~known_bits) {
10472 return -TARGET_EOPNOTSUPP;
10474 /* Setting FRE without FR is not supported. */
10475 if (new_fre && !new_fr) {
10476 return -TARGET_EOPNOTSUPP;
10478 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10479 /* FR1 is not supported */
10480 return -TARGET_EOPNOTSUPP;
10482 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10483 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10484 /* cannot set FR=0 */
10485 return -TARGET_EOPNOTSUPP;
10487 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10488 /* Cannot set FRE=1 */
10489 return -TARGET_EOPNOTSUPP;
10492 int i;
10493 fpr_t *fpr = env->active_fpu.fpr;
10494 for (i = 0; i < 32 ; i += 2) {
10495 if (!old_fr && new_fr) {
10496 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10497 } else if (old_fr && !new_fr) {
10498 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10502 if (new_fr) {
10503 env->CP0_Status |= (1 << CP0St_FR);
10504 env->hflags |= MIPS_HFLAG_F64;
10505 } else {
10506 env->CP0_Status &= ~(1 << CP0St_FR);
10507 env->hflags &= ~MIPS_HFLAG_F64;
10509 if (new_fre) {
10510 env->CP0_Config5 |= (1 << CP0C5_FRE);
10511 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10512 env->hflags |= MIPS_HFLAG_FRE;
10514 } else {
10515 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10516 env->hflags &= ~MIPS_HFLAG_FRE;
10519 return 0;
10521 #endif /* MIPS */
10522 #ifdef TARGET_AARCH64
10523 case TARGET_PR_SVE_SET_VL:
10525 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10526 * PR_SVE_VL_INHERIT. Note the kernel definition
10527 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10528 * even though the current architectural maximum is VQ=16.
10530 ret = -TARGET_EINVAL;
10531 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10532 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10533 CPUARMState *env = cpu_env;
10534 ARMCPU *cpu = env_archcpu(env);
10535 uint32_t vq, old_vq;
10537 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10538 vq = MAX(arg2 / 16, 1);
10539 vq = MIN(vq, cpu->sve_max_vq);
10541 if (vq < old_vq) {
10542 aarch64_sve_narrow_vq(env, vq);
10544 env->vfp.zcr_el[1] = vq - 1;
10545 arm_rebuild_hflags(env);
10546 ret = vq * 16;
10548 return ret;
10549 case TARGET_PR_SVE_GET_VL:
10550 ret = -TARGET_EINVAL;
10552 ARMCPU *cpu = env_archcpu(cpu_env);
10553 if (cpu_isar_feature(aa64_sve, cpu)) {
10554 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10557 return ret;
10558 case TARGET_PR_PAC_RESET_KEYS:
10560 CPUARMState *env = cpu_env;
10561 ARMCPU *cpu = env_archcpu(env);
10563 if (arg3 || arg4 || arg5) {
10564 return -TARGET_EINVAL;
10566 if (cpu_isar_feature(aa64_pauth, cpu)) {
10567 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10568 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10569 TARGET_PR_PAC_APGAKEY);
10570 int ret = 0;
10571 Error *err = NULL;
10573 if (arg2 == 0) {
10574 arg2 = all;
10575 } else if (arg2 & ~all) {
10576 return -TARGET_EINVAL;
10578 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10579 ret |= qemu_guest_getrandom(&env->keys.apia,
10580 sizeof(ARMPACKey), &err);
10582 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10583 ret |= qemu_guest_getrandom(&env->keys.apib,
10584 sizeof(ARMPACKey), &err);
10586 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10587 ret |= qemu_guest_getrandom(&env->keys.apda,
10588 sizeof(ARMPACKey), &err);
10590 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10591 ret |= qemu_guest_getrandom(&env->keys.apdb,
10592 sizeof(ARMPACKey), &err);
10594 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10595 ret |= qemu_guest_getrandom(&env->keys.apga,
10596 sizeof(ARMPACKey), &err);
10598 if (ret != 0) {
10600 * Some unknown failure in the crypto. The best
10601 * we can do is log it and fail the syscall.
10602 * The real syscall cannot fail this way.
10604 qemu_log_mask(LOG_UNIMP,
10605 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10606 error_get_pretty(err));
10607 error_free(err);
10608 return -TARGET_EIO;
10610 return 0;
10613 return -TARGET_EINVAL;
10614 #endif /* AARCH64 */
10615 case PR_GET_SECCOMP:
10616 case PR_SET_SECCOMP:
10617 /* Disable seccomp to prevent the target disabling syscalls we
10618 * need. */
10619 return -TARGET_EINVAL;
10620 default:
10621 /* Most prctl options have no pointer arguments */
10622 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10624 break;
10625 #ifdef TARGET_NR_arch_prctl
10626 case TARGET_NR_arch_prctl:
10627 return do_arch_prctl(cpu_env, arg1, arg2);
10628 #endif
10629 #ifdef TARGET_NR_pread64
10630 case TARGET_NR_pread64:
10631 if (regpairs_aligned(cpu_env, num)) {
10632 arg4 = arg5;
10633 arg5 = arg6;
10635 if (arg2 == 0 && arg3 == 0) {
10636 /* Special-case NULL buffer and zero length, which should succeed */
10637 p = 0;
10638 } else {
10639 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10640 if (!p) {
10641 return -TARGET_EFAULT;
10644 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10645 unlock_user(p, arg2, ret);
10646 return ret;
10647 case TARGET_NR_pwrite64:
10648 if (regpairs_aligned(cpu_env, num)) {
10649 arg4 = arg5;
10650 arg5 = arg6;
10652 if (arg2 == 0 && arg3 == 0) {
10653 /* Special-case NULL buffer and zero length, which should succeed */
10654 p = 0;
10655 } else {
10656 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10657 if (!p) {
10658 return -TARGET_EFAULT;
10661 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10662 unlock_user(p, arg2, 0);
10663 return ret;
10664 #endif
10665 case TARGET_NR_getcwd:
10666 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10667 return -TARGET_EFAULT;
10668 ret = get_errno(sys_getcwd1(p, arg2));
10669 unlock_user(p, arg1, ret);
10670 return ret;
10671 case TARGET_NR_capget:
10672 case TARGET_NR_capset:
10674 struct target_user_cap_header *target_header;
10675 struct target_user_cap_data *target_data = NULL;
10676 struct __user_cap_header_struct header;
10677 struct __user_cap_data_struct data[2];
10678 struct __user_cap_data_struct *dataptr = NULL;
10679 int i, target_datalen;
10680 int data_items = 1;
10682 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10683 return -TARGET_EFAULT;
10685 header.version = tswap32(target_header->version);
10686 header.pid = tswap32(target_header->pid);
10688 if (header.version != _LINUX_CAPABILITY_VERSION) {
10689 /* Version 2 and up takes pointer to two user_data structs */
10690 data_items = 2;
10693 target_datalen = sizeof(*target_data) * data_items;
10695 if (arg2) {
10696 if (num == TARGET_NR_capget) {
10697 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10698 } else {
10699 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10701 if (!target_data) {
10702 unlock_user_struct(target_header, arg1, 0);
10703 return -TARGET_EFAULT;
10706 if (num == TARGET_NR_capset) {
10707 for (i = 0; i < data_items; i++) {
10708 data[i].effective = tswap32(target_data[i].effective);
10709 data[i].permitted = tswap32(target_data[i].permitted);
10710 data[i].inheritable = tswap32(target_data[i].inheritable);
10714 dataptr = data;
10717 if (num == TARGET_NR_capget) {
10718 ret = get_errno(capget(&header, dataptr));
10719 } else {
10720 ret = get_errno(capset(&header, dataptr));
10723 /* The kernel always updates version for both capget and capset */
10724 target_header->version = tswap32(header.version);
10725 unlock_user_struct(target_header, arg1, 1);
10727 if (arg2) {
10728 if (num == TARGET_NR_capget) {
10729 for (i = 0; i < data_items; i++) {
10730 target_data[i].effective = tswap32(data[i].effective);
10731 target_data[i].permitted = tswap32(data[i].permitted);
10732 target_data[i].inheritable = tswap32(data[i].inheritable);
10734 unlock_user(target_data, arg2, target_datalen);
10735 } else {
10736 unlock_user(target_data, arg2, 0);
10739 return ret;
10741 case TARGET_NR_sigaltstack:
10742 return do_sigaltstack(arg1, arg2,
10743 get_sp_from_cpustate((CPUArchState *)cpu_env));
10745 #ifdef CONFIG_SENDFILE
10746 #ifdef TARGET_NR_sendfile
10747 case TARGET_NR_sendfile:
10749 off_t *offp = NULL;
10750 off_t off;
10751 if (arg3) {
10752 ret = get_user_sal(off, arg3);
10753 if (is_error(ret)) {
10754 return ret;
10756 offp = &off;
10758 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10759 if (!is_error(ret) && arg3) {
10760 abi_long ret2 = put_user_sal(off, arg3);
10761 if (is_error(ret2)) {
10762 ret = ret2;
10765 return ret;
10767 #endif
10768 #ifdef TARGET_NR_sendfile64
10769 case TARGET_NR_sendfile64:
10771 off_t *offp = NULL;
10772 off_t off;
10773 if (arg3) {
10774 ret = get_user_s64(off, arg3);
10775 if (is_error(ret)) {
10776 return ret;
10778 offp = &off;
10780 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10781 if (!is_error(ret) && arg3) {
10782 abi_long ret2 = put_user_s64(off, arg3);
10783 if (is_error(ret2)) {
10784 ret = ret2;
10787 return ret;
10789 #endif
10790 #endif
10791 #ifdef TARGET_NR_vfork
10792 case TARGET_NR_vfork:
10793 return get_errno(do_fork(cpu_env,
10794 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10795 0, 0, 0, 0));
10796 #endif
10797 #ifdef TARGET_NR_ugetrlimit
10798 case TARGET_NR_ugetrlimit:
10800 struct rlimit rlim;
10801 int resource = target_to_host_resource(arg1);
10802 ret = get_errno(getrlimit(resource, &rlim));
10803 if (!is_error(ret)) {
10804 struct target_rlimit *target_rlim;
10805 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10806 return -TARGET_EFAULT;
10807 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10808 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10809 unlock_user_struct(target_rlim, arg2, 1);
10811 return ret;
10813 #endif
10814 #ifdef TARGET_NR_truncate64
10815 case TARGET_NR_truncate64:
10816 if (!(p = lock_user_string(arg1)))
10817 return -TARGET_EFAULT;
10818 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10819 unlock_user(p, arg1, 0);
10820 return ret;
10821 #endif
10822 #ifdef TARGET_NR_ftruncate64
10823 case TARGET_NR_ftruncate64:
10824 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10825 #endif
10826 #ifdef TARGET_NR_stat64
10827 case TARGET_NR_stat64:
10828 if (!(p = lock_user_string(arg1))) {
10829 return -TARGET_EFAULT;
10831 ret = get_errno(stat(path(p), &st));
10832 unlock_user(p, arg1, 0);
10833 if (!is_error(ret))
10834 ret = host_to_target_stat64(cpu_env, arg2, &st);
10835 return ret;
10836 #endif
10837 #ifdef TARGET_NR_lstat64
10838 case TARGET_NR_lstat64:
10839 if (!(p = lock_user_string(arg1))) {
10840 return -TARGET_EFAULT;
10842 ret = get_errno(lstat(path(p), &st));
10843 unlock_user(p, arg1, 0);
10844 if (!is_error(ret))
10845 ret = host_to_target_stat64(cpu_env, arg2, &st);
10846 return ret;
10847 #endif
10848 #ifdef TARGET_NR_fstat64
10849 case TARGET_NR_fstat64:
10850 ret = get_errno(fstat(arg1, &st));
10851 if (!is_error(ret))
10852 ret = host_to_target_stat64(cpu_env, arg2, &st);
10853 return ret;
10854 #endif
10855 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10856 #ifdef TARGET_NR_fstatat64
10857 case TARGET_NR_fstatat64:
10858 #endif
10859 #ifdef TARGET_NR_newfstatat
10860 case TARGET_NR_newfstatat:
10861 #endif
10862 if (!(p = lock_user_string(arg2))) {
10863 return -TARGET_EFAULT;
10865 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10866 unlock_user(p, arg2, 0);
10867 if (!is_error(ret))
10868 ret = host_to_target_stat64(cpu_env, arg3, &st);
10869 return ret;
10870 #endif
10871 #if defined(TARGET_NR_statx)
10872 case TARGET_NR_statx:
10874 struct target_statx *target_stx;
10875 int dirfd = arg1;
10876 int flags = arg3;
10878 p = lock_user_string(arg2);
10879 if (p == NULL) {
10880 return -TARGET_EFAULT;
10882 #if defined(__NR_statx)
10885 * It is assumed that struct statx is architecture independent.
10887 struct target_statx host_stx;
10888 int mask = arg4;
10890 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10891 if (!is_error(ret)) {
10892 if (host_to_target_statx(&host_stx, arg5) != 0) {
10893 unlock_user(p, arg2, 0);
10894 return -TARGET_EFAULT;
10898 if (ret != -TARGET_ENOSYS) {
10899 unlock_user(p, arg2, 0);
10900 return ret;
10903 #endif
10904 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10905 unlock_user(p, arg2, 0);
10907 if (!is_error(ret)) {
10908 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10909 return -TARGET_EFAULT;
10911 memset(target_stx, 0, sizeof(*target_stx));
10912 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10913 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10914 __put_user(st.st_ino, &target_stx->stx_ino);
10915 __put_user(st.st_mode, &target_stx->stx_mode);
10916 __put_user(st.st_uid, &target_stx->stx_uid);
10917 __put_user(st.st_gid, &target_stx->stx_gid);
10918 __put_user(st.st_nlink, &target_stx->stx_nlink);
10919 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10920 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10921 __put_user(st.st_size, &target_stx->stx_size);
10922 __put_user(st.st_blksize, &target_stx->stx_blksize);
10923 __put_user(st.st_blocks, &target_stx->stx_blocks);
10924 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10925 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10926 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10927 unlock_user_struct(target_stx, arg5, 1);
10930 return ret;
10931 #endif
10932 #ifdef TARGET_NR_lchown
10933 case TARGET_NR_lchown:
10934 if (!(p = lock_user_string(arg1)))
10935 return -TARGET_EFAULT;
10936 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10937 unlock_user(p, arg1, 0);
10938 return ret;
10939 #endif
10940 #ifdef TARGET_NR_getuid
10941 case TARGET_NR_getuid:
10942 return get_errno(high2lowuid(getuid()));
10943 #endif
10944 #ifdef TARGET_NR_getgid
10945 case TARGET_NR_getgid:
10946 return get_errno(high2lowgid(getgid()));
10947 #endif
10948 #ifdef TARGET_NR_geteuid
10949 case TARGET_NR_geteuid:
10950 return get_errno(high2lowuid(geteuid()));
10951 #endif
10952 #ifdef TARGET_NR_getegid
10953 case TARGET_NR_getegid:
10954 return get_errno(high2lowgid(getegid()));
10955 #endif
10956 case TARGET_NR_setreuid:
10957 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10958 case TARGET_NR_setregid:
10959 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10960 case TARGET_NR_getgroups:
10962 int gidsetsize = arg1;
10963 target_id *target_grouplist;
10964 gid_t *grouplist;
10965 int i;
10967 grouplist = alloca(gidsetsize * sizeof(gid_t));
10968 ret = get_errno(getgroups(gidsetsize, grouplist));
10969 if (gidsetsize == 0)
10970 return ret;
10971 if (!is_error(ret)) {
10972 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10973 if (!target_grouplist)
10974 return -TARGET_EFAULT;
10975 for(i = 0;i < ret; i++)
10976 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10977 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10980 return ret;
10981 case TARGET_NR_setgroups:
10983 int gidsetsize = arg1;
10984 target_id *target_grouplist;
10985 gid_t *grouplist = NULL;
10986 int i;
10987 if (gidsetsize) {
10988 grouplist = alloca(gidsetsize * sizeof(gid_t));
10989 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10990 if (!target_grouplist) {
10991 return -TARGET_EFAULT;
10993 for (i = 0; i < gidsetsize; i++) {
10994 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10996 unlock_user(target_grouplist, arg2, 0);
10998 return get_errno(setgroups(gidsetsize, grouplist));
11000 case TARGET_NR_fchown:
11001 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11002 #if defined(TARGET_NR_fchownat)
11003 case TARGET_NR_fchownat:
11004 if (!(p = lock_user_string(arg2)))
11005 return -TARGET_EFAULT;
11006 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11007 low2highgid(arg4), arg5));
11008 unlock_user(p, arg2, 0);
11009 return ret;
11010 #endif
11011 #ifdef TARGET_NR_setresuid
11012 case TARGET_NR_setresuid:
11013 return get_errno(sys_setresuid(low2highuid(arg1),
11014 low2highuid(arg2),
11015 low2highuid(arg3)));
11016 #endif
11017 #ifdef TARGET_NR_getresuid
11018 case TARGET_NR_getresuid:
11020 uid_t ruid, euid, suid;
11021 ret = get_errno(getresuid(&ruid, &euid, &suid));
11022 if (!is_error(ret)) {
11023 if (put_user_id(high2lowuid(ruid), arg1)
11024 || put_user_id(high2lowuid(euid), arg2)
11025 || put_user_id(high2lowuid(suid), arg3))
11026 return -TARGET_EFAULT;
11029 return ret;
11030 #endif
11031 #ifdef TARGET_NR_getresgid
11032 case TARGET_NR_setresgid:
11033 return get_errno(sys_setresgid(low2highgid(arg1),
11034 low2highgid(arg2),
11035 low2highgid(arg3)));
11036 #endif
11037 #ifdef TARGET_NR_getresgid
11038 case TARGET_NR_getresgid:
11040 gid_t rgid, egid, sgid;
11041 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11042 if (!is_error(ret)) {
11043 if (put_user_id(high2lowgid(rgid), arg1)
11044 || put_user_id(high2lowgid(egid), arg2)
11045 || put_user_id(high2lowgid(sgid), arg3))
11046 return -TARGET_EFAULT;
11049 return ret;
11050 #endif
11051 #ifdef TARGET_NR_chown
11052 case TARGET_NR_chown:
11053 if (!(p = lock_user_string(arg1)))
11054 return -TARGET_EFAULT;
11055 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11056 unlock_user(p, arg1, 0);
11057 return ret;
11058 #endif
11059 case TARGET_NR_setuid:
11060 return get_errno(sys_setuid(low2highuid(arg1)));
11061 case TARGET_NR_setgid:
11062 return get_errno(sys_setgid(low2highgid(arg1)));
11063 case TARGET_NR_setfsuid:
11064 return get_errno(setfsuid(arg1));
11065 case TARGET_NR_setfsgid:
11066 return get_errno(setfsgid(arg1));
11068 #ifdef TARGET_NR_lchown32
11069 case TARGET_NR_lchown32:
11070 if (!(p = lock_user_string(arg1)))
11071 return -TARGET_EFAULT;
11072 ret = get_errno(lchown(p, arg2, arg3));
11073 unlock_user(p, arg1, 0);
11074 return ret;
11075 #endif
11076 #ifdef TARGET_NR_getuid32
11077 case TARGET_NR_getuid32:
11078 return get_errno(getuid());
11079 #endif
11081 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11082 /* Alpha specific */
11083 case TARGET_NR_getxuid:
11085 uid_t euid;
11086 euid=geteuid();
11087 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11089 return get_errno(getuid());
11090 #endif
11091 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11092 /* Alpha specific */
11093 case TARGET_NR_getxgid:
11095 uid_t egid;
11096 egid=getegid();
11097 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11099 return get_errno(getgid());
11100 #endif
11101 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11102 /* Alpha specific */
11103 case TARGET_NR_osf_getsysinfo:
11104 ret = -TARGET_EOPNOTSUPP;
11105 switch (arg1) {
11106 case TARGET_GSI_IEEE_FP_CONTROL:
11108 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11109 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11111 swcr &= ~SWCR_STATUS_MASK;
11112 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11114 if (put_user_u64 (swcr, arg2))
11115 return -TARGET_EFAULT;
11116 ret = 0;
11118 break;
11120 /* case GSI_IEEE_STATE_AT_SIGNAL:
11121 -- Not implemented in linux kernel.
11122 case GSI_UACPROC:
11123 -- Retrieves current unaligned access state; not much used.
11124 case GSI_PROC_TYPE:
11125 -- Retrieves implver information; surely not used.
11126 case GSI_GET_HWRPB:
11127 -- Grabs a copy of the HWRPB; surely not used.
11130 return ret;
11131 #endif
11132 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11133 /* Alpha specific */
11134 case TARGET_NR_osf_setsysinfo:
11135 ret = -TARGET_EOPNOTSUPP;
11136 switch (arg1) {
11137 case TARGET_SSI_IEEE_FP_CONTROL:
11139 uint64_t swcr, fpcr;
11141 if (get_user_u64 (swcr, arg2)) {
11142 return -TARGET_EFAULT;
11146 * The kernel calls swcr_update_status to update the
11147 * status bits from the fpcr at every point that it
11148 * could be queried. Therefore, we store the status
11149 * bits only in FPCR.
11151 ((CPUAlphaState *)cpu_env)->swcr
11152 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11154 fpcr = cpu_alpha_load_fpcr(cpu_env);
11155 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11156 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11157 cpu_alpha_store_fpcr(cpu_env, fpcr);
11158 ret = 0;
11160 break;
11162 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11164 uint64_t exc, fpcr, fex;
11166 if (get_user_u64(exc, arg2)) {
11167 return -TARGET_EFAULT;
11169 exc &= SWCR_STATUS_MASK;
11170 fpcr = cpu_alpha_load_fpcr(cpu_env);
11172 /* Old exceptions are not signaled. */
11173 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11174 fex = exc & ~fex;
11175 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11176 fex &= ((CPUArchState *)cpu_env)->swcr;
11178 /* Update the hardware fpcr. */
11179 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11180 cpu_alpha_store_fpcr(cpu_env, fpcr);
11182 if (fex) {
11183 int si_code = TARGET_FPE_FLTUNK;
11184 target_siginfo_t info;
11186 if (fex & SWCR_TRAP_ENABLE_DNO) {
11187 si_code = TARGET_FPE_FLTUND;
11189 if (fex & SWCR_TRAP_ENABLE_INE) {
11190 si_code = TARGET_FPE_FLTRES;
11192 if (fex & SWCR_TRAP_ENABLE_UNF) {
11193 si_code = TARGET_FPE_FLTUND;
11195 if (fex & SWCR_TRAP_ENABLE_OVF) {
11196 si_code = TARGET_FPE_FLTOVF;
11198 if (fex & SWCR_TRAP_ENABLE_DZE) {
11199 si_code = TARGET_FPE_FLTDIV;
11201 if (fex & SWCR_TRAP_ENABLE_INV) {
11202 si_code = TARGET_FPE_FLTINV;
11205 info.si_signo = SIGFPE;
11206 info.si_errno = 0;
11207 info.si_code = si_code;
11208 info._sifields._sigfault._addr
11209 = ((CPUArchState *)cpu_env)->pc;
11210 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11211 QEMU_SI_FAULT, &info);
11213 ret = 0;
11215 break;
11217 /* case SSI_NVPAIRS:
11218 -- Used with SSIN_UACPROC to enable unaligned accesses.
11219 case SSI_IEEE_STATE_AT_SIGNAL:
11220 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11221 -- Not implemented in linux kernel
11224 return ret;
11225 #endif
11226 #ifdef TARGET_NR_osf_sigprocmask
11227 /* Alpha specific. */
11228 case TARGET_NR_osf_sigprocmask:
11230 abi_ulong mask;
11231 int how;
11232 sigset_t set, oldset;
11234 switch(arg1) {
11235 case TARGET_SIG_BLOCK:
11236 how = SIG_BLOCK;
11237 break;
11238 case TARGET_SIG_UNBLOCK:
11239 how = SIG_UNBLOCK;
11240 break;
11241 case TARGET_SIG_SETMASK:
11242 how = SIG_SETMASK;
11243 break;
11244 default:
11245 return -TARGET_EINVAL;
11247 mask = arg2;
11248 target_to_host_old_sigset(&set, &mask);
11249 ret = do_sigprocmask(how, &set, &oldset);
11250 if (!ret) {
11251 host_to_target_old_sigset(&mask, &oldset);
11252 ret = mask;
11255 return ret;
11256 #endif
11258 #ifdef TARGET_NR_getgid32
11259 case TARGET_NR_getgid32:
11260 return get_errno(getgid());
11261 #endif
11262 #ifdef TARGET_NR_geteuid32
11263 case TARGET_NR_geteuid32:
11264 return get_errno(geteuid());
11265 #endif
11266 #ifdef TARGET_NR_getegid32
11267 case TARGET_NR_getegid32:
11268 return get_errno(getegid());
11269 #endif
11270 #ifdef TARGET_NR_setreuid32
11271 case TARGET_NR_setreuid32:
11272 return get_errno(setreuid(arg1, arg2));
11273 #endif
11274 #ifdef TARGET_NR_setregid32
11275 case TARGET_NR_setregid32:
11276 return get_errno(setregid(arg1, arg2));
11277 #endif
11278 #ifdef TARGET_NR_getgroups32
11279 case TARGET_NR_getgroups32:
11281 int gidsetsize = arg1;
11282 uint32_t *target_grouplist;
11283 gid_t *grouplist;
11284 int i;
11286 grouplist = alloca(gidsetsize * sizeof(gid_t));
11287 ret = get_errno(getgroups(gidsetsize, grouplist));
11288 if (gidsetsize == 0)
11289 return ret;
11290 if (!is_error(ret)) {
11291 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11292 if (!target_grouplist) {
11293 return -TARGET_EFAULT;
11295 for(i = 0;i < ret; i++)
11296 target_grouplist[i] = tswap32(grouplist[i]);
11297 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11300 return ret;
11301 #endif
11302 #ifdef TARGET_NR_setgroups32
11303 case TARGET_NR_setgroups32:
11305 int gidsetsize = arg1;
11306 uint32_t *target_grouplist;
11307 gid_t *grouplist;
11308 int i;
11310 grouplist = alloca(gidsetsize * sizeof(gid_t));
11311 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11312 if (!target_grouplist) {
11313 return -TARGET_EFAULT;
11315 for(i = 0;i < gidsetsize; i++)
11316 grouplist[i] = tswap32(target_grouplist[i]);
11317 unlock_user(target_grouplist, arg2, 0);
11318 return get_errno(setgroups(gidsetsize, grouplist));
11320 #endif
11321 #ifdef TARGET_NR_fchown32
11322 case TARGET_NR_fchown32:
11323 return get_errno(fchown(arg1, arg2, arg3));
11324 #endif
11325 #ifdef TARGET_NR_setresuid32
11326 case TARGET_NR_setresuid32:
11327 return get_errno(sys_setresuid(arg1, arg2, arg3));
11328 #endif
11329 #ifdef TARGET_NR_getresuid32
11330 case TARGET_NR_getresuid32:
11332 uid_t ruid, euid, suid;
11333 ret = get_errno(getresuid(&ruid, &euid, &suid));
11334 if (!is_error(ret)) {
11335 if (put_user_u32(ruid, arg1)
11336 || put_user_u32(euid, arg2)
11337 || put_user_u32(suid, arg3))
11338 return -TARGET_EFAULT;
11341 return ret;
11342 #endif
11343 #ifdef TARGET_NR_setresgid32
11344 case TARGET_NR_setresgid32:
11345 return get_errno(sys_setresgid(arg1, arg2, arg3));
11346 #endif
11347 #ifdef TARGET_NR_getresgid32
11348 case TARGET_NR_getresgid32:
11350 gid_t rgid, egid, sgid;
11351 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11352 if (!is_error(ret)) {
11353 if (put_user_u32(rgid, arg1)
11354 || put_user_u32(egid, arg2)
11355 || put_user_u32(sgid, arg3))
11356 return -TARGET_EFAULT;
11359 return ret;
11360 #endif
11361 #ifdef TARGET_NR_chown32
11362 case TARGET_NR_chown32:
11363 if (!(p = lock_user_string(arg1)))
11364 return -TARGET_EFAULT;
11365 ret = get_errno(chown(p, arg2, arg3));
11366 unlock_user(p, arg1, 0);
11367 return ret;
11368 #endif
11369 #ifdef TARGET_NR_setuid32
11370 case TARGET_NR_setuid32:
11371 return get_errno(sys_setuid(arg1));
11372 #endif
11373 #ifdef TARGET_NR_setgid32
11374 case TARGET_NR_setgid32:
11375 return get_errno(sys_setgid(arg1));
11376 #endif
11377 #ifdef TARGET_NR_setfsuid32
11378 case TARGET_NR_setfsuid32:
11379 return get_errno(setfsuid(arg1));
11380 #endif
11381 #ifdef TARGET_NR_setfsgid32
11382 case TARGET_NR_setfsgid32:
11383 return get_errno(setfsgid(arg1));
11384 #endif
11385 #ifdef TARGET_NR_mincore
11386 case TARGET_NR_mincore:
11388 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11389 if (!a) {
11390 return -TARGET_ENOMEM;
11392 p = lock_user_string(arg3);
11393 if (!p) {
11394 ret = -TARGET_EFAULT;
11395 } else {
11396 ret = get_errno(mincore(a, arg2, p));
11397 unlock_user(p, arg3, ret);
11399 unlock_user(a, arg1, 0);
11401 return ret;
11402 #endif
11403 #ifdef TARGET_NR_arm_fadvise64_64
11404 case TARGET_NR_arm_fadvise64_64:
11405 /* arm_fadvise64_64 looks like fadvise64_64 but
11406 * with different argument order: fd, advice, offset, len
11407 * rather than the usual fd, offset, len, advice.
11408 * Note that offset and len are both 64-bit so appear as
11409 * pairs of 32-bit registers.
11411 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11412 target_offset64(arg5, arg6), arg2);
11413 return -host_to_target_errno(ret);
11414 #endif
11416 #if TARGET_ABI_BITS == 32
11418 #ifdef TARGET_NR_fadvise64_64
11419 case TARGET_NR_fadvise64_64:
11420 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11421 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11422 ret = arg2;
11423 arg2 = arg3;
11424 arg3 = arg4;
11425 arg4 = arg5;
11426 arg5 = arg6;
11427 arg6 = ret;
11428 #else
11429 /* 6 args: fd, offset (high, low), len (high, low), advice */
11430 if (regpairs_aligned(cpu_env, num)) {
11431 /* offset is in (3,4), len in (5,6) and advice in 7 */
11432 arg2 = arg3;
11433 arg3 = arg4;
11434 arg4 = arg5;
11435 arg5 = arg6;
11436 arg6 = arg7;
11438 #endif
11439 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11440 target_offset64(arg4, arg5), arg6);
11441 return -host_to_target_errno(ret);
11442 #endif
11444 #ifdef TARGET_NR_fadvise64
11445 case TARGET_NR_fadvise64:
11446 /* 5 args: fd, offset (high, low), len, advice */
11447 if (regpairs_aligned(cpu_env, num)) {
11448 /* offset is in (3,4), len in 5 and advice in 6 */
11449 arg2 = arg3;
11450 arg3 = arg4;
11451 arg4 = arg5;
11452 arg5 = arg6;
11454 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11455 return -host_to_target_errno(ret);
11456 #endif
11458 #else /* not a 32-bit ABI */
11459 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11460 #ifdef TARGET_NR_fadvise64_64
11461 case TARGET_NR_fadvise64_64:
11462 #endif
11463 #ifdef TARGET_NR_fadvise64
11464 case TARGET_NR_fadvise64:
11465 #endif
11466 #ifdef TARGET_S390X
11467 switch (arg4) {
11468 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11469 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11470 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11471 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11472 default: break;
11474 #endif
11475 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11476 #endif
11477 #endif /* end of 64-bit ABI fadvise handling */
11479 #ifdef TARGET_NR_madvise
11480 case TARGET_NR_madvise:
11481 /* A straight passthrough may not be safe because qemu sometimes
11482 turns private file-backed mappings into anonymous mappings.
11483 This will break MADV_DONTNEED.
11484 This is a hint, so ignoring and returning success is ok. */
11485 return 0;
11486 #endif
11487 #ifdef TARGET_NR_fcntl64
11488 case TARGET_NR_fcntl64:
11490 int cmd;
11491 struct flock64 fl;
11492 from_flock64_fn *copyfrom = copy_from_user_flock64;
11493 to_flock64_fn *copyto = copy_to_user_flock64;
11495 #ifdef TARGET_ARM
11496 if (!((CPUARMState *)cpu_env)->eabi) {
11497 copyfrom = copy_from_user_oabi_flock64;
11498 copyto = copy_to_user_oabi_flock64;
11500 #endif
11502 cmd = target_to_host_fcntl_cmd(arg2);
11503 if (cmd == -TARGET_EINVAL) {
11504 return cmd;
11507 switch(arg2) {
11508 case TARGET_F_GETLK64:
11509 ret = copyfrom(&fl, arg3);
11510 if (ret) {
11511 break;
11513 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11514 if (ret == 0) {
11515 ret = copyto(arg3, &fl);
11517 break;
11519 case TARGET_F_SETLK64:
11520 case TARGET_F_SETLKW64:
11521 ret = copyfrom(&fl, arg3);
11522 if (ret) {
11523 break;
11525 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11526 break;
11527 default:
11528 ret = do_fcntl(arg1, arg2, arg3);
11529 break;
11531 return ret;
11533 #endif
11534 #ifdef TARGET_NR_cacheflush
11535 case TARGET_NR_cacheflush:
11536 /* self-modifying code is handled automatically, so nothing needed */
11537 return 0;
11538 #endif
11539 #ifdef TARGET_NR_getpagesize
11540 case TARGET_NR_getpagesize:
11541 return TARGET_PAGE_SIZE;
11542 #endif
11543 case TARGET_NR_gettid:
11544 return get_errno(sys_gettid());
11545 #ifdef TARGET_NR_readahead
11546 case TARGET_NR_readahead:
11547 #if TARGET_ABI_BITS == 32
11548 if (regpairs_aligned(cpu_env, num)) {
11549 arg2 = arg3;
11550 arg3 = arg4;
11551 arg4 = arg5;
11553 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11554 #else
11555 ret = get_errno(readahead(arg1, arg2, arg3));
11556 #endif
11557 return ret;
11558 #endif
11559 #ifdef CONFIG_ATTR
11560 #ifdef TARGET_NR_setxattr
11561 case TARGET_NR_listxattr:
11562 case TARGET_NR_llistxattr:
11564 void *p, *b = 0;
11565 if (arg2) {
11566 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11567 if (!b) {
11568 return -TARGET_EFAULT;
11571 p = lock_user_string(arg1);
11572 if (p) {
11573 if (num == TARGET_NR_listxattr) {
11574 ret = get_errno(listxattr(p, b, arg3));
11575 } else {
11576 ret = get_errno(llistxattr(p, b, arg3));
11578 } else {
11579 ret = -TARGET_EFAULT;
11581 unlock_user(p, arg1, 0);
11582 unlock_user(b, arg2, arg3);
11583 return ret;
11585 case TARGET_NR_flistxattr:
11587 void *b = 0;
11588 if (arg2) {
11589 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11590 if (!b) {
11591 return -TARGET_EFAULT;
11594 ret = get_errno(flistxattr(arg1, b, arg3));
11595 unlock_user(b, arg2, arg3);
11596 return ret;
11598 case TARGET_NR_setxattr:
11599 case TARGET_NR_lsetxattr:
11601 void *p, *n, *v = 0;
11602 if (arg3) {
11603 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11604 if (!v) {
11605 return -TARGET_EFAULT;
11608 p = lock_user_string(arg1);
11609 n = lock_user_string(arg2);
11610 if (p && n) {
11611 if (num == TARGET_NR_setxattr) {
11612 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11613 } else {
11614 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11616 } else {
11617 ret = -TARGET_EFAULT;
11619 unlock_user(p, arg1, 0);
11620 unlock_user(n, arg2, 0);
11621 unlock_user(v, arg3, 0);
11623 return ret;
11624 case TARGET_NR_fsetxattr:
11626 void *n, *v = 0;
11627 if (arg3) {
11628 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11629 if (!v) {
11630 return -TARGET_EFAULT;
11633 n = lock_user_string(arg2);
11634 if (n) {
11635 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11636 } else {
11637 ret = -TARGET_EFAULT;
11639 unlock_user(n, arg2, 0);
11640 unlock_user(v, arg3, 0);
11642 return ret;
11643 case TARGET_NR_getxattr:
11644 case TARGET_NR_lgetxattr:
11646 void *p, *n, *v = 0;
11647 if (arg3) {
11648 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11649 if (!v) {
11650 return -TARGET_EFAULT;
11653 p = lock_user_string(arg1);
11654 n = lock_user_string(arg2);
11655 if (p && n) {
11656 if (num == TARGET_NR_getxattr) {
11657 ret = get_errno(getxattr(p, n, v, arg4));
11658 } else {
11659 ret = get_errno(lgetxattr(p, n, v, arg4));
11661 } else {
11662 ret = -TARGET_EFAULT;
11664 unlock_user(p, arg1, 0);
11665 unlock_user(n, arg2, 0);
11666 unlock_user(v, arg3, arg4);
11668 return ret;
11669 case TARGET_NR_fgetxattr:
11671 void *n, *v = 0;
11672 if (arg3) {
11673 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11674 if (!v) {
11675 return -TARGET_EFAULT;
11678 n = lock_user_string(arg2);
11679 if (n) {
11680 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11681 } else {
11682 ret = -TARGET_EFAULT;
11684 unlock_user(n, arg2, 0);
11685 unlock_user(v, arg3, arg4);
11687 return ret;
11688 case TARGET_NR_removexattr:
11689 case TARGET_NR_lremovexattr:
11691 void *p, *n;
11692 p = lock_user_string(arg1);
11693 n = lock_user_string(arg2);
11694 if (p && n) {
11695 if (num == TARGET_NR_removexattr) {
11696 ret = get_errno(removexattr(p, n));
11697 } else {
11698 ret = get_errno(lremovexattr(p, n));
11700 } else {
11701 ret = -TARGET_EFAULT;
11703 unlock_user(p, arg1, 0);
11704 unlock_user(n, arg2, 0);
11706 return ret;
11707 case TARGET_NR_fremovexattr:
11709 void *n;
11710 n = lock_user_string(arg2);
11711 if (n) {
11712 ret = get_errno(fremovexattr(arg1, n));
11713 } else {
11714 ret = -TARGET_EFAULT;
11716 unlock_user(n, arg2, 0);
11718 return ret;
11719 #endif
11720 #endif /* CONFIG_ATTR */
11721 #ifdef TARGET_NR_set_thread_area
11722 case TARGET_NR_set_thread_area:
11723 #if defined(TARGET_MIPS)
11724 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11725 return 0;
11726 #elif defined(TARGET_CRIS)
11727 if (arg1 & 0xff)
11728 ret = -TARGET_EINVAL;
11729 else {
11730 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11731 ret = 0;
11733 return ret;
11734 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11735 return do_set_thread_area(cpu_env, arg1);
11736 #elif defined(TARGET_M68K)
11738 TaskState *ts = cpu->opaque;
11739 ts->tp_value = arg1;
11740 return 0;
11742 #else
11743 return -TARGET_ENOSYS;
11744 #endif
11745 #endif
11746 #ifdef TARGET_NR_get_thread_area
11747 case TARGET_NR_get_thread_area:
11748 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11749 return do_get_thread_area(cpu_env, arg1);
11750 #elif defined(TARGET_M68K)
11752 TaskState *ts = cpu->opaque;
11753 return ts->tp_value;
11755 #else
11756 return -TARGET_ENOSYS;
11757 #endif
11758 #endif
11759 #ifdef TARGET_NR_getdomainname
11760 case TARGET_NR_getdomainname:
11761 return -TARGET_ENOSYS;
11762 #endif
11764 #ifdef TARGET_NR_clock_settime
11765 case TARGET_NR_clock_settime:
11767 struct timespec ts;
11769 ret = target_to_host_timespec(&ts, arg2);
11770 if (!is_error(ret)) {
11771 ret = get_errno(clock_settime(arg1, &ts));
11773 return ret;
11775 #endif
11776 #ifdef TARGET_NR_clock_settime64
11777 case TARGET_NR_clock_settime64:
11779 struct timespec ts;
11781 ret = target_to_host_timespec64(&ts, arg2);
11782 if (!is_error(ret)) {
11783 ret = get_errno(clock_settime(arg1, &ts));
11785 return ret;
11787 #endif
11788 #ifdef TARGET_NR_clock_gettime
11789 case TARGET_NR_clock_gettime:
11791 struct timespec ts;
11792 ret = get_errno(clock_gettime(arg1, &ts));
11793 if (!is_error(ret)) {
11794 ret = host_to_target_timespec(arg2, &ts);
11796 return ret;
11798 #endif
11799 #ifdef TARGET_NR_clock_gettime64
11800 case TARGET_NR_clock_gettime64:
11802 struct timespec ts;
11803 ret = get_errno(clock_gettime(arg1, &ts));
11804 if (!is_error(ret)) {
11805 ret = host_to_target_timespec64(arg2, &ts);
11807 return ret;
11809 #endif
11810 #ifdef TARGET_NR_clock_getres
11811 case TARGET_NR_clock_getres:
11813 struct timespec ts;
11814 ret = get_errno(clock_getres(arg1, &ts));
11815 if (!is_error(ret)) {
11816 host_to_target_timespec(arg2, &ts);
11818 return ret;
11820 #endif
11821 #ifdef TARGET_NR_clock_nanosleep
11822 case TARGET_NR_clock_nanosleep:
11824 struct timespec ts;
11825 target_to_host_timespec(&ts, arg3);
11826 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11827 &ts, arg4 ? &ts : NULL));
11829 * if the call is interrupted by a signal handler, it fails
11830 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11831 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11833 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME) {
11834 host_to_target_timespec(arg4, &ts);
11837 return ret;
11839 #endif
11841 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11842 case TARGET_NR_set_tid_address:
11843 return get_errno(set_tid_address((int *)g2h(arg1)));
11844 #endif
11846 case TARGET_NR_tkill:
11847 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11849 case TARGET_NR_tgkill:
11850 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11851 target_to_host_signal(arg3)));
11853 #ifdef TARGET_NR_set_robust_list
11854 case TARGET_NR_set_robust_list:
11855 case TARGET_NR_get_robust_list:
11856 /* The ABI for supporting robust futexes has userspace pass
11857 * the kernel a pointer to a linked list which is updated by
11858 * userspace after the syscall; the list is walked by the kernel
11859 * when the thread exits. Since the linked list in QEMU guest
11860 * memory isn't a valid linked list for the host and we have
11861 * no way to reliably intercept the thread-death event, we can't
11862 * support these. Silently return ENOSYS so that guest userspace
11863 * falls back to a non-robust futex implementation (which should
11864 * be OK except in the corner case of the guest crashing while
11865 * holding a mutex that is shared with another process via
11866 * shared memory).
11868 return -TARGET_ENOSYS;
11869 #endif
11871 #if defined(TARGET_NR_utimensat)
11872 case TARGET_NR_utimensat:
11874 struct timespec *tsp, ts[2];
11875 if (!arg3) {
11876 tsp = NULL;
11877 } else {
11878 target_to_host_timespec(ts, arg3);
11879 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11880 tsp = ts;
11882 if (!arg2)
11883 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11884 else {
11885 if (!(p = lock_user_string(arg2))) {
11886 return -TARGET_EFAULT;
11888 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11889 unlock_user(p, arg2, 0);
11892 return ret;
11893 #endif
11894 #ifdef TARGET_NR_futex
11895 case TARGET_NR_futex:
11896 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11897 #endif
11898 #ifdef TARGET_NR_futex_time64
11899 case TARGET_NR_futex_time64:
11900 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11901 #endif
11902 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11903 case TARGET_NR_inotify_init:
11904 ret = get_errno(sys_inotify_init());
11905 if (ret >= 0) {
11906 fd_trans_register(ret, &target_inotify_trans);
11908 return ret;
11909 #endif
11910 #ifdef CONFIG_INOTIFY1
11911 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11912 case TARGET_NR_inotify_init1:
11913 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11914 fcntl_flags_tbl)));
11915 if (ret >= 0) {
11916 fd_trans_register(ret, &target_inotify_trans);
11918 return ret;
11919 #endif
11920 #endif
11921 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11922 case TARGET_NR_inotify_add_watch:
11923 p = lock_user_string(arg2);
11924 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11925 unlock_user(p, arg2, 0);
11926 return ret;
11927 #endif
11928 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11929 case TARGET_NR_inotify_rm_watch:
11930 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11931 #endif
11933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11934 case TARGET_NR_mq_open:
11936 struct mq_attr posix_mq_attr;
11937 struct mq_attr *pposix_mq_attr;
11938 int host_flags;
11940 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11941 pposix_mq_attr = NULL;
11942 if (arg4) {
11943 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11944 return -TARGET_EFAULT;
11946 pposix_mq_attr = &posix_mq_attr;
11948 p = lock_user_string(arg1 - 1);
11949 if (!p) {
11950 return -TARGET_EFAULT;
11952 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11953 unlock_user (p, arg1, 0);
11955 return ret;
11957 case TARGET_NR_mq_unlink:
11958 p = lock_user_string(arg1 - 1);
11959 if (!p) {
11960 return -TARGET_EFAULT;
11962 ret = get_errno(mq_unlink(p));
11963 unlock_user (p, arg1, 0);
11964 return ret;
11966 #ifdef TARGET_NR_mq_timedsend
11967 case TARGET_NR_mq_timedsend:
11969 struct timespec ts;
11971 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11972 if (arg5 != 0) {
11973 target_to_host_timespec(&ts, arg5);
11974 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11975 host_to_target_timespec(arg5, &ts);
11976 } else {
11977 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11979 unlock_user (p, arg2, arg3);
11981 return ret;
11982 #endif
11984 #ifdef TARGET_NR_mq_timedreceive
11985 case TARGET_NR_mq_timedreceive:
11987 struct timespec ts;
11988 unsigned int prio;
11990 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11991 if (arg5 != 0) {
11992 target_to_host_timespec(&ts, arg5);
11993 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11994 &prio, &ts));
11995 host_to_target_timespec(arg5, &ts);
11996 } else {
11997 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11998 &prio, NULL));
12000 unlock_user (p, arg2, arg3);
12001 if (arg4 != 0)
12002 put_user_u32(prio, arg4);
12004 return ret;
12005 #endif
12007 /* Not implemented for now... */
12008 /* case TARGET_NR_mq_notify: */
12009 /* break; */
12011 case TARGET_NR_mq_getsetattr:
12013 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12014 ret = 0;
12015 if (arg2 != 0) {
12016 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12017 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12018 &posix_mq_attr_out));
12019 } else if (arg3 != 0) {
12020 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12022 if (ret == 0 && arg3 != 0) {
12023 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12026 return ret;
12027 #endif
12029 #ifdef CONFIG_SPLICE
12030 #ifdef TARGET_NR_tee
12031 case TARGET_NR_tee:
12033 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12035 return ret;
12036 #endif
12037 #ifdef TARGET_NR_splice
12038 case TARGET_NR_splice:
12040 loff_t loff_in, loff_out;
12041 loff_t *ploff_in = NULL, *ploff_out = NULL;
12042 if (arg2) {
12043 if (get_user_u64(loff_in, arg2)) {
12044 return -TARGET_EFAULT;
12046 ploff_in = &loff_in;
12048 if (arg4) {
12049 if (get_user_u64(loff_out, arg4)) {
12050 return -TARGET_EFAULT;
12052 ploff_out = &loff_out;
12054 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12055 if (arg2) {
12056 if (put_user_u64(loff_in, arg2)) {
12057 return -TARGET_EFAULT;
12060 if (arg4) {
12061 if (put_user_u64(loff_out, arg4)) {
12062 return -TARGET_EFAULT;
12066 return ret;
12067 #endif
12068 #ifdef TARGET_NR_vmsplice
12069 case TARGET_NR_vmsplice:
12071 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12072 if (vec != NULL) {
12073 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12074 unlock_iovec(vec, arg2, arg3, 0);
12075 } else {
12076 ret = -host_to_target_errno(errno);
12079 return ret;
12080 #endif
12081 #endif /* CONFIG_SPLICE */
12082 #ifdef CONFIG_EVENTFD
12083 #if defined(TARGET_NR_eventfd)
12084 case TARGET_NR_eventfd:
12085 ret = get_errno(eventfd(arg1, 0));
12086 if (ret >= 0) {
12087 fd_trans_register(ret, &target_eventfd_trans);
12089 return ret;
12090 #endif
12091 #if defined(TARGET_NR_eventfd2)
12092 case TARGET_NR_eventfd2:
12094 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12095 if (arg2 & TARGET_O_NONBLOCK) {
12096 host_flags |= O_NONBLOCK;
12098 if (arg2 & TARGET_O_CLOEXEC) {
12099 host_flags |= O_CLOEXEC;
12101 ret = get_errno(eventfd(arg1, host_flags));
12102 if (ret >= 0) {
12103 fd_trans_register(ret, &target_eventfd_trans);
12105 return ret;
12107 #endif
12108 #endif /* CONFIG_EVENTFD */
12109 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12110 case TARGET_NR_fallocate:
12111 #if TARGET_ABI_BITS == 32
12112 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12113 target_offset64(arg5, arg6)));
12114 #else
12115 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12116 #endif
12117 return ret;
12118 #endif
12119 #if defined(CONFIG_SYNC_FILE_RANGE)
12120 #if defined(TARGET_NR_sync_file_range)
12121 case TARGET_NR_sync_file_range:
12122 #if TARGET_ABI_BITS == 32
12123 #if defined(TARGET_MIPS)
12124 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12125 target_offset64(arg5, arg6), arg7));
12126 #else
12127 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12128 target_offset64(arg4, arg5), arg6));
12129 #endif /* !TARGET_MIPS */
12130 #else
12131 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12132 #endif
12133 return ret;
12134 #endif
12135 #if defined(TARGET_NR_sync_file_range2) || \
12136 defined(TARGET_NR_arm_sync_file_range)
12137 #if defined(TARGET_NR_sync_file_range2)
12138 case TARGET_NR_sync_file_range2:
12139 #endif
12140 #if defined(TARGET_NR_arm_sync_file_range)
12141 case TARGET_NR_arm_sync_file_range:
12142 #endif
12143 /* This is like sync_file_range but the arguments are reordered */
12144 #if TARGET_ABI_BITS == 32
12145 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12146 target_offset64(arg5, arg6), arg2));
12147 #else
12148 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12149 #endif
12150 return ret;
12151 #endif
12152 #endif
12153 #if defined(TARGET_NR_signalfd4)
12154 case TARGET_NR_signalfd4:
12155 return do_signalfd4(arg1, arg2, arg4);
12156 #endif
12157 #if defined(TARGET_NR_signalfd)
12158 case TARGET_NR_signalfd:
12159 return do_signalfd4(arg1, arg2, 0);
12160 #endif
12161 #if defined(CONFIG_EPOLL)
12162 #if defined(TARGET_NR_epoll_create)
12163 case TARGET_NR_epoll_create:
12164 return get_errno(epoll_create(arg1));
12165 #endif
12166 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12167 case TARGET_NR_epoll_create1:
12168 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12169 #endif
12170 #if defined(TARGET_NR_epoll_ctl)
12171 case TARGET_NR_epoll_ctl:
12173 struct epoll_event ep;
12174 struct epoll_event *epp = 0;
12175 if (arg4) {
12176 struct target_epoll_event *target_ep;
12177 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12178 return -TARGET_EFAULT;
12180 ep.events = tswap32(target_ep->events);
12181 /* The epoll_data_t union is just opaque data to the kernel,
12182 * so we transfer all 64 bits across and need not worry what
12183 * actual data type it is.
12185 ep.data.u64 = tswap64(target_ep->data.u64);
12186 unlock_user_struct(target_ep, arg4, 0);
12187 epp = &ep;
12189 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12191 #endif
12193 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12194 #if defined(TARGET_NR_epoll_wait)
12195 case TARGET_NR_epoll_wait:
12196 #endif
12197 #if defined(TARGET_NR_epoll_pwait)
12198 case TARGET_NR_epoll_pwait:
12199 #endif
12201 struct target_epoll_event *target_ep;
12202 struct epoll_event *ep;
12203 int epfd = arg1;
12204 int maxevents = arg3;
12205 int timeout = arg4;
12207 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12208 return -TARGET_EINVAL;
12211 target_ep = lock_user(VERIFY_WRITE, arg2,
12212 maxevents * sizeof(struct target_epoll_event), 1);
12213 if (!target_ep) {
12214 return -TARGET_EFAULT;
12217 ep = g_try_new(struct epoll_event, maxevents);
12218 if (!ep) {
12219 unlock_user(target_ep, arg2, 0);
12220 return -TARGET_ENOMEM;
12223 switch (num) {
12224 #if defined(TARGET_NR_epoll_pwait)
12225 case TARGET_NR_epoll_pwait:
12227 target_sigset_t *target_set;
12228 sigset_t _set, *set = &_set;
12230 if (arg5) {
12231 if (arg6 != sizeof(target_sigset_t)) {
12232 ret = -TARGET_EINVAL;
12233 break;
12236 target_set = lock_user(VERIFY_READ, arg5,
12237 sizeof(target_sigset_t), 1);
12238 if (!target_set) {
12239 ret = -TARGET_EFAULT;
12240 break;
12242 target_to_host_sigset(set, target_set);
12243 unlock_user(target_set, arg5, 0);
12244 } else {
12245 set = NULL;
12248 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12249 set, SIGSET_T_SIZE));
12250 break;
12252 #endif
12253 #if defined(TARGET_NR_epoll_wait)
12254 case TARGET_NR_epoll_wait:
12255 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12256 NULL, 0));
12257 break;
12258 #endif
12259 default:
12260 ret = -TARGET_ENOSYS;
12262 if (!is_error(ret)) {
12263 int i;
12264 for (i = 0; i < ret; i++) {
12265 target_ep[i].events = tswap32(ep[i].events);
12266 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12268 unlock_user(target_ep, arg2,
12269 ret * sizeof(struct target_epoll_event));
12270 } else {
12271 unlock_user(target_ep, arg2, 0);
12273 g_free(ep);
12274 return ret;
12276 #endif
12277 #endif
12278 #ifdef TARGET_NR_prlimit64
12279 case TARGET_NR_prlimit64:
12281 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12282 struct target_rlimit64 *target_rnew, *target_rold;
12283 struct host_rlimit64 rnew, rold, *rnewp = 0;
12284 int resource = target_to_host_resource(arg2);
12286 if (arg3 && (resource != RLIMIT_AS &&
12287 resource != RLIMIT_DATA &&
12288 resource != RLIMIT_STACK)) {
12289 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12290 return -TARGET_EFAULT;
12292 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12293 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12294 unlock_user_struct(target_rnew, arg3, 0);
12295 rnewp = &rnew;
12298 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12299 if (!is_error(ret) && arg4) {
12300 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12301 return -TARGET_EFAULT;
12303 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12304 target_rold->rlim_max = tswap64(rold.rlim_max);
12305 unlock_user_struct(target_rold, arg4, 1);
12307 return ret;
12309 #endif
12310 #ifdef TARGET_NR_gethostname
12311 case TARGET_NR_gethostname:
12313 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12314 if (name) {
12315 ret = get_errno(gethostname(name, arg2));
12316 unlock_user(name, arg1, arg2);
12317 } else {
12318 ret = -TARGET_EFAULT;
12320 return ret;
12322 #endif
12323 #ifdef TARGET_NR_atomic_cmpxchg_32
12324 case TARGET_NR_atomic_cmpxchg_32:
12326 /* should use start_exclusive from main.c */
12327 abi_ulong mem_value;
12328 if (get_user_u32(mem_value, arg6)) {
12329 target_siginfo_t info;
12330 info.si_signo = SIGSEGV;
12331 info.si_errno = 0;
12332 info.si_code = TARGET_SEGV_MAPERR;
12333 info._sifields._sigfault._addr = arg6;
12334 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12335 QEMU_SI_FAULT, &info);
12336 ret = 0xdeadbeef;
12339 if (mem_value == arg2)
12340 put_user_u32(arg1, arg6);
12341 return mem_value;
12343 #endif
12344 #ifdef TARGET_NR_atomic_barrier
12345 case TARGET_NR_atomic_barrier:
12346 /* Like the kernel implementation and the
12347 qemu arm barrier, no-op this? */
12348 return 0;
12349 #endif
12351 #ifdef TARGET_NR_timer_create
12352 case TARGET_NR_timer_create:
12354 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12356 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12358 int clkid = arg1;
12359 int timer_index = next_free_host_timer();
12361 if (timer_index < 0) {
12362 ret = -TARGET_EAGAIN;
12363 } else {
12364 timer_t *phtimer = g_posix_timers + timer_index;
12366 if (arg2) {
12367 phost_sevp = &host_sevp;
12368 ret = target_to_host_sigevent(phost_sevp, arg2);
12369 if (ret != 0) {
12370 return ret;
12374 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12375 if (ret) {
12376 phtimer = NULL;
12377 } else {
12378 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12379 return -TARGET_EFAULT;
12383 return ret;
12385 #endif
12387 #ifdef TARGET_NR_timer_settime
12388 case TARGET_NR_timer_settime:
12390 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12391 * struct itimerspec * old_value */
12392 target_timer_t timerid = get_timer_id(arg1);
12394 if (timerid < 0) {
12395 ret = timerid;
12396 } else if (arg3 == 0) {
12397 ret = -TARGET_EINVAL;
12398 } else {
12399 timer_t htimer = g_posix_timers[timerid];
12400 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12402 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12403 return -TARGET_EFAULT;
12405 ret = get_errno(
12406 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12407 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12408 return -TARGET_EFAULT;
12411 return ret;
12413 #endif
12415 #ifdef TARGET_NR_timer_gettime
12416 case TARGET_NR_timer_gettime:
12418 /* args: timer_t timerid, struct itimerspec *curr_value */
12419 target_timer_t timerid = get_timer_id(arg1);
12421 if (timerid < 0) {
12422 ret = timerid;
12423 } else if (!arg2) {
12424 ret = -TARGET_EFAULT;
12425 } else {
12426 timer_t htimer = g_posix_timers[timerid];
12427 struct itimerspec hspec;
12428 ret = get_errno(timer_gettime(htimer, &hspec));
12430 if (host_to_target_itimerspec(arg2, &hspec)) {
12431 ret = -TARGET_EFAULT;
12434 return ret;
12436 #endif
12438 #ifdef TARGET_NR_timer_getoverrun
12439 case TARGET_NR_timer_getoverrun:
12441 /* args: timer_t timerid */
12442 target_timer_t timerid = get_timer_id(arg1);
12444 if (timerid < 0) {
12445 ret = timerid;
12446 } else {
12447 timer_t htimer = g_posix_timers[timerid];
12448 ret = get_errno(timer_getoverrun(htimer));
12450 return ret;
12452 #endif
12454 #ifdef TARGET_NR_timer_delete
12455 case TARGET_NR_timer_delete:
12457 /* args: timer_t timerid */
12458 target_timer_t timerid = get_timer_id(arg1);
12460 if (timerid < 0) {
12461 ret = timerid;
12462 } else {
12463 timer_t htimer = g_posix_timers[timerid];
12464 ret = get_errno(timer_delete(htimer));
12465 g_posix_timers[timerid] = 0;
12467 return ret;
12469 #endif
12471 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12472 case TARGET_NR_timerfd_create:
12473 return get_errno(timerfd_create(arg1,
12474 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12475 #endif
12477 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12478 case TARGET_NR_timerfd_gettime:
12480 struct itimerspec its_curr;
12482 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12484 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12485 return -TARGET_EFAULT;
12488 return ret;
12489 #endif
12491 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12492 case TARGET_NR_timerfd_settime:
12494 struct itimerspec its_new, its_old, *p_new;
12496 if (arg3) {
12497 if (target_to_host_itimerspec(&its_new, arg3)) {
12498 return -TARGET_EFAULT;
12500 p_new = &its_new;
12501 } else {
12502 p_new = NULL;
12505 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12507 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12508 return -TARGET_EFAULT;
12511 return ret;
12512 #endif
12514 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12515 case TARGET_NR_ioprio_get:
12516 return get_errno(ioprio_get(arg1, arg2));
12517 #endif
12519 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12520 case TARGET_NR_ioprio_set:
12521 return get_errno(ioprio_set(arg1, arg2, arg3));
12522 #endif
12524 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12525 case TARGET_NR_setns:
12526 return get_errno(setns(arg1, arg2));
12527 #endif
12528 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12529 case TARGET_NR_unshare:
12530 return get_errno(unshare(arg1));
12531 #endif
12532 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12533 case TARGET_NR_kcmp:
12534 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12535 #endif
12536 #ifdef TARGET_NR_swapcontext
12537 case TARGET_NR_swapcontext:
12538 /* PowerPC specific. */
12539 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12540 #endif
12541 #ifdef TARGET_NR_memfd_create
12542 case TARGET_NR_memfd_create:
12543 p = lock_user_string(arg1);
12544 if (!p) {
12545 return -TARGET_EFAULT;
12547 ret = get_errno(memfd_create(p, arg2));
12548 fd_trans_unregister(ret);
12549 unlock_user(p, arg1, 0);
12550 return ret;
12551 #endif
12552 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12553 case TARGET_NR_membarrier:
12554 return get_errno(membarrier(arg1, arg2));
12555 #endif
12557 default:
12558 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12559 return -TARGET_ENOSYS;
12561 return ret;
12564 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12565 abi_long arg2, abi_long arg3, abi_long arg4,
12566 abi_long arg5, abi_long arg6, abi_long arg7,
12567 abi_long arg8)
12569 CPUState *cpu = env_cpu(cpu_env);
12570 abi_long ret;
12572 #ifdef DEBUG_ERESTARTSYS
12573 /* Debug-only code for exercising the syscall-restart code paths
12574 * in the per-architecture cpu main loops: restart every syscall
12575 * the guest makes once before letting it through.
12578 static bool flag;
12579 flag = !flag;
12580 if (flag) {
12581 return -TARGET_ERESTARTSYS;
12584 #endif
12586 record_syscall_start(cpu, num, arg1,
12587 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12589 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12590 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12593 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12594 arg5, arg6, arg7, arg8);
12596 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12597 print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12600 record_syscall_return(cpu, num, ret);
12601 return ret;