configure: Fix atomic64 test for --enable-werror on macOS
[qemu/ar7.git] / linux-user / syscall.c
blobd14d849a720cf90db0989efb962d1e1faf5a1df0
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
130 #ifndef CLONE_IO
131 #define CLONE_IO 0x80000000 /* Clone io context */
132 #endif
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
239 type6 arg6) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
296 loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300 siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310 const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318 unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325 void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327 struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329 struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342 unsigned long, idx1, unsigned long, idx2)
343 #endif
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350 unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
358 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
359 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
360 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
361 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
362 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
363 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
364 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
365 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
366 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
367 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
368 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
372 #endif
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
375 #endif
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
378 #endif
379 #if defined(O_PATH)
380 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
381 #endif
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
384 #endif
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389 { 0, 0, 0, 0 }
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
394 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398 const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401 const struct timespec times[2], int flags)
403 errno = ENOSYS;
404 return -1;
406 #endif
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413 const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416 int newfd, const char *new, int flags)
418 if (flags == 0) {
419 return renameat(oldfd, old, newfd, new);
421 errno = ENOSYS;
422 return -1;
424 #endif
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
439 return (inotify_add_watch(fd, pathname, mask));
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
445 return (inotify_rm_watch(fd, wd));
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
452 return (inotify_init1(flags));
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471 uint64_t rlim_cur;
472 uint64_t rlim_max;
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475 const struct host_rlimit64 *, new_limit,
476 struct host_rlimit64 *, old_limit)
477 #endif
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
486 int k ;
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489 if (g_posix_timers[k] == 0) {
490 g_posix_timers[k] = (timer_t) 1;
491 return k;
494 return -1;
496 #endif
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510 [EAGAIN] = TARGET_EAGAIN,
511 [EIDRM] = TARGET_EIDRM,
512 [ECHRNG] = TARGET_ECHRNG,
513 [EL2NSYNC] = TARGET_EL2NSYNC,
514 [EL3HLT] = TARGET_EL3HLT,
515 [EL3RST] = TARGET_EL3RST,
516 [ELNRNG] = TARGET_ELNRNG,
517 [EUNATCH] = TARGET_EUNATCH,
518 [ENOCSI] = TARGET_ENOCSI,
519 [EL2HLT] = TARGET_EL2HLT,
520 [EDEADLK] = TARGET_EDEADLK,
521 [ENOLCK] = TARGET_ENOLCK,
522 [EBADE] = TARGET_EBADE,
523 [EBADR] = TARGET_EBADR,
524 [EXFULL] = TARGET_EXFULL,
525 [ENOANO] = TARGET_ENOANO,
526 [EBADRQC] = TARGET_EBADRQC,
527 [EBADSLT] = TARGET_EBADSLT,
528 [EBFONT] = TARGET_EBFONT,
529 [ENOSTR] = TARGET_ENOSTR,
530 [ENODATA] = TARGET_ENODATA,
531 [ETIME] = TARGET_ETIME,
532 [ENOSR] = TARGET_ENOSR,
533 [ENONET] = TARGET_ENONET,
534 [ENOPKG] = TARGET_ENOPKG,
535 [EREMOTE] = TARGET_EREMOTE,
536 [ENOLINK] = TARGET_ENOLINK,
537 [EADV] = TARGET_EADV,
538 [ESRMNT] = TARGET_ESRMNT,
539 [ECOMM] = TARGET_ECOMM,
540 [EPROTO] = TARGET_EPROTO,
541 [EDOTDOT] = TARGET_EDOTDOT,
542 [EMULTIHOP] = TARGET_EMULTIHOP,
543 [EBADMSG] = TARGET_EBADMSG,
544 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
545 [EOVERFLOW] = TARGET_EOVERFLOW,
546 [ENOTUNIQ] = TARGET_ENOTUNIQ,
547 [EBADFD] = TARGET_EBADFD,
548 [EREMCHG] = TARGET_EREMCHG,
549 [ELIBACC] = TARGET_ELIBACC,
550 [ELIBBAD] = TARGET_ELIBBAD,
551 [ELIBSCN] = TARGET_ELIBSCN,
552 [ELIBMAX] = TARGET_ELIBMAX,
553 [ELIBEXEC] = TARGET_ELIBEXEC,
554 [EILSEQ] = TARGET_EILSEQ,
555 [ENOSYS] = TARGET_ENOSYS,
556 [ELOOP] = TARGET_ELOOP,
557 [ERESTART] = TARGET_ERESTART,
558 [ESTRPIPE] = TARGET_ESTRPIPE,
559 [ENOTEMPTY] = TARGET_ENOTEMPTY,
560 [EUSERS] = TARGET_EUSERS,
561 [ENOTSOCK] = TARGET_ENOTSOCK,
562 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
563 [EMSGSIZE] = TARGET_EMSGSIZE,
564 [EPROTOTYPE] = TARGET_EPROTOTYPE,
565 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
566 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
567 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
568 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
569 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
570 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
571 [EADDRINUSE] = TARGET_EADDRINUSE,
572 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
573 [ENETDOWN] = TARGET_ENETDOWN,
574 [ENETUNREACH] = TARGET_ENETUNREACH,
575 [ENETRESET] = TARGET_ENETRESET,
576 [ECONNABORTED] = TARGET_ECONNABORTED,
577 [ECONNRESET] = TARGET_ECONNRESET,
578 [ENOBUFS] = TARGET_ENOBUFS,
579 [EISCONN] = TARGET_EISCONN,
580 [ENOTCONN] = TARGET_ENOTCONN,
581 [EUCLEAN] = TARGET_EUCLEAN,
582 [ENOTNAM] = TARGET_ENOTNAM,
583 [ENAVAIL] = TARGET_ENAVAIL,
584 [EISNAM] = TARGET_EISNAM,
585 [EREMOTEIO] = TARGET_EREMOTEIO,
586 [EDQUOT] = TARGET_EDQUOT,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600 [ENOKEY] = TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618 [ENOMSG] = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621 [ERFKILL] = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624 [EHWPOISON] = TARGET_EHWPOISON,
625 #endif
628 static inline int host_to_target_errno(int err)
630 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631 host_to_target_errno_table[err]) {
632 return host_to_target_errno_table[err];
634 return err;
637 static inline int target_to_host_errno(int err)
639 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640 target_to_host_errno_table[err]) {
641 return target_to_host_errno_table[err];
643 return err;
646 static inline abi_long get_errno(abi_long ret)
648 if (ret == -1)
649 return -host_to_target_errno(errno);
650 else
651 return ret;
654 const char *target_strerror(int err)
656 if (err == TARGET_ERESTARTSYS) {
657 return "To be restarted";
659 if (err == TARGET_QEMU_ESIGRETURN) {
660 return "Successful exit from sigreturn";
663 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664 return NULL;
666 return strerror(target_to_host_errno(err));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694 type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703 type5 arg5) \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719 int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722 struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725 int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734 struct timespec *, tsp, const sigset_t *, sigmask,
735 size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738 int, maxevents, int, timeout, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742 const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746 const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755 unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757 unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759 socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769 const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772 int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775 struct timespec *, rem)
776 #endif
777 #if defined(TARGET_NR_clock_nanosleep) || \
778 defined(TARGET_NR_clock_nanosleep_time64)
779 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
780 const struct timespec *, req, struct timespec *, rem)
781 #endif
782 #ifdef __NR_ipc
783 #ifdef __s390x__
784 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
785 void *, ptr)
786 #else
787 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
788 void *, ptr, long, fifth)
789 #endif
790 #endif
791 #ifdef __NR_msgsnd
792 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
793 int, flags)
794 #endif
795 #ifdef __NR_msgrcv
796 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
797 long, msgtype, int, flags)
798 #endif
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
801 unsigned, nsops, const struct timespec *, timeout)
802 #endif
803 #if defined(TARGET_NR_mq_timedsend) || \
804 defined(TARGET_NR_mq_timedsend_time64)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806 size_t, len, unsigned, prio, const struct timespec *, timeout)
807 #endif
808 #if defined(TARGET_NR_mq_timedreceive) || \
809 defined(TARGET_NR_mq_timedreceive_time64)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811 size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
815 * the libc function.
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
829 static inline int host_to_target_sock_type(int host_type)
831 int target_type;
833 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834 case SOCK_DGRAM:
835 target_type = TARGET_SOCK_DGRAM;
836 break;
837 case SOCK_STREAM:
838 target_type = TARGET_SOCK_STREAM;
839 break;
840 default:
841 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842 break;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type & SOCK_CLOEXEC) {
847 target_type |= TARGET_SOCK_CLOEXEC;
849 #endif
851 #if defined(SOCK_NONBLOCK)
852 if (host_type & SOCK_NONBLOCK) {
853 target_type |= TARGET_SOCK_NONBLOCK;
855 #endif
857 return target_type;
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
864 void target_set_brk(abi_ulong new_brk)
866 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867 brk_page = HOST_PAGE_ALIGN(target_brk);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
876 abi_long mapped_addr;
877 abi_ulong new_alloc_size;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
881 if (!new_brk) {
882 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883 return target_brk;
885 if (new_brk < target_original_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887 target_brk);
888 return target_brk;
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk <= brk_page) {
894 /* Heap contents are initialized to zero, as for anonymous
895 * mapped pages. */
896 if (new_brk > target_brk) {
897 memset(g2h(target_brk), 0, new_brk - target_brk);
899 target_brk = new_brk;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901 return target_brk;
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912 PROT_READ|PROT_WRITE,
913 MAP_ANON|MAP_PRIVATE, 0, 0));
915 if (mapped_addr == brk_page) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
922 * then shrunken). */
923 memset(g2h(target_brk), 0, brk_page - target_brk);
925 target_brk = new_brk;
926 brk_page = HOST_PAGE_ALIGN(target_brk);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928 target_brk);
929 return target_brk;
930 } else if (mapped_addr != -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr, new_alloc_size);
935 mapped_addr = -1;
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
938 else {
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM;
946 #endif
947 /* For everything else, return the previous break. */
948 return target_brk;
951 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
952 defined(TARGET_NR_pselect6)
953 static inline abi_long copy_from_user_fdset(fd_set *fds,
954 abi_ulong target_fds_addr,
955 int n)
957 int i, nw, j, k;
958 abi_ulong b, *target_fds;
960 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
961 if (!(target_fds = lock_user(VERIFY_READ,
962 target_fds_addr,
963 sizeof(abi_ulong) * nw,
964 1)))
965 return -TARGET_EFAULT;
967 FD_ZERO(fds);
968 k = 0;
969 for (i = 0; i < nw; i++) {
970 /* grab the abi_ulong */
971 __get_user(b, &target_fds[i]);
972 for (j = 0; j < TARGET_ABI_BITS; j++) {
973 /* check the bit inside the abi_ulong */
974 if ((b >> j) & 1)
975 FD_SET(k, fds);
976 k++;
980 unlock_user(target_fds, target_fds_addr, 0);
982 return 0;
985 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
986 abi_ulong target_fds_addr,
987 int n)
989 if (target_fds_addr) {
990 if (copy_from_user_fdset(fds, target_fds_addr, n))
991 return -TARGET_EFAULT;
992 *fds_ptr = fds;
993 } else {
994 *fds_ptr = NULL;
996 return 0;
999 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1000 const fd_set *fds,
1001 int n)
1003 int i, nw, j, k;
1004 abi_long v;
1005 abi_ulong *target_fds;
1007 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1008 if (!(target_fds = lock_user(VERIFY_WRITE,
1009 target_fds_addr,
1010 sizeof(abi_ulong) * nw,
1011 0)))
1012 return -TARGET_EFAULT;
1014 k = 0;
1015 for (i = 0; i < nw; i++) {
1016 v = 0;
1017 for (j = 0; j < TARGET_ABI_BITS; j++) {
1018 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1019 k++;
1021 __put_user(v, &target_fds[i]);
1024 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1026 return 0;
1028 #endif
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1032 #else
1033 #define HOST_HZ 100
1034 #endif
1036 static inline abi_long host_to_target_clock_t(long ticks)
1038 #if HOST_HZ == TARGET_HZ
1039 return ticks;
1040 #else
1041 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1042 #endif
1045 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1046 const struct rusage *rusage)
1048 struct target_rusage *target_rusage;
1050 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1051 return -TARGET_EFAULT;
1052 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1053 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1054 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1055 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1056 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1057 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1058 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1059 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1060 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1061 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1062 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1063 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1064 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1065 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1066 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1067 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1068 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1069 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1070 unlock_user_struct(target_rusage, target_addr, 1);
1072 return 0;
1075 #ifdef TARGET_NR_setrlimit
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1079 rlim_t result;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1089 return result;
1091 #endif
1093 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1094 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1096 abi_ulong target_rlim_swap;
1097 abi_ulong result;
1099 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1100 target_rlim_swap = TARGET_RLIM_INFINITY;
1101 else
1102 target_rlim_swap = rlim;
1103 result = tswapal(target_rlim_swap);
1105 return result;
1107 #endif
1109 static inline int target_to_host_resource(int code)
1111 switch (code) {
1112 case TARGET_RLIMIT_AS:
1113 return RLIMIT_AS;
1114 case TARGET_RLIMIT_CORE:
1115 return RLIMIT_CORE;
1116 case TARGET_RLIMIT_CPU:
1117 return RLIMIT_CPU;
1118 case TARGET_RLIMIT_DATA:
1119 return RLIMIT_DATA;
1120 case TARGET_RLIMIT_FSIZE:
1121 return RLIMIT_FSIZE;
1122 case TARGET_RLIMIT_LOCKS:
1123 return RLIMIT_LOCKS;
1124 case TARGET_RLIMIT_MEMLOCK:
1125 return RLIMIT_MEMLOCK;
1126 case TARGET_RLIMIT_MSGQUEUE:
1127 return RLIMIT_MSGQUEUE;
1128 case TARGET_RLIMIT_NICE:
1129 return RLIMIT_NICE;
1130 case TARGET_RLIMIT_NOFILE:
1131 return RLIMIT_NOFILE;
1132 case TARGET_RLIMIT_NPROC:
1133 return RLIMIT_NPROC;
1134 case TARGET_RLIMIT_RSS:
1135 return RLIMIT_RSS;
1136 case TARGET_RLIMIT_RTPRIO:
1137 return RLIMIT_RTPRIO;
1138 case TARGET_RLIMIT_SIGPENDING:
1139 return RLIMIT_SIGPENDING;
1140 case TARGET_RLIMIT_STACK:
1141 return RLIMIT_STACK;
1142 default:
1143 return code;
1147 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1148 abi_ulong target_tv_addr)
1150 struct target_timeval *target_tv;
1152 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1153 return -TARGET_EFAULT;
1156 __get_user(tv->tv_sec, &target_tv->tv_sec);
1157 __get_user(tv->tv_usec, &target_tv->tv_usec);
1159 unlock_user_struct(target_tv, target_tv_addr, 0);
1161 return 0;
1164 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1165 const struct timeval *tv)
1167 struct target_timeval *target_tv;
1169 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1170 return -TARGET_EFAULT;
1173 __put_user(tv->tv_sec, &target_tv->tv_sec);
1174 __put_user(tv->tv_usec, &target_tv->tv_usec);
1176 unlock_user_struct(target_tv, target_tv_addr, 1);
1178 return 0;
1181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1182 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1183 abi_ulong target_tv_addr)
1185 struct target__kernel_sock_timeval *target_tv;
1187 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1188 return -TARGET_EFAULT;
1191 __get_user(tv->tv_sec, &target_tv->tv_sec);
1192 __get_user(tv->tv_usec, &target_tv->tv_usec);
1194 unlock_user_struct(target_tv, target_tv_addr, 0);
1196 return 0;
1198 #endif
1200 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1201 const struct timeval *tv)
1203 struct target__kernel_sock_timeval *target_tv;
1205 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1206 return -TARGET_EFAULT;
1209 __put_user(tv->tv_sec, &target_tv->tv_sec);
1210 __put_user(tv->tv_usec, &target_tv->tv_usec);
1212 unlock_user_struct(target_tv, target_tv_addr, 1);
1214 return 0;
1217 #if defined(TARGET_NR_futex) || \
1218 defined(TARGET_NR_rt_sigtimedwait) || \
1219 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1220 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1221 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1222 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1223 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1224 defined(TARGET_NR_timer_settime) || \
1225 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1226 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1227 abi_ulong target_addr)
1229 struct target_timespec *target_ts;
1231 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1232 return -TARGET_EFAULT;
1234 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1235 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236 unlock_user_struct(target_ts, target_addr, 0);
1237 return 0;
1239 #endif
1241 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1242 defined(TARGET_NR_timer_settime64) || \
1243 defined(TARGET_NR_mq_timedsend_time64) || \
1244 defined(TARGET_NR_mq_timedreceive_time64) || \
1245 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1246 defined(TARGET_NR_clock_nanosleep_time64) || \
1247 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1248 defined(TARGET_NR_utimensat) || \
1249 defined(TARGET_NR_utimensat_time64) || \
1250 defined(TARGET_NR_semtimedop_time64)
1251 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1252 abi_ulong target_addr)
1254 struct target__kernel_timespec *target_ts;
1256 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1257 return -TARGET_EFAULT;
1259 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1260 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261 /* in 32bit mode, this drops the padding */
1262 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1263 unlock_user_struct(target_ts, target_addr, 0);
1264 return 0;
1266 #endif
1268 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1269 struct timespec *host_ts)
1271 struct target_timespec *target_ts;
1273 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274 return -TARGET_EFAULT;
1276 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278 unlock_user_struct(target_ts, target_addr, 1);
1279 return 0;
1282 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1283 struct timespec *host_ts)
1285 struct target__kernel_timespec *target_ts;
1287 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288 return -TARGET_EFAULT;
1290 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292 unlock_user_struct(target_ts, target_addr, 1);
1293 return 0;
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1298 struct timezone *tz)
1300 struct target_timezone *target_tz;
1302 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1303 return -TARGET_EFAULT;
1306 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1307 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1309 unlock_user_struct(target_tz, target_tz_addr, 1);
1311 return 0;
1313 #endif
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1317 abi_ulong target_tz_addr)
1319 struct target_timezone *target_tz;
1321 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1322 return -TARGET_EFAULT;
1325 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1328 unlock_user_struct(target_tz, target_tz_addr, 0);
1330 return 0;
1332 #endif
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1335 #include <mqueue.h>
1337 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1338 abi_ulong target_mq_attr_addr)
1340 struct target_mq_attr *target_mq_attr;
1342 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1343 target_mq_attr_addr, 1))
1344 return -TARGET_EFAULT;
1346 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1347 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1348 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1349 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1351 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1353 return 0;
1356 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1357 const struct mq_attr *attr)
1359 struct target_mq_attr *target_mq_attr;
1361 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1362 target_mq_attr_addr, 0))
1363 return -TARGET_EFAULT;
1365 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1370 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1372 return 0;
1374 #endif
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long do_select(int n,
1379 abi_ulong rfd_addr, abi_ulong wfd_addr,
1380 abi_ulong efd_addr, abi_ulong target_tv_addr)
1382 fd_set rfds, wfds, efds;
1383 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1384 struct timeval tv;
1385 struct timespec ts, *ts_ptr;
1386 abi_long ret;
1388 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389 if (ret) {
1390 return ret;
1392 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393 if (ret) {
1394 return ret;
1396 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397 if (ret) {
1398 return ret;
1401 if (target_tv_addr) {
1402 if (copy_from_user_timeval(&tv, target_tv_addr))
1403 return -TARGET_EFAULT;
1404 ts.tv_sec = tv.tv_sec;
1405 ts.tv_nsec = tv.tv_usec * 1000;
1406 ts_ptr = &ts;
1407 } else {
1408 ts_ptr = NULL;
1411 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1412 ts_ptr, NULL));
1414 if (!is_error(ret)) {
1415 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1416 return -TARGET_EFAULT;
1417 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1418 return -TARGET_EFAULT;
1419 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1420 return -TARGET_EFAULT;
1422 if (target_tv_addr) {
1423 tv.tv_sec = ts.tv_sec;
1424 tv.tv_usec = ts.tv_nsec / 1000;
1425 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1426 return -TARGET_EFAULT;
1431 return ret;
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long do_old_select(abi_ulong arg1)
1437 struct target_sel_arg_struct *sel;
1438 abi_ulong inp, outp, exp, tvp;
1439 long nsel;
1441 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1442 return -TARGET_EFAULT;
1445 nsel = tswapal(sel->n);
1446 inp = tswapal(sel->inp);
1447 outp = tswapal(sel->outp);
1448 exp = tswapal(sel->exp);
1449 tvp = tswapal(sel->tvp);
1451 unlock_user_struct(sel, arg1, 0);
1453 return do_select(nsel, inp, outp, exp, tvp);
1455 #endif
1456 #endif
1458 static abi_long do_pipe2(int host_pipe[], int flags)
1460 #ifdef CONFIG_PIPE2
1461 return pipe2(host_pipe, flags);
1462 #else
1463 return -ENOSYS;
1464 #endif
1467 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1468 int flags, int is_pipe2)
1470 int host_pipe[2];
1471 abi_long ret;
1472 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1474 if (is_error(ret))
1475 return get_errno(ret);
1477 /* Several targets have special calling conventions for the original
1478 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1479 if (!is_pipe2) {
1480 #if defined(TARGET_ALPHA)
1481 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1482 return host_pipe[0];
1483 #elif defined(TARGET_MIPS)
1484 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1485 return host_pipe[0];
1486 #elif defined(TARGET_SH4)
1487 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1488 return host_pipe[0];
1489 #elif defined(TARGET_SPARC)
1490 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1491 return host_pipe[0];
1492 #endif
1495 if (put_user_s32(host_pipe[0], pipedes)
1496 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1497 return -TARGET_EFAULT;
1498 return get_errno(ret);
1501 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1502 abi_ulong target_addr,
1503 socklen_t len)
1505 struct target_ip_mreqn *target_smreqn;
1507 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1508 if (!target_smreqn)
1509 return -TARGET_EFAULT;
1510 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1511 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1512 if (len == sizeof(struct target_ip_mreqn))
1513 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1514 unlock_user(target_smreqn, target_addr, 0);
1516 return 0;
1519 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1520 abi_ulong target_addr,
1521 socklen_t len)
1523 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1524 sa_family_t sa_family;
1525 struct target_sockaddr *target_saddr;
1527 if (fd_trans_target_to_host_addr(fd)) {
1528 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1531 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1532 if (!target_saddr)
1533 return -TARGET_EFAULT;
1535 sa_family = tswap16(target_saddr->sa_family);
1537 /* Oops. The caller might send a incomplete sun_path; sun_path
1538 * must be terminated by \0 (see the manual page), but
1539 * unfortunately it is quite common to specify sockaddr_un
1540 * length as "strlen(x->sun_path)" while it should be
1541 * "strlen(...) + 1". We'll fix that here if needed.
1542 * Linux kernel has a similar feature.
1545 if (sa_family == AF_UNIX) {
1546 if (len < unix_maxlen && len > 0) {
1547 char *cp = (char*)target_saddr;
1549 if ( cp[len-1] && !cp[len] )
1550 len++;
1552 if (len > unix_maxlen)
1553 len = unix_maxlen;
1556 memcpy(addr, target_saddr, len);
1557 addr->sa_family = sa_family;
1558 if (sa_family == AF_NETLINK) {
1559 struct sockaddr_nl *nladdr;
1561 nladdr = (struct sockaddr_nl *)addr;
1562 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1563 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1564 } else if (sa_family == AF_PACKET) {
1565 struct target_sockaddr_ll *lladdr;
1567 lladdr = (struct target_sockaddr_ll *)addr;
1568 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1569 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1571 unlock_user(target_saddr, target_addr, 0);
1573 return 0;
1576 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1577 struct sockaddr *addr,
1578 socklen_t len)
1580 struct target_sockaddr *target_saddr;
1582 if (len == 0) {
1583 return 0;
1585 assert(addr);
1587 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1588 if (!target_saddr)
1589 return -TARGET_EFAULT;
1590 memcpy(target_saddr, addr, len);
1591 if (len >= offsetof(struct target_sockaddr, sa_family) +
1592 sizeof(target_saddr->sa_family)) {
1593 target_saddr->sa_family = tswap16(addr->sa_family);
1595 if (addr->sa_family == AF_NETLINK &&
1596 len >= sizeof(struct target_sockaddr_nl)) {
1597 struct target_sockaddr_nl *target_nl =
1598 (struct target_sockaddr_nl *)target_saddr;
1599 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1600 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1601 } else if (addr->sa_family == AF_PACKET) {
1602 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1603 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1604 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1605 } else if (addr->sa_family == AF_INET6 &&
1606 len >= sizeof(struct target_sockaddr_in6)) {
1607 struct target_sockaddr_in6 *target_in6 =
1608 (struct target_sockaddr_in6 *)target_saddr;
1609 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1611 unlock_user(target_saddr, target_addr, len);
1613 return 0;
1616 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1617 struct target_msghdr *target_msgh)
1619 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1620 abi_long msg_controllen;
1621 abi_ulong target_cmsg_addr;
1622 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1623 socklen_t space = 0;
1625 msg_controllen = tswapal(target_msgh->msg_controllen);
1626 if (msg_controllen < sizeof (struct target_cmsghdr))
1627 goto the_end;
1628 target_cmsg_addr = tswapal(target_msgh->msg_control);
1629 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1630 target_cmsg_start = target_cmsg;
1631 if (!target_cmsg)
1632 return -TARGET_EFAULT;
1634 while (cmsg && target_cmsg) {
1635 void *data = CMSG_DATA(cmsg);
1636 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1638 int len = tswapal(target_cmsg->cmsg_len)
1639 - sizeof(struct target_cmsghdr);
1641 space += CMSG_SPACE(len);
1642 if (space > msgh->msg_controllen) {
1643 space -= CMSG_SPACE(len);
1644 /* This is a QEMU bug, since we allocated the payload
1645 * area ourselves (unlike overflow in host-to-target
1646 * conversion, which is just the guest giving us a buffer
1647 * that's too small). It can't happen for the payload types
1648 * we currently support; if it becomes an issue in future
1649 * we would need to improve our allocation strategy to
1650 * something more intelligent than "twice the size of the
1651 * target buffer we're reading from".
1653 qemu_log_mask(LOG_UNIMP,
1654 ("Unsupported ancillary data %d/%d: "
1655 "unhandled msg size\n"),
1656 tswap32(target_cmsg->cmsg_level),
1657 tswap32(target_cmsg->cmsg_type));
1658 break;
1661 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1662 cmsg->cmsg_level = SOL_SOCKET;
1663 } else {
1664 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1666 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1667 cmsg->cmsg_len = CMSG_LEN(len);
1669 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1670 int *fd = (int *)data;
1671 int *target_fd = (int *)target_data;
1672 int i, numfds = len / sizeof(int);
1674 for (i = 0; i < numfds; i++) {
1675 __get_user(fd[i], target_fd + i);
1677 } else if (cmsg->cmsg_level == SOL_SOCKET
1678 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1679 struct ucred *cred = (struct ucred *)data;
1680 struct target_ucred *target_cred =
1681 (struct target_ucred *)target_data;
1683 __get_user(cred->pid, &target_cred->pid);
1684 __get_user(cred->uid, &target_cred->uid);
1685 __get_user(cred->gid, &target_cred->gid);
1686 } else {
1687 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1688 cmsg->cmsg_level, cmsg->cmsg_type);
1689 memcpy(data, target_data, len);
1692 cmsg = CMSG_NXTHDR(msgh, cmsg);
1693 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1694 target_cmsg_start);
1696 unlock_user(target_cmsg, target_cmsg_addr, 0);
1697 the_end:
1698 msgh->msg_controllen = space;
1699 return 0;
1702 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1703 struct msghdr *msgh)
1705 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1706 abi_long msg_controllen;
1707 abi_ulong target_cmsg_addr;
1708 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1709 socklen_t space = 0;
1711 msg_controllen = tswapal(target_msgh->msg_controllen);
1712 if (msg_controllen < sizeof (struct target_cmsghdr))
1713 goto the_end;
1714 target_cmsg_addr = tswapal(target_msgh->msg_control);
1715 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1716 target_cmsg_start = target_cmsg;
1717 if (!target_cmsg)
1718 return -TARGET_EFAULT;
1720 while (cmsg && target_cmsg) {
1721 void *data = CMSG_DATA(cmsg);
1722 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1724 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1725 int tgt_len, tgt_space;
1727 /* We never copy a half-header but may copy half-data;
1728 * this is Linux's behaviour in put_cmsg(). Note that
1729 * truncation here is a guest problem (which we report
1730 * to the guest via the CTRUNC bit), unlike truncation
1731 * in target_to_host_cmsg, which is a QEMU bug.
1733 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1734 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1735 break;
1738 if (cmsg->cmsg_level == SOL_SOCKET) {
1739 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1740 } else {
1741 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1743 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1745 /* Payload types which need a different size of payload on
1746 * the target must adjust tgt_len here.
1748 tgt_len = len;
1749 switch (cmsg->cmsg_level) {
1750 case SOL_SOCKET:
1751 switch (cmsg->cmsg_type) {
1752 case SO_TIMESTAMP:
1753 tgt_len = sizeof(struct target_timeval);
1754 break;
1755 default:
1756 break;
1758 break;
1759 default:
1760 break;
1763 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1764 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1765 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1768 /* We must now copy-and-convert len bytes of payload
1769 * into tgt_len bytes of destination space. Bear in mind
1770 * that in both source and destination we may be dealing
1771 * with a truncated value!
1773 switch (cmsg->cmsg_level) {
1774 case SOL_SOCKET:
1775 switch (cmsg->cmsg_type) {
1776 case SCM_RIGHTS:
1778 int *fd = (int *)data;
1779 int *target_fd = (int *)target_data;
1780 int i, numfds = tgt_len / sizeof(int);
1782 for (i = 0; i < numfds; i++) {
1783 __put_user(fd[i], target_fd + i);
1785 break;
1787 case SO_TIMESTAMP:
1789 struct timeval *tv = (struct timeval *)data;
1790 struct target_timeval *target_tv =
1791 (struct target_timeval *)target_data;
1793 if (len != sizeof(struct timeval) ||
1794 tgt_len != sizeof(struct target_timeval)) {
1795 goto unimplemented;
1798 /* copy struct timeval to target */
1799 __put_user(tv->tv_sec, &target_tv->tv_sec);
1800 __put_user(tv->tv_usec, &target_tv->tv_usec);
1801 break;
1803 case SCM_CREDENTIALS:
1805 struct ucred *cred = (struct ucred *)data;
1806 struct target_ucred *target_cred =
1807 (struct target_ucred *)target_data;
1809 __put_user(cred->pid, &target_cred->pid);
1810 __put_user(cred->uid, &target_cred->uid);
1811 __put_user(cred->gid, &target_cred->gid);
1812 break;
1814 default:
1815 goto unimplemented;
1817 break;
1819 case SOL_IP:
1820 switch (cmsg->cmsg_type) {
1821 case IP_TTL:
1823 uint32_t *v = (uint32_t *)data;
1824 uint32_t *t_int = (uint32_t *)target_data;
1826 if (len != sizeof(uint32_t) ||
1827 tgt_len != sizeof(uint32_t)) {
1828 goto unimplemented;
1830 __put_user(*v, t_int);
1831 break;
1833 case IP_RECVERR:
1835 struct errhdr_t {
1836 struct sock_extended_err ee;
1837 struct sockaddr_in offender;
1839 struct errhdr_t *errh = (struct errhdr_t *)data;
1840 struct errhdr_t *target_errh =
1841 (struct errhdr_t *)target_data;
1843 if (len != sizeof(struct errhdr_t) ||
1844 tgt_len != sizeof(struct errhdr_t)) {
1845 goto unimplemented;
1847 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1848 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1849 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1850 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1851 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1852 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1853 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1854 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1855 (void *) &errh->offender, sizeof(errh->offender));
1856 break;
1858 default:
1859 goto unimplemented;
1861 break;
1863 case SOL_IPV6:
1864 switch (cmsg->cmsg_type) {
1865 case IPV6_HOPLIMIT:
1867 uint32_t *v = (uint32_t *)data;
1868 uint32_t *t_int = (uint32_t *)target_data;
1870 if (len != sizeof(uint32_t) ||
1871 tgt_len != sizeof(uint32_t)) {
1872 goto unimplemented;
1874 __put_user(*v, t_int);
1875 break;
1877 case IPV6_RECVERR:
1879 struct errhdr6_t {
1880 struct sock_extended_err ee;
1881 struct sockaddr_in6 offender;
1883 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1884 struct errhdr6_t *target_errh =
1885 (struct errhdr6_t *)target_data;
1887 if (len != sizeof(struct errhdr6_t) ||
1888 tgt_len != sizeof(struct errhdr6_t)) {
1889 goto unimplemented;
1891 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1892 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1893 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1894 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1895 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1896 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1897 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1898 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1899 (void *) &errh->offender, sizeof(errh->offender));
1900 break;
1902 default:
1903 goto unimplemented;
1905 break;
1907 default:
1908 unimplemented:
1909 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1910 cmsg->cmsg_level, cmsg->cmsg_type);
1911 memcpy(target_data, data, MIN(len, tgt_len));
1912 if (tgt_len > len) {
1913 memset(target_data + len, 0, tgt_len - len);
1917 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1918 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1919 if (msg_controllen < tgt_space) {
1920 tgt_space = msg_controllen;
1922 msg_controllen -= tgt_space;
1923 space += tgt_space;
1924 cmsg = CMSG_NXTHDR(msgh, cmsg);
1925 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1926 target_cmsg_start);
1928 unlock_user(target_cmsg, target_cmsg_addr, space);
1929 the_end:
1930 target_msgh->msg_controllen = tswapal(space);
1931 return 0;
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long do_setsockopt(int sockfd, int level, int optname,
1936 abi_ulong optval_addr, socklen_t optlen)
1938 abi_long ret;
1939 int val;
1940 struct ip_mreqn *ip_mreq;
1941 struct ip_mreq_source *ip_mreq_source;
1943 switch(level) {
1944 case SOL_TCP:
1945 /* TCP options all take an 'int' value. */
1946 if (optlen < sizeof(uint32_t))
1947 return -TARGET_EINVAL;
1949 if (get_user_u32(val, optval_addr))
1950 return -TARGET_EFAULT;
1951 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1952 break;
1953 case SOL_IP:
1954 switch(optname) {
1955 case IP_TOS:
1956 case IP_TTL:
1957 case IP_HDRINCL:
1958 case IP_ROUTER_ALERT:
1959 case IP_RECVOPTS:
1960 case IP_RETOPTS:
1961 case IP_PKTINFO:
1962 case IP_MTU_DISCOVER:
1963 case IP_RECVERR:
1964 case IP_RECVTTL:
1965 case IP_RECVTOS:
1966 #ifdef IP_FREEBIND
1967 case IP_FREEBIND:
1968 #endif
1969 case IP_MULTICAST_TTL:
1970 case IP_MULTICAST_LOOP:
1971 val = 0;
1972 if (optlen >= sizeof(uint32_t)) {
1973 if (get_user_u32(val, optval_addr))
1974 return -TARGET_EFAULT;
1975 } else if (optlen >= 1) {
1976 if (get_user_u8(val, optval_addr))
1977 return -TARGET_EFAULT;
1979 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1980 break;
1981 case IP_ADD_MEMBERSHIP:
1982 case IP_DROP_MEMBERSHIP:
1983 if (optlen < sizeof (struct target_ip_mreq) ||
1984 optlen > sizeof (struct target_ip_mreqn))
1985 return -TARGET_EINVAL;
1987 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1988 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1989 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1990 break;
1992 case IP_BLOCK_SOURCE:
1993 case IP_UNBLOCK_SOURCE:
1994 case IP_ADD_SOURCE_MEMBERSHIP:
1995 case IP_DROP_SOURCE_MEMBERSHIP:
1996 if (optlen != sizeof (struct target_ip_mreq_source))
1997 return -TARGET_EINVAL;
1999 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2000 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2001 unlock_user (ip_mreq_source, optval_addr, 0);
2002 break;
2004 default:
2005 goto unimplemented;
2007 break;
2008 case SOL_IPV6:
2009 switch (optname) {
2010 case IPV6_MTU_DISCOVER:
2011 case IPV6_MTU:
2012 case IPV6_V6ONLY:
2013 case IPV6_RECVPKTINFO:
2014 case IPV6_UNICAST_HOPS:
2015 case IPV6_MULTICAST_HOPS:
2016 case IPV6_MULTICAST_LOOP:
2017 case IPV6_RECVERR:
2018 case IPV6_RECVHOPLIMIT:
2019 case IPV6_2292HOPLIMIT:
2020 case IPV6_CHECKSUM:
2021 case IPV6_ADDRFORM:
2022 case IPV6_2292PKTINFO:
2023 case IPV6_RECVTCLASS:
2024 case IPV6_RECVRTHDR:
2025 case IPV6_2292RTHDR:
2026 case IPV6_RECVHOPOPTS:
2027 case IPV6_2292HOPOPTS:
2028 case IPV6_RECVDSTOPTS:
2029 case IPV6_2292DSTOPTS:
2030 case IPV6_TCLASS:
2031 #ifdef IPV6_RECVPATHMTU
2032 case IPV6_RECVPATHMTU:
2033 #endif
2034 #ifdef IPV6_TRANSPARENT
2035 case IPV6_TRANSPARENT:
2036 #endif
2037 #ifdef IPV6_FREEBIND
2038 case IPV6_FREEBIND:
2039 #endif
2040 #ifdef IPV6_RECVORIGDSTADDR
2041 case IPV6_RECVORIGDSTADDR:
2042 #endif
2043 val = 0;
2044 if (optlen < sizeof(uint32_t)) {
2045 return -TARGET_EINVAL;
2047 if (get_user_u32(val, optval_addr)) {
2048 return -TARGET_EFAULT;
2050 ret = get_errno(setsockopt(sockfd, level, optname,
2051 &val, sizeof(val)));
2052 break;
2053 case IPV6_PKTINFO:
2055 struct in6_pktinfo pki;
2057 if (optlen < sizeof(pki)) {
2058 return -TARGET_EINVAL;
2061 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2062 return -TARGET_EFAULT;
2065 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2067 ret = get_errno(setsockopt(sockfd, level, optname,
2068 &pki, sizeof(pki)));
2069 break;
2071 case IPV6_ADD_MEMBERSHIP:
2072 case IPV6_DROP_MEMBERSHIP:
2074 struct ipv6_mreq ipv6mreq;
2076 if (optlen < sizeof(ipv6mreq)) {
2077 return -TARGET_EINVAL;
2080 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2081 return -TARGET_EFAULT;
2084 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2086 ret = get_errno(setsockopt(sockfd, level, optname,
2087 &ipv6mreq, sizeof(ipv6mreq)));
2088 break;
2090 default:
2091 goto unimplemented;
2093 break;
2094 case SOL_ICMPV6:
2095 switch (optname) {
2096 case ICMPV6_FILTER:
2098 struct icmp6_filter icmp6f;
2100 if (optlen > sizeof(icmp6f)) {
2101 optlen = sizeof(icmp6f);
2104 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2105 return -TARGET_EFAULT;
2108 for (val = 0; val < 8; val++) {
2109 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2112 ret = get_errno(setsockopt(sockfd, level, optname,
2113 &icmp6f, optlen));
2114 break;
2116 default:
2117 goto unimplemented;
2119 break;
2120 case SOL_RAW:
2121 switch (optname) {
2122 case ICMP_FILTER:
2123 case IPV6_CHECKSUM:
2124 /* those take an u32 value */
2125 if (optlen < sizeof(uint32_t)) {
2126 return -TARGET_EINVAL;
2129 if (get_user_u32(val, optval_addr)) {
2130 return -TARGET_EFAULT;
2132 ret = get_errno(setsockopt(sockfd, level, optname,
2133 &val, sizeof(val)));
2134 break;
2136 default:
2137 goto unimplemented;
2139 break;
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2141 case SOL_ALG:
2142 switch (optname) {
2143 case ALG_SET_KEY:
2145 char *alg_key = g_malloc(optlen);
2147 if (!alg_key) {
2148 return -TARGET_ENOMEM;
2150 if (copy_from_user(alg_key, optval_addr, optlen)) {
2151 g_free(alg_key);
2152 return -TARGET_EFAULT;
2154 ret = get_errno(setsockopt(sockfd, level, optname,
2155 alg_key, optlen));
2156 g_free(alg_key);
2157 break;
2159 case ALG_SET_AEAD_AUTHSIZE:
2161 ret = get_errno(setsockopt(sockfd, level, optname,
2162 NULL, optlen));
2163 break;
2165 default:
2166 goto unimplemented;
2168 break;
2169 #endif
2170 case TARGET_SOL_SOCKET:
2171 switch (optname) {
2172 case TARGET_SO_RCVTIMEO:
2174 struct timeval tv;
2176 optname = SO_RCVTIMEO;
2178 set_timeout:
2179 if (optlen != sizeof(struct target_timeval)) {
2180 return -TARGET_EINVAL;
2183 if (copy_from_user_timeval(&tv, optval_addr)) {
2184 return -TARGET_EFAULT;
2187 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2188 &tv, sizeof(tv)));
2189 return ret;
2191 case TARGET_SO_SNDTIMEO:
2192 optname = SO_SNDTIMEO;
2193 goto set_timeout;
2194 case TARGET_SO_ATTACH_FILTER:
2196 struct target_sock_fprog *tfprog;
2197 struct target_sock_filter *tfilter;
2198 struct sock_fprog fprog;
2199 struct sock_filter *filter;
2200 int i;
2202 if (optlen != sizeof(*tfprog)) {
2203 return -TARGET_EINVAL;
2205 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2206 return -TARGET_EFAULT;
2208 if (!lock_user_struct(VERIFY_READ, tfilter,
2209 tswapal(tfprog->filter), 0)) {
2210 unlock_user_struct(tfprog, optval_addr, 1);
2211 return -TARGET_EFAULT;
2214 fprog.len = tswap16(tfprog->len);
2215 filter = g_try_new(struct sock_filter, fprog.len);
2216 if (filter == NULL) {
2217 unlock_user_struct(tfilter, tfprog->filter, 1);
2218 unlock_user_struct(tfprog, optval_addr, 1);
2219 return -TARGET_ENOMEM;
2221 for (i = 0; i < fprog.len; i++) {
2222 filter[i].code = tswap16(tfilter[i].code);
2223 filter[i].jt = tfilter[i].jt;
2224 filter[i].jf = tfilter[i].jf;
2225 filter[i].k = tswap32(tfilter[i].k);
2227 fprog.filter = filter;
2229 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2230 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2231 g_free(filter);
2233 unlock_user_struct(tfilter, tfprog->filter, 1);
2234 unlock_user_struct(tfprog, optval_addr, 1);
2235 return ret;
2237 case TARGET_SO_BINDTODEVICE:
2239 char *dev_ifname, *addr_ifname;
2241 if (optlen > IFNAMSIZ - 1) {
2242 optlen = IFNAMSIZ - 1;
2244 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2245 if (!dev_ifname) {
2246 return -TARGET_EFAULT;
2248 optname = SO_BINDTODEVICE;
2249 addr_ifname = alloca(IFNAMSIZ);
2250 memcpy(addr_ifname, dev_ifname, optlen);
2251 addr_ifname[optlen] = 0;
2252 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2253 addr_ifname, optlen));
2254 unlock_user (dev_ifname, optval_addr, 0);
2255 return ret;
2257 case TARGET_SO_LINGER:
2259 struct linger lg;
2260 struct target_linger *tlg;
2262 if (optlen != sizeof(struct target_linger)) {
2263 return -TARGET_EINVAL;
2265 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2266 return -TARGET_EFAULT;
2268 __get_user(lg.l_onoff, &tlg->l_onoff);
2269 __get_user(lg.l_linger, &tlg->l_linger);
2270 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2271 &lg, sizeof(lg)));
2272 unlock_user_struct(tlg, optval_addr, 0);
2273 return ret;
2275 /* Options with 'int' argument. */
2276 case TARGET_SO_DEBUG:
2277 optname = SO_DEBUG;
2278 break;
2279 case TARGET_SO_REUSEADDR:
2280 optname = SO_REUSEADDR;
2281 break;
2282 #ifdef SO_REUSEPORT
2283 case TARGET_SO_REUSEPORT:
2284 optname = SO_REUSEPORT;
2285 break;
2286 #endif
2287 case TARGET_SO_TYPE:
2288 optname = SO_TYPE;
2289 break;
2290 case TARGET_SO_ERROR:
2291 optname = SO_ERROR;
2292 break;
2293 case TARGET_SO_DONTROUTE:
2294 optname = SO_DONTROUTE;
2295 break;
2296 case TARGET_SO_BROADCAST:
2297 optname = SO_BROADCAST;
2298 break;
2299 case TARGET_SO_SNDBUF:
2300 optname = SO_SNDBUF;
2301 break;
2302 case TARGET_SO_SNDBUFFORCE:
2303 optname = SO_SNDBUFFORCE;
2304 break;
2305 case TARGET_SO_RCVBUF:
2306 optname = SO_RCVBUF;
2307 break;
2308 case TARGET_SO_RCVBUFFORCE:
2309 optname = SO_RCVBUFFORCE;
2310 break;
2311 case TARGET_SO_KEEPALIVE:
2312 optname = SO_KEEPALIVE;
2313 break;
2314 case TARGET_SO_OOBINLINE:
2315 optname = SO_OOBINLINE;
2316 break;
2317 case TARGET_SO_NO_CHECK:
2318 optname = SO_NO_CHECK;
2319 break;
2320 case TARGET_SO_PRIORITY:
2321 optname = SO_PRIORITY;
2322 break;
2323 #ifdef SO_BSDCOMPAT
2324 case TARGET_SO_BSDCOMPAT:
2325 optname = SO_BSDCOMPAT;
2326 break;
2327 #endif
2328 case TARGET_SO_PASSCRED:
2329 optname = SO_PASSCRED;
2330 break;
2331 case TARGET_SO_PASSSEC:
2332 optname = SO_PASSSEC;
2333 break;
2334 case TARGET_SO_TIMESTAMP:
2335 optname = SO_TIMESTAMP;
2336 break;
2337 case TARGET_SO_RCVLOWAT:
2338 optname = SO_RCVLOWAT;
2339 break;
2340 default:
2341 goto unimplemented;
2343 if (optlen < sizeof(uint32_t))
2344 return -TARGET_EINVAL;
2346 if (get_user_u32(val, optval_addr))
2347 return -TARGET_EFAULT;
2348 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2349 break;
2350 #ifdef SOL_NETLINK
2351 case SOL_NETLINK:
2352 switch (optname) {
2353 case NETLINK_PKTINFO:
2354 case NETLINK_ADD_MEMBERSHIP:
2355 case NETLINK_DROP_MEMBERSHIP:
2356 case NETLINK_BROADCAST_ERROR:
2357 case NETLINK_NO_ENOBUFS:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359 case NETLINK_LISTEN_ALL_NSID:
2360 case NETLINK_CAP_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363 case NETLINK_EXT_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366 case NETLINK_GET_STRICT_CHK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 break;
2369 default:
2370 goto unimplemented;
2372 val = 0;
2373 if (optlen < sizeof(uint32_t)) {
2374 return -TARGET_EINVAL;
2376 if (get_user_u32(val, optval_addr)) {
2377 return -TARGET_EFAULT;
2379 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2380 sizeof(val)));
2381 break;
2382 #endif /* SOL_NETLINK */
2383 default:
2384 unimplemented:
2385 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2386 level, optname);
2387 ret = -TARGET_ENOPROTOOPT;
2389 return ret;
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long do_getsockopt(int sockfd, int level, int optname,
2394 abi_ulong optval_addr, abi_ulong optlen)
2396 abi_long ret;
2397 int len, val;
2398 socklen_t lv;
2400 switch(level) {
2401 case TARGET_SOL_SOCKET:
2402 level = SOL_SOCKET;
2403 switch (optname) {
2404 /* These don't just return a single integer */
2405 case TARGET_SO_PEERNAME:
2406 goto unimplemented;
2407 case TARGET_SO_RCVTIMEO: {
2408 struct timeval tv;
2409 socklen_t tvlen;
2411 optname = SO_RCVTIMEO;
2413 get_timeout:
2414 if (get_user_u32(len, optlen)) {
2415 return -TARGET_EFAULT;
2417 if (len < 0) {
2418 return -TARGET_EINVAL;
2421 tvlen = sizeof(tv);
2422 ret = get_errno(getsockopt(sockfd, level, optname,
2423 &tv, &tvlen));
2424 if (ret < 0) {
2425 return ret;
2427 if (len > sizeof(struct target_timeval)) {
2428 len = sizeof(struct target_timeval);
2430 if (copy_to_user_timeval(optval_addr, &tv)) {
2431 return -TARGET_EFAULT;
2433 if (put_user_u32(len, optlen)) {
2434 return -TARGET_EFAULT;
2436 break;
2438 case TARGET_SO_SNDTIMEO:
2439 optname = SO_SNDTIMEO;
2440 goto get_timeout;
2441 case TARGET_SO_PEERCRED: {
2442 struct ucred cr;
2443 socklen_t crlen;
2444 struct target_ucred *tcr;
2446 if (get_user_u32(len, optlen)) {
2447 return -TARGET_EFAULT;
2449 if (len < 0) {
2450 return -TARGET_EINVAL;
2453 crlen = sizeof(cr);
2454 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2455 &cr, &crlen));
2456 if (ret < 0) {
2457 return ret;
2459 if (len > crlen) {
2460 len = crlen;
2462 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2463 return -TARGET_EFAULT;
2465 __put_user(cr.pid, &tcr->pid);
2466 __put_user(cr.uid, &tcr->uid);
2467 __put_user(cr.gid, &tcr->gid);
2468 unlock_user_struct(tcr, optval_addr, 1);
2469 if (put_user_u32(len, optlen)) {
2470 return -TARGET_EFAULT;
2472 break;
2474 case TARGET_SO_PEERSEC: {
2475 char *name;
2477 if (get_user_u32(len, optlen)) {
2478 return -TARGET_EFAULT;
2480 if (len < 0) {
2481 return -TARGET_EINVAL;
2483 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2484 if (!name) {
2485 return -TARGET_EFAULT;
2487 lv = len;
2488 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2489 name, &lv));
2490 if (put_user_u32(lv, optlen)) {
2491 ret = -TARGET_EFAULT;
2493 unlock_user(name, optval_addr, lv);
2494 break;
2496 case TARGET_SO_LINGER:
2498 struct linger lg;
2499 socklen_t lglen;
2500 struct target_linger *tlg;
2502 if (get_user_u32(len, optlen)) {
2503 return -TARGET_EFAULT;
2505 if (len < 0) {
2506 return -TARGET_EINVAL;
2509 lglen = sizeof(lg);
2510 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2511 &lg, &lglen));
2512 if (ret < 0) {
2513 return ret;
2515 if (len > lglen) {
2516 len = lglen;
2518 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2519 return -TARGET_EFAULT;
2521 __put_user(lg.l_onoff, &tlg->l_onoff);
2522 __put_user(lg.l_linger, &tlg->l_linger);
2523 unlock_user_struct(tlg, optval_addr, 1);
2524 if (put_user_u32(len, optlen)) {
2525 return -TARGET_EFAULT;
2527 break;
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG:
2531 optname = SO_DEBUG;
2532 goto int_case;
2533 case TARGET_SO_REUSEADDR:
2534 optname = SO_REUSEADDR;
2535 goto int_case;
2536 #ifdef SO_REUSEPORT
2537 case TARGET_SO_REUSEPORT:
2538 optname = SO_REUSEPORT;
2539 goto int_case;
2540 #endif
2541 case TARGET_SO_TYPE:
2542 optname = SO_TYPE;
2543 goto int_case;
2544 case TARGET_SO_ERROR:
2545 optname = SO_ERROR;
2546 goto int_case;
2547 case TARGET_SO_DONTROUTE:
2548 optname = SO_DONTROUTE;
2549 goto int_case;
2550 case TARGET_SO_BROADCAST:
2551 optname = SO_BROADCAST;
2552 goto int_case;
2553 case TARGET_SO_SNDBUF:
2554 optname = SO_SNDBUF;
2555 goto int_case;
2556 case TARGET_SO_RCVBUF:
2557 optname = SO_RCVBUF;
2558 goto int_case;
2559 case TARGET_SO_KEEPALIVE:
2560 optname = SO_KEEPALIVE;
2561 goto int_case;
2562 case TARGET_SO_OOBINLINE:
2563 optname = SO_OOBINLINE;
2564 goto int_case;
2565 case TARGET_SO_NO_CHECK:
2566 optname = SO_NO_CHECK;
2567 goto int_case;
2568 case TARGET_SO_PRIORITY:
2569 optname = SO_PRIORITY;
2570 goto int_case;
2571 #ifdef SO_BSDCOMPAT
2572 case TARGET_SO_BSDCOMPAT:
2573 optname = SO_BSDCOMPAT;
2574 goto int_case;
2575 #endif
2576 case TARGET_SO_PASSCRED:
2577 optname = SO_PASSCRED;
2578 goto int_case;
2579 case TARGET_SO_TIMESTAMP:
2580 optname = SO_TIMESTAMP;
2581 goto int_case;
2582 case TARGET_SO_RCVLOWAT:
2583 optname = SO_RCVLOWAT;
2584 goto int_case;
2585 case TARGET_SO_ACCEPTCONN:
2586 optname = SO_ACCEPTCONN;
2587 goto int_case;
2588 default:
2589 goto int_case;
2591 break;
2592 case SOL_TCP:
2593 /* TCP options all take an 'int' value. */
2594 int_case:
2595 if (get_user_u32(len, optlen))
2596 return -TARGET_EFAULT;
2597 if (len < 0)
2598 return -TARGET_EINVAL;
2599 lv = sizeof(lv);
2600 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2601 if (ret < 0)
2602 return ret;
2603 if (optname == SO_TYPE) {
2604 val = host_to_target_sock_type(val);
2606 if (len > lv)
2607 len = lv;
2608 if (len == 4) {
2609 if (put_user_u32(val, optval_addr))
2610 return -TARGET_EFAULT;
2611 } else {
2612 if (put_user_u8(val, optval_addr))
2613 return -TARGET_EFAULT;
2615 if (put_user_u32(len, optlen))
2616 return -TARGET_EFAULT;
2617 break;
2618 case SOL_IP:
2619 switch(optname) {
2620 case IP_TOS:
2621 case IP_TTL:
2622 case IP_HDRINCL:
2623 case IP_ROUTER_ALERT:
2624 case IP_RECVOPTS:
2625 case IP_RETOPTS:
2626 case IP_PKTINFO:
2627 case IP_MTU_DISCOVER:
2628 case IP_RECVERR:
2629 case IP_RECVTOS:
2630 #ifdef IP_FREEBIND
2631 case IP_FREEBIND:
2632 #endif
2633 case IP_MULTICAST_TTL:
2634 case IP_MULTICAST_LOOP:
2635 if (get_user_u32(len, optlen))
2636 return -TARGET_EFAULT;
2637 if (len < 0)
2638 return -TARGET_EINVAL;
2639 lv = sizeof(lv);
2640 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2641 if (ret < 0)
2642 return ret;
2643 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2644 len = 1;
2645 if (put_user_u32(len, optlen)
2646 || put_user_u8(val, optval_addr))
2647 return -TARGET_EFAULT;
2648 } else {
2649 if (len > sizeof(int))
2650 len = sizeof(int);
2651 if (put_user_u32(len, optlen)
2652 || put_user_u32(val, optval_addr))
2653 return -TARGET_EFAULT;
2655 break;
2656 default:
2657 ret = -TARGET_ENOPROTOOPT;
2658 break;
2660 break;
2661 case SOL_IPV6:
2662 switch (optname) {
2663 case IPV6_MTU_DISCOVER:
2664 case IPV6_MTU:
2665 case IPV6_V6ONLY:
2666 case IPV6_RECVPKTINFO:
2667 case IPV6_UNICAST_HOPS:
2668 case IPV6_MULTICAST_HOPS:
2669 case IPV6_MULTICAST_LOOP:
2670 case IPV6_RECVERR:
2671 case IPV6_RECVHOPLIMIT:
2672 case IPV6_2292HOPLIMIT:
2673 case IPV6_CHECKSUM:
2674 case IPV6_ADDRFORM:
2675 case IPV6_2292PKTINFO:
2676 case IPV6_RECVTCLASS:
2677 case IPV6_RECVRTHDR:
2678 case IPV6_2292RTHDR:
2679 case IPV6_RECVHOPOPTS:
2680 case IPV6_2292HOPOPTS:
2681 case IPV6_RECVDSTOPTS:
2682 case IPV6_2292DSTOPTS:
2683 case IPV6_TCLASS:
2684 #ifdef IPV6_RECVPATHMTU
2685 case IPV6_RECVPATHMTU:
2686 #endif
2687 #ifdef IPV6_TRANSPARENT
2688 case IPV6_TRANSPARENT:
2689 #endif
2690 #ifdef IPV6_FREEBIND
2691 case IPV6_FREEBIND:
2692 #endif
2693 #ifdef IPV6_RECVORIGDSTADDR
2694 case IPV6_RECVORIGDSTADDR:
2695 #endif
2696 if (get_user_u32(len, optlen))
2697 return -TARGET_EFAULT;
2698 if (len < 0)
2699 return -TARGET_EINVAL;
2700 lv = sizeof(lv);
2701 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2702 if (ret < 0)
2703 return ret;
2704 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2705 len = 1;
2706 if (put_user_u32(len, optlen)
2707 || put_user_u8(val, optval_addr))
2708 return -TARGET_EFAULT;
2709 } else {
2710 if (len > sizeof(int))
2711 len = sizeof(int);
2712 if (put_user_u32(len, optlen)
2713 || put_user_u32(val, optval_addr))
2714 return -TARGET_EFAULT;
2716 break;
2717 default:
2718 ret = -TARGET_ENOPROTOOPT;
2719 break;
2721 break;
2722 #ifdef SOL_NETLINK
2723 case SOL_NETLINK:
2724 switch (optname) {
2725 case NETLINK_PKTINFO:
2726 case NETLINK_BROADCAST_ERROR:
2727 case NETLINK_NO_ENOBUFS:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729 case NETLINK_LISTEN_ALL_NSID:
2730 case NETLINK_CAP_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733 case NETLINK_EXT_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736 case NETLINK_GET_STRICT_CHK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 if (get_user_u32(len, optlen)) {
2739 return -TARGET_EFAULT;
2741 if (len != sizeof(val)) {
2742 return -TARGET_EINVAL;
2744 lv = len;
2745 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2746 if (ret < 0) {
2747 return ret;
2749 if (put_user_u32(lv, optlen)
2750 || put_user_u32(val, optval_addr)) {
2751 return -TARGET_EFAULT;
2753 break;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755 case NETLINK_LIST_MEMBERSHIPS:
2757 uint32_t *results;
2758 int i;
2759 if (get_user_u32(len, optlen)) {
2760 return -TARGET_EFAULT;
2762 if (len < 0) {
2763 return -TARGET_EINVAL;
2765 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2766 if (!results) {
2767 return -TARGET_EFAULT;
2769 lv = len;
2770 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2771 if (ret < 0) {
2772 unlock_user(results, optval_addr, 0);
2773 return ret;
2775 /* swap host endianess to target endianess. */
2776 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2777 results[i] = tswap32(results[i]);
2779 if (put_user_u32(lv, optlen)) {
2780 return -TARGET_EFAULT;
2782 unlock_user(results, optval_addr, 0);
2783 break;
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2786 default:
2787 goto unimplemented;
2789 break;
2790 #endif /* SOL_NETLINK */
2791 default:
2792 unimplemented:
2793 qemu_log_mask(LOG_UNIMP,
2794 "getsockopt level=%d optname=%d not yet supported\n",
2795 level, optname);
2796 ret = -TARGET_EOPNOTSUPP;
2797 break;
2799 return ret;
2802 /* Convert target low/high pair representing file offset into the host
2803 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804 * as the kernel doesn't handle them either.
2806 static void target_to_host_low_high(abi_ulong tlow,
2807 abi_ulong thigh,
2808 unsigned long *hlow,
2809 unsigned long *hhigh)
2811 uint64_t off = tlow |
2812 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2813 TARGET_LONG_BITS / 2;
2815 *hlow = off;
2816 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2819 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2820 abi_ulong count, int copy)
2822 struct target_iovec *target_vec;
2823 struct iovec *vec;
2824 abi_ulong total_len, max_len;
2825 int i;
2826 int err = 0;
2827 bool bad_address = false;
2829 if (count == 0) {
2830 errno = 0;
2831 return NULL;
2833 if (count > IOV_MAX) {
2834 errno = EINVAL;
2835 return NULL;
2838 vec = g_try_new0(struct iovec, count);
2839 if (vec == NULL) {
2840 errno = ENOMEM;
2841 return NULL;
2844 target_vec = lock_user(VERIFY_READ, target_addr,
2845 count * sizeof(struct target_iovec), 1);
2846 if (target_vec == NULL) {
2847 err = EFAULT;
2848 goto fail2;
2851 /* ??? If host page size > target page size, this will result in a
2852 value larger than what we can actually support. */
2853 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2854 total_len = 0;
2856 for (i = 0; i < count; i++) {
2857 abi_ulong base = tswapal(target_vec[i].iov_base);
2858 abi_long len = tswapal(target_vec[i].iov_len);
2860 if (len < 0) {
2861 err = EINVAL;
2862 goto fail;
2863 } else if (len == 0) {
2864 /* Zero length pointer is ignored. */
2865 vec[i].iov_base = 0;
2866 } else {
2867 vec[i].iov_base = lock_user(type, base, len, copy);
2868 /* If the first buffer pointer is bad, this is a fault. But
2869 * subsequent bad buffers will result in a partial write; this
2870 * is realized by filling the vector with null pointers and
2871 * zero lengths. */
2872 if (!vec[i].iov_base) {
2873 if (i == 0) {
2874 err = EFAULT;
2875 goto fail;
2876 } else {
2877 bad_address = true;
2880 if (bad_address) {
2881 len = 0;
2883 if (len > max_len - total_len) {
2884 len = max_len - total_len;
2887 vec[i].iov_len = len;
2888 total_len += len;
2891 unlock_user(target_vec, target_addr, 0);
2892 return vec;
2894 fail:
2895 while (--i >= 0) {
2896 if (tswapal(target_vec[i].iov_len) > 0) {
2897 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2900 unlock_user(target_vec, target_addr, 0);
2901 fail2:
2902 g_free(vec);
2903 errno = err;
2904 return NULL;
2907 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2908 abi_ulong count, int copy)
2910 struct target_iovec *target_vec;
2911 int i;
2913 target_vec = lock_user(VERIFY_READ, target_addr,
2914 count * sizeof(struct target_iovec), 1);
2915 if (target_vec) {
2916 for (i = 0; i < count; i++) {
2917 abi_ulong base = tswapal(target_vec[i].iov_base);
2918 abi_long len = tswapal(target_vec[i].iov_len);
2919 if (len < 0) {
2920 break;
2922 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2924 unlock_user(target_vec, target_addr, 0);
2927 g_free(vec);
2930 static inline int target_to_host_sock_type(int *type)
2932 int host_type = 0;
2933 int target_type = *type;
2935 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2936 case TARGET_SOCK_DGRAM:
2937 host_type = SOCK_DGRAM;
2938 break;
2939 case TARGET_SOCK_STREAM:
2940 host_type = SOCK_STREAM;
2941 break;
2942 default:
2943 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2944 break;
2946 if (target_type & TARGET_SOCK_CLOEXEC) {
2947 #if defined(SOCK_CLOEXEC)
2948 host_type |= SOCK_CLOEXEC;
2949 #else
2950 return -TARGET_EINVAL;
2951 #endif
2953 if (target_type & TARGET_SOCK_NONBLOCK) {
2954 #if defined(SOCK_NONBLOCK)
2955 host_type |= SOCK_NONBLOCK;
2956 #elif !defined(O_NONBLOCK)
2957 return -TARGET_EINVAL;
2958 #endif
2960 *type = host_type;
2961 return 0;
2964 /* Try to emulate socket type flags after socket creation. */
2965 static int sock_flags_fixup(int fd, int target_type)
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968 if (target_type & TARGET_SOCK_NONBLOCK) {
2969 int flags = fcntl(fd, F_GETFL);
2970 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2971 close(fd);
2972 return -TARGET_EINVAL;
2975 #endif
2976 return fd;
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long do_socket(int domain, int type, int protocol)
2982 int target_type = type;
2983 int ret;
2985 ret = target_to_host_sock_type(&type);
2986 if (ret) {
2987 return ret;
2990 if (domain == PF_NETLINK && !(
2991 #ifdef CONFIG_RTNETLINK
2992 protocol == NETLINK_ROUTE ||
2993 #endif
2994 protocol == NETLINK_KOBJECT_UEVENT ||
2995 protocol == NETLINK_AUDIT)) {
2996 return -TARGET_EPROTONOSUPPORT;
2999 if (domain == AF_PACKET ||
3000 (domain == AF_INET && type == SOCK_PACKET)) {
3001 protocol = tswap16(protocol);
3004 ret = get_errno(socket(domain, type, protocol));
3005 if (ret >= 0) {
3006 ret = sock_flags_fixup(ret, target_type);
3007 if (type == SOCK_PACKET) {
3008 /* Manage an obsolete case :
3009 * if socket type is SOCK_PACKET, bind by name
3011 fd_trans_register(ret, &target_packet_trans);
3012 } else if (domain == PF_NETLINK) {
3013 switch (protocol) {
3014 #ifdef CONFIG_RTNETLINK
3015 case NETLINK_ROUTE:
3016 fd_trans_register(ret, &target_netlink_route_trans);
3017 break;
3018 #endif
3019 case NETLINK_KOBJECT_UEVENT:
3020 /* nothing to do: messages are strings */
3021 break;
3022 case NETLINK_AUDIT:
3023 fd_trans_register(ret, &target_netlink_audit_trans);
3024 break;
3025 default:
3026 g_assert_not_reached();
3030 return ret;
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3035 socklen_t addrlen)
3037 void *addr;
3038 abi_long ret;
3040 if ((int)addrlen < 0) {
3041 return -TARGET_EINVAL;
3044 addr = alloca(addrlen+1);
3046 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3047 if (ret)
3048 return ret;
3050 return get_errno(bind(sockfd, addr, addrlen));
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3055 socklen_t addrlen)
3057 void *addr;
3058 abi_long ret;
3060 if ((int)addrlen < 0) {
3061 return -TARGET_EINVAL;
3064 addr = alloca(addrlen+1);
3066 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3067 if (ret)
3068 return ret;
3070 return get_errno(safe_connect(sockfd, addr, addrlen));
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3075 int flags, int send)
3077 abi_long ret, len;
3078 struct msghdr msg;
3079 abi_ulong count;
3080 struct iovec *vec;
3081 abi_ulong target_vec;
3083 if (msgp->msg_name) {
3084 msg.msg_namelen = tswap32(msgp->msg_namelen);
3085 msg.msg_name = alloca(msg.msg_namelen+1);
3086 ret = target_to_host_sockaddr(fd, msg.msg_name,
3087 tswapal(msgp->msg_name),
3088 msg.msg_namelen);
3089 if (ret == -TARGET_EFAULT) {
3090 /* For connected sockets msg_name and msg_namelen must
3091 * be ignored, so returning EFAULT immediately is wrong.
3092 * Instead, pass a bad msg_name to the host kernel, and
3093 * let it decide whether to return EFAULT or not.
3095 msg.msg_name = (void *)-1;
3096 } else if (ret) {
3097 goto out2;
3099 } else {
3100 msg.msg_name = NULL;
3101 msg.msg_namelen = 0;
3103 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3104 msg.msg_control = alloca(msg.msg_controllen);
3105 memset(msg.msg_control, 0, msg.msg_controllen);
3107 msg.msg_flags = tswap32(msgp->msg_flags);
3109 count = tswapal(msgp->msg_iovlen);
3110 target_vec = tswapal(msgp->msg_iov);
3112 if (count > IOV_MAX) {
3113 /* sendrcvmsg returns a different errno for this condition than
3114 * readv/writev, so we must catch it here before lock_iovec() does.
3116 ret = -TARGET_EMSGSIZE;
3117 goto out2;
3120 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3121 target_vec, count, send);
3122 if (vec == NULL) {
3123 ret = -host_to_target_errno(errno);
3124 goto out2;
3126 msg.msg_iovlen = count;
3127 msg.msg_iov = vec;
3129 if (send) {
3130 if (fd_trans_target_to_host_data(fd)) {
3131 void *host_msg;
3133 host_msg = g_malloc(msg.msg_iov->iov_len);
3134 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3135 ret = fd_trans_target_to_host_data(fd)(host_msg,
3136 msg.msg_iov->iov_len);
3137 if (ret >= 0) {
3138 msg.msg_iov->iov_base = host_msg;
3139 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3141 g_free(host_msg);
3142 } else {
3143 ret = target_to_host_cmsg(&msg, msgp);
3144 if (ret == 0) {
3145 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3148 } else {
3149 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3150 if (!is_error(ret)) {
3151 len = ret;
3152 if (fd_trans_host_to_target_data(fd)) {
3153 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3154 MIN(msg.msg_iov->iov_len, len));
3155 } else {
3156 ret = host_to_target_cmsg(msgp, &msg);
3158 if (!is_error(ret)) {
3159 msgp->msg_namelen = tswap32(msg.msg_namelen);
3160 msgp->msg_flags = tswap32(msg.msg_flags);
3161 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3162 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3163 msg.msg_name, msg.msg_namelen);
3164 if (ret) {
3165 goto out;
3169 ret = len;
3174 out:
3175 unlock_iovec(vec, target_vec, count, !send);
3176 out2:
3177 return ret;
3180 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3181 int flags, int send)
3183 abi_long ret;
3184 struct target_msghdr *msgp;
3186 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3187 msgp,
3188 target_msg,
3189 send ? 1 : 0)) {
3190 return -TARGET_EFAULT;
3192 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3193 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3194 return ret;
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198 * so it might not have this *mmsg-specific flag either.
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3202 #endif
3204 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3205 unsigned int vlen, unsigned int flags,
3206 int send)
3208 struct target_mmsghdr *mmsgp;
3209 abi_long ret = 0;
3210 int i;
3212 if (vlen > UIO_MAXIOV) {
3213 vlen = UIO_MAXIOV;
3216 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3217 if (!mmsgp) {
3218 return -TARGET_EFAULT;
3221 for (i = 0; i < vlen; i++) {
3222 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3223 if (is_error(ret)) {
3224 break;
3226 mmsgp[i].msg_len = tswap32(ret);
3227 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228 if (flags & MSG_WAITFORONE) {
3229 flags |= MSG_DONTWAIT;
3233 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3235 /* Return number of datagrams sent if we sent any at all;
3236 * otherwise return the error.
3238 if (i) {
3239 return i;
3241 return ret;
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long do_accept4(int fd, abi_ulong target_addr,
3246 abi_ulong target_addrlen_addr, int flags)
3248 socklen_t addrlen, ret_addrlen;
3249 void *addr;
3250 abi_long ret;
3251 int host_flags;
3253 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3255 if (target_addr == 0) {
3256 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3259 /* linux returns EINVAL if addrlen pointer is invalid */
3260 if (get_user_u32(addrlen, target_addrlen_addr))
3261 return -TARGET_EINVAL;
3263 if ((int)addrlen < 0) {
3264 return -TARGET_EINVAL;
3267 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268 return -TARGET_EINVAL;
3270 addr = alloca(addrlen);
3272 ret_addrlen = addrlen;
3273 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3274 if (!is_error(ret)) {
3275 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277 ret = -TARGET_EFAULT;
3280 return ret;
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3285 abi_ulong target_addrlen_addr)
3287 socklen_t addrlen, ret_addrlen;
3288 void *addr;
3289 abi_long ret;
3291 if (get_user_u32(addrlen, target_addrlen_addr))
3292 return -TARGET_EFAULT;
3294 if ((int)addrlen < 0) {
3295 return -TARGET_EINVAL;
3298 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299 return -TARGET_EFAULT;
3301 addr = alloca(addrlen);
3303 ret_addrlen = addrlen;
3304 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3305 if (!is_error(ret)) {
3306 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308 ret = -TARGET_EFAULT;
3311 return ret;
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3316 abi_ulong target_addrlen_addr)
3318 socklen_t addrlen, ret_addrlen;
3319 void *addr;
3320 abi_long ret;
3322 if (get_user_u32(addrlen, target_addrlen_addr))
3323 return -TARGET_EFAULT;
3325 if ((int)addrlen < 0) {
3326 return -TARGET_EINVAL;
3329 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3330 return -TARGET_EFAULT;
3332 addr = alloca(addrlen);
3334 ret_addrlen = addrlen;
3335 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3336 if (!is_error(ret)) {
3337 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3338 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3339 ret = -TARGET_EFAULT;
3342 return ret;
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long do_socketpair(int domain, int type, int protocol,
3347 abi_ulong target_tab_addr)
3349 int tab[2];
3350 abi_long ret;
3352 target_to_host_sock_type(&type);
3354 ret = get_errno(socketpair(domain, type, protocol, tab));
3355 if (!is_error(ret)) {
3356 if (put_user_s32(tab[0], target_tab_addr)
3357 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3358 ret = -TARGET_EFAULT;
3360 return ret;
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3365 abi_ulong target_addr, socklen_t addrlen)
3367 void *addr;
3368 void *host_msg;
3369 void *copy_msg = NULL;
3370 abi_long ret;
3372 if ((int)addrlen < 0) {
3373 return -TARGET_EINVAL;
3376 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3377 if (!host_msg)
3378 return -TARGET_EFAULT;
3379 if (fd_trans_target_to_host_data(fd)) {
3380 copy_msg = host_msg;
3381 host_msg = g_malloc(len);
3382 memcpy(host_msg, copy_msg, len);
3383 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3384 if (ret < 0) {
3385 goto fail;
3388 if (target_addr) {
3389 addr = alloca(addrlen+1);
3390 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3391 if (ret) {
3392 goto fail;
3394 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3395 } else {
3396 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3398 fail:
3399 if (copy_msg) {
3400 g_free(host_msg);
3401 host_msg = copy_msg;
3403 unlock_user(host_msg, msg, 0);
3404 return ret;
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3409 abi_ulong target_addr,
3410 abi_ulong target_addrlen)
3412 socklen_t addrlen, ret_addrlen;
3413 void *addr;
3414 void *host_msg;
3415 abi_long ret;
3417 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3418 if (!host_msg)
3419 return -TARGET_EFAULT;
3420 if (target_addr) {
3421 if (get_user_u32(addrlen, target_addrlen)) {
3422 ret = -TARGET_EFAULT;
3423 goto fail;
3425 if ((int)addrlen < 0) {
3426 ret = -TARGET_EINVAL;
3427 goto fail;
3429 addr = alloca(addrlen);
3430 ret_addrlen = addrlen;
3431 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3432 addr, &ret_addrlen));
3433 } else {
3434 addr = NULL; /* To keep compiler quiet. */
3435 addrlen = 0; /* To keep compiler quiet. */
3436 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3438 if (!is_error(ret)) {
3439 if (fd_trans_host_to_target_data(fd)) {
3440 abi_long trans;
3441 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3442 if (is_error(trans)) {
3443 ret = trans;
3444 goto fail;
3447 if (target_addr) {
3448 host_to_target_sockaddr(target_addr, addr,
3449 MIN(addrlen, ret_addrlen));
3450 if (put_user_u32(ret_addrlen, target_addrlen)) {
3451 ret = -TARGET_EFAULT;
3452 goto fail;
3455 unlock_user(host_msg, msg, len);
3456 } else {
3457 fail:
3458 unlock_user(host_msg, msg, 0);
3460 return ret;
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long do_socketcall(int num, abi_ulong vptr)
3467 static const unsigned nargs[] = { /* number of arguments per operation */
3468 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3469 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3472 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3476 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3477 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3479 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3481 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3482 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3484 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3486 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3487 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3489 abi_long a[6]; /* max 6 args */
3490 unsigned i;
3492 /* check the range of the first argument num */
3493 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3495 return -TARGET_EINVAL;
3497 /* ensure we have space for args */
3498 if (nargs[num] > ARRAY_SIZE(a)) {
3499 return -TARGET_EINVAL;
3501 /* collect the arguments in a[] according to nargs[] */
3502 for (i = 0; i < nargs[num]; ++i) {
3503 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3504 return -TARGET_EFAULT;
3507 /* now when we have the args, invoke the appropriate underlying function */
3508 switch (num) {
3509 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3510 return do_socket(a[0], a[1], a[2]);
3511 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3512 return do_bind(a[0], a[1], a[2]);
3513 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3514 return do_connect(a[0], a[1], a[2]);
3515 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3516 return get_errno(listen(a[0], a[1]));
3517 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3518 return do_accept4(a[0], a[1], a[2], 0);
3519 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3520 return do_getsockname(a[0], a[1], a[2]);
3521 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3522 return do_getpeername(a[0], a[1], a[2]);
3523 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3524 return do_socketpair(a[0], a[1], a[2], a[3]);
3525 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3526 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3527 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3528 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3529 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3530 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3531 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3532 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3533 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3534 return get_errno(shutdown(a[0], a[1]));
3535 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3536 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3537 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3538 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3539 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3540 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3541 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3542 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3543 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3544 return do_accept4(a[0], a[1], a[2], a[3]);
3545 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3546 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3547 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3548 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3549 default:
3550 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3551 return -TARGET_EINVAL;
3554 #endif
3556 #define N_SHM_REGIONS 32
3558 static struct shm_region {
3559 abi_ulong start;
3560 abi_ulong size;
3561 bool in_use;
3562 } shm_regions[N_SHM_REGIONS];
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3568 struct target_ipc_perm sem_perm;
3569 abi_ulong sem_otime;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused1;
3572 #endif
3573 abi_ulong sem_ctime;
3574 #if TARGET_ABI_BITS == 32
3575 abi_ulong __unused2;
3576 #endif
3577 abi_ulong sem_nsems;
3578 abi_ulong __unused3;
3579 abi_ulong __unused4;
3581 #endif
3583 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3584 abi_ulong target_addr)
3586 struct target_ipc_perm *target_ip;
3587 struct target_semid64_ds *target_sd;
3589 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3590 return -TARGET_EFAULT;
3591 target_ip = &(target_sd->sem_perm);
3592 host_ip->__key = tswap32(target_ip->__key);
3593 host_ip->uid = tswap32(target_ip->uid);
3594 host_ip->gid = tswap32(target_ip->gid);
3595 host_ip->cuid = tswap32(target_ip->cuid);
3596 host_ip->cgid = tswap32(target_ip->cgid);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598 host_ip->mode = tswap32(target_ip->mode);
3599 #else
3600 host_ip->mode = tswap16(target_ip->mode);
3601 #endif
3602 #if defined(TARGET_PPC)
3603 host_ip->__seq = tswap32(target_ip->__seq);
3604 #else
3605 host_ip->__seq = tswap16(target_ip->__seq);
3606 #endif
3607 unlock_user_struct(target_sd, target_addr, 0);
3608 return 0;
3611 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3612 struct ipc_perm *host_ip)
3614 struct target_ipc_perm *target_ip;
3615 struct target_semid64_ds *target_sd;
3617 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3618 return -TARGET_EFAULT;
3619 target_ip = &(target_sd->sem_perm);
3620 target_ip->__key = tswap32(host_ip->__key);
3621 target_ip->uid = tswap32(host_ip->uid);
3622 target_ip->gid = tswap32(host_ip->gid);
3623 target_ip->cuid = tswap32(host_ip->cuid);
3624 target_ip->cgid = tswap32(host_ip->cgid);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626 target_ip->mode = tswap32(host_ip->mode);
3627 #else
3628 target_ip->mode = tswap16(host_ip->mode);
3629 #endif
3630 #if defined(TARGET_PPC)
3631 target_ip->__seq = tswap32(host_ip->__seq);
3632 #else
3633 target_ip->__seq = tswap16(host_ip->__seq);
3634 #endif
3635 unlock_user_struct(target_sd, target_addr, 1);
3636 return 0;
3639 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3640 abi_ulong target_addr)
3642 struct target_semid64_ds *target_sd;
3644 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645 return -TARGET_EFAULT;
3646 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3647 return -TARGET_EFAULT;
3648 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3649 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3650 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3651 unlock_user_struct(target_sd, target_addr, 0);
3652 return 0;
3655 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3656 struct semid_ds *host_sd)
3658 struct target_semid64_ds *target_sd;
3660 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661 return -TARGET_EFAULT;
3662 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3663 return -TARGET_EFAULT;
3664 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3665 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3666 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3667 unlock_user_struct(target_sd, target_addr, 1);
3668 return 0;
3671 struct target_seminfo {
3672 int semmap;
3673 int semmni;
3674 int semmns;
3675 int semmnu;
3676 int semmsl;
3677 int semopm;
3678 int semume;
3679 int semusz;
3680 int semvmx;
3681 int semaem;
3684 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3685 struct seminfo *host_seminfo)
3687 struct target_seminfo *target_seminfo;
3688 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3689 return -TARGET_EFAULT;
3690 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3691 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3692 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3693 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3694 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3695 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3696 __put_user(host_seminfo->semume, &target_seminfo->semume);
3697 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3698 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3699 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3700 unlock_user_struct(target_seminfo, target_addr, 1);
3701 return 0;
3704 union semun {
3705 int val;
3706 struct semid_ds *buf;
3707 unsigned short *array;
3708 struct seminfo *__buf;
3711 union target_semun {
3712 int val;
3713 abi_ulong buf;
3714 abi_ulong array;
3715 abi_ulong __buf;
3718 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3719 abi_ulong target_addr)
3721 int nsems;
3722 unsigned short *array;
3723 union semun semun;
3724 struct semid_ds semid_ds;
3725 int i, ret;
3727 semun.buf = &semid_ds;
3729 ret = semctl(semid, 0, IPC_STAT, semun);
3730 if (ret == -1)
3731 return get_errno(ret);
3733 nsems = semid_ds.sem_nsems;
3735 *host_array = g_try_new(unsigned short, nsems);
3736 if (!*host_array) {
3737 return -TARGET_ENOMEM;
3739 array = lock_user(VERIFY_READ, target_addr,
3740 nsems*sizeof(unsigned short), 1);
3741 if (!array) {
3742 g_free(*host_array);
3743 return -TARGET_EFAULT;
3746 for(i=0; i<nsems; i++) {
3747 __get_user((*host_array)[i], &array[i]);
3749 unlock_user(array, target_addr, 0);
3751 return 0;
3754 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3755 unsigned short **host_array)
3757 int nsems;
3758 unsigned short *array;
3759 union semun semun;
3760 struct semid_ds semid_ds;
3761 int i, ret;
3763 semun.buf = &semid_ds;
3765 ret = semctl(semid, 0, IPC_STAT, semun);
3766 if (ret == -1)
3767 return get_errno(ret);
3769 nsems = semid_ds.sem_nsems;
3771 array = lock_user(VERIFY_WRITE, target_addr,
3772 nsems*sizeof(unsigned short), 0);
3773 if (!array)
3774 return -TARGET_EFAULT;
3776 for(i=0; i<nsems; i++) {
3777 __put_user((*host_array)[i], &array[i]);
3779 g_free(*host_array);
3780 unlock_user(array, target_addr, 1);
3782 return 0;
3785 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3786 abi_ulong target_arg)
3788 union target_semun target_su = { .buf = target_arg };
3789 union semun arg;
3790 struct semid_ds dsarg;
3791 unsigned short *array = NULL;
3792 struct seminfo seminfo;
3793 abi_long ret = -TARGET_EINVAL;
3794 abi_long err;
3795 cmd &= 0xff;
3797 switch( cmd ) {
3798 case GETVAL:
3799 case SETVAL:
3800 /* In 64 bit cross-endian situations, we will erroneously pick up
3801 * the wrong half of the union for the "val" element. To rectify
3802 * this, the entire 8-byte structure is byteswapped, followed by
3803 * a swap of the 4 byte val field. In other cases, the data is
3804 * already in proper host byte order. */
3805 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3806 target_su.buf = tswapal(target_su.buf);
3807 arg.val = tswap32(target_su.val);
3808 } else {
3809 arg.val = target_su.val;
3811 ret = get_errno(semctl(semid, semnum, cmd, arg));
3812 break;
3813 case GETALL:
3814 case SETALL:
3815 err = target_to_host_semarray(semid, &array, target_su.array);
3816 if (err)
3817 return err;
3818 arg.array = array;
3819 ret = get_errno(semctl(semid, semnum, cmd, arg));
3820 err = host_to_target_semarray(semid, target_su.array, &array);
3821 if (err)
3822 return err;
3823 break;
3824 case IPC_STAT:
3825 case IPC_SET:
3826 case SEM_STAT:
3827 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3828 if (err)
3829 return err;
3830 arg.buf = &dsarg;
3831 ret = get_errno(semctl(semid, semnum, cmd, arg));
3832 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3833 if (err)
3834 return err;
3835 break;
3836 case IPC_INFO:
3837 case SEM_INFO:
3838 arg.__buf = &seminfo;
3839 ret = get_errno(semctl(semid, semnum, cmd, arg));
3840 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3841 if (err)
3842 return err;
3843 break;
3844 case IPC_RMID:
3845 case GETPID:
3846 case GETNCNT:
3847 case GETZCNT:
3848 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3849 break;
3852 return ret;
3855 struct target_sembuf {
3856 unsigned short sem_num;
3857 short sem_op;
3858 short sem_flg;
3861 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3862 abi_ulong target_addr,
3863 unsigned nsops)
3865 struct target_sembuf *target_sembuf;
3866 int i;
3868 target_sembuf = lock_user(VERIFY_READ, target_addr,
3869 nsops*sizeof(struct target_sembuf), 1);
3870 if (!target_sembuf)
3871 return -TARGET_EFAULT;
3873 for(i=0; i<nsops; i++) {
3874 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3875 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3876 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3879 unlock_user(target_sembuf, target_addr, 0);
3881 return 0;
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3888 * This macro is required to handle the s390 variants, which passes the
3889 * arguments in a different order than default.
3891 #ifdef __s390x__
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893 (__nsops), (__timeout), (__sops)
3894 #else
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), 0, (__sops), (__timeout)
3897 #endif
3899 static inline abi_long do_semtimedop(int semid,
3900 abi_long ptr,
3901 unsigned nsops,
3902 abi_long timeout, bool time64)
3904 struct sembuf *sops;
3905 struct timespec ts, *pts = NULL;
3906 abi_long ret;
3908 if (timeout) {
3909 pts = &ts;
3910 if (time64) {
3911 if (target_to_host_timespec64(pts, timeout)) {
3912 return -TARGET_EFAULT;
3914 } else {
3915 if (target_to_host_timespec(pts, timeout)) {
3916 return -TARGET_EFAULT;
3921 if (nsops > TARGET_SEMOPM) {
3922 return -TARGET_E2BIG;
3925 sops = g_new(struct sembuf, nsops);
3927 if (target_to_host_sembuf(sops, ptr, nsops)) {
3928 g_free(sops);
3929 return -TARGET_EFAULT;
3932 ret = -TARGET_ENOSYS;
3933 #ifdef __NR_semtimedop
3934 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3935 #endif
3936 #ifdef __NR_ipc
3937 if (ret == -TARGET_ENOSYS) {
3938 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3939 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3941 #endif
3942 g_free(sops);
3943 return ret;
3945 #endif
3947 struct target_msqid_ds
3949 struct target_ipc_perm msg_perm;
3950 abi_ulong msg_stime;
3951 #if TARGET_ABI_BITS == 32
3952 abi_ulong __unused1;
3953 #endif
3954 abi_ulong msg_rtime;
3955 #if TARGET_ABI_BITS == 32
3956 abi_ulong __unused2;
3957 #endif
3958 abi_ulong msg_ctime;
3959 #if TARGET_ABI_BITS == 32
3960 abi_ulong __unused3;
3961 #endif
3962 abi_ulong __msg_cbytes;
3963 abi_ulong msg_qnum;
3964 abi_ulong msg_qbytes;
3965 abi_ulong msg_lspid;
3966 abi_ulong msg_lrpid;
3967 abi_ulong __unused4;
3968 abi_ulong __unused5;
3971 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3972 abi_ulong target_addr)
3974 struct target_msqid_ds *target_md;
3976 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3977 return -TARGET_EFAULT;
3978 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3979 return -TARGET_EFAULT;
3980 host_md->msg_stime = tswapal(target_md->msg_stime);
3981 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3982 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3983 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3984 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3985 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3986 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3987 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3988 unlock_user_struct(target_md, target_addr, 0);
3989 return 0;
3992 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3993 struct msqid_ds *host_md)
3995 struct target_msqid_ds *target_md;
3997 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3998 return -TARGET_EFAULT;
3999 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4000 return -TARGET_EFAULT;
4001 target_md->msg_stime = tswapal(host_md->msg_stime);
4002 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4003 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4004 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4005 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4006 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4007 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4008 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4009 unlock_user_struct(target_md, target_addr, 1);
4010 return 0;
4013 struct target_msginfo {
4014 int msgpool;
4015 int msgmap;
4016 int msgmax;
4017 int msgmnb;
4018 int msgmni;
4019 int msgssz;
4020 int msgtql;
4021 unsigned short int msgseg;
4024 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4025 struct msginfo *host_msginfo)
4027 struct target_msginfo *target_msginfo;
4028 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4029 return -TARGET_EFAULT;
4030 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4031 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4032 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4033 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4034 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4035 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4036 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4037 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4038 unlock_user_struct(target_msginfo, target_addr, 1);
4039 return 0;
4042 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4044 struct msqid_ds dsarg;
4045 struct msginfo msginfo;
4046 abi_long ret = -TARGET_EINVAL;
4048 cmd &= 0xff;
4050 switch (cmd) {
4051 case IPC_STAT:
4052 case IPC_SET:
4053 case MSG_STAT:
4054 if (target_to_host_msqid_ds(&dsarg,ptr))
4055 return -TARGET_EFAULT;
4056 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4057 if (host_to_target_msqid_ds(ptr,&dsarg))
4058 return -TARGET_EFAULT;
4059 break;
4060 case IPC_RMID:
4061 ret = get_errno(msgctl(msgid, cmd, NULL));
4062 break;
4063 case IPC_INFO:
4064 case MSG_INFO:
4065 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4066 if (host_to_target_msginfo(ptr, &msginfo))
4067 return -TARGET_EFAULT;
4068 break;
4071 return ret;
4074 struct target_msgbuf {
4075 abi_long mtype;
4076 char mtext[1];
4079 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4080 ssize_t msgsz, int msgflg)
4082 struct target_msgbuf *target_mb;
4083 struct msgbuf *host_mb;
4084 abi_long ret = 0;
4086 if (msgsz < 0) {
4087 return -TARGET_EINVAL;
4090 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4091 return -TARGET_EFAULT;
4092 host_mb = g_try_malloc(msgsz + sizeof(long));
4093 if (!host_mb) {
4094 unlock_user_struct(target_mb, msgp, 0);
4095 return -TARGET_ENOMEM;
4097 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4098 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4099 ret = -TARGET_ENOSYS;
4100 #ifdef __NR_msgsnd
4101 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4102 #endif
4103 #ifdef __NR_ipc
4104 if (ret == -TARGET_ENOSYS) {
4105 #ifdef __s390x__
4106 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4107 host_mb));
4108 #else
4109 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4110 host_mb, 0));
4111 #endif
4113 #endif
4114 g_free(host_mb);
4115 unlock_user_struct(target_mb, msgp, 0);
4117 return ret;
4120 #ifdef __NR_ipc
4121 #if defined(__sparc__)
4122 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4123 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4124 #elif defined(__s390x__)
4125 /* The s390 sys_ipc variant has only five parameters. */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4127 ((long int[]){(long int)__msgp, __msgtyp})
4128 #else
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130 ((long int[]){(long int)__msgp, __msgtyp}), 0
4131 #endif
4132 #endif
4134 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4135 ssize_t msgsz, abi_long msgtyp,
4136 int msgflg)
4138 struct target_msgbuf *target_mb;
4139 char *target_mtext;
4140 struct msgbuf *host_mb;
4141 abi_long ret = 0;
4143 if (msgsz < 0) {
4144 return -TARGET_EINVAL;
4147 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4148 return -TARGET_EFAULT;
4150 host_mb = g_try_malloc(msgsz + sizeof(long));
4151 if (!host_mb) {
4152 ret = -TARGET_ENOMEM;
4153 goto end;
4155 ret = -TARGET_ENOSYS;
4156 #ifdef __NR_msgrcv
4157 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4158 #endif
4159 #ifdef __NR_ipc
4160 if (ret == -TARGET_ENOSYS) {
4161 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4162 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4164 #endif
4166 if (ret > 0) {
4167 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4168 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4169 if (!target_mtext) {
4170 ret = -TARGET_EFAULT;
4171 goto end;
4173 memcpy(target_mb->mtext, host_mb->mtext, ret);
4174 unlock_user(target_mtext, target_mtext_addr, ret);
4177 target_mb->mtype = tswapal(host_mb->mtype);
4179 end:
4180 if (target_mb)
4181 unlock_user_struct(target_mb, msgp, 1);
4182 g_free(host_mb);
4183 return ret;
4186 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4187 abi_ulong target_addr)
4189 struct target_shmid_ds *target_sd;
4191 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4192 return -TARGET_EFAULT;
4193 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4194 return -TARGET_EFAULT;
4195 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4196 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4197 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4198 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4199 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4200 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4201 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4202 unlock_user_struct(target_sd, target_addr, 0);
4203 return 0;
4206 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4207 struct shmid_ds *host_sd)
4209 struct target_shmid_ds *target_sd;
4211 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4212 return -TARGET_EFAULT;
4213 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4214 return -TARGET_EFAULT;
4215 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4216 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4217 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4218 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4219 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4220 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4221 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4222 unlock_user_struct(target_sd, target_addr, 1);
4223 return 0;
4226 struct target_shminfo {
4227 abi_ulong shmmax;
4228 abi_ulong shmmin;
4229 abi_ulong shmmni;
4230 abi_ulong shmseg;
4231 abi_ulong shmall;
4234 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4235 struct shminfo *host_shminfo)
4237 struct target_shminfo *target_shminfo;
4238 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4239 return -TARGET_EFAULT;
4240 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4241 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4242 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4243 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4244 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4245 unlock_user_struct(target_shminfo, target_addr, 1);
4246 return 0;
4249 struct target_shm_info {
4250 int used_ids;
4251 abi_ulong shm_tot;
4252 abi_ulong shm_rss;
4253 abi_ulong shm_swp;
4254 abi_ulong swap_attempts;
4255 abi_ulong swap_successes;
4258 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4259 struct shm_info *host_shm_info)
4261 struct target_shm_info *target_shm_info;
4262 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4263 return -TARGET_EFAULT;
4264 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4265 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4266 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4267 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4268 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4269 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4270 unlock_user_struct(target_shm_info, target_addr, 1);
4271 return 0;
4274 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4276 struct shmid_ds dsarg;
4277 struct shminfo shminfo;
4278 struct shm_info shm_info;
4279 abi_long ret = -TARGET_EINVAL;
4281 cmd &= 0xff;
4283 switch(cmd) {
4284 case IPC_STAT:
4285 case IPC_SET:
4286 case SHM_STAT:
4287 if (target_to_host_shmid_ds(&dsarg, buf))
4288 return -TARGET_EFAULT;
4289 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4290 if (host_to_target_shmid_ds(buf, &dsarg))
4291 return -TARGET_EFAULT;
4292 break;
4293 case IPC_INFO:
4294 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4295 if (host_to_target_shminfo(buf, &shminfo))
4296 return -TARGET_EFAULT;
4297 break;
4298 case SHM_INFO:
4299 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4300 if (host_to_target_shm_info(buf, &shm_info))
4301 return -TARGET_EFAULT;
4302 break;
4303 case IPC_RMID:
4304 case SHM_LOCK:
4305 case SHM_UNLOCK:
4306 ret = get_errno(shmctl(shmid, cmd, NULL));
4307 break;
4310 return ret;
4313 #ifndef TARGET_FORCE_SHMLBA
4314 /* For most architectures, SHMLBA is the same as the page size;
4315 * some architectures have larger values, in which case they should
4316 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4317 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4318 * and defining its own value for SHMLBA.
4320 * The kernel also permits SHMLBA to be set by the architecture to a
4321 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4322 * this means that addresses are rounded to the large size if
4323 * SHM_RND is set but addresses not aligned to that size are not rejected
4324 * as long as they are at least page-aligned. Since the only architecture
4325 * which uses this is ia64 this code doesn't provide for that oddity.
4327 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4329 return TARGET_PAGE_SIZE;
4331 #endif
4333 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4334 int shmid, abi_ulong shmaddr, int shmflg)
4336 abi_long raddr;
4337 void *host_raddr;
4338 struct shmid_ds shm_info;
4339 int i,ret;
4340 abi_ulong shmlba;
4342 /* find out the length of the shared memory segment */
4343 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4344 if (is_error(ret)) {
4345 /* can't get length, bail out */
4346 return ret;
4349 shmlba = target_shmlba(cpu_env);
4351 if (shmaddr & (shmlba - 1)) {
4352 if (shmflg & SHM_RND) {
4353 shmaddr &= ~(shmlba - 1);
4354 } else {
4355 return -TARGET_EINVAL;
4358 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4359 return -TARGET_EINVAL;
4362 mmap_lock();
4364 if (shmaddr)
4365 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4366 else {
4367 abi_ulong mmap_start;
4369 /* In order to use the host shmat, we need to honor host SHMLBA. */
4370 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4372 if (mmap_start == -1) {
4373 errno = ENOMEM;
4374 host_raddr = (void *)-1;
4375 } else
4376 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4379 if (host_raddr == (void *)-1) {
4380 mmap_unlock();
4381 return get_errno((long)host_raddr);
4383 raddr=h2g((unsigned long)host_raddr);
4385 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4386 PAGE_VALID | PAGE_READ |
4387 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4389 for (i = 0; i < N_SHM_REGIONS; i++) {
4390 if (!shm_regions[i].in_use) {
4391 shm_regions[i].in_use = true;
4392 shm_regions[i].start = raddr;
4393 shm_regions[i].size = shm_info.shm_segsz;
4394 break;
4398 mmap_unlock();
4399 return raddr;
4403 static inline abi_long do_shmdt(abi_ulong shmaddr)
4405 int i;
4406 abi_long rv;
4408 mmap_lock();
4410 for (i = 0; i < N_SHM_REGIONS; ++i) {
4411 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4412 shm_regions[i].in_use = false;
4413 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4414 break;
4417 rv = get_errno(shmdt(g2h(shmaddr)));
4419 mmap_unlock();
4421 return rv;
4424 #ifdef TARGET_NR_ipc
4425 /* ??? This only works with linear mappings. */
4426 /* do_ipc() must return target values and target errnos. */
4427 static abi_long do_ipc(CPUArchState *cpu_env,
4428 unsigned int call, abi_long first,
4429 abi_long second, abi_long third,
4430 abi_long ptr, abi_long fifth)
4432 int version;
4433 abi_long ret = 0;
4435 version = call >> 16;
4436 call &= 0xffff;
4438 switch (call) {
4439 case IPCOP_semop:
4440 ret = do_semtimedop(first, ptr, second, 0, false);
4441 break;
4442 case IPCOP_semtimedop:
4444 * The s390 sys_ipc variant has only five parameters instead of six
4445 * (as for default variant) and the only difference is the handling of
4446 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4447 * to a struct timespec where the generic variant uses fifth parameter.
4449 #if defined(TARGET_S390X)
4450 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4451 #else
4452 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4453 #endif
4454 break;
4456 case IPCOP_semget:
4457 ret = get_errno(semget(first, second, third));
4458 break;
4460 case IPCOP_semctl: {
4461 /* The semun argument to semctl is passed by value, so dereference the
4462 * ptr argument. */
4463 abi_ulong atptr;
4464 get_user_ual(atptr, ptr);
4465 ret = do_semctl(first, second, third, atptr);
4466 break;
4469 case IPCOP_msgget:
4470 ret = get_errno(msgget(first, second));
4471 break;
4473 case IPCOP_msgsnd:
4474 ret = do_msgsnd(first, ptr, second, third);
4475 break;
4477 case IPCOP_msgctl:
4478 ret = do_msgctl(first, second, ptr);
4479 break;
4481 case IPCOP_msgrcv:
4482 switch (version) {
4483 case 0:
4485 struct target_ipc_kludge {
4486 abi_long msgp;
4487 abi_long msgtyp;
4488 } *tmp;
4490 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4491 ret = -TARGET_EFAULT;
4492 break;
4495 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4497 unlock_user_struct(tmp, ptr, 0);
4498 break;
4500 default:
4501 ret = do_msgrcv(first, ptr, second, fifth, third);
4503 break;
4505 case IPCOP_shmat:
4506 switch (version) {
4507 default:
4509 abi_ulong raddr;
4510 raddr = do_shmat(cpu_env, first, ptr, second);
4511 if (is_error(raddr))
4512 return get_errno(raddr);
4513 if (put_user_ual(raddr, third))
4514 return -TARGET_EFAULT;
4515 break;
4517 case 1:
4518 ret = -TARGET_EINVAL;
4519 break;
4521 break;
4522 case IPCOP_shmdt:
4523 ret = do_shmdt(ptr);
4524 break;
4526 case IPCOP_shmget:
4527 /* IPC_* flag values are the same on all linux platforms */
4528 ret = get_errno(shmget(first, second, third));
4529 break;
4531 /* IPC_* and SHM_* command values are the same on all linux platforms */
4532 case IPCOP_shmctl:
4533 ret = do_shmctl(first, second, ptr);
4534 break;
4535 default:
4536 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4537 call, version);
4538 ret = -TARGET_ENOSYS;
4539 break;
4541 return ret;
4543 #endif
4545 /* kernel structure types definitions */
4547 #define STRUCT(name, ...) STRUCT_ ## name,
4548 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4549 enum {
4550 #include "syscall_types.h"
4551 STRUCT_MAX
4553 #undef STRUCT
4554 #undef STRUCT_SPECIAL
4556 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4557 #define STRUCT_SPECIAL(name)
4558 #include "syscall_types.h"
4559 #undef STRUCT
4560 #undef STRUCT_SPECIAL
4562 #define MAX_STRUCT_SIZE 4096
4564 #ifdef CONFIG_FIEMAP
4565 /* So fiemap access checks don't overflow on 32 bit systems.
4566 * This is very slightly smaller than the limit imposed by
4567 * the underlying kernel.
4569 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4570 / sizeof(struct fiemap_extent))
4572 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4573 int fd, int cmd, abi_long arg)
4575 /* The parameter for this ioctl is a struct fiemap followed
4576 * by an array of struct fiemap_extent whose size is set
4577 * in fiemap->fm_extent_count. The array is filled in by the
4578 * ioctl.
4580 int target_size_in, target_size_out;
4581 struct fiemap *fm;
4582 const argtype *arg_type = ie->arg_type;
4583 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4584 void *argptr, *p;
4585 abi_long ret;
4586 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4587 uint32_t outbufsz;
4588 int free_fm = 0;
4590 assert(arg_type[0] == TYPE_PTR);
4591 assert(ie->access == IOC_RW);
4592 arg_type++;
4593 target_size_in = thunk_type_size(arg_type, 0);
4594 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4595 if (!argptr) {
4596 return -TARGET_EFAULT;
4598 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4599 unlock_user(argptr, arg, 0);
4600 fm = (struct fiemap *)buf_temp;
4601 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4602 return -TARGET_EINVAL;
4605 outbufsz = sizeof (*fm) +
4606 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4608 if (outbufsz > MAX_STRUCT_SIZE) {
4609 /* We can't fit all the extents into the fixed size buffer.
4610 * Allocate one that is large enough and use it instead.
4612 fm = g_try_malloc(outbufsz);
4613 if (!fm) {
4614 return -TARGET_ENOMEM;
4616 memcpy(fm, buf_temp, sizeof(struct fiemap));
4617 free_fm = 1;
4619 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4620 if (!is_error(ret)) {
4621 target_size_out = target_size_in;
4622 /* An extent_count of 0 means we were only counting the extents
4623 * so there are no structs to copy
4625 if (fm->fm_extent_count != 0) {
4626 target_size_out += fm->fm_mapped_extents * extent_size;
4628 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4629 if (!argptr) {
4630 ret = -TARGET_EFAULT;
4631 } else {
4632 /* Convert the struct fiemap */
4633 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4634 if (fm->fm_extent_count != 0) {
4635 p = argptr + target_size_in;
4636 /* ...and then all the struct fiemap_extents */
4637 for (i = 0; i < fm->fm_mapped_extents; i++) {
4638 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4639 THUNK_TARGET);
4640 p += extent_size;
4643 unlock_user(argptr, arg, target_size_out);
4646 if (free_fm) {
4647 g_free(fm);
4649 return ret;
4651 #endif
4653 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4654 int fd, int cmd, abi_long arg)
4656 const argtype *arg_type = ie->arg_type;
4657 int target_size;
4658 void *argptr;
4659 int ret;
4660 struct ifconf *host_ifconf;
4661 uint32_t outbufsz;
4662 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4663 int target_ifreq_size;
4664 int nb_ifreq;
4665 int free_buf = 0;
4666 int i;
4667 int target_ifc_len;
4668 abi_long target_ifc_buf;
4669 int host_ifc_len;
4670 char *host_ifc_buf;
4672 assert(arg_type[0] == TYPE_PTR);
4673 assert(ie->access == IOC_RW);
4675 arg_type++;
4676 target_size = thunk_type_size(arg_type, 0);
4678 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4679 if (!argptr)
4680 return -TARGET_EFAULT;
4681 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4682 unlock_user(argptr, arg, 0);
4684 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4685 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4686 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4688 if (target_ifc_buf != 0) {
4689 target_ifc_len = host_ifconf->ifc_len;
4690 nb_ifreq = target_ifc_len / target_ifreq_size;
4691 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4693 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4694 if (outbufsz > MAX_STRUCT_SIZE) {
4696 * We can't fit all the extents into the fixed size buffer.
4697 * Allocate one that is large enough and use it instead.
4699 host_ifconf = malloc(outbufsz);
4700 if (!host_ifconf) {
4701 return -TARGET_ENOMEM;
4703 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4704 free_buf = 1;
4706 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4708 host_ifconf->ifc_len = host_ifc_len;
4709 } else {
4710 host_ifc_buf = NULL;
4712 host_ifconf->ifc_buf = host_ifc_buf;
4714 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4715 if (!is_error(ret)) {
4716 /* convert host ifc_len to target ifc_len */
4718 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4719 target_ifc_len = nb_ifreq * target_ifreq_size;
4720 host_ifconf->ifc_len = target_ifc_len;
4722 /* restore target ifc_buf */
4724 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4726 /* copy struct ifconf to target user */
4728 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4729 if (!argptr)
4730 return -TARGET_EFAULT;
4731 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4732 unlock_user(argptr, arg, target_size);
4734 if (target_ifc_buf != 0) {
4735 /* copy ifreq[] to target user */
4736 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4737 for (i = 0; i < nb_ifreq ; i++) {
4738 thunk_convert(argptr + i * target_ifreq_size,
4739 host_ifc_buf + i * sizeof(struct ifreq),
4740 ifreq_arg_type, THUNK_TARGET);
4742 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4746 if (free_buf) {
4747 free(host_ifconf);
4750 return ret;
4753 #if defined(CONFIG_USBFS)
4754 #if HOST_LONG_BITS > 64
4755 #error USBDEVFS thunks do not support >64 bit hosts yet.
4756 #endif
4757 struct live_urb {
4758 uint64_t target_urb_adr;
4759 uint64_t target_buf_adr;
4760 char *target_buf_ptr;
4761 struct usbdevfs_urb host_urb;
4764 static GHashTable *usbdevfs_urb_hashtable(void)
4766 static GHashTable *urb_hashtable;
4768 if (!urb_hashtable) {
4769 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4771 return urb_hashtable;
4774 static void urb_hashtable_insert(struct live_urb *urb)
4776 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4777 g_hash_table_insert(urb_hashtable, urb, urb);
4780 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4782 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4783 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4786 static void urb_hashtable_remove(struct live_urb *urb)
4788 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4789 g_hash_table_remove(urb_hashtable, urb);
4792 static abi_long
4793 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4794 int fd, int cmd, abi_long arg)
4796 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4797 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4798 struct live_urb *lurb;
4799 void *argptr;
4800 uint64_t hurb;
4801 int target_size;
4802 uintptr_t target_urb_adr;
4803 abi_long ret;
4805 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4807 memset(buf_temp, 0, sizeof(uint64_t));
4808 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4809 if (is_error(ret)) {
4810 return ret;
4813 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4814 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4815 if (!lurb->target_urb_adr) {
4816 return -TARGET_EFAULT;
4818 urb_hashtable_remove(lurb);
4819 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4820 lurb->host_urb.buffer_length);
4821 lurb->target_buf_ptr = NULL;
4823 /* restore the guest buffer pointer */
4824 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4826 /* update the guest urb struct */
4827 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4828 if (!argptr) {
4829 g_free(lurb);
4830 return -TARGET_EFAULT;
4832 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4833 unlock_user(argptr, lurb->target_urb_adr, target_size);
4835 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4836 /* write back the urb handle */
4837 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4838 if (!argptr) {
4839 g_free(lurb);
4840 return -TARGET_EFAULT;
4843 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4844 target_urb_adr = lurb->target_urb_adr;
4845 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4846 unlock_user(argptr, arg, target_size);
4848 g_free(lurb);
4849 return ret;
4852 static abi_long
4853 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4854 uint8_t *buf_temp __attribute__((unused)),
4855 int fd, int cmd, abi_long arg)
4857 struct live_urb *lurb;
4859 /* map target address back to host URB with metadata. */
4860 lurb = urb_hashtable_lookup(arg);
4861 if (!lurb) {
4862 return -TARGET_EFAULT;
4864 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4867 static abi_long
4868 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4869 int fd, int cmd, abi_long arg)
4871 const argtype *arg_type = ie->arg_type;
4872 int target_size;
4873 abi_long ret;
4874 void *argptr;
4875 int rw_dir;
4876 struct live_urb *lurb;
4879 * each submitted URB needs to map to a unique ID for the
4880 * kernel, and that unique ID needs to be a pointer to
4881 * host memory. hence, we need to malloc for each URB.
4882 * isochronous transfers have a variable length struct.
4884 arg_type++;
4885 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4887 /* construct host copy of urb and metadata */
4888 lurb = g_try_malloc0(sizeof(struct live_urb));
4889 if (!lurb) {
4890 return -TARGET_ENOMEM;
4893 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4894 if (!argptr) {
4895 g_free(lurb);
4896 return -TARGET_EFAULT;
4898 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4899 unlock_user(argptr, arg, 0);
4901 lurb->target_urb_adr = arg;
4902 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4904 /* buffer space used depends on endpoint type so lock the entire buffer */
4905 /* control type urbs should check the buffer contents for true direction */
4906 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4907 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4908 lurb->host_urb.buffer_length, 1);
4909 if (lurb->target_buf_ptr == NULL) {
4910 g_free(lurb);
4911 return -TARGET_EFAULT;
4914 /* update buffer pointer in host copy */
4915 lurb->host_urb.buffer = lurb->target_buf_ptr;
4917 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4918 if (is_error(ret)) {
4919 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4920 g_free(lurb);
4921 } else {
4922 urb_hashtable_insert(lurb);
4925 return ret;
4927 #endif /* CONFIG_USBFS */
4929 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4930 int cmd, abi_long arg)
4932 void *argptr;
4933 struct dm_ioctl *host_dm;
4934 abi_long guest_data;
4935 uint32_t guest_data_size;
4936 int target_size;
4937 const argtype *arg_type = ie->arg_type;
4938 abi_long ret;
4939 void *big_buf = NULL;
4940 char *host_data;
4942 arg_type++;
4943 target_size = thunk_type_size(arg_type, 0);
4944 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4945 if (!argptr) {
4946 ret = -TARGET_EFAULT;
4947 goto out;
4949 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4952 /* buf_temp is too small, so fetch things into a bigger buffer */
4953 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4954 memcpy(big_buf, buf_temp, target_size);
4955 buf_temp = big_buf;
4956 host_dm = big_buf;
4958 guest_data = arg + host_dm->data_start;
4959 if ((guest_data - arg) < 0) {
4960 ret = -TARGET_EINVAL;
4961 goto out;
4963 guest_data_size = host_dm->data_size - host_dm->data_start;
4964 host_data = (char*)host_dm + host_dm->data_start;
4966 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4967 if (!argptr) {
4968 ret = -TARGET_EFAULT;
4969 goto out;
4972 switch (ie->host_cmd) {
4973 case DM_REMOVE_ALL:
4974 case DM_LIST_DEVICES:
4975 case DM_DEV_CREATE:
4976 case DM_DEV_REMOVE:
4977 case DM_DEV_SUSPEND:
4978 case DM_DEV_STATUS:
4979 case DM_DEV_WAIT:
4980 case DM_TABLE_STATUS:
4981 case DM_TABLE_CLEAR:
4982 case DM_TABLE_DEPS:
4983 case DM_LIST_VERSIONS:
4984 /* no input data */
4985 break;
4986 case DM_DEV_RENAME:
4987 case DM_DEV_SET_GEOMETRY:
4988 /* data contains only strings */
4989 memcpy(host_data, argptr, guest_data_size);
4990 break;
4991 case DM_TARGET_MSG:
4992 memcpy(host_data, argptr, guest_data_size);
4993 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4994 break;
4995 case DM_TABLE_LOAD:
4997 void *gspec = argptr;
4998 void *cur_data = host_data;
4999 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5000 int spec_size = thunk_type_size(arg_type, 0);
5001 int i;
5003 for (i = 0; i < host_dm->target_count; i++) {
5004 struct dm_target_spec *spec = cur_data;
5005 uint32_t next;
5006 int slen;
5008 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5009 slen = strlen((char*)gspec + spec_size) + 1;
5010 next = spec->next;
5011 spec->next = sizeof(*spec) + slen;
5012 strcpy((char*)&spec[1], gspec + spec_size);
5013 gspec += next;
5014 cur_data += spec->next;
5016 break;
5018 default:
5019 ret = -TARGET_EINVAL;
5020 unlock_user(argptr, guest_data, 0);
5021 goto out;
5023 unlock_user(argptr, guest_data, 0);
5025 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5026 if (!is_error(ret)) {
5027 guest_data = arg + host_dm->data_start;
5028 guest_data_size = host_dm->data_size - host_dm->data_start;
5029 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5030 switch (ie->host_cmd) {
5031 case DM_REMOVE_ALL:
5032 case DM_DEV_CREATE:
5033 case DM_DEV_REMOVE:
5034 case DM_DEV_RENAME:
5035 case DM_DEV_SUSPEND:
5036 case DM_DEV_STATUS:
5037 case DM_TABLE_LOAD:
5038 case DM_TABLE_CLEAR:
5039 case DM_TARGET_MSG:
5040 case DM_DEV_SET_GEOMETRY:
5041 /* no return data */
5042 break;
5043 case DM_LIST_DEVICES:
5045 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5046 uint32_t remaining_data = guest_data_size;
5047 void *cur_data = argptr;
5048 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5049 int nl_size = 12; /* can't use thunk_size due to alignment */
5051 while (1) {
5052 uint32_t next = nl->next;
5053 if (next) {
5054 nl->next = nl_size + (strlen(nl->name) + 1);
5056 if (remaining_data < nl->next) {
5057 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5058 break;
5060 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5061 strcpy(cur_data + nl_size, nl->name);
5062 cur_data += nl->next;
5063 remaining_data -= nl->next;
5064 if (!next) {
5065 break;
5067 nl = (void*)nl + next;
5069 break;
5071 case DM_DEV_WAIT:
5072 case DM_TABLE_STATUS:
5074 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5075 void *cur_data = argptr;
5076 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077 int spec_size = thunk_type_size(arg_type, 0);
5078 int i;
5080 for (i = 0; i < host_dm->target_count; i++) {
5081 uint32_t next = spec->next;
5082 int slen = strlen((char*)&spec[1]) + 1;
5083 spec->next = (cur_data - argptr) + spec_size + slen;
5084 if (guest_data_size < spec->next) {
5085 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5086 break;
5088 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5089 strcpy(cur_data + spec_size, (char*)&spec[1]);
5090 cur_data = argptr + spec->next;
5091 spec = (void*)host_dm + host_dm->data_start + next;
5093 break;
5095 case DM_TABLE_DEPS:
5097 void *hdata = (void*)host_dm + host_dm->data_start;
5098 int count = *(uint32_t*)hdata;
5099 uint64_t *hdev = hdata + 8;
5100 uint64_t *gdev = argptr + 8;
5101 int i;
5103 *(uint32_t*)argptr = tswap32(count);
5104 for (i = 0; i < count; i++) {
5105 *gdev = tswap64(*hdev);
5106 gdev++;
5107 hdev++;
5109 break;
5111 case DM_LIST_VERSIONS:
5113 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5114 uint32_t remaining_data = guest_data_size;
5115 void *cur_data = argptr;
5116 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5117 int vers_size = thunk_type_size(arg_type, 0);
5119 while (1) {
5120 uint32_t next = vers->next;
5121 if (next) {
5122 vers->next = vers_size + (strlen(vers->name) + 1);
5124 if (remaining_data < vers->next) {
5125 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5126 break;
5128 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5129 strcpy(cur_data + vers_size, vers->name);
5130 cur_data += vers->next;
5131 remaining_data -= vers->next;
5132 if (!next) {
5133 break;
5135 vers = (void*)vers + next;
5137 break;
5139 default:
5140 unlock_user(argptr, guest_data, 0);
5141 ret = -TARGET_EINVAL;
5142 goto out;
5144 unlock_user(argptr, guest_data, guest_data_size);
5146 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5147 if (!argptr) {
5148 ret = -TARGET_EFAULT;
5149 goto out;
5151 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5152 unlock_user(argptr, arg, target_size);
5154 out:
5155 g_free(big_buf);
5156 return ret;
5159 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5160 int cmd, abi_long arg)
5162 void *argptr;
5163 int target_size;
5164 const argtype *arg_type = ie->arg_type;
5165 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5166 abi_long ret;
5168 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5169 struct blkpg_partition host_part;
5171 /* Read and convert blkpg */
5172 arg_type++;
5173 target_size = thunk_type_size(arg_type, 0);
5174 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5175 if (!argptr) {
5176 ret = -TARGET_EFAULT;
5177 goto out;
5179 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5180 unlock_user(argptr, arg, 0);
5182 switch (host_blkpg->op) {
5183 case BLKPG_ADD_PARTITION:
5184 case BLKPG_DEL_PARTITION:
5185 /* payload is struct blkpg_partition */
5186 break;
5187 default:
5188 /* Unknown opcode */
5189 ret = -TARGET_EINVAL;
5190 goto out;
5193 /* Read and convert blkpg->data */
5194 arg = (abi_long)(uintptr_t)host_blkpg->data;
5195 target_size = thunk_type_size(part_arg_type, 0);
5196 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5197 if (!argptr) {
5198 ret = -TARGET_EFAULT;
5199 goto out;
5201 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5202 unlock_user(argptr, arg, 0);
5204 /* Swizzle the data pointer to our local copy and call! */
5205 host_blkpg->data = &host_part;
5206 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5208 out:
5209 return ret;
5212 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5213 int fd, int cmd, abi_long arg)
5215 const argtype *arg_type = ie->arg_type;
5216 const StructEntry *se;
5217 const argtype *field_types;
5218 const int *dst_offsets, *src_offsets;
5219 int target_size;
5220 void *argptr;
5221 abi_ulong *target_rt_dev_ptr = NULL;
5222 unsigned long *host_rt_dev_ptr = NULL;
5223 abi_long ret;
5224 int i;
5226 assert(ie->access == IOC_W);
5227 assert(*arg_type == TYPE_PTR);
5228 arg_type++;
5229 assert(*arg_type == TYPE_STRUCT);
5230 target_size = thunk_type_size(arg_type, 0);
5231 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5232 if (!argptr) {
5233 return -TARGET_EFAULT;
5235 arg_type++;
5236 assert(*arg_type == (int)STRUCT_rtentry);
5237 se = struct_entries + *arg_type++;
5238 assert(se->convert[0] == NULL);
5239 /* convert struct here to be able to catch rt_dev string */
5240 field_types = se->field_types;
5241 dst_offsets = se->field_offsets[THUNK_HOST];
5242 src_offsets = se->field_offsets[THUNK_TARGET];
5243 for (i = 0; i < se->nb_fields; i++) {
5244 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5245 assert(*field_types == TYPE_PTRVOID);
5246 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5247 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5248 if (*target_rt_dev_ptr != 0) {
5249 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5250 tswapal(*target_rt_dev_ptr));
5251 if (!*host_rt_dev_ptr) {
5252 unlock_user(argptr, arg, 0);
5253 return -TARGET_EFAULT;
5255 } else {
5256 *host_rt_dev_ptr = 0;
5258 field_types++;
5259 continue;
5261 field_types = thunk_convert(buf_temp + dst_offsets[i],
5262 argptr + src_offsets[i],
5263 field_types, THUNK_HOST);
5265 unlock_user(argptr, arg, 0);
5267 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5269 assert(host_rt_dev_ptr != NULL);
5270 assert(target_rt_dev_ptr != NULL);
5271 if (*host_rt_dev_ptr != 0) {
5272 unlock_user((void *)*host_rt_dev_ptr,
5273 *target_rt_dev_ptr, 0);
5275 return ret;
5278 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5279 int fd, int cmd, abi_long arg)
5281 int sig = target_to_host_signal(arg);
5282 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5285 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5286 int fd, int cmd, abi_long arg)
5288 struct timeval tv;
5289 abi_long ret;
5291 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5292 if (is_error(ret)) {
5293 return ret;
5296 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5297 if (copy_to_user_timeval(arg, &tv)) {
5298 return -TARGET_EFAULT;
5300 } else {
5301 if (copy_to_user_timeval64(arg, &tv)) {
5302 return -TARGET_EFAULT;
5306 return ret;
5309 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5310 int fd, int cmd, abi_long arg)
5312 struct timespec ts;
5313 abi_long ret;
5315 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5316 if (is_error(ret)) {
5317 return ret;
5320 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5321 if (host_to_target_timespec(arg, &ts)) {
5322 return -TARGET_EFAULT;
5324 } else{
5325 if (host_to_target_timespec64(arg, &ts)) {
5326 return -TARGET_EFAULT;
5330 return ret;
5333 #ifdef TIOCGPTPEER
5334 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5335 int fd, int cmd, abi_long arg)
5337 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5338 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5340 #endif
5342 #ifdef HAVE_DRM_H
5344 static void unlock_drm_version(struct drm_version *host_ver,
5345 struct target_drm_version *target_ver,
5346 bool copy)
5348 unlock_user(host_ver->name, target_ver->name,
5349 copy ? host_ver->name_len : 0);
5350 unlock_user(host_ver->date, target_ver->date,
5351 copy ? host_ver->date_len : 0);
5352 unlock_user(host_ver->desc, target_ver->desc,
5353 copy ? host_ver->desc_len : 0);
5356 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5357 struct target_drm_version *target_ver)
5359 memset(host_ver, 0, sizeof(*host_ver));
5361 __get_user(host_ver->name_len, &target_ver->name_len);
5362 if (host_ver->name_len) {
5363 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5364 target_ver->name_len, 0);
5365 if (!host_ver->name) {
5366 return -EFAULT;
5370 __get_user(host_ver->date_len, &target_ver->date_len);
5371 if (host_ver->date_len) {
5372 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5373 target_ver->date_len, 0);
5374 if (!host_ver->date) {
5375 goto err;
5379 __get_user(host_ver->desc_len, &target_ver->desc_len);
5380 if (host_ver->desc_len) {
5381 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5382 target_ver->desc_len, 0);
5383 if (!host_ver->desc) {
5384 goto err;
5388 return 0;
5389 err:
5390 unlock_drm_version(host_ver, target_ver, false);
5391 return -EFAULT;
5394 static inline void host_to_target_drmversion(
5395 struct target_drm_version *target_ver,
5396 struct drm_version *host_ver)
5398 __put_user(host_ver->version_major, &target_ver->version_major);
5399 __put_user(host_ver->version_minor, &target_ver->version_minor);
5400 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5401 __put_user(host_ver->name_len, &target_ver->name_len);
5402 __put_user(host_ver->date_len, &target_ver->date_len);
5403 __put_user(host_ver->desc_len, &target_ver->desc_len);
5404 unlock_drm_version(host_ver, target_ver, true);
5407 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5408 int fd, int cmd, abi_long arg)
5410 struct drm_version *ver;
5411 struct target_drm_version *target_ver;
5412 abi_long ret;
5414 switch (ie->host_cmd) {
5415 case DRM_IOCTL_VERSION:
5416 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5417 return -TARGET_EFAULT;
5419 ver = (struct drm_version *)buf_temp;
5420 ret = target_to_host_drmversion(ver, target_ver);
5421 if (!is_error(ret)) {
5422 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5423 if (is_error(ret)) {
5424 unlock_drm_version(ver, target_ver, false);
5425 } else {
5426 host_to_target_drmversion(target_ver, ver);
5429 unlock_user_struct(target_ver, arg, 0);
5430 return ret;
5432 return -TARGET_ENOSYS;
5435 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5436 struct drm_i915_getparam *gparam,
5437 int fd, abi_long arg)
5439 abi_long ret;
5440 int value;
5441 struct target_drm_i915_getparam *target_gparam;
5443 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5444 return -TARGET_EFAULT;
5447 __get_user(gparam->param, &target_gparam->param);
5448 gparam->value = &value;
5449 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5450 put_user_s32(value, target_gparam->value);
5452 unlock_user_struct(target_gparam, arg, 0);
5453 return ret;
5456 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5457 int fd, int cmd, abi_long arg)
5459 switch (ie->host_cmd) {
5460 case DRM_IOCTL_I915_GETPARAM:
5461 return do_ioctl_drm_i915_getparam(ie,
5462 (struct drm_i915_getparam *)buf_temp,
5463 fd, arg);
5464 default:
5465 return -TARGET_ENOSYS;
5469 #endif
5471 IOCTLEntry ioctl_entries[] = {
5472 #define IOCTL(cmd, access, ...) \
5473 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5474 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5475 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5476 #define IOCTL_IGNORE(cmd) \
5477 { TARGET_ ## cmd, 0, #cmd },
5478 #include "ioctls.h"
5479 { 0, 0, },
5482 /* ??? Implement proper locking for ioctls. */
5483 /* do_ioctl() Must return target values and target errnos. */
5484 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5486 const IOCTLEntry *ie;
5487 const argtype *arg_type;
5488 abi_long ret;
5489 uint8_t buf_temp[MAX_STRUCT_SIZE];
5490 int target_size;
5491 void *argptr;
5493 ie = ioctl_entries;
5494 for(;;) {
5495 if (ie->target_cmd == 0) {
5496 qemu_log_mask(
5497 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5498 return -TARGET_ENOSYS;
5500 if (ie->target_cmd == cmd)
5501 break;
5502 ie++;
5504 arg_type = ie->arg_type;
5505 if (ie->do_ioctl) {
5506 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5507 } else if (!ie->host_cmd) {
5508 /* Some architectures define BSD ioctls in their headers
5509 that are not implemented in Linux. */
5510 return -TARGET_ENOSYS;
5513 switch(arg_type[0]) {
5514 case TYPE_NULL:
5515 /* no argument */
5516 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5517 break;
5518 case TYPE_PTRVOID:
5519 case TYPE_INT:
5520 case TYPE_LONG:
5521 case TYPE_ULONG:
5522 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5523 break;
5524 case TYPE_PTR:
5525 arg_type++;
5526 target_size = thunk_type_size(arg_type, 0);
5527 switch(ie->access) {
5528 case IOC_R:
5529 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5530 if (!is_error(ret)) {
5531 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5532 if (!argptr)
5533 return -TARGET_EFAULT;
5534 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5535 unlock_user(argptr, arg, target_size);
5537 break;
5538 case IOC_W:
5539 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5540 if (!argptr)
5541 return -TARGET_EFAULT;
5542 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5543 unlock_user(argptr, arg, 0);
5544 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5545 break;
5546 default:
5547 case IOC_RW:
5548 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5549 if (!argptr)
5550 return -TARGET_EFAULT;
5551 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5552 unlock_user(argptr, arg, 0);
5553 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5554 if (!is_error(ret)) {
5555 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5556 if (!argptr)
5557 return -TARGET_EFAULT;
5558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5559 unlock_user(argptr, arg, target_size);
5561 break;
5563 break;
5564 default:
5565 qemu_log_mask(LOG_UNIMP,
5566 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5567 (long)cmd, arg_type[0]);
5568 ret = -TARGET_ENOSYS;
5569 break;
5571 return ret;
5574 static const bitmask_transtbl iflag_tbl[] = {
5575 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5576 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5577 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5578 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5579 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5580 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5581 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5582 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5583 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5584 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5585 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5586 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5587 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5588 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5589 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5590 { 0, 0, 0, 0 }
5593 static const bitmask_transtbl oflag_tbl[] = {
5594 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5595 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5596 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5597 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5598 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5599 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5600 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5601 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5602 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5603 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5604 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5605 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5606 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5607 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5608 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5609 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5610 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5611 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5612 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5613 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5614 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5615 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5616 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5617 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5618 { 0, 0, 0, 0 }
5621 static const bitmask_transtbl cflag_tbl[] = {
5622 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5623 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5624 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5625 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5626 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5627 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5628 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5629 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5630 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5631 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5632 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5633 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5634 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5635 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5636 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5637 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5638 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5639 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5640 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5641 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5642 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5643 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5644 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5645 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5646 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5647 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5648 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5649 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5650 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5651 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5652 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5653 { 0, 0, 0, 0 }
5656 static const bitmask_transtbl lflag_tbl[] = {
5657 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5658 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5659 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5660 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5661 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5662 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5663 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5664 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5665 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5666 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5667 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5668 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5669 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5670 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5671 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5672 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5673 { 0, 0, 0, 0 }
5676 static void target_to_host_termios (void *dst, const void *src)
5678 struct host_termios *host = dst;
5679 const struct target_termios *target = src;
5681 host->c_iflag =
5682 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5683 host->c_oflag =
5684 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5685 host->c_cflag =
5686 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5687 host->c_lflag =
5688 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5689 host->c_line = target->c_line;
5691 memset(host->c_cc, 0, sizeof(host->c_cc));
5692 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5693 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5694 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5695 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5696 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5697 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5698 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5699 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5700 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5701 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5702 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5703 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5704 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5705 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5706 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5707 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5708 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5711 static void host_to_target_termios (void *dst, const void *src)
5713 struct target_termios *target = dst;
5714 const struct host_termios *host = src;
5716 target->c_iflag =
5717 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5718 target->c_oflag =
5719 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5720 target->c_cflag =
5721 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5722 target->c_lflag =
5723 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5724 target->c_line = host->c_line;
5726 memset(target->c_cc, 0, sizeof(target->c_cc));
5727 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5728 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5729 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5730 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5731 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5732 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5733 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5734 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5735 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5736 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5737 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5738 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5739 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5740 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5741 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5742 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5743 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5746 static const StructEntry struct_termios_def = {
5747 .convert = { host_to_target_termios, target_to_host_termios },
5748 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5749 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5750 .print = print_termios,
5753 static bitmask_transtbl mmap_flags_tbl[] = {
5754 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5755 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5756 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5757 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5758 MAP_ANONYMOUS, MAP_ANONYMOUS },
5759 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5760 MAP_GROWSDOWN, MAP_GROWSDOWN },
5761 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5762 MAP_DENYWRITE, MAP_DENYWRITE },
5763 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5764 MAP_EXECUTABLE, MAP_EXECUTABLE },
5765 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5766 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5767 MAP_NORESERVE, MAP_NORESERVE },
5768 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5769 /* MAP_STACK had been ignored by the kernel for quite some time.
5770 Recognize it for the target insofar as we do not want to pass
5771 it through to the host. */
5772 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5773 { 0, 0, 0, 0 }
5777 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5778 * TARGET_I386 is defined if TARGET_X86_64 is defined
5780 #if defined(TARGET_I386)
5782 /* NOTE: there is really one LDT for all the threads */
5783 static uint8_t *ldt_table;
5785 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5787 int size;
5788 void *p;
5790 if (!ldt_table)
5791 return 0;
5792 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5793 if (size > bytecount)
5794 size = bytecount;
5795 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5796 if (!p)
5797 return -TARGET_EFAULT;
5798 /* ??? Should this by byteswapped? */
5799 memcpy(p, ldt_table, size);
5800 unlock_user(p, ptr, size);
5801 return size;
5804 /* XXX: add locking support */
5805 static abi_long write_ldt(CPUX86State *env,
5806 abi_ulong ptr, unsigned long bytecount, int oldmode)
5808 struct target_modify_ldt_ldt_s ldt_info;
5809 struct target_modify_ldt_ldt_s *target_ldt_info;
5810 int seg_32bit, contents, read_exec_only, limit_in_pages;
5811 int seg_not_present, useable, lm;
5812 uint32_t *lp, entry_1, entry_2;
5814 if (bytecount != sizeof(ldt_info))
5815 return -TARGET_EINVAL;
5816 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5817 return -TARGET_EFAULT;
5818 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5819 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5820 ldt_info.limit = tswap32(target_ldt_info->limit);
5821 ldt_info.flags = tswap32(target_ldt_info->flags);
5822 unlock_user_struct(target_ldt_info, ptr, 0);
5824 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5825 return -TARGET_EINVAL;
5826 seg_32bit = ldt_info.flags & 1;
5827 contents = (ldt_info.flags >> 1) & 3;
5828 read_exec_only = (ldt_info.flags >> 3) & 1;
5829 limit_in_pages = (ldt_info.flags >> 4) & 1;
5830 seg_not_present = (ldt_info.flags >> 5) & 1;
5831 useable = (ldt_info.flags >> 6) & 1;
5832 #ifdef TARGET_ABI32
5833 lm = 0;
5834 #else
5835 lm = (ldt_info.flags >> 7) & 1;
5836 #endif
5837 if (contents == 3) {
5838 if (oldmode)
5839 return -TARGET_EINVAL;
5840 if (seg_not_present == 0)
5841 return -TARGET_EINVAL;
5843 /* allocate the LDT */
5844 if (!ldt_table) {
5845 env->ldt.base = target_mmap(0,
5846 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5847 PROT_READ|PROT_WRITE,
5848 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5849 if (env->ldt.base == -1)
5850 return -TARGET_ENOMEM;
5851 memset(g2h(env->ldt.base), 0,
5852 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5853 env->ldt.limit = 0xffff;
5854 ldt_table = g2h(env->ldt.base);
5857 /* NOTE: same code as Linux kernel */
5858 /* Allow LDTs to be cleared by the user. */
5859 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5860 if (oldmode ||
5861 (contents == 0 &&
5862 read_exec_only == 1 &&
5863 seg_32bit == 0 &&
5864 limit_in_pages == 0 &&
5865 seg_not_present == 1 &&
5866 useable == 0 )) {
5867 entry_1 = 0;
5868 entry_2 = 0;
5869 goto install;
5873 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5874 (ldt_info.limit & 0x0ffff);
5875 entry_2 = (ldt_info.base_addr & 0xff000000) |
5876 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5877 (ldt_info.limit & 0xf0000) |
5878 ((read_exec_only ^ 1) << 9) |
5879 (contents << 10) |
5880 ((seg_not_present ^ 1) << 15) |
5881 (seg_32bit << 22) |
5882 (limit_in_pages << 23) |
5883 (lm << 21) |
5884 0x7000;
5885 if (!oldmode)
5886 entry_2 |= (useable << 20);
5888 /* Install the new entry ... */
5889 install:
5890 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5891 lp[0] = tswap32(entry_1);
5892 lp[1] = tswap32(entry_2);
5893 return 0;
5896 /* specific and weird i386 syscalls */
5897 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5898 unsigned long bytecount)
5900 abi_long ret;
5902 switch (func) {
5903 case 0:
5904 ret = read_ldt(ptr, bytecount);
5905 break;
5906 case 1:
5907 ret = write_ldt(env, ptr, bytecount, 1);
5908 break;
5909 case 0x11:
5910 ret = write_ldt(env, ptr, bytecount, 0);
5911 break;
5912 default:
5913 ret = -TARGET_ENOSYS;
5914 break;
5916 return ret;
5919 #if defined(TARGET_ABI32)
5920 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5922 uint64_t *gdt_table = g2h(env->gdt.base);
5923 struct target_modify_ldt_ldt_s ldt_info;
5924 struct target_modify_ldt_ldt_s *target_ldt_info;
5925 int seg_32bit, contents, read_exec_only, limit_in_pages;
5926 int seg_not_present, useable, lm;
5927 uint32_t *lp, entry_1, entry_2;
5928 int i;
5930 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5931 if (!target_ldt_info)
5932 return -TARGET_EFAULT;
5933 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5934 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5935 ldt_info.limit = tswap32(target_ldt_info->limit);
5936 ldt_info.flags = tswap32(target_ldt_info->flags);
5937 if (ldt_info.entry_number == -1) {
5938 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5939 if (gdt_table[i] == 0) {
5940 ldt_info.entry_number = i;
5941 target_ldt_info->entry_number = tswap32(i);
5942 break;
5946 unlock_user_struct(target_ldt_info, ptr, 1);
5948 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5949 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5950 return -TARGET_EINVAL;
5951 seg_32bit = ldt_info.flags & 1;
5952 contents = (ldt_info.flags >> 1) & 3;
5953 read_exec_only = (ldt_info.flags >> 3) & 1;
5954 limit_in_pages = (ldt_info.flags >> 4) & 1;
5955 seg_not_present = (ldt_info.flags >> 5) & 1;
5956 useable = (ldt_info.flags >> 6) & 1;
5957 #ifdef TARGET_ABI32
5958 lm = 0;
5959 #else
5960 lm = (ldt_info.flags >> 7) & 1;
5961 #endif
5963 if (contents == 3) {
5964 if (seg_not_present == 0)
5965 return -TARGET_EINVAL;
5968 /* NOTE: same code as Linux kernel */
5969 /* Allow LDTs to be cleared by the user. */
5970 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5971 if ((contents == 0 &&
5972 read_exec_only == 1 &&
5973 seg_32bit == 0 &&
5974 limit_in_pages == 0 &&
5975 seg_not_present == 1 &&
5976 useable == 0 )) {
5977 entry_1 = 0;
5978 entry_2 = 0;
5979 goto install;
5983 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5984 (ldt_info.limit & 0x0ffff);
5985 entry_2 = (ldt_info.base_addr & 0xff000000) |
5986 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5987 (ldt_info.limit & 0xf0000) |
5988 ((read_exec_only ^ 1) << 9) |
5989 (contents << 10) |
5990 ((seg_not_present ^ 1) << 15) |
5991 (seg_32bit << 22) |
5992 (limit_in_pages << 23) |
5993 (useable << 20) |
5994 (lm << 21) |
5995 0x7000;
5997 /* Install the new entry ... */
5998 install:
5999 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6000 lp[0] = tswap32(entry_1);
6001 lp[1] = tswap32(entry_2);
6002 return 0;
6005 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6007 struct target_modify_ldt_ldt_s *target_ldt_info;
6008 uint64_t *gdt_table = g2h(env->gdt.base);
6009 uint32_t base_addr, limit, flags;
6010 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6011 int seg_not_present, useable, lm;
6012 uint32_t *lp, entry_1, entry_2;
6014 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6015 if (!target_ldt_info)
6016 return -TARGET_EFAULT;
6017 idx = tswap32(target_ldt_info->entry_number);
6018 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6019 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6020 unlock_user_struct(target_ldt_info, ptr, 1);
6021 return -TARGET_EINVAL;
6023 lp = (uint32_t *)(gdt_table + idx);
6024 entry_1 = tswap32(lp[0]);
6025 entry_2 = tswap32(lp[1]);
6027 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6028 contents = (entry_2 >> 10) & 3;
6029 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6030 seg_32bit = (entry_2 >> 22) & 1;
6031 limit_in_pages = (entry_2 >> 23) & 1;
6032 useable = (entry_2 >> 20) & 1;
6033 #ifdef TARGET_ABI32
6034 lm = 0;
6035 #else
6036 lm = (entry_2 >> 21) & 1;
6037 #endif
6038 flags = (seg_32bit << 0) | (contents << 1) |
6039 (read_exec_only << 3) | (limit_in_pages << 4) |
6040 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6041 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6042 base_addr = (entry_1 >> 16) |
6043 (entry_2 & 0xff000000) |
6044 ((entry_2 & 0xff) << 16);
6045 target_ldt_info->base_addr = tswapal(base_addr);
6046 target_ldt_info->limit = tswap32(limit);
6047 target_ldt_info->flags = tswap32(flags);
6048 unlock_user_struct(target_ldt_info, ptr, 1);
6049 return 0;
6052 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6054 return -TARGET_ENOSYS;
6056 #else
6057 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6059 abi_long ret = 0;
6060 abi_ulong val;
6061 int idx;
6063 switch(code) {
6064 case TARGET_ARCH_SET_GS:
6065 case TARGET_ARCH_SET_FS:
6066 if (code == TARGET_ARCH_SET_GS)
6067 idx = R_GS;
6068 else
6069 idx = R_FS;
6070 cpu_x86_load_seg(env, idx, 0);
6071 env->segs[idx].base = addr;
6072 break;
6073 case TARGET_ARCH_GET_GS:
6074 case TARGET_ARCH_GET_FS:
6075 if (code == TARGET_ARCH_GET_GS)
6076 idx = R_GS;
6077 else
6078 idx = R_FS;
6079 val = env->segs[idx].base;
6080 if (put_user(val, addr, abi_ulong))
6081 ret = -TARGET_EFAULT;
6082 break;
6083 default:
6084 ret = -TARGET_EINVAL;
6085 break;
6087 return ret;
6089 #endif /* defined(TARGET_ABI32 */
6091 #endif /* defined(TARGET_I386) */
6093 #define NEW_STACK_SIZE 0x40000
6096 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6097 typedef struct {
6098 CPUArchState *env;
6099 pthread_mutex_t mutex;
6100 pthread_cond_t cond;
6101 pthread_t thread;
6102 uint32_t tid;
6103 abi_ulong child_tidptr;
6104 abi_ulong parent_tidptr;
6105 sigset_t sigmask;
6106 } new_thread_info;
6108 static void *clone_func(void *arg)
6110 new_thread_info *info = arg;
6111 CPUArchState *env;
6112 CPUState *cpu;
6113 TaskState *ts;
6115 rcu_register_thread();
6116 tcg_register_thread();
6117 env = info->env;
6118 cpu = env_cpu(env);
6119 thread_cpu = cpu;
6120 ts = (TaskState *)cpu->opaque;
6121 info->tid = sys_gettid();
6122 task_settid(ts);
6123 if (info->child_tidptr)
6124 put_user_u32(info->tid, info->child_tidptr);
6125 if (info->parent_tidptr)
6126 put_user_u32(info->tid, info->parent_tidptr);
6127 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6128 /* Enable signals. */
6129 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6130 /* Signal to the parent that we're ready. */
6131 pthread_mutex_lock(&info->mutex);
6132 pthread_cond_broadcast(&info->cond);
6133 pthread_mutex_unlock(&info->mutex);
6134 /* Wait until the parent has finished initializing the tls state. */
6135 pthread_mutex_lock(&clone_lock);
6136 pthread_mutex_unlock(&clone_lock);
6137 cpu_loop(env);
6138 /* never exits */
6139 return NULL;
6142 /* do_fork() Must return host values and target errnos (unlike most
6143 do_*() functions). */
6144 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6145 abi_ulong parent_tidptr, target_ulong newtls,
6146 abi_ulong child_tidptr)
6148 CPUState *cpu = env_cpu(env);
6149 int ret;
6150 TaskState *ts;
6151 CPUState *new_cpu;
6152 CPUArchState *new_env;
6153 sigset_t sigmask;
6155 flags &= ~CLONE_IGNORED_FLAGS;
6157 /* Emulate vfork() with fork() */
6158 if (flags & CLONE_VFORK)
6159 flags &= ~(CLONE_VFORK | CLONE_VM);
6161 if (flags & CLONE_VM) {
6162 TaskState *parent_ts = (TaskState *)cpu->opaque;
6163 new_thread_info info;
6164 pthread_attr_t attr;
6166 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6167 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6168 return -TARGET_EINVAL;
6171 ts = g_new0(TaskState, 1);
6172 init_task_state(ts);
6174 /* Grab a mutex so that thread setup appears atomic. */
6175 pthread_mutex_lock(&clone_lock);
6177 /* we create a new CPU instance. */
6178 new_env = cpu_copy(env);
6179 /* Init regs that differ from the parent. */
6180 cpu_clone_regs_child(new_env, newsp, flags);
6181 cpu_clone_regs_parent(env, flags);
6182 new_cpu = env_cpu(new_env);
6183 new_cpu->opaque = ts;
6184 ts->bprm = parent_ts->bprm;
6185 ts->info = parent_ts->info;
6186 ts->signal_mask = parent_ts->signal_mask;
6188 if (flags & CLONE_CHILD_CLEARTID) {
6189 ts->child_tidptr = child_tidptr;
6192 if (flags & CLONE_SETTLS) {
6193 cpu_set_tls (new_env, newtls);
6196 memset(&info, 0, sizeof(info));
6197 pthread_mutex_init(&info.mutex, NULL);
6198 pthread_mutex_lock(&info.mutex);
6199 pthread_cond_init(&info.cond, NULL);
6200 info.env = new_env;
6201 if (flags & CLONE_CHILD_SETTID) {
6202 info.child_tidptr = child_tidptr;
6204 if (flags & CLONE_PARENT_SETTID) {
6205 info.parent_tidptr = parent_tidptr;
6208 ret = pthread_attr_init(&attr);
6209 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6210 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6211 /* It is not safe to deliver signals until the child has finished
6212 initializing, so temporarily block all signals. */
6213 sigfillset(&sigmask);
6214 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6215 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6217 /* If this is our first additional thread, we need to ensure we
6218 * generate code for parallel execution and flush old translations.
6220 if (!parallel_cpus) {
6221 parallel_cpus = true;
6222 tb_flush(cpu);
6225 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6226 /* TODO: Free new CPU state if thread creation failed. */
6228 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6229 pthread_attr_destroy(&attr);
6230 if (ret == 0) {
6231 /* Wait for the child to initialize. */
6232 pthread_cond_wait(&info.cond, &info.mutex);
6233 ret = info.tid;
6234 } else {
6235 ret = -1;
6237 pthread_mutex_unlock(&info.mutex);
6238 pthread_cond_destroy(&info.cond);
6239 pthread_mutex_destroy(&info.mutex);
6240 pthread_mutex_unlock(&clone_lock);
6241 } else {
6242 /* if no CLONE_VM, we consider it is a fork */
6243 if (flags & CLONE_INVALID_FORK_FLAGS) {
6244 return -TARGET_EINVAL;
6247 /* We can't support custom termination signals */
6248 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6249 return -TARGET_EINVAL;
6252 if (block_signals()) {
6253 return -TARGET_ERESTARTSYS;
6256 fork_start();
6257 ret = fork();
6258 if (ret == 0) {
6259 /* Child Process. */
6260 cpu_clone_regs_child(env, newsp, flags);
6261 fork_end(1);
6262 /* There is a race condition here. The parent process could
6263 theoretically read the TID in the child process before the child
6264 tid is set. This would require using either ptrace
6265 (not implemented) or having *_tidptr to point at a shared memory
6266 mapping. We can't repeat the spinlock hack used above because
6267 the child process gets its own copy of the lock. */
6268 if (flags & CLONE_CHILD_SETTID)
6269 put_user_u32(sys_gettid(), child_tidptr);
6270 if (flags & CLONE_PARENT_SETTID)
6271 put_user_u32(sys_gettid(), parent_tidptr);
6272 ts = (TaskState *)cpu->opaque;
6273 if (flags & CLONE_SETTLS)
6274 cpu_set_tls (env, newtls);
6275 if (flags & CLONE_CHILD_CLEARTID)
6276 ts->child_tidptr = child_tidptr;
6277 } else {
6278 cpu_clone_regs_parent(env, flags);
6279 fork_end(0);
6282 return ret;
6285 /* warning : doesn't handle linux specific flags... */
6286 static int target_to_host_fcntl_cmd(int cmd)
6288 int ret;
6290 switch(cmd) {
6291 case TARGET_F_DUPFD:
6292 case TARGET_F_GETFD:
6293 case TARGET_F_SETFD:
6294 case TARGET_F_GETFL:
6295 case TARGET_F_SETFL:
6296 case TARGET_F_OFD_GETLK:
6297 case TARGET_F_OFD_SETLK:
6298 case TARGET_F_OFD_SETLKW:
6299 ret = cmd;
6300 break;
6301 case TARGET_F_GETLK:
6302 ret = F_GETLK64;
6303 break;
6304 case TARGET_F_SETLK:
6305 ret = F_SETLK64;
6306 break;
6307 case TARGET_F_SETLKW:
6308 ret = F_SETLKW64;
6309 break;
6310 case TARGET_F_GETOWN:
6311 ret = F_GETOWN;
6312 break;
6313 case TARGET_F_SETOWN:
6314 ret = F_SETOWN;
6315 break;
6316 case TARGET_F_GETSIG:
6317 ret = F_GETSIG;
6318 break;
6319 case TARGET_F_SETSIG:
6320 ret = F_SETSIG;
6321 break;
6322 #if TARGET_ABI_BITS == 32
6323 case TARGET_F_GETLK64:
6324 ret = F_GETLK64;
6325 break;
6326 case TARGET_F_SETLK64:
6327 ret = F_SETLK64;
6328 break;
6329 case TARGET_F_SETLKW64:
6330 ret = F_SETLKW64;
6331 break;
6332 #endif
6333 case TARGET_F_SETLEASE:
6334 ret = F_SETLEASE;
6335 break;
6336 case TARGET_F_GETLEASE:
6337 ret = F_GETLEASE;
6338 break;
6339 #ifdef F_DUPFD_CLOEXEC
6340 case TARGET_F_DUPFD_CLOEXEC:
6341 ret = F_DUPFD_CLOEXEC;
6342 break;
6343 #endif
6344 case TARGET_F_NOTIFY:
6345 ret = F_NOTIFY;
6346 break;
6347 #ifdef F_GETOWN_EX
6348 case TARGET_F_GETOWN_EX:
6349 ret = F_GETOWN_EX;
6350 break;
6351 #endif
6352 #ifdef F_SETOWN_EX
6353 case TARGET_F_SETOWN_EX:
6354 ret = F_SETOWN_EX;
6355 break;
6356 #endif
6357 #ifdef F_SETPIPE_SZ
6358 case TARGET_F_SETPIPE_SZ:
6359 ret = F_SETPIPE_SZ;
6360 break;
6361 case TARGET_F_GETPIPE_SZ:
6362 ret = F_GETPIPE_SZ;
6363 break;
6364 #endif
6365 default:
6366 ret = -TARGET_EINVAL;
6367 break;
6370 #if defined(__powerpc64__)
6371 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6372 * is not supported by kernel. The glibc fcntl call actually adjusts
6373 * them to 5, 6 and 7 before making the syscall(). Since we make the
6374 * syscall directly, adjust to what is supported by the kernel.
6376 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6377 ret -= F_GETLK64 - 5;
6379 #endif
6381 return ret;
6384 #define FLOCK_TRANSTBL \
6385 switch (type) { \
6386 TRANSTBL_CONVERT(F_RDLCK); \
6387 TRANSTBL_CONVERT(F_WRLCK); \
6388 TRANSTBL_CONVERT(F_UNLCK); \
6389 TRANSTBL_CONVERT(F_EXLCK); \
6390 TRANSTBL_CONVERT(F_SHLCK); \
6393 static int target_to_host_flock(int type)
6395 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6396 FLOCK_TRANSTBL
6397 #undef TRANSTBL_CONVERT
6398 return -TARGET_EINVAL;
6401 static int host_to_target_flock(int type)
6403 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6404 FLOCK_TRANSTBL
6405 #undef TRANSTBL_CONVERT
6406 /* if we don't know how to convert the value coming
6407 * from the host we copy to the target field as-is
6409 return type;
6412 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6413 abi_ulong target_flock_addr)
6415 struct target_flock *target_fl;
6416 int l_type;
6418 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6419 return -TARGET_EFAULT;
6422 __get_user(l_type, &target_fl->l_type);
6423 l_type = target_to_host_flock(l_type);
6424 if (l_type < 0) {
6425 return l_type;
6427 fl->l_type = l_type;
6428 __get_user(fl->l_whence, &target_fl->l_whence);
6429 __get_user(fl->l_start, &target_fl->l_start);
6430 __get_user(fl->l_len, &target_fl->l_len);
6431 __get_user(fl->l_pid, &target_fl->l_pid);
6432 unlock_user_struct(target_fl, target_flock_addr, 0);
6433 return 0;
6436 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6437 const struct flock64 *fl)
6439 struct target_flock *target_fl;
6440 short l_type;
6442 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6443 return -TARGET_EFAULT;
6446 l_type = host_to_target_flock(fl->l_type);
6447 __put_user(l_type, &target_fl->l_type);
6448 __put_user(fl->l_whence, &target_fl->l_whence);
6449 __put_user(fl->l_start, &target_fl->l_start);
6450 __put_user(fl->l_len, &target_fl->l_len);
6451 __put_user(fl->l_pid, &target_fl->l_pid);
6452 unlock_user_struct(target_fl, target_flock_addr, 1);
6453 return 0;
6456 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6457 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6459 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6460 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6461 abi_ulong target_flock_addr)
6463 struct target_oabi_flock64 *target_fl;
6464 int l_type;
6466 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6467 return -TARGET_EFAULT;
6470 __get_user(l_type, &target_fl->l_type);
6471 l_type = target_to_host_flock(l_type);
6472 if (l_type < 0) {
6473 return l_type;
6475 fl->l_type = l_type;
6476 __get_user(fl->l_whence, &target_fl->l_whence);
6477 __get_user(fl->l_start, &target_fl->l_start);
6478 __get_user(fl->l_len, &target_fl->l_len);
6479 __get_user(fl->l_pid, &target_fl->l_pid);
6480 unlock_user_struct(target_fl, target_flock_addr, 0);
6481 return 0;
6484 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6485 const struct flock64 *fl)
6487 struct target_oabi_flock64 *target_fl;
6488 short l_type;
6490 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6491 return -TARGET_EFAULT;
6494 l_type = host_to_target_flock(fl->l_type);
6495 __put_user(l_type, &target_fl->l_type);
6496 __put_user(fl->l_whence, &target_fl->l_whence);
6497 __put_user(fl->l_start, &target_fl->l_start);
6498 __put_user(fl->l_len, &target_fl->l_len);
6499 __put_user(fl->l_pid, &target_fl->l_pid);
6500 unlock_user_struct(target_fl, target_flock_addr, 1);
6501 return 0;
6503 #endif
6505 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6506 abi_ulong target_flock_addr)
6508 struct target_flock64 *target_fl;
6509 int l_type;
6511 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6512 return -TARGET_EFAULT;
6515 __get_user(l_type, &target_fl->l_type);
6516 l_type = target_to_host_flock(l_type);
6517 if (l_type < 0) {
6518 return l_type;
6520 fl->l_type = l_type;
6521 __get_user(fl->l_whence, &target_fl->l_whence);
6522 __get_user(fl->l_start, &target_fl->l_start);
6523 __get_user(fl->l_len, &target_fl->l_len);
6524 __get_user(fl->l_pid, &target_fl->l_pid);
6525 unlock_user_struct(target_fl, target_flock_addr, 0);
6526 return 0;
6529 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6530 const struct flock64 *fl)
6532 struct target_flock64 *target_fl;
6533 short l_type;
6535 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6536 return -TARGET_EFAULT;
6539 l_type = host_to_target_flock(fl->l_type);
6540 __put_user(l_type, &target_fl->l_type);
6541 __put_user(fl->l_whence, &target_fl->l_whence);
6542 __put_user(fl->l_start, &target_fl->l_start);
6543 __put_user(fl->l_len, &target_fl->l_len);
6544 __put_user(fl->l_pid, &target_fl->l_pid);
6545 unlock_user_struct(target_fl, target_flock_addr, 1);
6546 return 0;
6549 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6551 struct flock64 fl64;
6552 #ifdef F_GETOWN_EX
6553 struct f_owner_ex fox;
6554 struct target_f_owner_ex *target_fox;
6555 #endif
6556 abi_long ret;
6557 int host_cmd = target_to_host_fcntl_cmd(cmd);
6559 if (host_cmd == -TARGET_EINVAL)
6560 return host_cmd;
6562 switch(cmd) {
6563 case TARGET_F_GETLK:
6564 ret = copy_from_user_flock(&fl64, arg);
6565 if (ret) {
6566 return ret;
6568 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6569 if (ret == 0) {
6570 ret = copy_to_user_flock(arg, &fl64);
6572 break;
6574 case TARGET_F_SETLK:
6575 case TARGET_F_SETLKW:
6576 ret = copy_from_user_flock(&fl64, arg);
6577 if (ret) {
6578 return ret;
6580 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6581 break;
6583 case TARGET_F_GETLK64:
6584 case TARGET_F_OFD_GETLK:
6585 ret = copy_from_user_flock64(&fl64, arg);
6586 if (ret) {
6587 return ret;
6589 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6590 if (ret == 0) {
6591 ret = copy_to_user_flock64(arg, &fl64);
6593 break;
6594 case TARGET_F_SETLK64:
6595 case TARGET_F_SETLKW64:
6596 case TARGET_F_OFD_SETLK:
6597 case TARGET_F_OFD_SETLKW:
6598 ret = copy_from_user_flock64(&fl64, arg);
6599 if (ret) {
6600 return ret;
6602 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6603 break;
6605 case TARGET_F_GETFL:
6606 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6607 if (ret >= 0) {
6608 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6610 break;
6612 case TARGET_F_SETFL:
6613 ret = get_errno(safe_fcntl(fd, host_cmd,
6614 target_to_host_bitmask(arg,
6615 fcntl_flags_tbl)));
6616 break;
6618 #ifdef F_GETOWN_EX
6619 case TARGET_F_GETOWN_EX:
6620 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6621 if (ret >= 0) {
6622 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6623 return -TARGET_EFAULT;
6624 target_fox->type = tswap32(fox.type);
6625 target_fox->pid = tswap32(fox.pid);
6626 unlock_user_struct(target_fox, arg, 1);
6628 break;
6629 #endif
6631 #ifdef F_SETOWN_EX
6632 case TARGET_F_SETOWN_EX:
6633 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6634 return -TARGET_EFAULT;
6635 fox.type = tswap32(target_fox->type);
6636 fox.pid = tswap32(target_fox->pid);
6637 unlock_user_struct(target_fox, arg, 0);
6638 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6639 break;
6640 #endif
6642 case TARGET_F_SETOWN:
6643 case TARGET_F_GETOWN:
6644 case TARGET_F_SETSIG:
6645 case TARGET_F_GETSIG:
6646 case TARGET_F_SETLEASE:
6647 case TARGET_F_GETLEASE:
6648 case TARGET_F_SETPIPE_SZ:
6649 case TARGET_F_GETPIPE_SZ:
6650 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6651 break;
6653 default:
6654 ret = get_errno(safe_fcntl(fd, cmd, arg));
6655 break;
6657 return ret;
6660 #ifdef USE_UID16
6662 static inline int high2lowuid(int uid)
6664 if (uid > 65535)
6665 return 65534;
6666 else
6667 return uid;
6670 static inline int high2lowgid(int gid)
6672 if (gid > 65535)
6673 return 65534;
6674 else
6675 return gid;
6678 static inline int low2highuid(int uid)
6680 if ((int16_t)uid == -1)
6681 return -1;
6682 else
6683 return uid;
6686 static inline int low2highgid(int gid)
6688 if ((int16_t)gid == -1)
6689 return -1;
6690 else
6691 return gid;
6693 static inline int tswapid(int id)
6695 return tswap16(id);
6698 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6700 #else /* !USE_UID16 */
6701 static inline int high2lowuid(int uid)
6703 return uid;
6705 static inline int high2lowgid(int gid)
6707 return gid;
6709 static inline int low2highuid(int uid)
6711 return uid;
6713 static inline int low2highgid(int gid)
6715 return gid;
6717 static inline int tswapid(int id)
6719 return tswap32(id);
6722 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6724 #endif /* USE_UID16 */
6726 /* We must do direct syscalls for setting UID/GID, because we want to
6727 * implement the Linux system call semantics of "change only for this thread",
6728 * not the libc/POSIX semantics of "change for all threads in process".
6729 * (See http://ewontfix.com/17/ for more details.)
6730 * We use the 32-bit version of the syscalls if present; if it is not
6731 * then either the host architecture supports 32-bit UIDs natively with
6732 * the standard syscall, or the 16-bit UID is the best we can do.
6734 #ifdef __NR_setuid32
6735 #define __NR_sys_setuid __NR_setuid32
6736 #else
6737 #define __NR_sys_setuid __NR_setuid
6738 #endif
6739 #ifdef __NR_setgid32
6740 #define __NR_sys_setgid __NR_setgid32
6741 #else
6742 #define __NR_sys_setgid __NR_setgid
6743 #endif
6744 #ifdef __NR_setresuid32
6745 #define __NR_sys_setresuid __NR_setresuid32
6746 #else
6747 #define __NR_sys_setresuid __NR_setresuid
6748 #endif
6749 #ifdef __NR_setresgid32
6750 #define __NR_sys_setresgid __NR_setresgid32
6751 #else
6752 #define __NR_sys_setresgid __NR_setresgid
6753 #endif
6755 _syscall1(int, sys_setuid, uid_t, uid)
6756 _syscall1(int, sys_setgid, gid_t, gid)
6757 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6758 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6760 void syscall_init(void)
6762 IOCTLEntry *ie;
6763 const argtype *arg_type;
6764 int size;
6765 int i;
6767 thunk_init(STRUCT_MAX);
6769 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6770 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6771 #include "syscall_types.h"
6772 #undef STRUCT
6773 #undef STRUCT_SPECIAL
6775 /* Build target_to_host_errno_table[] table from
6776 * host_to_target_errno_table[]. */
6777 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6778 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6781 /* we patch the ioctl size if necessary. We rely on the fact that
6782 no ioctl has all the bits at '1' in the size field */
6783 ie = ioctl_entries;
6784 while (ie->target_cmd != 0) {
6785 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6786 TARGET_IOC_SIZEMASK) {
6787 arg_type = ie->arg_type;
6788 if (arg_type[0] != TYPE_PTR) {
6789 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6790 ie->target_cmd);
6791 exit(1);
6793 arg_type++;
6794 size = thunk_type_size(arg_type, 0);
6795 ie->target_cmd = (ie->target_cmd &
6796 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6797 (size << TARGET_IOC_SIZESHIFT);
6800 /* automatic consistency check if same arch */
6801 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6802 (defined(__x86_64__) && defined(TARGET_X86_64))
6803 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6804 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6805 ie->name, ie->target_cmd, ie->host_cmd);
6807 #endif
6808 ie++;
6812 #ifdef TARGET_NR_truncate64
6813 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6814 abi_long arg2,
6815 abi_long arg3,
6816 abi_long arg4)
6818 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6819 arg2 = arg3;
6820 arg3 = arg4;
6822 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6824 #endif
6826 #ifdef TARGET_NR_ftruncate64
6827 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6828 abi_long arg2,
6829 abi_long arg3,
6830 abi_long arg4)
6832 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6833 arg2 = arg3;
6834 arg3 = arg4;
6836 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6838 #endif
6840 #if defined(TARGET_NR_timer_settime) || \
6841 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6842 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6843 abi_ulong target_addr)
6845 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6846 offsetof(struct target_itimerspec,
6847 it_interval)) ||
6848 target_to_host_timespec(&host_its->it_value, target_addr +
6849 offsetof(struct target_itimerspec,
6850 it_value))) {
6851 return -TARGET_EFAULT;
6854 return 0;
6856 #endif
6858 #if defined(TARGET_NR_timer_settime64) || \
6859 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6860 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6861 abi_ulong target_addr)
6863 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6864 offsetof(struct target__kernel_itimerspec,
6865 it_interval)) ||
6866 target_to_host_timespec64(&host_its->it_value, target_addr +
6867 offsetof(struct target__kernel_itimerspec,
6868 it_value))) {
6869 return -TARGET_EFAULT;
6872 return 0;
6874 #endif
6876 #if ((defined(TARGET_NR_timerfd_gettime) || \
6877 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6878 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6879 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6880 struct itimerspec *host_its)
6882 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6883 it_interval),
6884 &host_its->it_interval) ||
6885 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6886 it_value),
6887 &host_its->it_value)) {
6888 return -TARGET_EFAULT;
6890 return 0;
6892 #endif
6894 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6895 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6896 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6897 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6898 struct itimerspec *host_its)
6900 if (host_to_target_timespec64(target_addr +
6901 offsetof(struct target__kernel_itimerspec,
6902 it_interval),
6903 &host_its->it_interval) ||
6904 host_to_target_timespec64(target_addr +
6905 offsetof(struct target__kernel_itimerspec,
6906 it_value),
6907 &host_its->it_value)) {
6908 return -TARGET_EFAULT;
6910 return 0;
6912 #endif
6914 #if defined(TARGET_NR_adjtimex) || \
6915 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6916 static inline abi_long target_to_host_timex(struct timex *host_tx,
6917 abi_long target_addr)
6919 struct target_timex *target_tx;
6921 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6922 return -TARGET_EFAULT;
6925 __get_user(host_tx->modes, &target_tx->modes);
6926 __get_user(host_tx->offset, &target_tx->offset);
6927 __get_user(host_tx->freq, &target_tx->freq);
6928 __get_user(host_tx->maxerror, &target_tx->maxerror);
6929 __get_user(host_tx->esterror, &target_tx->esterror);
6930 __get_user(host_tx->status, &target_tx->status);
6931 __get_user(host_tx->constant, &target_tx->constant);
6932 __get_user(host_tx->precision, &target_tx->precision);
6933 __get_user(host_tx->tolerance, &target_tx->tolerance);
6934 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6935 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6936 __get_user(host_tx->tick, &target_tx->tick);
6937 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6938 __get_user(host_tx->jitter, &target_tx->jitter);
6939 __get_user(host_tx->shift, &target_tx->shift);
6940 __get_user(host_tx->stabil, &target_tx->stabil);
6941 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6942 __get_user(host_tx->calcnt, &target_tx->calcnt);
6943 __get_user(host_tx->errcnt, &target_tx->errcnt);
6944 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6945 __get_user(host_tx->tai, &target_tx->tai);
6947 unlock_user_struct(target_tx, target_addr, 0);
6948 return 0;
6951 static inline abi_long host_to_target_timex(abi_long target_addr,
6952 struct timex *host_tx)
6954 struct target_timex *target_tx;
6956 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6957 return -TARGET_EFAULT;
6960 __put_user(host_tx->modes, &target_tx->modes);
6961 __put_user(host_tx->offset, &target_tx->offset);
6962 __put_user(host_tx->freq, &target_tx->freq);
6963 __put_user(host_tx->maxerror, &target_tx->maxerror);
6964 __put_user(host_tx->esterror, &target_tx->esterror);
6965 __put_user(host_tx->status, &target_tx->status);
6966 __put_user(host_tx->constant, &target_tx->constant);
6967 __put_user(host_tx->precision, &target_tx->precision);
6968 __put_user(host_tx->tolerance, &target_tx->tolerance);
6969 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6970 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6971 __put_user(host_tx->tick, &target_tx->tick);
6972 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6973 __put_user(host_tx->jitter, &target_tx->jitter);
6974 __put_user(host_tx->shift, &target_tx->shift);
6975 __put_user(host_tx->stabil, &target_tx->stabil);
6976 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6977 __put_user(host_tx->calcnt, &target_tx->calcnt);
6978 __put_user(host_tx->errcnt, &target_tx->errcnt);
6979 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6980 __put_user(host_tx->tai, &target_tx->tai);
6982 unlock_user_struct(target_tx, target_addr, 1);
6983 return 0;
6985 #endif
6988 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6989 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6990 abi_long target_addr)
6992 struct target__kernel_timex *target_tx;
6994 if (copy_from_user_timeval64(&host_tx->time, target_addr +
6995 offsetof(struct target__kernel_timex,
6996 time))) {
6997 return -TARGET_EFAULT;
7000 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7001 return -TARGET_EFAULT;
7004 __get_user(host_tx->modes, &target_tx->modes);
7005 __get_user(host_tx->offset, &target_tx->offset);
7006 __get_user(host_tx->freq, &target_tx->freq);
7007 __get_user(host_tx->maxerror, &target_tx->maxerror);
7008 __get_user(host_tx->esterror, &target_tx->esterror);
7009 __get_user(host_tx->status, &target_tx->status);
7010 __get_user(host_tx->constant, &target_tx->constant);
7011 __get_user(host_tx->precision, &target_tx->precision);
7012 __get_user(host_tx->tolerance, &target_tx->tolerance);
7013 __get_user(host_tx->tick, &target_tx->tick);
7014 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7015 __get_user(host_tx->jitter, &target_tx->jitter);
7016 __get_user(host_tx->shift, &target_tx->shift);
7017 __get_user(host_tx->stabil, &target_tx->stabil);
7018 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7019 __get_user(host_tx->calcnt, &target_tx->calcnt);
7020 __get_user(host_tx->errcnt, &target_tx->errcnt);
7021 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7022 __get_user(host_tx->tai, &target_tx->tai);
7024 unlock_user_struct(target_tx, target_addr, 0);
7025 return 0;
7028 static inline abi_long host_to_target_timex64(abi_long target_addr,
7029 struct timex *host_tx)
7031 struct target__kernel_timex *target_tx;
7033 if (copy_to_user_timeval64(target_addr +
7034 offsetof(struct target__kernel_timex, time),
7035 &host_tx->time)) {
7036 return -TARGET_EFAULT;
7039 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7040 return -TARGET_EFAULT;
7043 __put_user(host_tx->modes, &target_tx->modes);
7044 __put_user(host_tx->offset, &target_tx->offset);
7045 __put_user(host_tx->freq, &target_tx->freq);
7046 __put_user(host_tx->maxerror, &target_tx->maxerror);
7047 __put_user(host_tx->esterror, &target_tx->esterror);
7048 __put_user(host_tx->status, &target_tx->status);
7049 __put_user(host_tx->constant, &target_tx->constant);
7050 __put_user(host_tx->precision, &target_tx->precision);
7051 __put_user(host_tx->tolerance, &target_tx->tolerance);
7052 __put_user(host_tx->tick, &target_tx->tick);
7053 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7054 __put_user(host_tx->jitter, &target_tx->jitter);
7055 __put_user(host_tx->shift, &target_tx->shift);
7056 __put_user(host_tx->stabil, &target_tx->stabil);
7057 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7058 __put_user(host_tx->calcnt, &target_tx->calcnt);
7059 __put_user(host_tx->errcnt, &target_tx->errcnt);
7060 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7061 __put_user(host_tx->tai, &target_tx->tai);
7063 unlock_user_struct(target_tx, target_addr, 1);
7064 return 0;
7066 #endif
7068 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7069 abi_ulong target_addr)
7071 struct target_sigevent *target_sevp;
7073 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7074 return -TARGET_EFAULT;
7077 /* This union is awkward on 64 bit systems because it has a 32 bit
7078 * integer and a pointer in it; we follow the conversion approach
7079 * used for handling sigval types in signal.c so the guest should get
7080 * the correct value back even if we did a 64 bit byteswap and it's
7081 * using the 32 bit integer.
7083 host_sevp->sigev_value.sival_ptr =
7084 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7085 host_sevp->sigev_signo =
7086 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7087 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7088 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7090 unlock_user_struct(target_sevp, target_addr, 1);
7091 return 0;
7094 #if defined(TARGET_NR_mlockall)
7095 static inline int target_to_host_mlockall_arg(int arg)
7097 int result = 0;
7099 if (arg & TARGET_MCL_CURRENT) {
7100 result |= MCL_CURRENT;
7102 if (arg & TARGET_MCL_FUTURE) {
7103 result |= MCL_FUTURE;
7105 #ifdef MCL_ONFAULT
7106 if (arg & TARGET_MCL_ONFAULT) {
7107 result |= MCL_ONFAULT;
7109 #endif
7111 return result;
7113 #endif
7115 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7116 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7117 defined(TARGET_NR_newfstatat))
7118 static inline abi_long host_to_target_stat64(void *cpu_env,
7119 abi_ulong target_addr,
7120 struct stat *host_st)
7122 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7123 if (((CPUARMState *)cpu_env)->eabi) {
7124 struct target_eabi_stat64 *target_st;
7126 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7127 return -TARGET_EFAULT;
7128 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7129 __put_user(host_st->st_dev, &target_st->st_dev);
7130 __put_user(host_st->st_ino, &target_st->st_ino);
7131 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7132 __put_user(host_st->st_ino, &target_st->__st_ino);
7133 #endif
7134 __put_user(host_st->st_mode, &target_st->st_mode);
7135 __put_user(host_st->st_nlink, &target_st->st_nlink);
7136 __put_user(host_st->st_uid, &target_st->st_uid);
7137 __put_user(host_st->st_gid, &target_st->st_gid);
7138 __put_user(host_st->st_rdev, &target_st->st_rdev);
7139 __put_user(host_st->st_size, &target_st->st_size);
7140 __put_user(host_st->st_blksize, &target_st->st_blksize);
7141 __put_user(host_st->st_blocks, &target_st->st_blocks);
7142 __put_user(host_st->st_atime, &target_st->target_st_atime);
7143 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7144 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7145 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7146 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7147 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7148 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7149 #endif
7150 unlock_user_struct(target_st, target_addr, 1);
7151 } else
7152 #endif
7154 #if defined(TARGET_HAS_STRUCT_STAT64)
7155 struct target_stat64 *target_st;
7156 #else
7157 struct target_stat *target_st;
7158 #endif
7160 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7161 return -TARGET_EFAULT;
7162 memset(target_st, 0, sizeof(*target_st));
7163 __put_user(host_st->st_dev, &target_st->st_dev);
7164 __put_user(host_st->st_ino, &target_st->st_ino);
7165 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7166 __put_user(host_st->st_ino, &target_st->__st_ino);
7167 #endif
7168 __put_user(host_st->st_mode, &target_st->st_mode);
7169 __put_user(host_st->st_nlink, &target_st->st_nlink);
7170 __put_user(host_st->st_uid, &target_st->st_uid);
7171 __put_user(host_st->st_gid, &target_st->st_gid);
7172 __put_user(host_st->st_rdev, &target_st->st_rdev);
7173 /* XXX: better use of kernel struct */
7174 __put_user(host_st->st_size, &target_st->st_size);
7175 __put_user(host_st->st_blksize, &target_st->st_blksize);
7176 __put_user(host_st->st_blocks, &target_st->st_blocks);
7177 __put_user(host_st->st_atime, &target_st->target_st_atime);
7178 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7179 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7180 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7181 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7182 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7183 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7184 #endif
7185 unlock_user_struct(target_st, target_addr, 1);
7188 return 0;
7190 #endif
7192 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7193 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7194 abi_ulong target_addr)
7196 struct target_statx *target_stx;
7198 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7199 return -TARGET_EFAULT;
7201 memset(target_stx, 0, sizeof(*target_stx));
7203 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7204 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7205 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7206 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7207 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7208 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7209 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7210 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7211 __put_user(host_stx->stx_size, &target_stx->stx_size);
7212 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7213 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7214 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7215 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7216 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7217 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7218 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7219 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7220 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7221 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7222 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7223 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7224 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7225 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7227 unlock_user_struct(target_stx, target_addr, 1);
7229 return 0;
7231 #endif
7233 static int do_sys_futex(int *uaddr, int op, int val,
7234 const struct timespec *timeout, int *uaddr2,
7235 int val3)
7237 #if HOST_LONG_BITS == 64
7238 #if defined(__NR_futex)
7239 /* always a 64-bit time_t, it doesn't define _time64 version */
7240 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7242 #endif
7243 #else /* HOST_LONG_BITS == 64 */
7244 #if defined(__NR_futex_time64)
7245 if (sizeof(timeout->tv_sec) == 8) {
7246 /* _time64 function on 32bit arch */
7247 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7249 #endif
7250 #if defined(__NR_futex)
7251 /* old function on 32bit arch */
7252 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7253 #endif
7254 #endif /* HOST_LONG_BITS == 64 */
7255 g_assert_not_reached();
7258 static int do_safe_futex(int *uaddr, int op, int val,
7259 const struct timespec *timeout, int *uaddr2,
7260 int val3)
7262 #if HOST_LONG_BITS == 64
7263 #if defined(__NR_futex)
7264 /* always a 64-bit time_t, it doesn't define _time64 version */
7265 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7266 #endif
7267 #else /* HOST_LONG_BITS == 64 */
7268 #if defined(__NR_futex_time64)
7269 if (sizeof(timeout->tv_sec) == 8) {
7270 /* _time64 function on 32bit arch */
7271 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7272 val3));
7274 #endif
7275 #if defined(__NR_futex)
7276 /* old function on 32bit arch */
7277 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7278 #endif
7279 #endif /* HOST_LONG_BITS == 64 */
7280 return -TARGET_ENOSYS;
7283 /* ??? Using host futex calls even when target atomic operations
7284 are not really atomic probably breaks things. However implementing
7285 futexes locally would make futexes shared between multiple processes
7286 tricky. However they're probably useless because guest atomic
7287 operations won't work either. */
7288 #if defined(TARGET_NR_futex)
7289 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7290 target_ulong uaddr2, int val3)
7292 struct timespec ts, *pts;
7293 int base_op;
7295 /* ??? We assume FUTEX_* constants are the same on both host
7296 and target. */
7297 #ifdef FUTEX_CMD_MASK
7298 base_op = op & FUTEX_CMD_MASK;
7299 #else
7300 base_op = op;
7301 #endif
7302 switch (base_op) {
7303 case FUTEX_WAIT:
7304 case FUTEX_WAIT_BITSET:
7305 if (timeout) {
7306 pts = &ts;
7307 target_to_host_timespec(pts, timeout);
7308 } else {
7309 pts = NULL;
7311 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7312 case FUTEX_WAKE:
7313 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7314 case FUTEX_FD:
7315 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7316 case FUTEX_REQUEUE:
7317 case FUTEX_CMP_REQUEUE:
7318 case FUTEX_WAKE_OP:
7319 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7320 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7321 But the prototype takes a `struct timespec *'; insert casts
7322 to satisfy the compiler. We do not need to tswap TIMEOUT
7323 since it's not compared to guest memory. */
7324 pts = (struct timespec *)(uintptr_t) timeout;
7325 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7326 (base_op == FUTEX_CMP_REQUEUE
7327 ? tswap32(val3)
7328 : val3));
7329 default:
7330 return -TARGET_ENOSYS;
7333 #endif
7335 #if defined(TARGET_NR_futex_time64)
7336 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7337 target_ulong uaddr2, int val3)
7339 struct timespec ts, *pts;
7340 int base_op;
7342 /* ??? We assume FUTEX_* constants are the same on both host
7343 and target. */
7344 #ifdef FUTEX_CMD_MASK
7345 base_op = op & FUTEX_CMD_MASK;
7346 #else
7347 base_op = op;
7348 #endif
7349 switch (base_op) {
7350 case FUTEX_WAIT:
7351 case FUTEX_WAIT_BITSET:
7352 if (timeout) {
7353 pts = &ts;
7354 target_to_host_timespec64(pts, timeout);
7355 } else {
7356 pts = NULL;
7358 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7359 case FUTEX_WAKE:
7360 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7361 case FUTEX_FD:
7362 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7363 case FUTEX_REQUEUE:
7364 case FUTEX_CMP_REQUEUE:
7365 case FUTEX_WAKE_OP:
7366 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7367 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7368 But the prototype takes a `struct timespec *'; insert casts
7369 to satisfy the compiler. We do not need to tswap TIMEOUT
7370 since it's not compared to guest memory. */
7371 pts = (struct timespec *)(uintptr_t) timeout;
7372 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7373 (base_op == FUTEX_CMP_REQUEUE
7374 ? tswap32(val3)
7375 : val3));
7376 default:
7377 return -TARGET_ENOSYS;
7380 #endif
7382 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7383 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7384 abi_long handle, abi_long mount_id,
7385 abi_long flags)
7387 struct file_handle *target_fh;
7388 struct file_handle *fh;
7389 int mid = 0;
7390 abi_long ret;
7391 char *name;
7392 unsigned int size, total_size;
7394 if (get_user_s32(size, handle)) {
7395 return -TARGET_EFAULT;
7398 name = lock_user_string(pathname);
7399 if (!name) {
7400 return -TARGET_EFAULT;
7403 total_size = sizeof(struct file_handle) + size;
7404 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7405 if (!target_fh) {
7406 unlock_user(name, pathname, 0);
7407 return -TARGET_EFAULT;
7410 fh = g_malloc0(total_size);
7411 fh->handle_bytes = size;
7413 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7414 unlock_user(name, pathname, 0);
7416 /* man name_to_handle_at(2):
7417 * Other than the use of the handle_bytes field, the caller should treat
7418 * the file_handle structure as an opaque data type
7421 memcpy(target_fh, fh, total_size);
7422 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7423 target_fh->handle_type = tswap32(fh->handle_type);
7424 g_free(fh);
7425 unlock_user(target_fh, handle, total_size);
7427 if (put_user_s32(mid, mount_id)) {
7428 return -TARGET_EFAULT;
7431 return ret;
7434 #endif
7436 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7437 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7438 abi_long flags)
7440 struct file_handle *target_fh;
7441 struct file_handle *fh;
7442 unsigned int size, total_size;
7443 abi_long ret;
7445 if (get_user_s32(size, handle)) {
7446 return -TARGET_EFAULT;
7449 total_size = sizeof(struct file_handle) + size;
7450 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7451 if (!target_fh) {
7452 return -TARGET_EFAULT;
7455 fh = g_memdup(target_fh, total_size);
7456 fh->handle_bytes = size;
7457 fh->handle_type = tswap32(target_fh->handle_type);
7459 ret = get_errno(open_by_handle_at(mount_fd, fh,
7460 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7462 g_free(fh);
7464 unlock_user(target_fh, handle, total_size);
7466 return ret;
7468 #endif
7470 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7472 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7474 int host_flags;
7475 target_sigset_t *target_mask;
7476 sigset_t host_mask;
7477 abi_long ret;
7479 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7480 return -TARGET_EINVAL;
7482 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7483 return -TARGET_EFAULT;
7486 target_to_host_sigset(&host_mask, target_mask);
7488 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7490 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7491 if (ret >= 0) {
7492 fd_trans_register(ret, &target_signalfd_trans);
7495 unlock_user_struct(target_mask, mask, 0);
7497 return ret;
7499 #endif
7501 /* Map host to target signal numbers for the wait family of syscalls.
7502 Assume all other status bits are the same. */
7503 int host_to_target_waitstatus(int status)
7505 if (WIFSIGNALED(status)) {
7506 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7508 if (WIFSTOPPED(status)) {
7509 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7510 | (status & 0xff);
7512 return status;
7515 static int open_self_cmdline(void *cpu_env, int fd)
7517 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7518 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7519 int i;
7521 for (i = 0; i < bprm->argc; i++) {
7522 size_t len = strlen(bprm->argv[i]) + 1;
7524 if (write(fd, bprm->argv[i], len) != len) {
7525 return -1;
7529 return 0;
7532 static int open_self_maps(void *cpu_env, int fd)
7534 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7535 TaskState *ts = cpu->opaque;
7536 GSList *map_info = read_self_maps();
7537 GSList *s;
7538 int count;
7540 for (s = map_info; s; s = g_slist_next(s)) {
7541 MapInfo *e = (MapInfo *) s->data;
7543 if (h2g_valid(e->start)) {
7544 unsigned long min = e->start;
7545 unsigned long max = e->end;
7546 int flags = page_get_flags(h2g(min));
7547 const char *path;
7549 max = h2g_valid(max - 1) ?
7550 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7552 if (page_check_range(h2g(min), max - min, flags) == -1) {
7553 continue;
7556 if (h2g(min) == ts->info->stack_limit) {
7557 path = "[stack]";
7558 } else {
7559 path = e->path;
7562 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7563 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7564 h2g(min), h2g(max - 1) + 1,
7565 e->is_read ? 'r' : '-',
7566 e->is_write ? 'w' : '-',
7567 e->is_exec ? 'x' : '-',
7568 e->is_priv ? 'p' : '-',
7569 (uint64_t) e->offset, e->dev, e->inode);
7570 if (path) {
7571 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7572 } else {
7573 dprintf(fd, "\n");
7578 free_self_maps(map_info);
7580 #ifdef TARGET_VSYSCALL_PAGE
7582 * We only support execution from the vsyscall page.
7583 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7585 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7586 " --xp 00000000 00:00 0",
7587 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7588 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7589 #endif
7591 return 0;
7594 static int open_self_stat(void *cpu_env, int fd)
7596 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7597 TaskState *ts = cpu->opaque;
7598 g_autoptr(GString) buf = g_string_new(NULL);
7599 int i;
7601 for (i = 0; i < 44; i++) {
7602 if (i == 0) {
7603 /* pid */
7604 g_string_printf(buf, FMT_pid " ", getpid());
7605 } else if (i == 1) {
7606 /* app name */
7607 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7608 bin = bin ? bin + 1 : ts->bprm->argv[0];
7609 g_string_printf(buf, "(%.15s) ", bin);
7610 } else if (i == 27) {
7611 /* stack bottom */
7612 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7613 } else {
7614 /* for the rest, there is MasterCard */
7615 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7618 if (write(fd, buf->str, buf->len) != buf->len) {
7619 return -1;
7623 return 0;
7626 static int open_self_auxv(void *cpu_env, int fd)
7628 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7629 TaskState *ts = cpu->opaque;
7630 abi_ulong auxv = ts->info->saved_auxv;
7631 abi_ulong len = ts->info->auxv_len;
7632 char *ptr;
7635 * Auxiliary vector is stored in target process stack.
7636 * read in whole auxv vector and copy it to file
7638 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7639 if (ptr != NULL) {
7640 while (len > 0) {
7641 ssize_t r;
7642 r = write(fd, ptr, len);
7643 if (r <= 0) {
7644 break;
7646 len -= r;
7647 ptr += r;
7649 lseek(fd, 0, SEEK_SET);
7650 unlock_user(ptr, auxv, len);
7653 return 0;
7656 static int is_proc_myself(const char *filename, const char *entry)
7658 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7659 filename += strlen("/proc/");
7660 if (!strncmp(filename, "self/", strlen("self/"))) {
7661 filename += strlen("self/");
7662 } else if (*filename >= '1' && *filename <= '9') {
7663 char myself[80];
7664 snprintf(myself, sizeof(myself), "%d/", getpid());
7665 if (!strncmp(filename, myself, strlen(myself))) {
7666 filename += strlen(myself);
7667 } else {
7668 return 0;
7670 } else {
7671 return 0;
7673 if (!strcmp(filename, entry)) {
7674 return 1;
7677 return 0;
7680 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7681 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7682 static int is_proc(const char *filename, const char *entry)
7684 return strcmp(filename, entry) == 0;
7686 #endif
7688 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7689 static int open_net_route(void *cpu_env, int fd)
7691 FILE *fp;
7692 char *line = NULL;
7693 size_t len = 0;
7694 ssize_t read;
7696 fp = fopen("/proc/net/route", "r");
7697 if (fp == NULL) {
7698 return -1;
7701 /* read header */
7703 read = getline(&line, &len, fp);
7704 dprintf(fd, "%s", line);
7706 /* read routes */
7708 while ((read = getline(&line, &len, fp)) != -1) {
7709 char iface[16];
7710 uint32_t dest, gw, mask;
7711 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7712 int fields;
7714 fields = sscanf(line,
7715 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7716 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7717 &mask, &mtu, &window, &irtt);
7718 if (fields != 11) {
7719 continue;
7721 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7722 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7723 metric, tswap32(mask), mtu, window, irtt);
7726 free(line);
7727 fclose(fp);
7729 return 0;
7731 #endif
7733 #if defined(TARGET_SPARC)
7734 static int open_cpuinfo(void *cpu_env, int fd)
7736 dprintf(fd, "type\t\t: sun4u\n");
7737 return 0;
7739 #endif
7741 #if defined(TARGET_HPPA)
7742 static int open_cpuinfo(void *cpu_env, int fd)
7744 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7745 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7746 dprintf(fd, "capabilities\t: os32\n");
7747 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7748 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7749 return 0;
7751 #endif
7753 #if defined(TARGET_M68K)
7754 static int open_hardware(void *cpu_env, int fd)
7756 dprintf(fd, "Model:\t\tqemu-m68k\n");
7757 return 0;
7759 #endif
7761 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7763 struct fake_open {
7764 const char *filename;
7765 int (*fill)(void *cpu_env, int fd);
7766 int (*cmp)(const char *s1, const char *s2);
7768 const struct fake_open *fake_open;
7769 static const struct fake_open fakes[] = {
7770 { "maps", open_self_maps, is_proc_myself },
7771 { "stat", open_self_stat, is_proc_myself },
7772 { "auxv", open_self_auxv, is_proc_myself },
7773 { "cmdline", open_self_cmdline, is_proc_myself },
7774 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7775 { "/proc/net/route", open_net_route, is_proc },
7776 #endif
7777 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7778 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7779 #endif
7780 #if defined(TARGET_M68K)
7781 { "/proc/hardware", open_hardware, is_proc },
7782 #endif
7783 { NULL, NULL, NULL }
7786 if (is_proc_myself(pathname, "exe")) {
7787 int execfd = qemu_getauxval(AT_EXECFD);
7788 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7791 for (fake_open = fakes; fake_open->filename; fake_open++) {
7792 if (fake_open->cmp(pathname, fake_open->filename)) {
7793 break;
7797 if (fake_open->filename) {
7798 const char *tmpdir;
7799 char filename[PATH_MAX];
7800 int fd, r;
7802 /* create temporary file to map stat to */
7803 tmpdir = getenv("TMPDIR");
7804 if (!tmpdir)
7805 tmpdir = "/tmp";
7806 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7807 fd = mkstemp(filename);
7808 if (fd < 0) {
7809 return fd;
7811 unlink(filename);
7813 if ((r = fake_open->fill(cpu_env, fd))) {
7814 int e = errno;
7815 close(fd);
7816 errno = e;
7817 return r;
7819 lseek(fd, 0, SEEK_SET);
7821 return fd;
7824 return safe_openat(dirfd, path(pathname), flags, mode);
7827 #define TIMER_MAGIC 0x0caf0000
7828 #define TIMER_MAGIC_MASK 0xffff0000
7830 /* Convert QEMU provided timer ID back to internal 16bit index format */
7831 static target_timer_t get_timer_id(abi_long arg)
7833 target_timer_t timerid = arg;
7835 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7836 return -TARGET_EINVAL;
7839 timerid &= 0xffff;
7841 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7842 return -TARGET_EINVAL;
7845 return timerid;
7848 static int target_to_host_cpu_mask(unsigned long *host_mask,
7849 size_t host_size,
7850 abi_ulong target_addr,
7851 size_t target_size)
7853 unsigned target_bits = sizeof(abi_ulong) * 8;
7854 unsigned host_bits = sizeof(*host_mask) * 8;
7855 abi_ulong *target_mask;
7856 unsigned i, j;
7858 assert(host_size >= target_size);
7860 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7861 if (!target_mask) {
7862 return -TARGET_EFAULT;
7864 memset(host_mask, 0, host_size);
7866 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7867 unsigned bit = i * target_bits;
7868 abi_ulong val;
7870 __get_user(val, &target_mask[i]);
7871 for (j = 0; j < target_bits; j++, bit++) {
7872 if (val & (1UL << j)) {
7873 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7878 unlock_user(target_mask, target_addr, 0);
7879 return 0;
7882 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7883 size_t host_size,
7884 abi_ulong target_addr,
7885 size_t target_size)
7887 unsigned target_bits = sizeof(abi_ulong) * 8;
7888 unsigned host_bits = sizeof(*host_mask) * 8;
7889 abi_ulong *target_mask;
7890 unsigned i, j;
7892 assert(host_size >= target_size);
7894 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7895 if (!target_mask) {
7896 return -TARGET_EFAULT;
7899 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7900 unsigned bit = i * target_bits;
7901 abi_ulong val = 0;
7903 for (j = 0; j < target_bits; j++, bit++) {
7904 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7905 val |= 1UL << j;
7908 __put_user(val, &target_mask[i]);
7911 unlock_user(target_mask, target_addr, target_size);
7912 return 0;
7915 /* This is an internal helper for do_syscall so that it is easier
7916 * to have a single return point, so that actions, such as logging
7917 * of syscall results, can be performed.
7918 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7920 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7921 abi_long arg2, abi_long arg3, abi_long arg4,
7922 abi_long arg5, abi_long arg6, abi_long arg7,
7923 abi_long arg8)
7925 CPUState *cpu = env_cpu(cpu_env);
7926 abi_long ret;
7927 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7928 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7929 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7930 || defined(TARGET_NR_statx)
7931 struct stat st;
7932 #endif
7933 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7934 || defined(TARGET_NR_fstatfs)
7935 struct statfs stfs;
7936 #endif
7937 void *p;
7939 switch(num) {
7940 case TARGET_NR_exit:
7941 /* In old applications this may be used to implement _exit(2).
7942 However in threaded applictions it is used for thread termination,
7943 and _exit_group is used for application termination.
7944 Do thread termination if we have more then one thread. */
7946 if (block_signals()) {
7947 return -TARGET_ERESTARTSYS;
7950 pthread_mutex_lock(&clone_lock);
7952 if (CPU_NEXT(first_cpu)) {
7953 TaskState *ts = cpu->opaque;
7955 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7956 object_unref(OBJECT(cpu));
7958 * At this point the CPU should be unrealized and removed
7959 * from cpu lists. We can clean-up the rest of the thread
7960 * data without the lock held.
7963 pthread_mutex_unlock(&clone_lock);
7965 if (ts->child_tidptr) {
7966 put_user_u32(0, ts->child_tidptr);
7967 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7968 NULL, NULL, 0);
7970 thread_cpu = NULL;
7971 g_free(ts);
7972 rcu_unregister_thread();
7973 pthread_exit(NULL);
7976 pthread_mutex_unlock(&clone_lock);
7977 preexit_cleanup(cpu_env, arg1);
7978 _exit(arg1);
7979 return 0; /* avoid warning */
7980 case TARGET_NR_read:
7981 if (arg2 == 0 && arg3 == 0) {
7982 return get_errno(safe_read(arg1, 0, 0));
7983 } else {
7984 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7985 return -TARGET_EFAULT;
7986 ret = get_errno(safe_read(arg1, p, arg3));
7987 if (ret >= 0 &&
7988 fd_trans_host_to_target_data(arg1)) {
7989 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7991 unlock_user(p, arg2, ret);
7993 return ret;
7994 case TARGET_NR_write:
7995 if (arg2 == 0 && arg3 == 0) {
7996 return get_errno(safe_write(arg1, 0, 0));
7998 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7999 return -TARGET_EFAULT;
8000 if (fd_trans_target_to_host_data(arg1)) {
8001 void *copy = g_malloc(arg3);
8002 memcpy(copy, p, arg3);
8003 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8004 if (ret >= 0) {
8005 ret = get_errno(safe_write(arg1, copy, ret));
8007 g_free(copy);
8008 } else {
8009 ret = get_errno(safe_write(arg1, p, arg3));
8011 unlock_user(p, arg2, 0);
8012 return ret;
8014 #ifdef TARGET_NR_open
8015 case TARGET_NR_open:
8016 if (!(p = lock_user_string(arg1)))
8017 return -TARGET_EFAULT;
8018 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8019 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8020 arg3));
8021 fd_trans_unregister(ret);
8022 unlock_user(p, arg1, 0);
8023 return ret;
8024 #endif
8025 case TARGET_NR_openat:
8026 if (!(p = lock_user_string(arg2)))
8027 return -TARGET_EFAULT;
8028 ret = get_errno(do_openat(cpu_env, arg1, p,
8029 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8030 arg4));
8031 fd_trans_unregister(ret);
8032 unlock_user(p, arg2, 0);
8033 return ret;
8034 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8035 case TARGET_NR_name_to_handle_at:
8036 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8037 return ret;
8038 #endif
8039 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8040 case TARGET_NR_open_by_handle_at:
8041 ret = do_open_by_handle_at(arg1, arg2, arg3);
8042 fd_trans_unregister(ret);
8043 return ret;
8044 #endif
8045 case TARGET_NR_close:
8046 fd_trans_unregister(arg1);
8047 return get_errno(close(arg1));
8049 case TARGET_NR_brk:
8050 return do_brk(arg1);
8051 #ifdef TARGET_NR_fork
8052 case TARGET_NR_fork:
8053 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8054 #endif
8055 #ifdef TARGET_NR_waitpid
8056 case TARGET_NR_waitpid:
8058 int status;
8059 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8060 if (!is_error(ret) && arg2 && ret
8061 && put_user_s32(host_to_target_waitstatus(status), arg2))
8062 return -TARGET_EFAULT;
8064 return ret;
8065 #endif
8066 #ifdef TARGET_NR_waitid
8067 case TARGET_NR_waitid:
8069 siginfo_t info;
8070 info.si_pid = 0;
8071 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8072 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8073 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8074 return -TARGET_EFAULT;
8075 host_to_target_siginfo(p, &info);
8076 unlock_user(p, arg3, sizeof(target_siginfo_t));
8079 return ret;
8080 #endif
8081 #ifdef TARGET_NR_creat /* not on alpha */
8082 case TARGET_NR_creat:
8083 if (!(p = lock_user_string(arg1)))
8084 return -TARGET_EFAULT;
8085 ret = get_errno(creat(p, arg2));
8086 fd_trans_unregister(ret);
8087 unlock_user(p, arg1, 0);
8088 return ret;
8089 #endif
8090 #ifdef TARGET_NR_link
8091 case TARGET_NR_link:
8093 void * p2;
8094 p = lock_user_string(arg1);
8095 p2 = lock_user_string(arg2);
8096 if (!p || !p2)
8097 ret = -TARGET_EFAULT;
8098 else
8099 ret = get_errno(link(p, p2));
8100 unlock_user(p2, arg2, 0);
8101 unlock_user(p, arg1, 0);
8103 return ret;
8104 #endif
8105 #if defined(TARGET_NR_linkat)
8106 case TARGET_NR_linkat:
8108 void * p2 = NULL;
8109 if (!arg2 || !arg4)
8110 return -TARGET_EFAULT;
8111 p = lock_user_string(arg2);
8112 p2 = lock_user_string(arg4);
8113 if (!p || !p2)
8114 ret = -TARGET_EFAULT;
8115 else
8116 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8117 unlock_user(p, arg2, 0);
8118 unlock_user(p2, arg4, 0);
8120 return ret;
8121 #endif
8122 #ifdef TARGET_NR_unlink
8123 case TARGET_NR_unlink:
8124 if (!(p = lock_user_string(arg1)))
8125 return -TARGET_EFAULT;
8126 ret = get_errno(unlink(p));
8127 unlock_user(p, arg1, 0);
8128 return ret;
8129 #endif
8130 #if defined(TARGET_NR_unlinkat)
8131 case TARGET_NR_unlinkat:
8132 if (!(p = lock_user_string(arg2)))
8133 return -TARGET_EFAULT;
8134 ret = get_errno(unlinkat(arg1, p, arg3));
8135 unlock_user(p, arg2, 0);
8136 return ret;
8137 #endif
8138 case TARGET_NR_execve:
8140 char **argp, **envp;
8141 int argc, envc;
8142 abi_ulong gp;
8143 abi_ulong guest_argp;
8144 abi_ulong guest_envp;
8145 abi_ulong addr;
8146 char **q;
8147 int total_size = 0;
8149 argc = 0;
8150 guest_argp = arg2;
8151 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8152 if (get_user_ual(addr, gp))
8153 return -TARGET_EFAULT;
8154 if (!addr)
8155 break;
8156 argc++;
8158 envc = 0;
8159 guest_envp = arg3;
8160 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8161 if (get_user_ual(addr, gp))
8162 return -TARGET_EFAULT;
8163 if (!addr)
8164 break;
8165 envc++;
8168 argp = g_new0(char *, argc + 1);
8169 envp = g_new0(char *, envc + 1);
8171 for (gp = guest_argp, q = argp; gp;
8172 gp += sizeof(abi_ulong), q++) {
8173 if (get_user_ual(addr, gp))
8174 goto execve_efault;
8175 if (!addr)
8176 break;
8177 if (!(*q = lock_user_string(addr)))
8178 goto execve_efault;
8179 total_size += strlen(*q) + 1;
8181 *q = NULL;
8183 for (gp = guest_envp, q = envp; gp;
8184 gp += sizeof(abi_ulong), q++) {
8185 if (get_user_ual(addr, gp))
8186 goto execve_efault;
8187 if (!addr)
8188 break;
8189 if (!(*q = lock_user_string(addr)))
8190 goto execve_efault;
8191 total_size += strlen(*q) + 1;
8193 *q = NULL;
8195 if (!(p = lock_user_string(arg1)))
8196 goto execve_efault;
8197 /* Although execve() is not an interruptible syscall it is
8198 * a special case where we must use the safe_syscall wrapper:
8199 * if we allow a signal to happen before we make the host
8200 * syscall then we will 'lose' it, because at the point of
8201 * execve the process leaves QEMU's control. So we use the
8202 * safe syscall wrapper to ensure that we either take the
8203 * signal as a guest signal, or else it does not happen
8204 * before the execve completes and makes it the other
8205 * program's problem.
8207 ret = get_errno(safe_execve(p, argp, envp));
8208 unlock_user(p, arg1, 0);
8210 goto execve_end;
8212 execve_efault:
8213 ret = -TARGET_EFAULT;
8215 execve_end:
8216 for (gp = guest_argp, q = argp; *q;
8217 gp += sizeof(abi_ulong), q++) {
8218 if (get_user_ual(addr, gp)
8219 || !addr)
8220 break;
8221 unlock_user(*q, addr, 0);
8223 for (gp = guest_envp, q = envp; *q;
8224 gp += sizeof(abi_ulong), q++) {
8225 if (get_user_ual(addr, gp)
8226 || !addr)
8227 break;
8228 unlock_user(*q, addr, 0);
8231 g_free(argp);
8232 g_free(envp);
8234 return ret;
8235 case TARGET_NR_chdir:
8236 if (!(p = lock_user_string(arg1)))
8237 return -TARGET_EFAULT;
8238 ret = get_errno(chdir(p));
8239 unlock_user(p, arg1, 0);
8240 return ret;
8241 #ifdef TARGET_NR_time
8242 case TARGET_NR_time:
8244 time_t host_time;
8245 ret = get_errno(time(&host_time));
8246 if (!is_error(ret)
8247 && arg1
8248 && put_user_sal(host_time, arg1))
8249 return -TARGET_EFAULT;
8251 return ret;
8252 #endif
8253 #ifdef TARGET_NR_mknod
8254 case TARGET_NR_mknod:
8255 if (!(p = lock_user_string(arg1)))
8256 return -TARGET_EFAULT;
8257 ret = get_errno(mknod(p, arg2, arg3));
8258 unlock_user(p, arg1, 0);
8259 return ret;
8260 #endif
8261 #if defined(TARGET_NR_mknodat)
8262 case TARGET_NR_mknodat:
8263 if (!(p = lock_user_string(arg2)))
8264 return -TARGET_EFAULT;
8265 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8266 unlock_user(p, arg2, 0);
8267 return ret;
8268 #endif
8269 #ifdef TARGET_NR_chmod
8270 case TARGET_NR_chmod:
8271 if (!(p = lock_user_string(arg1)))
8272 return -TARGET_EFAULT;
8273 ret = get_errno(chmod(p, arg2));
8274 unlock_user(p, arg1, 0);
8275 return ret;
8276 #endif
8277 #ifdef TARGET_NR_lseek
8278 case TARGET_NR_lseek:
8279 return get_errno(lseek(arg1, arg2, arg3));
8280 #endif
8281 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8282 /* Alpha specific */
8283 case TARGET_NR_getxpid:
8284 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8285 return get_errno(getpid());
8286 #endif
8287 #ifdef TARGET_NR_getpid
8288 case TARGET_NR_getpid:
8289 return get_errno(getpid());
8290 #endif
8291 case TARGET_NR_mount:
8293 /* need to look at the data field */
8294 void *p2, *p3;
8296 if (arg1) {
8297 p = lock_user_string(arg1);
8298 if (!p) {
8299 return -TARGET_EFAULT;
8301 } else {
8302 p = NULL;
8305 p2 = lock_user_string(arg2);
8306 if (!p2) {
8307 if (arg1) {
8308 unlock_user(p, arg1, 0);
8310 return -TARGET_EFAULT;
8313 if (arg3) {
8314 p3 = lock_user_string(arg3);
8315 if (!p3) {
8316 if (arg1) {
8317 unlock_user(p, arg1, 0);
8319 unlock_user(p2, arg2, 0);
8320 return -TARGET_EFAULT;
8322 } else {
8323 p3 = NULL;
8326 /* FIXME - arg5 should be locked, but it isn't clear how to
8327 * do that since it's not guaranteed to be a NULL-terminated
8328 * string.
8330 if (!arg5) {
8331 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8332 } else {
8333 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8335 ret = get_errno(ret);
8337 if (arg1) {
8338 unlock_user(p, arg1, 0);
8340 unlock_user(p2, arg2, 0);
8341 if (arg3) {
8342 unlock_user(p3, arg3, 0);
8345 return ret;
8346 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8347 #if defined(TARGET_NR_umount)
8348 case TARGET_NR_umount:
8349 #endif
8350 #if defined(TARGET_NR_oldumount)
8351 case TARGET_NR_oldumount:
8352 #endif
8353 if (!(p = lock_user_string(arg1)))
8354 return -TARGET_EFAULT;
8355 ret = get_errno(umount(p));
8356 unlock_user(p, arg1, 0);
8357 return ret;
8358 #endif
8359 #ifdef TARGET_NR_stime /* not on alpha */
8360 case TARGET_NR_stime:
8362 struct timespec ts;
8363 ts.tv_nsec = 0;
8364 if (get_user_sal(ts.tv_sec, arg1)) {
8365 return -TARGET_EFAULT;
8367 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8369 #endif
8370 #ifdef TARGET_NR_alarm /* not on alpha */
8371 case TARGET_NR_alarm:
8372 return alarm(arg1);
8373 #endif
8374 #ifdef TARGET_NR_pause /* not on alpha */
8375 case TARGET_NR_pause:
8376 if (!block_signals()) {
8377 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8379 return -TARGET_EINTR;
8380 #endif
8381 #ifdef TARGET_NR_utime
8382 case TARGET_NR_utime:
8384 struct utimbuf tbuf, *host_tbuf;
8385 struct target_utimbuf *target_tbuf;
8386 if (arg2) {
8387 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8388 return -TARGET_EFAULT;
8389 tbuf.actime = tswapal(target_tbuf->actime);
8390 tbuf.modtime = tswapal(target_tbuf->modtime);
8391 unlock_user_struct(target_tbuf, arg2, 0);
8392 host_tbuf = &tbuf;
8393 } else {
8394 host_tbuf = NULL;
8396 if (!(p = lock_user_string(arg1)))
8397 return -TARGET_EFAULT;
8398 ret = get_errno(utime(p, host_tbuf));
8399 unlock_user(p, arg1, 0);
8401 return ret;
8402 #endif
8403 #ifdef TARGET_NR_utimes
8404 case TARGET_NR_utimes:
8406 struct timeval *tvp, tv[2];
8407 if (arg2) {
8408 if (copy_from_user_timeval(&tv[0], arg2)
8409 || copy_from_user_timeval(&tv[1],
8410 arg2 + sizeof(struct target_timeval)))
8411 return -TARGET_EFAULT;
8412 tvp = tv;
8413 } else {
8414 tvp = NULL;
8416 if (!(p = lock_user_string(arg1)))
8417 return -TARGET_EFAULT;
8418 ret = get_errno(utimes(p, tvp));
8419 unlock_user(p, arg1, 0);
8421 return ret;
8422 #endif
8423 #if defined(TARGET_NR_futimesat)
8424 case TARGET_NR_futimesat:
8426 struct timeval *tvp, tv[2];
8427 if (arg3) {
8428 if (copy_from_user_timeval(&tv[0], arg3)
8429 || copy_from_user_timeval(&tv[1],
8430 arg3 + sizeof(struct target_timeval)))
8431 return -TARGET_EFAULT;
8432 tvp = tv;
8433 } else {
8434 tvp = NULL;
8436 if (!(p = lock_user_string(arg2))) {
8437 return -TARGET_EFAULT;
8439 ret = get_errno(futimesat(arg1, path(p), tvp));
8440 unlock_user(p, arg2, 0);
8442 return ret;
8443 #endif
8444 #ifdef TARGET_NR_access
8445 case TARGET_NR_access:
8446 if (!(p = lock_user_string(arg1))) {
8447 return -TARGET_EFAULT;
8449 ret = get_errno(access(path(p), arg2));
8450 unlock_user(p, arg1, 0);
8451 return ret;
8452 #endif
8453 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8454 case TARGET_NR_faccessat:
8455 if (!(p = lock_user_string(arg2))) {
8456 return -TARGET_EFAULT;
8458 ret = get_errno(faccessat(arg1, p, arg3, 0));
8459 unlock_user(p, arg2, 0);
8460 return ret;
8461 #endif
8462 #ifdef TARGET_NR_nice /* not on alpha */
8463 case TARGET_NR_nice:
8464 return get_errno(nice(arg1));
8465 #endif
8466 case TARGET_NR_sync:
8467 sync();
8468 return 0;
8469 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8470 case TARGET_NR_syncfs:
8471 return get_errno(syncfs(arg1));
8472 #endif
8473 case TARGET_NR_kill:
8474 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8475 #ifdef TARGET_NR_rename
8476 case TARGET_NR_rename:
8478 void *p2;
8479 p = lock_user_string(arg1);
8480 p2 = lock_user_string(arg2);
8481 if (!p || !p2)
8482 ret = -TARGET_EFAULT;
8483 else
8484 ret = get_errno(rename(p, p2));
8485 unlock_user(p2, arg2, 0);
8486 unlock_user(p, arg1, 0);
8488 return ret;
8489 #endif
8490 #if defined(TARGET_NR_renameat)
8491 case TARGET_NR_renameat:
8493 void *p2;
8494 p = lock_user_string(arg2);
8495 p2 = lock_user_string(arg4);
8496 if (!p || !p2)
8497 ret = -TARGET_EFAULT;
8498 else
8499 ret = get_errno(renameat(arg1, p, arg3, p2));
8500 unlock_user(p2, arg4, 0);
8501 unlock_user(p, arg2, 0);
8503 return ret;
8504 #endif
8505 #if defined(TARGET_NR_renameat2)
8506 case TARGET_NR_renameat2:
8508 void *p2;
8509 p = lock_user_string(arg2);
8510 p2 = lock_user_string(arg4);
8511 if (!p || !p2) {
8512 ret = -TARGET_EFAULT;
8513 } else {
8514 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8516 unlock_user(p2, arg4, 0);
8517 unlock_user(p, arg2, 0);
8519 return ret;
8520 #endif
8521 #ifdef TARGET_NR_mkdir
8522 case TARGET_NR_mkdir:
8523 if (!(p = lock_user_string(arg1)))
8524 return -TARGET_EFAULT;
8525 ret = get_errno(mkdir(p, arg2));
8526 unlock_user(p, arg1, 0);
8527 return ret;
8528 #endif
8529 #if defined(TARGET_NR_mkdirat)
8530 case TARGET_NR_mkdirat:
8531 if (!(p = lock_user_string(arg2)))
8532 return -TARGET_EFAULT;
8533 ret = get_errno(mkdirat(arg1, p, arg3));
8534 unlock_user(p, arg2, 0);
8535 return ret;
8536 #endif
8537 #ifdef TARGET_NR_rmdir
8538 case TARGET_NR_rmdir:
8539 if (!(p = lock_user_string(arg1)))
8540 return -TARGET_EFAULT;
8541 ret = get_errno(rmdir(p));
8542 unlock_user(p, arg1, 0);
8543 return ret;
8544 #endif
8545 case TARGET_NR_dup:
8546 ret = get_errno(dup(arg1));
8547 if (ret >= 0) {
8548 fd_trans_dup(arg1, ret);
8550 return ret;
8551 #ifdef TARGET_NR_pipe
8552 case TARGET_NR_pipe:
8553 return do_pipe(cpu_env, arg1, 0, 0);
8554 #endif
8555 #ifdef TARGET_NR_pipe2
8556 case TARGET_NR_pipe2:
8557 return do_pipe(cpu_env, arg1,
8558 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8559 #endif
8560 case TARGET_NR_times:
8562 struct target_tms *tmsp;
8563 struct tms tms;
8564 ret = get_errno(times(&tms));
8565 if (arg1) {
8566 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8567 if (!tmsp)
8568 return -TARGET_EFAULT;
8569 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8570 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8571 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8572 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8574 if (!is_error(ret))
8575 ret = host_to_target_clock_t(ret);
8577 return ret;
8578 case TARGET_NR_acct:
8579 if (arg1 == 0) {
8580 ret = get_errno(acct(NULL));
8581 } else {
8582 if (!(p = lock_user_string(arg1))) {
8583 return -TARGET_EFAULT;
8585 ret = get_errno(acct(path(p)));
8586 unlock_user(p, arg1, 0);
8588 return ret;
8589 #ifdef TARGET_NR_umount2
8590 case TARGET_NR_umount2:
8591 if (!(p = lock_user_string(arg1)))
8592 return -TARGET_EFAULT;
8593 ret = get_errno(umount2(p, arg2));
8594 unlock_user(p, arg1, 0);
8595 return ret;
8596 #endif
8597 case TARGET_NR_ioctl:
8598 return do_ioctl(arg1, arg2, arg3);
8599 #ifdef TARGET_NR_fcntl
8600 case TARGET_NR_fcntl:
8601 return do_fcntl(arg1, arg2, arg3);
8602 #endif
8603 case TARGET_NR_setpgid:
8604 return get_errno(setpgid(arg1, arg2));
8605 case TARGET_NR_umask:
8606 return get_errno(umask(arg1));
8607 case TARGET_NR_chroot:
8608 if (!(p = lock_user_string(arg1)))
8609 return -TARGET_EFAULT;
8610 ret = get_errno(chroot(p));
8611 unlock_user(p, arg1, 0);
8612 return ret;
8613 #ifdef TARGET_NR_dup2
8614 case TARGET_NR_dup2:
8615 ret = get_errno(dup2(arg1, arg2));
8616 if (ret >= 0) {
8617 fd_trans_dup(arg1, arg2);
8619 return ret;
8620 #endif
8621 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8622 case TARGET_NR_dup3:
8624 int host_flags;
8626 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8627 return -EINVAL;
8629 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8630 ret = get_errno(dup3(arg1, arg2, host_flags));
8631 if (ret >= 0) {
8632 fd_trans_dup(arg1, arg2);
8634 return ret;
8636 #endif
8637 #ifdef TARGET_NR_getppid /* not on alpha */
8638 case TARGET_NR_getppid:
8639 return get_errno(getppid());
8640 #endif
8641 #ifdef TARGET_NR_getpgrp
8642 case TARGET_NR_getpgrp:
8643 return get_errno(getpgrp());
8644 #endif
8645 case TARGET_NR_setsid:
8646 return get_errno(setsid());
8647 #ifdef TARGET_NR_sigaction
8648 case TARGET_NR_sigaction:
8650 #if defined(TARGET_ALPHA)
8651 struct target_sigaction act, oact, *pact = 0;
8652 struct target_old_sigaction *old_act;
8653 if (arg2) {
8654 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8655 return -TARGET_EFAULT;
8656 act._sa_handler = old_act->_sa_handler;
8657 target_siginitset(&act.sa_mask, old_act->sa_mask);
8658 act.sa_flags = old_act->sa_flags;
8659 act.sa_restorer = 0;
8660 unlock_user_struct(old_act, arg2, 0);
8661 pact = &act;
8663 ret = get_errno(do_sigaction(arg1, pact, &oact));
8664 if (!is_error(ret) && arg3) {
8665 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8666 return -TARGET_EFAULT;
8667 old_act->_sa_handler = oact._sa_handler;
8668 old_act->sa_mask = oact.sa_mask.sig[0];
8669 old_act->sa_flags = oact.sa_flags;
8670 unlock_user_struct(old_act, arg3, 1);
8672 #elif defined(TARGET_MIPS)
8673 struct target_sigaction act, oact, *pact, *old_act;
8675 if (arg2) {
8676 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8677 return -TARGET_EFAULT;
8678 act._sa_handler = old_act->_sa_handler;
8679 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8680 act.sa_flags = old_act->sa_flags;
8681 unlock_user_struct(old_act, arg2, 0);
8682 pact = &act;
8683 } else {
8684 pact = NULL;
8687 ret = get_errno(do_sigaction(arg1, pact, &oact));
8689 if (!is_error(ret) && arg3) {
8690 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8691 return -TARGET_EFAULT;
8692 old_act->_sa_handler = oact._sa_handler;
8693 old_act->sa_flags = oact.sa_flags;
8694 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8695 old_act->sa_mask.sig[1] = 0;
8696 old_act->sa_mask.sig[2] = 0;
8697 old_act->sa_mask.sig[3] = 0;
8698 unlock_user_struct(old_act, arg3, 1);
8700 #else
8701 struct target_old_sigaction *old_act;
8702 struct target_sigaction act, oact, *pact;
8703 if (arg2) {
8704 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8705 return -TARGET_EFAULT;
8706 act._sa_handler = old_act->_sa_handler;
8707 target_siginitset(&act.sa_mask, old_act->sa_mask);
8708 act.sa_flags = old_act->sa_flags;
8709 act.sa_restorer = old_act->sa_restorer;
8710 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8711 act.ka_restorer = 0;
8712 #endif
8713 unlock_user_struct(old_act, arg2, 0);
8714 pact = &act;
8715 } else {
8716 pact = NULL;
8718 ret = get_errno(do_sigaction(arg1, pact, &oact));
8719 if (!is_error(ret) && arg3) {
8720 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8721 return -TARGET_EFAULT;
8722 old_act->_sa_handler = oact._sa_handler;
8723 old_act->sa_mask = oact.sa_mask.sig[0];
8724 old_act->sa_flags = oact.sa_flags;
8725 old_act->sa_restorer = oact.sa_restorer;
8726 unlock_user_struct(old_act, arg3, 1);
8728 #endif
8730 return ret;
8731 #endif
8732 case TARGET_NR_rt_sigaction:
8734 #if defined(TARGET_ALPHA)
8735 /* For Alpha and SPARC this is a 5 argument syscall, with
8736 * a 'restorer' parameter which must be copied into the
8737 * sa_restorer field of the sigaction struct.
8738 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8739 * and arg5 is the sigsetsize.
8740 * Alpha also has a separate rt_sigaction struct that it uses
8741 * here; SPARC uses the usual sigaction struct.
8743 struct target_rt_sigaction *rt_act;
8744 struct target_sigaction act, oact, *pact = 0;
8746 if (arg4 != sizeof(target_sigset_t)) {
8747 return -TARGET_EINVAL;
8749 if (arg2) {
8750 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8751 return -TARGET_EFAULT;
8752 act._sa_handler = rt_act->_sa_handler;
8753 act.sa_mask = rt_act->sa_mask;
8754 act.sa_flags = rt_act->sa_flags;
8755 act.sa_restorer = arg5;
8756 unlock_user_struct(rt_act, arg2, 0);
8757 pact = &act;
8759 ret = get_errno(do_sigaction(arg1, pact, &oact));
8760 if (!is_error(ret) && arg3) {
8761 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8762 return -TARGET_EFAULT;
8763 rt_act->_sa_handler = oact._sa_handler;
8764 rt_act->sa_mask = oact.sa_mask;
8765 rt_act->sa_flags = oact.sa_flags;
8766 unlock_user_struct(rt_act, arg3, 1);
8768 #else
8769 #ifdef TARGET_SPARC
8770 target_ulong restorer = arg4;
8771 target_ulong sigsetsize = arg5;
8772 #else
8773 target_ulong sigsetsize = arg4;
8774 #endif
8775 struct target_sigaction *act;
8776 struct target_sigaction *oact;
8778 if (sigsetsize != sizeof(target_sigset_t)) {
8779 return -TARGET_EINVAL;
8781 if (arg2) {
8782 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8783 return -TARGET_EFAULT;
8785 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8786 act->ka_restorer = restorer;
8787 #endif
8788 } else {
8789 act = NULL;
8791 if (arg3) {
8792 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8793 ret = -TARGET_EFAULT;
8794 goto rt_sigaction_fail;
8796 } else
8797 oact = NULL;
8798 ret = get_errno(do_sigaction(arg1, act, oact));
8799 rt_sigaction_fail:
8800 if (act)
8801 unlock_user_struct(act, arg2, 0);
8802 if (oact)
8803 unlock_user_struct(oact, arg3, 1);
8804 #endif
8806 return ret;
8807 #ifdef TARGET_NR_sgetmask /* not on alpha */
8808 case TARGET_NR_sgetmask:
8810 sigset_t cur_set;
8811 abi_ulong target_set;
8812 ret = do_sigprocmask(0, NULL, &cur_set);
8813 if (!ret) {
8814 host_to_target_old_sigset(&target_set, &cur_set);
8815 ret = target_set;
8818 return ret;
8819 #endif
8820 #ifdef TARGET_NR_ssetmask /* not on alpha */
8821 case TARGET_NR_ssetmask:
8823 sigset_t set, oset;
8824 abi_ulong target_set = arg1;
8825 target_to_host_old_sigset(&set, &target_set);
8826 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8827 if (!ret) {
8828 host_to_target_old_sigset(&target_set, &oset);
8829 ret = target_set;
8832 return ret;
8833 #endif
8834 #ifdef TARGET_NR_sigprocmask
8835 case TARGET_NR_sigprocmask:
8837 #if defined(TARGET_ALPHA)
8838 sigset_t set, oldset;
8839 abi_ulong mask;
8840 int how;
8842 switch (arg1) {
8843 case TARGET_SIG_BLOCK:
8844 how = SIG_BLOCK;
8845 break;
8846 case TARGET_SIG_UNBLOCK:
8847 how = SIG_UNBLOCK;
8848 break;
8849 case TARGET_SIG_SETMASK:
8850 how = SIG_SETMASK;
8851 break;
8852 default:
8853 return -TARGET_EINVAL;
8855 mask = arg2;
8856 target_to_host_old_sigset(&set, &mask);
8858 ret = do_sigprocmask(how, &set, &oldset);
8859 if (!is_error(ret)) {
8860 host_to_target_old_sigset(&mask, &oldset);
8861 ret = mask;
8862 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8864 #else
8865 sigset_t set, oldset, *set_ptr;
8866 int how;
8868 if (arg2) {
8869 switch (arg1) {
8870 case TARGET_SIG_BLOCK:
8871 how = SIG_BLOCK;
8872 break;
8873 case TARGET_SIG_UNBLOCK:
8874 how = SIG_UNBLOCK;
8875 break;
8876 case TARGET_SIG_SETMASK:
8877 how = SIG_SETMASK;
8878 break;
8879 default:
8880 return -TARGET_EINVAL;
8882 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8883 return -TARGET_EFAULT;
8884 target_to_host_old_sigset(&set, p);
8885 unlock_user(p, arg2, 0);
8886 set_ptr = &set;
8887 } else {
8888 how = 0;
8889 set_ptr = NULL;
8891 ret = do_sigprocmask(how, set_ptr, &oldset);
8892 if (!is_error(ret) && arg3) {
8893 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8894 return -TARGET_EFAULT;
8895 host_to_target_old_sigset(p, &oldset);
8896 unlock_user(p, arg3, sizeof(target_sigset_t));
8898 #endif
8900 return ret;
8901 #endif
8902 case TARGET_NR_rt_sigprocmask:
8904 int how = arg1;
8905 sigset_t set, oldset, *set_ptr;
8907 if (arg4 != sizeof(target_sigset_t)) {
8908 return -TARGET_EINVAL;
8911 if (arg2) {
8912 switch(how) {
8913 case TARGET_SIG_BLOCK:
8914 how = SIG_BLOCK;
8915 break;
8916 case TARGET_SIG_UNBLOCK:
8917 how = SIG_UNBLOCK;
8918 break;
8919 case TARGET_SIG_SETMASK:
8920 how = SIG_SETMASK;
8921 break;
8922 default:
8923 return -TARGET_EINVAL;
8925 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8926 return -TARGET_EFAULT;
8927 target_to_host_sigset(&set, p);
8928 unlock_user(p, arg2, 0);
8929 set_ptr = &set;
8930 } else {
8931 how = 0;
8932 set_ptr = NULL;
8934 ret = do_sigprocmask(how, set_ptr, &oldset);
8935 if (!is_error(ret) && arg3) {
8936 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8937 return -TARGET_EFAULT;
8938 host_to_target_sigset(p, &oldset);
8939 unlock_user(p, arg3, sizeof(target_sigset_t));
8942 return ret;
8943 #ifdef TARGET_NR_sigpending
8944 case TARGET_NR_sigpending:
8946 sigset_t set;
8947 ret = get_errno(sigpending(&set));
8948 if (!is_error(ret)) {
8949 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8950 return -TARGET_EFAULT;
8951 host_to_target_old_sigset(p, &set);
8952 unlock_user(p, arg1, sizeof(target_sigset_t));
8955 return ret;
8956 #endif
8957 case TARGET_NR_rt_sigpending:
8959 sigset_t set;
8961 /* Yes, this check is >, not != like most. We follow the kernel's
8962 * logic and it does it like this because it implements
8963 * NR_sigpending through the same code path, and in that case
8964 * the old_sigset_t is smaller in size.
8966 if (arg2 > sizeof(target_sigset_t)) {
8967 return -TARGET_EINVAL;
8970 ret = get_errno(sigpending(&set));
8971 if (!is_error(ret)) {
8972 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8973 return -TARGET_EFAULT;
8974 host_to_target_sigset(p, &set);
8975 unlock_user(p, arg1, sizeof(target_sigset_t));
8978 return ret;
8979 #ifdef TARGET_NR_sigsuspend
8980 case TARGET_NR_sigsuspend:
8982 TaskState *ts = cpu->opaque;
8983 #if defined(TARGET_ALPHA)
8984 abi_ulong mask = arg1;
8985 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8986 #else
8987 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8988 return -TARGET_EFAULT;
8989 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8990 unlock_user(p, arg1, 0);
8991 #endif
8992 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8993 SIGSET_T_SIZE));
8994 if (ret != -TARGET_ERESTARTSYS) {
8995 ts->in_sigsuspend = 1;
8998 return ret;
8999 #endif
9000 case TARGET_NR_rt_sigsuspend:
9002 TaskState *ts = cpu->opaque;
9004 if (arg2 != sizeof(target_sigset_t)) {
9005 return -TARGET_EINVAL;
9007 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9008 return -TARGET_EFAULT;
9009 target_to_host_sigset(&ts->sigsuspend_mask, p);
9010 unlock_user(p, arg1, 0);
9011 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9012 SIGSET_T_SIZE));
9013 if (ret != -TARGET_ERESTARTSYS) {
9014 ts->in_sigsuspend = 1;
9017 return ret;
9018 #ifdef TARGET_NR_rt_sigtimedwait
9019 case TARGET_NR_rt_sigtimedwait:
9021 sigset_t set;
9022 struct timespec uts, *puts;
9023 siginfo_t uinfo;
9025 if (arg4 != sizeof(target_sigset_t)) {
9026 return -TARGET_EINVAL;
9029 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9030 return -TARGET_EFAULT;
9031 target_to_host_sigset(&set, p);
9032 unlock_user(p, arg1, 0);
9033 if (arg3) {
9034 puts = &uts;
9035 if (target_to_host_timespec(puts, arg3)) {
9036 return -TARGET_EFAULT;
9038 } else {
9039 puts = NULL;
9041 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9042 SIGSET_T_SIZE));
9043 if (!is_error(ret)) {
9044 if (arg2) {
9045 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9047 if (!p) {
9048 return -TARGET_EFAULT;
9050 host_to_target_siginfo(p, &uinfo);
9051 unlock_user(p, arg2, sizeof(target_siginfo_t));
9053 ret = host_to_target_signal(ret);
9056 return ret;
9057 #endif
9058 #ifdef TARGET_NR_rt_sigtimedwait_time64
9059 case TARGET_NR_rt_sigtimedwait_time64:
9061 sigset_t set;
9062 struct timespec uts, *puts;
9063 siginfo_t uinfo;
9065 if (arg4 != sizeof(target_sigset_t)) {
9066 return -TARGET_EINVAL;
9069 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9070 if (!p) {
9071 return -TARGET_EFAULT;
9073 target_to_host_sigset(&set, p);
9074 unlock_user(p, arg1, 0);
9075 if (arg3) {
9076 puts = &uts;
9077 if (target_to_host_timespec64(puts, arg3)) {
9078 return -TARGET_EFAULT;
9080 } else {
9081 puts = NULL;
9083 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9084 SIGSET_T_SIZE));
9085 if (!is_error(ret)) {
9086 if (arg2) {
9087 p = lock_user(VERIFY_WRITE, arg2,
9088 sizeof(target_siginfo_t), 0);
9089 if (!p) {
9090 return -TARGET_EFAULT;
9092 host_to_target_siginfo(p, &uinfo);
9093 unlock_user(p, arg2, sizeof(target_siginfo_t));
9095 ret = host_to_target_signal(ret);
9098 return ret;
9099 #endif
9100 case TARGET_NR_rt_sigqueueinfo:
9102 siginfo_t uinfo;
9104 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9105 if (!p) {
9106 return -TARGET_EFAULT;
9108 target_to_host_siginfo(&uinfo, p);
9109 unlock_user(p, arg3, 0);
9110 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9112 return ret;
9113 case TARGET_NR_rt_tgsigqueueinfo:
9115 siginfo_t uinfo;
9117 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9118 if (!p) {
9119 return -TARGET_EFAULT;
9121 target_to_host_siginfo(&uinfo, p);
9122 unlock_user(p, arg4, 0);
9123 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9125 return ret;
9126 #ifdef TARGET_NR_sigreturn
9127 case TARGET_NR_sigreturn:
9128 if (block_signals()) {
9129 return -TARGET_ERESTARTSYS;
9131 return do_sigreturn(cpu_env);
9132 #endif
9133 case TARGET_NR_rt_sigreturn:
9134 if (block_signals()) {
9135 return -TARGET_ERESTARTSYS;
9137 return do_rt_sigreturn(cpu_env);
9138 case TARGET_NR_sethostname:
9139 if (!(p = lock_user_string(arg1)))
9140 return -TARGET_EFAULT;
9141 ret = get_errno(sethostname(p, arg2));
9142 unlock_user(p, arg1, 0);
9143 return ret;
9144 #ifdef TARGET_NR_setrlimit
9145 case TARGET_NR_setrlimit:
9147 int resource = target_to_host_resource(arg1);
9148 struct target_rlimit *target_rlim;
9149 struct rlimit rlim;
9150 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9151 return -TARGET_EFAULT;
9152 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9153 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9154 unlock_user_struct(target_rlim, arg2, 0);
9156 * If we just passed through resource limit settings for memory then
9157 * they would also apply to QEMU's own allocations, and QEMU will
9158 * crash or hang or die if its allocations fail. Ideally we would
9159 * track the guest allocations in QEMU and apply the limits ourselves.
9160 * For now, just tell the guest the call succeeded but don't actually
9161 * limit anything.
9163 if (resource != RLIMIT_AS &&
9164 resource != RLIMIT_DATA &&
9165 resource != RLIMIT_STACK) {
9166 return get_errno(setrlimit(resource, &rlim));
9167 } else {
9168 return 0;
9171 #endif
9172 #ifdef TARGET_NR_getrlimit
9173 case TARGET_NR_getrlimit:
9175 int resource = target_to_host_resource(arg1);
9176 struct target_rlimit *target_rlim;
9177 struct rlimit rlim;
9179 ret = get_errno(getrlimit(resource, &rlim));
9180 if (!is_error(ret)) {
9181 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9182 return -TARGET_EFAULT;
9183 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9184 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9185 unlock_user_struct(target_rlim, arg2, 1);
9188 return ret;
9189 #endif
9190 case TARGET_NR_getrusage:
9192 struct rusage rusage;
9193 ret = get_errno(getrusage(arg1, &rusage));
9194 if (!is_error(ret)) {
9195 ret = host_to_target_rusage(arg2, &rusage);
9198 return ret;
9199 #if defined(TARGET_NR_gettimeofday)
9200 case TARGET_NR_gettimeofday:
9202 struct timeval tv;
9203 struct timezone tz;
9205 ret = get_errno(gettimeofday(&tv, &tz));
9206 if (!is_error(ret)) {
9207 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9208 return -TARGET_EFAULT;
9210 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9211 return -TARGET_EFAULT;
9215 return ret;
9216 #endif
9217 #if defined(TARGET_NR_settimeofday)
9218 case TARGET_NR_settimeofday:
9220 struct timeval tv, *ptv = NULL;
9221 struct timezone tz, *ptz = NULL;
9223 if (arg1) {
9224 if (copy_from_user_timeval(&tv, arg1)) {
9225 return -TARGET_EFAULT;
9227 ptv = &tv;
9230 if (arg2) {
9231 if (copy_from_user_timezone(&tz, arg2)) {
9232 return -TARGET_EFAULT;
9234 ptz = &tz;
9237 return get_errno(settimeofday(ptv, ptz));
9239 #endif
9240 #if defined(TARGET_NR_select)
9241 case TARGET_NR_select:
9242 #if defined(TARGET_WANT_NI_OLD_SELECT)
9243 /* some architectures used to have old_select here
9244 * but now ENOSYS it.
9246 ret = -TARGET_ENOSYS;
9247 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9248 ret = do_old_select(arg1);
9249 #else
9250 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9251 #endif
9252 return ret;
9253 #endif
9254 #ifdef TARGET_NR_pselect6
9255 case TARGET_NR_pselect6:
9257 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9258 fd_set rfds, wfds, efds;
9259 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9260 struct timespec ts, *ts_ptr;
9263 * The 6th arg is actually two args smashed together,
9264 * so we cannot use the C library.
9266 sigset_t set;
9267 struct {
9268 sigset_t *set;
9269 size_t size;
9270 } sig, *sig_ptr;
9272 abi_ulong arg_sigset, arg_sigsize, *arg7;
9273 target_sigset_t *target_sigset;
9275 n = arg1;
9276 rfd_addr = arg2;
9277 wfd_addr = arg3;
9278 efd_addr = arg4;
9279 ts_addr = arg5;
9281 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9282 if (ret) {
9283 return ret;
9285 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9286 if (ret) {
9287 return ret;
9289 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9290 if (ret) {
9291 return ret;
9295 * This takes a timespec, and not a timeval, so we cannot
9296 * use the do_select() helper ...
9298 if (ts_addr) {
9299 if (target_to_host_timespec(&ts, ts_addr)) {
9300 return -TARGET_EFAULT;
9302 ts_ptr = &ts;
9303 } else {
9304 ts_ptr = NULL;
9307 /* Extract the two packed args for the sigset */
9308 if (arg6) {
9309 sig_ptr = &sig;
9310 sig.size = SIGSET_T_SIZE;
9312 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9313 if (!arg7) {
9314 return -TARGET_EFAULT;
9316 arg_sigset = tswapal(arg7[0]);
9317 arg_sigsize = tswapal(arg7[1]);
9318 unlock_user(arg7, arg6, 0);
9320 if (arg_sigset) {
9321 sig.set = &set;
9322 if (arg_sigsize != sizeof(*target_sigset)) {
9323 /* Like the kernel, we enforce correct size sigsets */
9324 return -TARGET_EINVAL;
9326 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9327 sizeof(*target_sigset), 1);
9328 if (!target_sigset) {
9329 return -TARGET_EFAULT;
9331 target_to_host_sigset(&set, target_sigset);
9332 unlock_user(target_sigset, arg_sigset, 0);
9333 } else {
9334 sig.set = NULL;
9336 } else {
9337 sig_ptr = NULL;
9340 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9341 ts_ptr, sig_ptr));
9343 if (!is_error(ret)) {
9344 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9345 return -TARGET_EFAULT;
9346 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9347 return -TARGET_EFAULT;
9348 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9349 return -TARGET_EFAULT;
9351 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9352 return -TARGET_EFAULT;
9355 return ret;
9356 #endif
9357 #ifdef TARGET_NR_symlink
9358 case TARGET_NR_symlink:
9360 void *p2;
9361 p = lock_user_string(arg1);
9362 p2 = lock_user_string(arg2);
9363 if (!p || !p2)
9364 ret = -TARGET_EFAULT;
9365 else
9366 ret = get_errno(symlink(p, p2));
9367 unlock_user(p2, arg2, 0);
9368 unlock_user(p, arg1, 0);
9370 return ret;
9371 #endif
9372 #if defined(TARGET_NR_symlinkat)
9373 case TARGET_NR_symlinkat:
9375 void *p2;
9376 p = lock_user_string(arg1);
9377 p2 = lock_user_string(arg3);
9378 if (!p || !p2)
9379 ret = -TARGET_EFAULT;
9380 else
9381 ret = get_errno(symlinkat(p, arg2, p2));
9382 unlock_user(p2, arg3, 0);
9383 unlock_user(p, arg1, 0);
9385 return ret;
9386 #endif
9387 #ifdef TARGET_NR_readlink
9388 case TARGET_NR_readlink:
9390 void *p2;
9391 p = lock_user_string(arg1);
9392 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9393 if (!p || !p2) {
9394 ret = -TARGET_EFAULT;
9395 } else if (!arg3) {
9396 /* Short circuit this for the magic exe check. */
9397 ret = -TARGET_EINVAL;
9398 } else if (is_proc_myself((const char *)p, "exe")) {
9399 char real[PATH_MAX], *temp;
9400 temp = realpath(exec_path, real);
9401 /* Return value is # of bytes that we wrote to the buffer. */
9402 if (temp == NULL) {
9403 ret = get_errno(-1);
9404 } else {
9405 /* Don't worry about sign mismatch as earlier mapping
9406 * logic would have thrown a bad address error. */
9407 ret = MIN(strlen(real), arg3);
9408 /* We cannot NUL terminate the string. */
9409 memcpy(p2, real, ret);
9411 } else {
9412 ret = get_errno(readlink(path(p), p2, arg3));
9414 unlock_user(p2, arg2, ret);
9415 unlock_user(p, arg1, 0);
9417 return ret;
9418 #endif
9419 #if defined(TARGET_NR_readlinkat)
9420 case TARGET_NR_readlinkat:
9422 void *p2;
9423 p = lock_user_string(arg2);
9424 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9425 if (!p || !p2) {
9426 ret = -TARGET_EFAULT;
9427 } else if (is_proc_myself((const char *)p, "exe")) {
9428 char real[PATH_MAX], *temp;
9429 temp = realpath(exec_path, real);
9430 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9431 snprintf((char *)p2, arg4, "%s", real);
9432 } else {
9433 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9435 unlock_user(p2, arg3, ret);
9436 unlock_user(p, arg2, 0);
9438 return ret;
9439 #endif
9440 #ifdef TARGET_NR_swapon
9441 case TARGET_NR_swapon:
9442 if (!(p = lock_user_string(arg1)))
9443 return -TARGET_EFAULT;
9444 ret = get_errno(swapon(p, arg2));
9445 unlock_user(p, arg1, 0);
9446 return ret;
9447 #endif
9448 case TARGET_NR_reboot:
9449 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9450 /* arg4 must be ignored in all other cases */
9451 p = lock_user_string(arg4);
9452 if (!p) {
9453 return -TARGET_EFAULT;
9455 ret = get_errno(reboot(arg1, arg2, arg3, p));
9456 unlock_user(p, arg4, 0);
9457 } else {
9458 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9460 return ret;
9461 #ifdef TARGET_NR_mmap
9462 case TARGET_NR_mmap:
9463 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9464 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9465 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9466 || defined(TARGET_S390X)
9468 abi_ulong *v;
9469 abi_ulong v1, v2, v3, v4, v5, v6;
9470 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9471 return -TARGET_EFAULT;
9472 v1 = tswapal(v[0]);
9473 v2 = tswapal(v[1]);
9474 v3 = tswapal(v[2]);
9475 v4 = tswapal(v[3]);
9476 v5 = tswapal(v[4]);
9477 v6 = tswapal(v[5]);
9478 unlock_user(v, arg1, 0);
9479 ret = get_errno(target_mmap(v1, v2, v3,
9480 target_to_host_bitmask(v4, mmap_flags_tbl),
9481 v5, v6));
9483 #else
9484 ret = get_errno(target_mmap(arg1, arg2, arg3,
9485 target_to_host_bitmask(arg4, mmap_flags_tbl),
9486 arg5,
9487 arg6));
9488 #endif
9489 return ret;
9490 #endif
9491 #ifdef TARGET_NR_mmap2
9492 case TARGET_NR_mmap2:
9493 #ifndef MMAP_SHIFT
9494 #define MMAP_SHIFT 12
9495 #endif
9496 ret = target_mmap(arg1, arg2, arg3,
9497 target_to_host_bitmask(arg4, mmap_flags_tbl),
9498 arg5, arg6 << MMAP_SHIFT);
9499 return get_errno(ret);
9500 #endif
9501 case TARGET_NR_munmap:
9502 return get_errno(target_munmap(arg1, arg2));
9503 case TARGET_NR_mprotect:
9505 TaskState *ts = cpu->opaque;
9506 /* Special hack to detect libc making the stack executable. */
9507 if ((arg3 & PROT_GROWSDOWN)
9508 && arg1 >= ts->info->stack_limit
9509 && arg1 <= ts->info->start_stack) {
9510 arg3 &= ~PROT_GROWSDOWN;
9511 arg2 = arg2 + arg1 - ts->info->stack_limit;
9512 arg1 = ts->info->stack_limit;
9515 return get_errno(target_mprotect(arg1, arg2, arg3));
9516 #ifdef TARGET_NR_mremap
9517 case TARGET_NR_mremap:
9518 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9519 #endif
9520 /* ??? msync/mlock/munlock are broken for softmmu. */
9521 #ifdef TARGET_NR_msync
9522 case TARGET_NR_msync:
9523 return get_errno(msync(g2h(arg1), arg2, arg3));
9524 #endif
9525 #ifdef TARGET_NR_mlock
9526 case TARGET_NR_mlock:
9527 return get_errno(mlock(g2h(arg1), arg2));
9528 #endif
9529 #ifdef TARGET_NR_munlock
9530 case TARGET_NR_munlock:
9531 return get_errno(munlock(g2h(arg1), arg2));
9532 #endif
9533 #ifdef TARGET_NR_mlockall
9534 case TARGET_NR_mlockall:
9535 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9536 #endif
9537 #ifdef TARGET_NR_munlockall
9538 case TARGET_NR_munlockall:
9539 return get_errno(munlockall());
9540 #endif
9541 #ifdef TARGET_NR_truncate
9542 case TARGET_NR_truncate:
9543 if (!(p = lock_user_string(arg1)))
9544 return -TARGET_EFAULT;
9545 ret = get_errno(truncate(p, arg2));
9546 unlock_user(p, arg1, 0);
9547 return ret;
9548 #endif
9549 #ifdef TARGET_NR_ftruncate
9550 case TARGET_NR_ftruncate:
9551 return get_errno(ftruncate(arg1, arg2));
9552 #endif
9553 case TARGET_NR_fchmod:
9554 return get_errno(fchmod(arg1, arg2));
9555 #if defined(TARGET_NR_fchmodat)
9556 case TARGET_NR_fchmodat:
9557 if (!(p = lock_user_string(arg2)))
9558 return -TARGET_EFAULT;
9559 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9560 unlock_user(p, arg2, 0);
9561 return ret;
9562 #endif
9563 case TARGET_NR_getpriority:
9564 /* Note that negative values are valid for getpriority, so we must
9565 differentiate based on errno settings. */
9566 errno = 0;
9567 ret = getpriority(arg1, arg2);
9568 if (ret == -1 && errno != 0) {
9569 return -host_to_target_errno(errno);
9571 #ifdef TARGET_ALPHA
9572 /* Return value is the unbiased priority. Signal no error. */
9573 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9574 #else
9575 /* Return value is a biased priority to avoid negative numbers. */
9576 ret = 20 - ret;
9577 #endif
9578 return ret;
9579 case TARGET_NR_setpriority:
9580 return get_errno(setpriority(arg1, arg2, arg3));
9581 #ifdef TARGET_NR_statfs
9582 case TARGET_NR_statfs:
9583 if (!(p = lock_user_string(arg1))) {
9584 return -TARGET_EFAULT;
9586 ret = get_errno(statfs(path(p), &stfs));
9587 unlock_user(p, arg1, 0);
9588 convert_statfs:
9589 if (!is_error(ret)) {
9590 struct target_statfs *target_stfs;
9592 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9593 return -TARGET_EFAULT;
9594 __put_user(stfs.f_type, &target_stfs->f_type);
9595 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9596 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9597 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9598 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9599 __put_user(stfs.f_files, &target_stfs->f_files);
9600 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9601 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9602 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9603 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9604 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9605 #ifdef _STATFS_F_FLAGS
9606 __put_user(stfs.f_flags, &target_stfs->f_flags);
9607 #else
9608 __put_user(0, &target_stfs->f_flags);
9609 #endif
9610 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9611 unlock_user_struct(target_stfs, arg2, 1);
9613 return ret;
9614 #endif
9615 #ifdef TARGET_NR_fstatfs
9616 case TARGET_NR_fstatfs:
9617 ret = get_errno(fstatfs(arg1, &stfs));
9618 goto convert_statfs;
9619 #endif
9620 #ifdef TARGET_NR_statfs64
9621 case TARGET_NR_statfs64:
9622 if (!(p = lock_user_string(arg1))) {
9623 return -TARGET_EFAULT;
9625 ret = get_errno(statfs(path(p), &stfs));
9626 unlock_user(p, arg1, 0);
9627 convert_statfs64:
9628 if (!is_error(ret)) {
9629 struct target_statfs64 *target_stfs;
9631 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9632 return -TARGET_EFAULT;
9633 __put_user(stfs.f_type, &target_stfs->f_type);
9634 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9635 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9636 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9637 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9638 __put_user(stfs.f_files, &target_stfs->f_files);
9639 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9640 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9641 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9642 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9643 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9644 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9645 unlock_user_struct(target_stfs, arg3, 1);
9647 return ret;
9648 case TARGET_NR_fstatfs64:
9649 ret = get_errno(fstatfs(arg1, &stfs));
9650 goto convert_statfs64;
9651 #endif
9652 #ifdef TARGET_NR_socketcall
9653 case TARGET_NR_socketcall:
9654 return do_socketcall(arg1, arg2);
9655 #endif
9656 #ifdef TARGET_NR_accept
9657 case TARGET_NR_accept:
9658 return do_accept4(arg1, arg2, arg3, 0);
9659 #endif
9660 #ifdef TARGET_NR_accept4
9661 case TARGET_NR_accept4:
9662 return do_accept4(arg1, arg2, arg3, arg4);
9663 #endif
9664 #ifdef TARGET_NR_bind
9665 case TARGET_NR_bind:
9666 return do_bind(arg1, arg2, arg3);
9667 #endif
9668 #ifdef TARGET_NR_connect
9669 case TARGET_NR_connect:
9670 return do_connect(arg1, arg2, arg3);
9671 #endif
9672 #ifdef TARGET_NR_getpeername
9673 case TARGET_NR_getpeername:
9674 return do_getpeername(arg1, arg2, arg3);
9675 #endif
9676 #ifdef TARGET_NR_getsockname
9677 case TARGET_NR_getsockname:
9678 return do_getsockname(arg1, arg2, arg3);
9679 #endif
9680 #ifdef TARGET_NR_getsockopt
9681 case TARGET_NR_getsockopt:
9682 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9683 #endif
9684 #ifdef TARGET_NR_listen
9685 case TARGET_NR_listen:
9686 return get_errno(listen(arg1, arg2));
9687 #endif
9688 #ifdef TARGET_NR_recv
9689 case TARGET_NR_recv:
9690 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9691 #endif
9692 #ifdef TARGET_NR_recvfrom
9693 case TARGET_NR_recvfrom:
9694 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9695 #endif
9696 #ifdef TARGET_NR_recvmsg
9697 case TARGET_NR_recvmsg:
9698 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9699 #endif
9700 #ifdef TARGET_NR_send
9701 case TARGET_NR_send:
9702 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9703 #endif
9704 #ifdef TARGET_NR_sendmsg
9705 case TARGET_NR_sendmsg:
9706 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9707 #endif
9708 #ifdef TARGET_NR_sendmmsg
9709 case TARGET_NR_sendmmsg:
9710 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9711 #endif
9712 #ifdef TARGET_NR_recvmmsg
9713 case TARGET_NR_recvmmsg:
9714 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9715 #endif
9716 #ifdef TARGET_NR_sendto
9717 case TARGET_NR_sendto:
9718 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9719 #endif
9720 #ifdef TARGET_NR_shutdown
9721 case TARGET_NR_shutdown:
9722 return get_errno(shutdown(arg1, arg2));
9723 #endif
9724 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9725 case TARGET_NR_getrandom:
9726 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9727 if (!p) {
9728 return -TARGET_EFAULT;
9730 ret = get_errno(getrandom(p, arg2, arg3));
9731 unlock_user(p, arg1, ret);
9732 return ret;
9733 #endif
9734 #ifdef TARGET_NR_socket
9735 case TARGET_NR_socket:
9736 return do_socket(arg1, arg2, arg3);
9737 #endif
9738 #ifdef TARGET_NR_socketpair
9739 case TARGET_NR_socketpair:
9740 return do_socketpair(arg1, arg2, arg3, arg4);
9741 #endif
9742 #ifdef TARGET_NR_setsockopt
9743 case TARGET_NR_setsockopt:
9744 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9745 #endif
9746 #if defined(TARGET_NR_syslog)
9747 case TARGET_NR_syslog:
9749 int len = arg2;
9751 switch (arg1) {
9752 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9753 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9754 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9755 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9756 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9757 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9758 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9759 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9760 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9761 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9762 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9763 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9765 if (len < 0) {
9766 return -TARGET_EINVAL;
9768 if (len == 0) {
9769 return 0;
9771 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9772 if (!p) {
9773 return -TARGET_EFAULT;
9775 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9776 unlock_user(p, arg2, arg3);
9778 return ret;
9779 default:
9780 return -TARGET_EINVAL;
9783 break;
9784 #endif
9785 case TARGET_NR_setitimer:
9787 struct itimerval value, ovalue, *pvalue;
9789 if (arg2) {
9790 pvalue = &value;
9791 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9792 || copy_from_user_timeval(&pvalue->it_value,
9793 arg2 + sizeof(struct target_timeval)))
9794 return -TARGET_EFAULT;
9795 } else {
9796 pvalue = NULL;
9798 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9799 if (!is_error(ret) && arg3) {
9800 if (copy_to_user_timeval(arg3,
9801 &ovalue.it_interval)
9802 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9803 &ovalue.it_value))
9804 return -TARGET_EFAULT;
9807 return ret;
9808 case TARGET_NR_getitimer:
9810 struct itimerval value;
9812 ret = get_errno(getitimer(arg1, &value));
9813 if (!is_error(ret) && arg2) {
9814 if (copy_to_user_timeval(arg2,
9815 &value.it_interval)
9816 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9817 &value.it_value))
9818 return -TARGET_EFAULT;
9821 return ret;
9822 #ifdef TARGET_NR_stat
9823 case TARGET_NR_stat:
9824 if (!(p = lock_user_string(arg1))) {
9825 return -TARGET_EFAULT;
9827 ret = get_errno(stat(path(p), &st));
9828 unlock_user(p, arg1, 0);
9829 goto do_stat;
9830 #endif
9831 #ifdef TARGET_NR_lstat
9832 case TARGET_NR_lstat:
9833 if (!(p = lock_user_string(arg1))) {
9834 return -TARGET_EFAULT;
9836 ret = get_errno(lstat(path(p), &st));
9837 unlock_user(p, arg1, 0);
9838 goto do_stat;
9839 #endif
9840 #ifdef TARGET_NR_fstat
9841 case TARGET_NR_fstat:
9843 ret = get_errno(fstat(arg1, &st));
9844 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9845 do_stat:
9846 #endif
9847 if (!is_error(ret)) {
9848 struct target_stat *target_st;
9850 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9851 return -TARGET_EFAULT;
9852 memset(target_st, 0, sizeof(*target_st));
9853 __put_user(st.st_dev, &target_st->st_dev);
9854 __put_user(st.st_ino, &target_st->st_ino);
9855 __put_user(st.st_mode, &target_st->st_mode);
9856 __put_user(st.st_uid, &target_st->st_uid);
9857 __put_user(st.st_gid, &target_st->st_gid);
9858 __put_user(st.st_nlink, &target_st->st_nlink);
9859 __put_user(st.st_rdev, &target_st->st_rdev);
9860 __put_user(st.st_size, &target_st->st_size);
9861 __put_user(st.st_blksize, &target_st->st_blksize);
9862 __put_user(st.st_blocks, &target_st->st_blocks);
9863 __put_user(st.st_atime, &target_st->target_st_atime);
9864 __put_user(st.st_mtime, &target_st->target_st_mtime);
9865 __put_user(st.st_ctime, &target_st->target_st_ctime);
9866 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9867 defined(TARGET_STAT_HAVE_NSEC)
9868 __put_user(st.st_atim.tv_nsec,
9869 &target_st->target_st_atime_nsec);
9870 __put_user(st.st_mtim.tv_nsec,
9871 &target_st->target_st_mtime_nsec);
9872 __put_user(st.st_ctim.tv_nsec,
9873 &target_st->target_st_ctime_nsec);
9874 #endif
9875 unlock_user_struct(target_st, arg2, 1);
9878 return ret;
9879 #endif
9880 case TARGET_NR_vhangup:
9881 return get_errno(vhangup());
9882 #ifdef TARGET_NR_syscall
9883 case TARGET_NR_syscall:
9884 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9885 arg6, arg7, arg8, 0);
9886 #endif
9887 #if defined(TARGET_NR_wait4)
9888 case TARGET_NR_wait4:
9890 int status;
9891 abi_long status_ptr = arg2;
9892 struct rusage rusage, *rusage_ptr;
9893 abi_ulong target_rusage = arg4;
9894 abi_long rusage_err;
9895 if (target_rusage)
9896 rusage_ptr = &rusage;
9897 else
9898 rusage_ptr = NULL;
9899 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9900 if (!is_error(ret)) {
9901 if (status_ptr && ret) {
9902 status = host_to_target_waitstatus(status);
9903 if (put_user_s32(status, status_ptr))
9904 return -TARGET_EFAULT;
9906 if (target_rusage) {
9907 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9908 if (rusage_err) {
9909 ret = rusage_err;
9914 return ret;
9915 #endif
9916 #ifdef TARGET_NR_swapoff
9917 case TARGET_NR_swapoff:
9918 if (!(p = lock_user_string(arg1)))
9919 return -TARGET_EFAULT;
9920 ret = get_errno(swapoff(p));
9921 unlock_user(p, arg1, 0);
9922 return ret;
9923 #endif
9924 case TARGET_NR_sysinfo:
9926 struct target_sysinfo *target_value;
9927 struct sysinfo value;
9928 ret = get_errno(sysinfo(&value));
9929 if (!is_error(ret) && arg1)
9931 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9932 return -TARGET_EFAULT;
9933 __put_user(value.uptime, &target_value->uptime);
9934 __put_user(value.loads[0], &target_value->loads[0]);
9935 __put_user(value.loads[1], &target_value->loads[1]);
9936 __put_user(value.loads[2], &target_value->loads[2]);
9937 __put_user(value.totalram, &target_value->totalram);
9938 __put_user(value.freeram, &target_value->freeram);
9939 __put_user(value.sharedram, &target_value->sharedram);
9940 __put_user(value.bufferram, &target_value->bufferram);
9941 __put_user(value.totalswap, &target_value->totalswap);
9942 __put_user(value.freeswap, &target_value->freeswap);
9943 __put_user(value.procs, &target_value->procs);
9944 __put_user(value.totalhigh, &target_value->totalhigh);
9945 __put_user(value.freehigh, &target_value->freehigh);
9946 __put_user(value.mem_unit, &target_value->mem_unit);
9947 unlock_user_struct(target_value, arg1, 1);
9950 return ret;
9951 #ifdef TARGET_NR_ipc
9952 case TARGET_NR_ipc:
9953 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9954 #endif
9955 #ifdef TARGET_NR_semget
9956 case TARGET_NR_semget:
9957 return get_errno(semget(arg1, arg2, arg3));
9958 #endif
9959 #ifdef TARGET_NR_semop
9960 case TARGET_NR_semop:
9961 return do_semtimedop(arg1, arg2, arg3, 0, false);
9962 #endif
9963 #ifdef TARGET_NR_semtimedop
9964 case TARGET_NR_semtimedop:
9965 return do_semtimedop(arg1, arg2, arg3, arg4, false);
9966 #endif
9967 #ifdef TARGET_NR_semtimedop_time64
9968 case TARGET_NR_semtimedop_time64:
9969 return do_semtimedop(arg1, arg2, arg3, arg4, true);
9970 #endif
9971 #ifdef TARGET_NR_semctl
9972 case TARGET_NR_semctl:
9973 return do_semctl(arg1, arg2, arg3, arg4);
9974 #endif
9975 #ifdef TARGET_NR_msgctl
9976 case TARGET_NR_msgctl:
9977 return do_msgctl(arg1, arg2, arg3);
9978 #endif
9979 #ifdef TARGET_NR_msgget
9980 case TARGET_NR_msgget:
9981 return get_errno(msgget(arg1, arg2));
9982 #endif
9983 #ifdef TARGET_NR_msgrcv
9984 case TARGET_NR_msgrcv:
9985 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9986 #endif
9987 #ifdef TARGET_NR_msgsnd
9988 case TARGET_NR_msgsnd:
9989 return do_msgsnd(arg1, arg2, arg3, arg4);
9990 #endif
9991 #ifdef TARGET_NR_shmget
9992 case TARGET_NR_shmget:
9993 return get_errno(shmget(arg1, arg2, arg3));
9994 #endif
9995 #ifdef TARGET_NR_shmctl
9996 case TARGET_NR_shmctl:
9997 return do_shmctl(arg1, arg2, arg3);
9998 #endif
9999 #ifdef TARGET_NR_shmat
10000 case TARGET_NR_shmat:
10001 return do_shmat(cpu_env, arg1, arg2, arg3);
10002 #endif
10003 #ifdef TARGET_NR_shmdt
10004 case TARGET_NR_shmdt:
10005 return do_shmdt(arg1);
10006 #endif
10007 case TARGET_NR_fsync:
10008 return get_errno(fsync(arg1));
10009 case TARGET_NR_clone:
10010 /* Linux manages to have three different orderings for its
10011 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10012 * match the kernel's CONFIG_CLONE_* settings.
10013 * Microblaze is further special in that it uses a sixth
10014 * implicit argument to clone for the TLS pointer.
10016 #if defined(TARGET_MICROBLAZE)
10017 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10018 #elif defined(TARGET_CLONE_BACKWARDS)
10019 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10020 #elif defined(TARGET_CLONE_BACKWARDS2)
10021 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10022 #else
10023 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10024 #endif
10025 return ret;
10026 #ifdef __NR_exit_group
10027 /* new thread calls */
10028 case TARGET_NR_exit_group:
10029 preexit_cleanup(cpu_env, arg1);
10030 return get_errno(exit_group(arg1));
10031 #endif
10032 case TARGET_NR_setdomainname:
10033 if (!(p = lock_user_string(arg1)))
10034 return -TARGET_EFAULT;
10035 ret = get_errno(setdomainname(p, arg2));
10036 unlock_user(p, arg1, 0);
10037 return ret;
10038 case TARGET_NR_uname:
10039 /* no need to transcode because we use the linux syscall */
10041 struct new_utsname * buf;
10043 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10044 return -TARGET_EFAULT;
10045 ret = get_errno(sys_uname(buf));
10046 if (!is_error(ret)) {
10047 /* Overwrite the native machine name with whatever is being
10048 emulated. */
10049 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10050 sizeof(buf->machine));
10051 /* Allow the user to override the reported release. */
10052 if (qemu_uname_release && *qemu_uname_release) {
10053 g_strlcpy(buf->release, qemu_uname_release,
10054 sizeof(buf->release));
10057 unlock_user_struct(buf, arg1, 1);
10059 return ret;
10060 #ifdef TARGET_I386
10061 case TARGET_NR_modify_ldt:
10062 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10063 #if !defined(TARGET_X86_64)
10064 case TARGET_NR_vm86:
10065 return do_vm86(cpu_env, arg1, arg2);
10066 #endif
10067 #endif
10068 #if defined(TARGET_NR_adjtimex)
10069 case TARGET_NR_adjtimex:
10071 struct timex host_buf;
10073 if (target_to_host_timex(&host_buf, arg1) != 0) {
10074 return -TARGET_EFAULT;
10076 ret = get_errno(adjtimex(&host_buf));
10077 if (!is_error(ret)) {
10078 if (host_to_target_timex(arg1, &host_buf) != 0) {
10079 return -TARGET_EFAULT;
10083 return ret;
10084 #endif
10085 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10086 case TARGET_NR_clock_adjtime:
10088 struct timex htx, *phtx = &htx;
10090 if (target_to_host_timex(phtx, arg2) != 0) {
10091 return -TARGET_EFAULT;
10093 ret = get_errno(clock_adjtime(arg1, phtx));
10094 if (!is_error(ret) && phtx) {
10095 if (host_to_target_timex(arg2, phtx) != 0) {
10096 return -TARGET_EFAULT;
10100 return ret;
10101 #endif
10102 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10103 case TARGET_NR_clock_adjtime64:
10105 struct timex htx;
10107 if (target_to_host_timex64(&htx, arg2) != 0) {
10108 return -TARGET_EFAULT;
10110 ret = get_errno(clock_adjtime(arg1, &htx));
10111 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10112 return -TARGET_EFAULT;
10115 return ret;
10116 #endif
10117 case TARGET_NR_getpgid:
10118 return get_errno(getpgid(arg1));
10119 case TARGET_NR_fchdir:
10120 return get_errno(fchdir(arg1));
10121 case TARGET_NR_personality:
10122 return get_errno(personality(arg1));
10123 #ifdef TARGET_NR__llseek /* Not on alpha */
10124 case TARGET_NR__llseek:
10126 int64_t res;
10127 #if !defined(__NR_llseek)
10128 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10129 if (res == -1) {
10130 ret = get_errno(res);
10131 } else {
10132 ret = 0;
10134 #else
10135 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10136 #endif
10137 if ((ret == 0) && put_user_s64(res, arg4)) {
10138 return -TARGET_EFAULT;
10141 return ret;
10142 #endif
10143 #ifdef TARGET_NR_getdents
10144 case TARGET_NR_getdents:
10145 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10146 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10148 struct target_dirent *target_dirp;
10149 struct linux_dirent *dirp;
10150 abi_long count = arg3;
10152 dirp = g_try_malloc(count);
10153 if (!dirp) {
10154 return -TARGET_ENOMEM;
10157 ret = get_errno(sys_getdents(arg1, dirp, count));
10158 if (!is_error(ret)) {
10159 struct linux_dirent *de;
10160 struct target_dirent *tde;
10161 int len = ret;
10162 int reclen, treclen;
10163 int count1, tnamelen;
10165 count1 = 0;
10166 de = dirp;
10167 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10168 return -TARGET_EFAULT;
10169 tde = target_dirp;
10170 while (len > 0) {
10171 reclen = de->d_reclen;
10172 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10173 assert(tnamelen >= 0);
10174 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10175 assert(count1 + treclen <= count);
10176 tde->d_reclen = tswap16(treclen);
10177 tde->d_ino = tswapal(de->d_ino);
10178 tde->d_off = tswapal(de->d_off);
10179 memcpy(tde->d_name, de->d_name, tnamelen);
10180 de = (struct linux_dirent *)((char *)de + reclen);
10181 len -= reclen;
10182 tde = (struct target_dirent *)((char *)tde + treclen);
10183 count1 += treclen;
10185 ret = count1;
10186 unlock_user(target_dirp, arg2, ret);
10188 g_free(dirp);
10190 #else
10192 struct linux_dirent *dirp;
10193 abi_long count = arg3;
10195 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10196 return -TARGET_EFAULT;
10197 ret = get_errno(sys_getdents(arg1, dirp, count));
10198 if (!is_error(ret)) {
10199 struct linux_dirent *de;
10200 int len = ret;
10201 int reclen;
10202 de = dirp;
10203 while (len > 0) {
10204 reclen = de->d_reclen;
10205 if (reclen > len)
10206 break;
10207 de->d_reclen = tswap16(reclen);
10208 tswapls(&de->d_ino);
10209 tswapls(&de->d_off);
10210 de = (struct linux_dirent *)((char *)de + reclen);
10211 len -= reclen;
10214 unlock_user(dirp, arg2, ret);
10216 #endif
10217 #else
10218 /* Implement getdents in terms of getdents64 */
10220 struct linux_dirent64 *dirp;
10221 abi_long count = arg3;
10223 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10224 if (!dirp) {
10225 return -TARGET_EFAULT;
10227 ret = get_errno(sys_getdents64(arg1, dirp, count));
10228 if (!is_error(ret)) {
10229 /* Convert the dirent64 structs to target dirent. We do this
10230 * in-place, since we can guarantee that a target_dirent is no
10231 * larger than a dirent64; however this means we have to be
10232 * careful to read everything before writing in the new format.
10234 struct linux_dirent64 *de;
10235 struct target_dirent *tde;
10236 int len = ret;
10237 int tlen = 0;
10239 de = dirp;
10240 tde = (struct target_dirent *)dirp;
10241 while (len > 0) {
10242 int namelen, treclen;
10243 int reclen = de->d_reclen;
10244 uint64_t ino = de->d_ino;
10245 int64_t off = de->d_off;
10246 uint8_t type = de->d_type;
10248 namelen = strlen(de->d_name);
10249 treclen = offsetof(struct target_dirent, d_name)
10250 + namelen + 2;
10251 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10253 memmove(tde->d_name, de->d_name, namelen + 1);
10254 tde->d_ino = tswapal(ino);
10255 tde->d_off = tswapal(off);
10256 tde->d_reclen = tswap16(treclen);
10257 /* The target_dirent type is in what was formerly a padding
10258 * byte at the end of the structure:
10260 *(((char *)tde) + treclen - 1) = type;
10262 de = (struct linux_dirent64 *)((char *)de + reclen);
10263 tde = (struct target_dirent *)((char *)tde + treclen);
10264 len -= reclen;
10265 tlen += treclen;
10267 ret = tlen;
10269 unlock_user(dirp, arg2, ret);
10271 #endif
10272 return ret;
10273 #endif /* TARGET_NR_getdents */
10274 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10275 case TARGET_NR_getdents64:
10277 struct linux_dirent64 *dirp;
10278 abi_long count = arg3;
10279 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10280 return -TARGET_EFAULT;
10281 ret = get_errno(sys_getdents64(arg1, dirp, count));
10282 if (!is_error(ret)) {
10283 struct linux_dirent64 *de;
10284 int len = ret;
10285 int reclen;
10286 de = dirp;
10287 while (len > 0) {
10288 reclen = de->d_reclen;
10289 if (reclen > len)
10290 break;
10291 de->d_reclen = tswap16(reclen);
10292 tswap64s((uint64_t *)&de->d_ino);
10293 tswap64s((uint64_t *)&de->d_off);
10294 de = (struct linux_dirent64 *)((char *)de + reclen);
10295 len -= reclen;
10298 unlock_user(dirp, arg2, ret);
10300 return ret;
10301 #endif /* TARGET_NR_getdents64 */
10302 #if defined(TARGET_NR__newselect)
10303 case TARGET_NR__newselect:
10304 return do_select(arg1, arg2, arg3, arg4, arg5);
10305 #endif
10306 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10307 # ifdef TARGET_NR_poll
10308 case TARGET_NR_poll:
10309 # endif
10310 # ifdef TARGET_NR_ppoll
10311 case TARGET_NR_ppoll:
10312 # endif
10314 struct target_pollfd *target_pfd;
10315 unsigned int nfds = arg2;
10316 struct pollfd *pfd;
10317 unsigned int i;
10319 pfd = NULL;
10320 target_pfd = NULL;
10321 if (nfds) {
10322 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10323 return -TARGET_EINVAL;
10326 target_pfd = lock_user(VERIFY_WRITE, arg1,
10327 sizeof(struct target_pollfd) * nfds, 1);
10328 if (!target_pfd) {
10329 return -TARGET_EFAULT;
10332 pfd = alloca(sizeof(struct pollfd) * nfds);
10333 for (i = 0; i < nfds; i++) {
10334 pfd[i].fd = tswap32(target_pfd[i].fd);
10335 pfd[i].events = tswap16(target_pfd[i].events);
10339 switch (num) {
10340 # ifdef TARGET_NR_ppoll
10341 case TARGET_NR_ppoll:
10343 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10344 target_sigset_t *target_set;
10345 sigset_t _set, *set = &_set;
10347 if (arg3) {
10348 if (target_to_host_timespec(timeout_ts, arg3)) {
10349 unlock_user(target_pfd, arg1, 0);
10350 return -TARGET_EFAULT;
10352 } else {
10353 timeout_ts = NULL;
10356 if (arg4) {
10357 if (arg5 != sizeof(target_sigset_t)) {
10358 unlock_user(target_pfd, arg1, 0);
10359 return -TARGET_EINVAL;
10362 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10363 if (!target_set) {
10364 unlock_user(target_pfd, arg1, 0);
10365 return -TARGET_EFAULT;
10367 target_to_host_sigset(set, target_set);
10368 } else {
10369 set = NULL;
10372 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10373 set, SIGSET_T_SIZE));
10375 if (!is_error(ret) && arg3) {
10376 host_to_target_timespec(arg3, timeout_ts);
10378 if (arg4) {
10379 unlock_user(target_set, arg4, 0);
10381 break;
10383 # endif
10384 # ifdef TARGET_NR_poll
10385 case TARGET_NR_poll:
10387 struct timespec ts, *pts;
10389 if (arg3 >= 0) {
10390 /* Convert ms to secs, ns */
10391 ts.tv_sec = arg3 / 1000;
10392 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10393 pts = &ts;
10394 } else {
10395 /* -ve poll() timeout means "infinite" */
10396 pts = NULL;
10398 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10399 break;
10401 # endif
10402 default:
10403 g_assert_not_reached();
10406 if (!is_error(ret)) {
10407 for(i = 0; i < nfds; i++) {
10408 target_pfd[i].revents = tswap16(pfd[i].revents);
10411 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10413 return ret;
10414 #endif
10415 case TARGET_NR_flock:
10416 /* NOTE: the flock constant seems to be the same for every
10417 Linux platform */
10418 return get_errno(safe_flock(arg1, arg2));
10419 case TARGET_NR_readv:
10421 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10422 if (vec != NULL) {
10423 ret = get_errno(safe_readv(arg1, vec, arg3));
10424 unlock_iovec(vec, arg2, arg3, 1);
10425 } else {
10426 ret = -host_to_target_errno(errno);
10429 return ret;
10430 case TARGET_NR_writev:
10432 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10433 if (vec != NULL) {
10434 ret = get_errno(safe_writev(arg1, vec, arg3));
10435 unlock_iovec(vec, arg2, arg3, 0);
10436 } else {
10437 ret = -host_to_target_errno(errno);
10440 return ret;
10441 #if defined(TARGET_NR_preadv)
10442 case TARGET_NR_preadv:
10444 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10445 if (vec != NULL) {
10446 unsigned long low, high;
10448 target_to_host_low_high(arg4, arg5, &low, &high);
10449 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10450 unlock_iovec(vec, arg2, arg3, 1);
10451 } else {
10452 ret = -host_to_target_errno(errno);
10455 return ret;
10456 #endif
10457 #if defined(TARGET_NR_pwritev)
10458 case TARGET_NR_pwritev:
10460 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10461 if (vec != NULL) {
10462 unsigned long low, high;
10464 target_to_host_low_high(arg4, arg5, &low, &high);
10465 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10466 unlock_iovec(vec, arg2, arg3, 0);
10467 } else {
10468 ret = -host_to_target_errno(errno);
10471 return ret;
10472 #endif
10473 case TARGET_NR_getsid:
10474 return get_errno(getsid(arg1));
10475 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10476 case TARGET_NR_fdatasync:
10477 return get_errno(fdatasync(arg1));
10478 #endif
10479 #ifdef TARGET_NR__sysctl
10480 case TARGET_NR__sysctl:
10481 /* We don't implement this, but ENOTDIR is always a safe
10482 return value. */
10483 return -TARGET_ENOTDIR;
10484 #endif
10485 case TARGET_NR_sched_getaffinity:
10487 unsigned int mask_size;
10488 unsigned long *mask;
10491 * sched_getaffinity needs multiples of ulong, so need to take
10492 * care of mismatches between target ulong and host ulong sizes.
10494 if (arg2 & (sizeof(abi_ulong) - 1)) {
10495 return -TARGET_EINVAL;
10497 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10499 mask = alloca(mask_size);
10500 memset(mask, 0, mask_size);
10501 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10503 if (!is_error(ret)) {
10504 if (ret > arg2) {
10505 /* More data returned than the caller's buffer will fit.
10506 * This only happens if sizeof(abi_long) < sizeof(long)
10507 * and the caller passed us a buffer holding an odd number
10508 * of abi_longs. If the host kernel is actually using the
10509 * extra 4 bytes then fail EINVAL; otherwise we can just
10510 * ignore them and only copy the interesting part.
10512 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10513 if (numcpus > arg2 * 8) {
10514 return -TARGET_EINVAL;
10516 ret = arg2;
10519 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10520 return -TARGET_EFAULT;
10524 return ret;
10525 case TARGET_NR_sched_setaffinity:
10527 unsigned int mask_size;
10528 unsigned long *mask;
10531 * sched_setaffinity needs multiples of ulong, so need to take
10532 * care of mismatches between target ulong and host ulong sizes.
10534 if (arg2 & (sizeof(abi_ulong) - 1)) {
10535 return -TARGET_EINVAL;
10537 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10538 mask = alloca(mask_size);
10540 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10541 if (ret) {
10542 return ret;
10545 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10547 case TARGET_NR_getcpu:
10549 unsigned cpu, node;
10550 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10551 arg2 ? &node : NULL,
10552 NULL));
10553 if (is_error(ret)) {
10554 return ret;
10556 if (arg1 && put_user_u32(cpu, arg1)) {
10557 return -TARGET_EFAULT;
10559 if (arg2 && put_user_u32(node, arg2)) {
10560 return -TARGET_EFAULT;
10563 return ret;
10564 case TARGET_NR_sched_setparam:
10566 struct sched_param *target_schp;
10567 struct sched_param schp;
10569 if (arg2 == 0) {
10570 return -TARGET_EINVAL;
10572 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10573 return -TARGET_EFAULT;
10574 schp.sched_priority = tswap32(target_schp->sched_priority);
10575 unlock_user_struct(target_schp, arg2, 0);
10576 return get_errno(sched_setparam(arg1, &schp));
10578 case TARGET_NR_sched_getparam:
10580 struct sched_param *target_schp;
10581 struct sched_param schp;
10583 if (arg2 == 0) {
10584 return -TARGET_EINVAL;
10586 ret = get_errno(sched_getparam(arg1, &schp));
10587 if (!is_error(ret)) {
10588 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10589 return -TARGET_EFAULT;
10590 target_schp->sched_priority = tswap32(schp.sched_priority);
10591 unlock_user_struct(target_schp, arg2, 1);
10594 return ret;
10595 case TARGET_NR_sched_setscheduler:
10597 struct sched_param *target_schp;
10598 struct sched_param schp;
10599 if (arg3 == 0) {
10600 return -TARGET_EINVAL;
10602 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10603 return -TARGET_EFAULT;
10604 schp.sched_priority = tswap32(target_schp->sched_priority);
10605 unlock_user_struct(target_schp, arg3, 0);
10606 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10608 case TARGET_NR_sched_getscheduler:
10609 return get_errno(sched_getscheduler(arg1));
10610 case TARGET_NR_sched_yield:
10611 return get_errno(sched_yield());
10612 case TARGET_NR_sched_get_priority_max:
10613 return get_errno(sched_get_priority_max(arg1));
10614 case TARGET_NR_sched_get_priority_min:
10615 return get_errno(sched_get_priority_min(arg1));
10616 #ifdef TARGET_NR_sched_rr_get_interval
10617 case TARGET_NR_sched_rr_get_interval:
10619 struct timespec ts;
10620 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10621 if (!is_error(ret)) {
10622 ret = host_to_target_timespec(arg2, &ts);
10625 return ret;
10626 #endif
10627 #ifdef TARGET_NR_sched_rr_get_interval_time64
10628 case TARGET_NR_sched_rr_get_interval_time64:
10630 struct timespec ts;
10631 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10632 if (!is_error(ret)) {
10633 ret = host_to_target_timespec64(arg2, &ts);
10636 return ret;
10637 #endif
10638 #if defined(TARGET_NR_nanosleep)
10639 case TARGET_NR_nanosleep:
10641 struct timespec req, rem;
10642 target_to_host_timespec(&req, arg1);
10643 ret = get_errno(safe_nanosleep(&req, &rem));
10644 if (is_error(ret) && arg2) {
10645 host_to_target_timespec(arg2, &rem);
10648 return ret;
10649 #endif
10650 case TARGET_NR_prctl:
10651 switch (arg1) {
10652 case PR_GET_PDEATHSIG:
10654 int deathsig;
10655 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10656 if (!is_error(ret) && arg2
10657 && put_user_ual(deathsig, arg2)) {
10658 return -TARGET_EFAULT;
10660 return ret;
10662 #ifdef PR_GET_NAME
10663 case PR_GET_NAME:
10665 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10666 if (!name) {
10667 return -TARGET_EFAULT;
10669 ret = get_errno(prctl(arg1, (unsigned long)name,
10670 arg3, arg4, arg5));
10671 unlock_user(name, arg2, 16);
10672 return ret;
10674 case PR_SET_NAME:
10676 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10677 if (!name) {
10678 return -TARGET_EFAULT;
10680 ret = get_errno(prctl(arg1, (unsigned long)name,
10681 arg3, arg4, arg5));
10682 unlock_user(name, arg2, 0);
10683 return ret;
10685 #endif
10686 #ifdef TARGET_MIPS
10687 case TARGET_PR_GET_FP_MODE:
10689 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10690 ret = 0;
10691 if (env->CP0_Status & (1 << CP0St_FR)) {
10692 ret |= TARGET_PR_FP_MODE_FR;
10694 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10695 ret |= TARGET_PR_FP_MODE_FRE;
10697 return ret;
10699 case TARGET_PR_SET_FP_MODE:
10701 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10702 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10703 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10704 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10705 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10707 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10708 TARGET_PR_FP_MODE_FRE;
10710 /* If nothing to change, return right away, successfully. */
10711 if (old_fr == new_fr && old_fre == new_fre) {
10712 return 0;
10714 /* Check the value is valid */
10715 if (arg2 & ~known_bits) {
10716 return -TARGET_EOPNOTSUPP;
10718 /* Setting FRE without FR is not supported. */
10719 if (new_fre && !new_fr) {
10720 return -TARGET_EOPNOTSUPP;
10722 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10723 /* FR1 is not supported */
10724 return -TARGET_EOPNOTSUPP;
10726 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10727 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10728 /* cannot set FR=0 */
10729 return -TARGET_EOPNOTSUPP;
10731 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10732 /* Cannot set FRE=1 */
10733 return -TARGET_EOPNOTSUPP;
10736 int i;
10737 fpr_t *fpr = env->active_fpu.fpr;
10738 for (i = 0; i < 32 ; i += 2) {
10739 if (!old_fr && new_fr) {
10740 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10741 } else if (old_fr && !new_fr) {
10742 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10746 if (new_fr) {
10747 env->CP0_Status |= (1 << CP0St_FR);
10748 env->hflags |= MIPS_HFLAG_F64;
10749 } else {
10750 env->CP0_Status &= ~(1 << CP0St_FR);
10751 env->hflags &= ~MIPS_HFLAG_F64;
10753 if (new_fre) {
10754 env->CP0_Config5 |= (1 << CP0C5_FRE);
10755 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10756 env->hflags |= MIPS_HFLAG_FRE;
10758 } else {
10759 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10760 env->hflags &= ~MIPS_HFLAG_FRE;
10763 return 0;
10765 #endif /* MIPS */
10766 #ifdef TARGET_AARCH64
10767 case TARGET_PR_SVE_SET_VL:
10769 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10770 * PR_SVE_VL_INHERIT. Note the kernel definition
10771 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10772 * even though the current architectural maximum is VQ=16.
10774 ret = -TARGET_EINVAL;
10775 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10776 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10777 CPUARMState *env = cpu_env;
10778 ARMCPU *cpu = env_archcpu(env);
10779 uint32_t vq, old_vq;
10781 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10782 vq = MAX(arg2 / 16, 1);
10783 vq = MIN(vq, cpu->sve_max_vq);
10785 if (vq < old_vq) {
10786 aarch64_sve_narrow_vq(env, vq);
10788 env->vfp.zcr_el[1] = vq - 1;
10789 arm_rebuild_hflags(env);
10790 ret = vq * 16;
10792 return ret;
10793 case TARGET_PR_SVE_GET_VL:
10794 ret = -TARGET_EINVAL;
10796 ARMCPU *cpu = env_archcpu(cpu_env);
10797 if (cpu_isar_feature(aa64_sve, cpu)) {
10798 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10801 return ret;
10802 case TARGET_PR_PAC_RESET_KEYS:
10804 CPUARMState *env = cpu_env;
10805 ARMCPU *cpu = env_archcpu(env);
10807 if (arg3 || arg4 || arg5) {
10808 return -TARGET_EINVAL;
10810 if (cpu_isar_feature(aa64_pauth, cpu)) {
10811 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10812 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10813 TARGET_PR_PAC_APGAKEY);
10814 int ret = 0;
10815 Error *err = NULL;
10817 if (arg2 == 0) {
10818 arg2 = all;
10819 } else if (arg2 & ~all) {
10820 return -TARGET_EINVAL;
10822 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10823 ret |= qemu_guest_getrandom(&env->keys.apia,
10824 sizeof(ARMPACKey), &err);
10826 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10827 ret |= qemu_guest_getrandom(&env->keys.apib,
10828 sizeof(ARMPACKey), &err);
10830 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10831 ret |= qemu_guest_getrandom(&env->keys.apda,
10832 sizeof(ARMPACKey), &err);
10834 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10835 ret |= qemu_guest_getrandom(&env->keys.apdb,
10836 sizeof(ARMPACKey), &err);
10838 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10839 ret |= qemu_guest_getrandom(&env->keys.apga,
10840 sizeof(ARMPACKey), &err);
10842 if (ret != 0) {
10844 * Some unknown failure in the crypto. The best
10845 * we can do is log it and fail the syscall.
10846 * The real syscall cannot fail this way.
10848 qemu_log_mask(LOG_UNIMP,
10849 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10850 error_get_pretty(err));
10851 error_free(err);
10852 return -TARGET_EIO;
10854 return 0;
10857 return -TARGET_EINVAL;
10858 #endif /* AARCH64 */
10859 case PR_GET_SECCOMP:
10860 case PR_SET_SECCOMP:
10861 /* Disable seccomp to prevent the target disabling syscalls we
10862 * need. */
10863 return -TARGET_EINVAL;
10864 default:
10865 /* Most prctl options have no pointer arguments */
10866 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10868 break;
10869 #ifdef TARGET_NR_arch_prctl
10870 case TARGET_NR_arch_prctl:
10871 return do_arch_prctl(cpu_env, arg1, arg2);
10872 #endif
10873 #ifdef TARGET_NR_pread64
10874 case TARGET_NR_pread64:
10875 if (regpairs_aligned(cpu_env, num)) {
10876 arg4 = arg5;
10877 arg5 = arg6;
10879 if (arg2 == 0 && arg3 == 0) {
10880 /* Special-case NULL buffer and zero length, which should succeed */
10881 p = 0;
10882 } else {
10883 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10884 if (!p) {
10885 return -TARGET_EFAULT;
10888 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10889 unlock_user(p, arg2, ret);
10890 return ret;
10891 case TARGET_NR_pwrite64:
10892 if (regpairs_aligned(cpu_env, num)) {
10893 arg4 = arg5;
10894 arg5 = arg6;
10896 if (arg2 == 0 && arg3 == 0) {
10897 /* Special-case NULL buffer and zero length, which should succeed */
10898 p = 0;
10899 } else {
10900 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10901 if (!p) {
10902 return -TARGET_EFAULT;
10905 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10906 unlock_user(p, arg2, 0);
10907 return ret;
10908 #endif
10909 case TARGET_NR_getcwd:
10910 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10911 return -TARGET_EFAULT;
10912 ret = get_errno(sys_getcwd1(p, arg2));
10913 unlock_user(p, arg1, ret);
10914 return ret;
10915 case TARGET_NR_capget:
10916 case TARGET_NR_capset:
10918 struct target_user_cap_header *target_header;
10919 struct target_user_cap_data *target_data = NULL;
10920 struct __user_cap_header_struct header;
10921 struct __user_cap_data_struct data[2];
10922 struct __user_cap_data_struct *dataptr = NULL;
10923 int i, target_datalen;
10924 int data_items = 1;
10926 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10927 return -TARGET_EFAULT;
10929 header.version = tswap32(target_header->version);
10930 header.pid = tswap32(target_header->pid);
10932 if (header.version != _LINUX_CAPABILITY_VERSION) {
10933 /* Version 2 and up takes pointer to two user_data structs */
10934 data_items = 2;
10937 target_datalen = sizeof(*target_data) * data_items;
10939 if (arg2) {
10940 if (num == TARGET_NR_capget) {
10941 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10942 } else {
10943 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10945 if (!target_data) {
10946 unlock_user_struct(target_header, arg1, 0);
10947 return -TARGET_EFAULT;
10950 if (num == TARGET_NR_capset) {
10951 for (i = 0; i < data_items; i++) {
10952 data[i].effective = tswap32(target_data[i].effective);
10953 data[i].permitted = tswap32(target_data[i].permitted);
10954 data[i].inheritable = tswap32(target_data[i].inheritable);
10958 dataptr = data;
10961 if (num == TARGET_NR_capget) {
10962 ret = get_errno(capget(&header, dataptr));
10963 } else {
10964 ret = get_errno(capset(&header, dataptr));
10967 /* The kernel always updates version for both capget and capset */
10968 target_header->version = tswap32(header.version);
10969 unlock_user_struct(target_header, arg1, 1);
10971 if (arg2) {
10972 if (num == TARGET_NR_capget) {
10973 for (i = 0; i < data_items; i++) {
10974 target_data[i].effective = tswap32(data[i].effective);
10975 target_data[i].permitted = tswap32(data[i].permitted);
10976 target_data[i].inheritable = tswap32(data[i].inheritable);
10978 unlock_user(target_data, arg2, target_datalen);
10979 } else {
10980 unlock_user(target_data, arg2, 0);
10983 return ret;
10985 case TARGET_NR_sigaltstack:
10986 return do_sigaltstack(arg1, arg2,
10987 get_sp_from_cpustate((CPUArchState *)cpu_env));
10989 #ifdef CONFIG_SENDFILE
10990 #ifdef TARGET_NR_sendfile
10991 case TARGET_NR_sendfile:
10993 off_t *offp = NULL;
10994 off_t off;
10995 if (arg3) {
10996 ret = get_user_sal(off, arg3);
10997 if (is_error(ret)) {
10998 return ret;
11000 offp = &off;
11002 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11003 if (!is_error(ret) && arg3) {
11004 abi_long ret2 = put_user_sal(off, arg3);
11005 if (is_error(ret2)) {
11006 ret = ret2;
11009 return ret;
11011 #endif
11012 #ifdef TARGET_NR_sendfile64
11013 case TARGET_NR_sendfile64:
11015 off_t *offp = NULL;
11016 off_t off;
11017 if (arg3) {
11018 ret = get_user_s64(off, arg3);
11019 if (is_error(ret)) {
11020 return ret;
11022 offp = &off;
11024 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11025 if (!is_error(ret) && arg3) {
11026 abi_long ret2 = put_user_s64(off, arg3);
11027 if (is_error(ret2)) {
11028 ret = ret2;
11031 return ret;
11033 #endif
11034 #endif
11035 #ifdef TARGET_NR_vfork
11036 case TARGET_NR_vfork:
11037 return get_errno(do_fork(cpu_env,
11038 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11039 0, 0, 0, 0));
11040 #endif
11041 #ifdef TARGET_NR_ugetrlimit
11042 case TARGET_NR_ugetrlimit:
11044 struct rlimit rlim;
11045 int resource = target_to_host_resource(arg1);
11046 ret = get_errno(getrlimit(resource, &rlim));
11047 if (!is_error(ret)) {
11048 struct target_rlimit *target_rlim;
11049 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11050 return -TARGET_EFAULT;
11051 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11052 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11053 unlock_user_struct(target_rlim, arg2, 1);
11055 return ret;
11057 #endif
11058 #ifdef TARGET_NR_truncate64
11059 case TARGET_NR_truncate64:
11060 if (!(p = lock_user_string(arg1)))
11061 return -TARGET_EFAULT;
11062 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11063 unlock_user(p, arg1, 0);
11064 return ret;
11065 #endif
11066 #ifdef TARGET_NR_ftruncate64
11067 case TARGET_NR_ftruncate64:
11068 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11069 #endif
11070 #ifdef TARGET_NR_stat64
11071 case TARGET_NR_stat64:
11072 if (!(p = lock_user_string(arg1))) {
11073 return -TARGET_EFAULT;
11075 ret = get_errno(stat(path(p), &st));
11076 unlock_user(p, arg1, 0);
11077 if (!is_error(ret))
11078 ret = host_to_target_stat64(cpu_env, arg2, &st);
11079 return ret;
11080 #endif
11081 #ifdef TARGET_NR_lstat64
11082 case TARGET_NR_lstat64:
11083 if (!(p = lock_user_string(arg1))) {
11084 return -TARGET_EFAULT;
11086 ret = get_errno(lstat(path(p), &st));
11087 unlock_user(p, arg1, 0);
11088 if (!is_error(ret))
11089 ret = host_to_target_stat64(cpu_env, arg2, &st);
11090 return ret;
11091 #endif
11092 #ifdef TARGET_NR_fstat64
11093 case TARGET_NR_fstat64:
11094 ret = get_errno(fstat(arg1, &st));
11095 if (!is_error(ret))
11096 ret = host_to_target_stat64(cpu_env, arg2, &st);
11097 return ret;
11098 #endif
11099 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11100 #ifdef TARGET_NR_fstatat64
11101 case TARGET_NR_fstatat64:
11102 #endif
11103 #ifdef TARGET_NR_newfstatat
11104 case TARGET_NR_newfstatat:
11105 #endif
11106 if (!(p = lock_user_string(arg2))) {
11107 return -TARGET_EFAULT;
11109 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11110 unlock_user(p, arg2, 0);
11111 if (!is_error(ret))
11112 ret = host_to_target_stat64(cpu_env, arg3, &st);
11113 return ret;
11114 #endif
11115 #if defined(TARGET_NR_statx)
11116 case TARGET_NR_statx:
11118 struct target_statx *target_stx;
11119 int dirfd = arg1;
11120 int flags = arg3;
11122 p = lock_user_string(arg2);
11123 if (p == NULL) {
11124 return -TARGET_EFAULT;
11126 #if defined(__NR_statx)
11129 * It is assumed that struct statx is architecture independent.
11131 struct target_statx host_stx;
11132 int mask = arg4;
11134 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11135 if (!is_error(ret)) {
11136 if (host_to_target_statx(&host_stx, arg5) != 0) {
11137 unlock_user(p, arg2, 0);
11138 return -TARGET_EFAULT;
11142 if (ret != -TARGET_ENOSYS) {
11143 unlock_user(p, arg2, 0);
11144 return ret;
11147 #endif
11148 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11149 unlock_user(p, arg2, 0);
11151 if (!is_error(ret)) {
11152 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11153 return -TARGET_EFAULT;
11155 memset(target_stx, 0, sizeof(*target_stx));
11156 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11157 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11158 __put_user(st.st_ino, &target_stx->stx_ino);
11159 __put_user(st.st_mode, &target_stx->stx_mode);
11160 __put_user(st.st_uid, &target_stx->stx_uid);
11161 __put_user(st.st_gid, &target_stx->stx_gid);
11162 __put_user(st.st_nlink, &target_stx->stx_nlink);
11163 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11164 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11165 __put_user(st.st_size, &target_stx->stx_size);
11166 __put_user(st.st_blksize, &target_stx->stx_blksize);
11167 __put_user(st.st_blocks, &target_stx->stx_blocks);
11168 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11169 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11170 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11171 unlock_user_struct(target_stx, arg5, 1);
11174 return ret;
11175 #endif
11176 #ifdef TARGET_NR_lchown
11177 case TARGET_NR_lchown:
11178 if (!(p = lock_user_string(arg1)))
11179 return -TARGET_EFAULT;
11180 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11181 unlock_user(p, arg1, 0);
11182 return ret;
11183 #endif
11184 #ifdef TARGET_NR_getuid
11185 case TARGET_NR_getuid:
11186 return get_errno(high2lowuid(getuid()));
11187 #endif
11188 #ifdef TARGET_NR_getgid
11189 case TARGET_NR_getgid:
11190 return get_errno(high2lowgid(getgid()));
11191 #endif
11192 #ifdef TARGET_NR_geteuid
11193 case TARGET_NR_geteuid:
11194 return get_errno(high2lowuid(geteuid()));
11195 #endif
11196 #ifdef TARGET_NR_getegid
11197 case TARGET_NR_getegid:
11198 return get_errno(high2lowgid(getegid()));
11199 #endif
11200 case TARGET_NR_setreuid:
11201 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11202 case TARGET_NR_setregid:
11203 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11204 case TARGET_NR_getgroups:
11206 int gidsetsize = arg1;
11207 target_id *target_grouplist;
11208 gid_t *grouplist;
11209 int i;
11211 grouplist = alloca(gidsetsize * sizeof(gid_t));
11212 ret = get_errno(getgroups(gidsetsize, grouplist));
11213 if (gidsetsize == 0)
11214 return ret;
11215 if (!is_error(ret)) {
11216 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11217 if (!target_grouplist)
11218 return -TARGET_EFAULT;
11219 for(i = 0;i < ret; i++)
11220 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11221 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11224 return ret;
11225 case TARGET_NR_setgroups:
11227 int gidsetsize = arg1;
11228 target_id *target_grouplist;
11229 gid_t *grouplist = NULL;
11230 int i;
11231 if (gidsetsize) {
11232 grouplist = alloca(gidsetsize * sizeof(gid_t));
11233 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11234 if (!target_grouplist) {
11235 return -TARGET_EFAULT;
11237 for (i = 0; i < gidsetsize; i++) {
11238 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11240 unlock_user(target_grouplist, arg2, 0);
11242 return get_errno(setgroups(gidsetsize, grouplist));
11244 case TARGET_NR_fchown:
11245 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11246 #if defined(TARGET_NR_fchownat)
11247 case TARGET_NR_fchownat:
11248 if (!(p = lock_user_string(arg2)))
11249 return -TARGET_EFAULT;
11250 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11251 low2highgid(arg4), arg5));
11252 unlock_user(p, arg2, 0);
11253 return ret;
11254 #endif
11255 #ifdef TARGET_NR_setresuid
11256 case TARGET_NR_setresuid:
11257 return get_errno(sys_setresuid(low2highuid(arg1),
11258 low2highuid(arg2),
11259 low2highuid(arg3)));
11260 #endif
11261 #ifdef TARGET_NR_getresuid
11262 case TARGET_NR_getresuid:
11264 uid_t ruid, euid, suid;
11265 ret = get_errno(getresuid(&ruid, &euid, &suid));
11266 if (!is_error(ret)) {
11267 if (put_user_id(high2lowuid(ruid), arg1)
11268 || put_user_id(high2lowuid(euid), arg2)
11269 || put_user_id(high2lowuid(suid), arg3))
11270 return -TARGET_EFAULT;
11273 return ret;
11274 #endif
11275 #ifdef TARGET_NR_getresgid
11276 case TARGET_NR_setresgid:
11277 return get_errno(sys_setresgid(low2highgid(arg1),
11278 low2highgid(arg2),
11279 low2highgid(arg3)));
11280 #endif
11281 #ifdef TARGET_NR_getresgid
11282 case TARGET_NR_getresgid:
11284 gid_t rgid, egid, sgid;
11285 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11286 if (!is_error(ret)) {
11287 if (put_user_id(high2lowgid(rgid), arg1)
11288 || put_user_id(high2lowgid(egid), arg2)
11289 || put_user_id(high2lowgid(sgid), arg3))
11290 return -TARGET_EFAULT;
11293 return ret;
11294 #endif
11295 #ifdef TARGET_NR_chown
11296 case TARGET_NR_chown:
11297 if (!(p = lock_user_string(arg1)))
11298 return -TARGET_EFAULT;
11299 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11300 unlock_user(p, arg1, 0);
11301 return ret;
11302 #endif
11303 case TARGET_NR_setuid:
11304 return get_errno(sys_setuid(low2highuid(arg1)));
11305 case TARGET_NR_setgid:
11306 return get_errno(sys_setgid(low2highgid(arg1)));
11307 case TARGET_NR_setfsuid:
11308 return get_errno(setfsuid(arg1));
11309 case TARGET_NR_setfsgid:
11310 return get_errno(setfsgid(arg1));
11312 #ifdef TARGET_NR_lchown32
11313 case TARGET_NR_lchown32:
11314 if (!(p = lock_user_string(arg1)))
11315 return -TARGET_EFAULT;
11316 ret = get_errno(lchown(p, arg2, arg3));
11317 unlock_user(p, arg1, 0);
11318 return ret;
11319 #endif
11320 #ifdef TARGET_NR_getuid32
11321 case TARGET_NR_getuid32:
11322 return get_errno(getuid());
11323 #endif
11325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11326 /* Alpha specific */
11327 case TARGET_NR_getxuid:
11329 uid_t euid;
11330 euid=geteuid();
11331 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11333 return get_errno(getuid());
11334 #endif
11335 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11336 /* Alpha specific */
11337 case TARGET_NR_getxgid:
11339 uid_t egid;
11340 egid=getegid();
11341 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11343 return get_errno(getgid());
11344 #endif
11345 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11346 /* Alpha specific */
11347 case TARGET_NR_osf_getsysinfo:
11348 ret = -TARGET_EOPNOTSUPP;
11349 switch (arg1) {
11350 case TARGET_GSI_IEEE_FP_CONTROL:
11352 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11353 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11355 swcr &= ~SWCR_STATUS_MASK;
11356 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11358 if (put_user_u64 (swcr, arg2))
11359 return -TARGET_EFAULT;
11360 ret = 0;
11362 break;
11364 /* case GSI_IEEE_STATE_AT_SIGNAL:
11365 -- Not implemented in linux kernel.
11366 case GSI_UACPROC:
11367 -- Retrieves current unaligned access state; not much used.
11368 case GSI_PROC_TYPE:
11369 -- Retrieves implver information; surely not used.
11370 case GSI_GET_HWRPB:
11371 -- Grabs a copy of the HWRPB; surely not used.
11374 return ret;
11375 #endif
11376 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11377 /* Alpha specific */
11378 case TARGET_NR_osf_setsysinfo:
11379 ret = -TARGET_EOPNOTSUPP;
11380 switch (arg1) {
11381 case TARGET_SSI_IEEE_FP_CONTROL:
11383 uint64_t swcr, fpcr;
11385 if (get_user_u64 (swcr, arg2)) {
11386 return -TARGET_EFAULT;
11390 * The kernel calls swcr_update_status to update the
11391 * status bits from the fpcr at every point that it
11392 * could be queried. Therefore, we store the status
11393 * bits only in FPCR.
11395 ((CPUAlphaState *)cpu_env)->swcr
11396 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11398 fpcr = cpu_alpha_load_fpcr(cpu_env);
11399 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11400 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11401 cpu_alpha_store_fpcr(cpu_env, fpcr);
11402 ret = 0;
11404 break;
11406 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11408 uint64_t exc, fpcr, fex;
11410 if (get_user_u64(exc, arg2)) {
11411 return -TARGET_EFAULT;
11413 exc &= SWCR_STATUS_MASK;
11414 fpcr = cpu_alpha_load_fpcr(cpu_env);
11416 /* Old exceptions are not signaled. */
11417 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11418 fex = exc & ~fex;
11419 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11420 fex &= ((CPUArchState *)cpu_env)->swcr;
11422 /* Update the hardware fpcr. */
11423 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11424 cpu_alpha_store_fpcr(cpu_env, fpcr);
11426 if (fex) {
11427 int si_code = TARGET_FPE_FLTUNK;
11428 target_siginfo_t info;
11430 if (fex & SWCR_TRAP_ENABLE_DNO) {
11431 si_code = TARGET_FPE_FLTUND;
11433 if (fex & SWCR_TRAP_ENABLE_INE) {
11434 si_code = TARGET_FPE_FLTRES;
11436 if (fex & SWCR_TRAP_ENABLE_UNF) {
11437 si_code = TARGET_FPE_FLTUND;
11439 if (fex & SWCR_TRAP_ENABLE_OVF) {
11440 si_code = TARGET_FPE_FLTOVF;
11442 if (fex & SWCR_TRAP_ENABLE_DZE) {
11443 si_code = TARGET_FPE_FLTDIV;
11445 if (fex & SWCR_TRAP_ENABLE_INV) {
11446 si_code = TARGET_FPE_FLTINV;
11449 info.si_signo = SIGFPE;
11450 info.si_errno = 0;
11451 info.si_code = si_code;
11452 info._sifields._sigfault._addr
11453 = ((CPUArchState *)cpu_env)->pc;
11454 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11455 QEMU_SI_FAULT, &info);
11457 ret = 0;
11459 break;
11461 /* case SSI_NVPAIRS:
11462 -- Used with SSIN_UACPROC to enable unaligned accesses.
11463 case SSI_IEEE_STATE_AT_SIGNAL:
11464 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11465 -- Not implemented in linux kernel
11468 return ret;
11469 #endif
11470 #ifdef TARGET_NR_osf_sigprocmask
11471 /* Alpha specific. */
11472 case TARGET_NR_osf_sigprocmask:
11474 abi_ulong mask;
11475 int how;
11476 sigset_t set, oldset;
11478 switch(arg1) {
11479 case TARGET_SIG_BLOCK:
11480 how = SIG_BLOCK;
11481 break;
11482 case TARGET_SIG_UNBLOCK:
11483 how = SIG_UNBLOCK;
11484 break;
11485 case TARGET_SIG_SETMASK:
11486 how = SIG_SETMASK;
11487 break;
11488 default:
11489 return -TARGET_EINVAL;
11491 mask = arg2;
11492 target_to_host_old_sigset(&set, &mask);
11493 ret = do_sigprocmask(how, &set, &oldset);
11494 if (!ret) {
11495 host_to_target_old_sigset(&mask, &oldset);
11496 ret = mask;
11499 return ret;
11500 #endif
11502 #ifdef TARGET_NR_getgid32
11503 case TARGET_NR_getgid32:
11504 return get_errno(getgid());
11505 #endif
11506 #ifdef TARGET_NR_geteuid32
11507 case TARGET_NR_geteuid32:
11508 return get_errno(geteuid());
11509 #endif
11510 #ifdef TARGET_NR_getegid32
11511 case TARGET_NR_getegid32:
11512 return get_errno(getegid());
11513 #endif
11514 #ifdef TARGET_NR_setreuid32
11515 case TARGET_NR_setreuid32:
11516 return get_errno(setreuid(arg1, arg2));
11517 #endif
11518 #ifdef TARGET_NR_setregid32
11519 case TARGET_NR_setregid32:
11520 return get_errno(setregid(arg1, arg2));
11521 #endif
11522 #ifdef TARGET_NR_getgroups32
11523 case TARGET_NR_getgroups32:
11525 int gidsetsize = arg1;
11526 uint32_t *target_grouplist;
11527 gid_t *grouplist;
11528 int i;
11530 grouplist = alloca(gidsetsize * sizeof(gid_t));
11531 ret = get_errno(getgroups(gidsetsize, grouplist));
11532 if (gidsetsize == 0)
11533 return ret;
11534 if (!is_error(ret)) {
11535 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11536 if (!target_grouplist) {
11537 return -TARGET_EFAULT;
11539 for(i = 0;i < ret; i++)
11540 target_grouplist[i] = tswap32(grouplist[i]);
11541 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11544 return ret;
11545 #endif
11546 #ifdef TARGET_NR_setgroups32
11547 case TARGET_NR_setgroups32:
11549 int gidsetsize = arg1;
11550 uint32_t *target_grouplist;
11551 gid_t *grouplist;
11552 int i;
11554 grouplist = alloca(gidsetsize * sizeof(gid_t));
11555 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11556 if (!target_grouplist) {
11557 return -TARGET_EFAULT;
11559 for(i = 0;i < gidsetsize; i++)
11560 grouplist[i] = tswap32(target_grouplist[i]);
11561 unlock_user(target_grouplist, arg2, 0);
11562 return get_errno(setgroups(gidsetsize, grouplist));
11564 #endif
11565 #ifdef TARGET_NR_fchown32
11566 case TARGET_NR_fchown32:
11567 return get_errno(fchown(arg1, arg2, arg3));
11568 #endif
11569 #ifdef TARGET_NR_setresuid32
11570 case TARGET_NR_setresuid32:
11571 return get_errno(sys_setresuid(arg1, arg2, arg3));
11572 #endif
11573 #ifdef TARGET_NR_getresuid32
11574 case TARGET_NR_getresuid32:
11576 uid_t ruid, euid, suid;
11577 ret = get_errno(getresuid(&ruid, &euid, &suid));
11578 if (!is_error(ret)) {
11579 if (put_user_u32(ruid, arg1)
11580 || put_user_u32(euid, arg2)
11581 || put_user_u32(suid, arg3))
11582 return -TARGET_EFAULT;
11585 return ret;
11586 #endif
11587 #ifdef TARGET_NR_setresgid32
11588 case TARGET_NR_setresgid32:
11589 return get_errno(sys_setresgid(arg1, arg2, arg3));
11590 #endif
11591 #ifdef TARGET_NR_getresgid32
11592 case TARGET_NR_getresgid32:
11594 gid_t rgid, egid, sgid;
11595 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11596 if (!is_error(ret)) {
11597 if (put_user_u32(rgid, arg1)
11598 || put_user_u32(egid, arg2)
11599 || put_user_u32(sgid, arg3))
11600 return -TARGET_EFAULT;
11603 return ret;
11604 #endif
11605 #ifdef TARGET_NR_chown32
11606 case TARGET_NR_chown32:
11607 if (!(p = lock_user_string(arg1)))
11608 return -TARGET_EFAULT;
11609 ret = get_errno(chown(p, arg2, arg3));
11610 unlock_user(p, arg1, 0);
11611 return ret;
11612 #endif
11613 #ifdef TARGET_NR_setuid32
11614 case TARGET_NR_setuid32:
11615 return get_errno(sys_setuid(arg1));
11616 #endif
11617 #ifdef TARGET_NR_setgid32
11618 case TARGET_NR_setgid32:
11619 return get_errno(sys_setgid(arg1));
11620 #endif
11621 #ifdef TARGET_NR_setfsuid32
11622 case TARGET_NR_setfsuid32:
11623 return get_errno(setfsuid(arg1));
11624 #endif
11625 #ifdef TARGET_NR_setfsgid32
11626 case TARGET_NR_setfsgid32:
11627 return get_errno(setfsgid(arg1));
11628 #endif
11629 #ifdef TARGET_NR_mincore
11630 case TARGET_NR_mincore:
11632 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11633 if (!a) {
11634 return -TARGET_ENOMEM;
11636 p = lock_user_string(arg3);
11637 if (!p) {
11638 ret = -TARGET_EFAULT;
11639 } else {
11640 ret = get_errno(mincore(a, arg2, p));
11641 unlock_user(p, arg3, ret);
11643 unlock_user(a, arg1, 0);
11645 return ret;
11646 #endif
11647 #ifdef TARGET_NR_arm_fadvise64_64
11648 case TARGET_NR_arm_fadvise64_64:
11649 /* arm_fadvise64_64 looks like fadvise64_64 but
11650 * with different argument order: fd, advice, offset, len
11651 * rather than the usual fd, offset, len, advice.
11652 * Note that offset and len are both 64-bit so appear as
11653 * pairs of 32-bit registers.
11655 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11656 target_offset64(arg5, arg6), arg2);
11657 return -host_to_target_errno(ret);
11658 #endif
11660 #if TARGET_ABI_BITS == 32
11662 #ifdef TARGET_NR_fadvise64_64
11663 case TARGET_NR_fadvise64_64:
11664 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11665 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11666 ret = arg2;
11667 arg2 = arg3;
11668 arg3 = arg4;
11669 arg4 = arg5;
11670 arg5 = arg6;
11671 arg6 = ret;
11672 #else
11673 /* 6 args: fd, offset (high, low), len (high, low), advice */
11674 if (regpairs_aligned(cpu_env, num)) {
11675 /* offset is in (3,4), len in (5,6) and advice in 7 */
11676 arg2 = arg3;
11677 arg3 = arg4;
11678 arg4 = arg5;
11679 arg5 = arg6;
11680 arg6 = arg7;
11682 #endif
11683 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11684 target_offset64(arg4, arg5), arg6);
11685 return -host_to_target_errno(ret);
11686 #endif
11688 #ifdef TARGET_NR_fadvise64
11689 case TARGET_NR_fadvise64:
11690 /* 5 args: fd, offset (high, low), len, advice */
11691 if (regpairs_aligned(cpu_env, num)) {
11692 /* offset is in (3,4), len in 5 and advice in 6 */
11693 arg2 = arg3;
11694 arg3 = arg4;
11695 arg4 = arg5;
11696 arg5 = arg6;
11698 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11699 return -host_to_target_errno(ret);
11700 #endif
11702 #else /* not a 32-bit ABI */
11703 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11704 #ifdef TARGET_NR_fadvise64_64
11705 case TARGET_NR_fadvise64_64:
11706 #endif
11707 #ifdef TARGET_NR_fadvise64
11708 case TARGET_NR_fadvise64:
11709 #endif
11710 #ifdef TARGET_S390X
11711 switch (arg4) {
11712 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11713 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11714 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11715 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11716 default: break;
11718 #endif
11719 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11720 #endif
11721 #endif /* end of 64-bit ABI fadvise handling */
11723 #ifdef TARGET_NR_madvise
11724 case TARGET_NR_madvise:
11725 /* A straight passthrough may not be safe because qemu sometimes
11726 turns private file-backed mappings into anonymous mappings.
11727 This will break MADV_DONTNEED.
11728 This is a hint, so ignoring and returning success is ok. */
11729 return 0;
11730 #endif
11731 #ifdef TARGET_NR_fcntl64
11732 case TARGET_NR_fcntl64:
11734 int cmd;
11735 struct flock64 fl;
11736 from_flock64_fn *copyfrom = copy_from_user_flock64;
11737 to_flock64_fn *copyto = copy_to_user_flock64;
11739 #ifdef TARGET_ARM
11740 if (!((CPUARMState *)cpu_env)->eabi) {
11741 copyfrom = copy_from_user_oabi_flock64;
11742 copyto = copy_to_user_oabi_flock64;
11744 #endif
11746 cmd = target_to_host_fcntl_cmd(arg2);
11747 if (cmd == -TARGET_EINVAL) {
11748 return cmd;
11751 switch(arg2) {
11752 case TARGET_F_GETLK64:
11753 ret = copyfrom(&fl, arg3);
11754 if (ret) {
11755 break;
11757 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11758 if (ret == 0) {
11759 ret = copyto(arg3, &fl);
11761 break;
11763 case TARGET_F_SETLK64:
11764 case TARGET_F_SETLKW64:
11765 ret = copyfrom(&fl, arg3);
11766 if (ret) {
11767 break;
11769 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11770 break;
11771 default:
11772 ret = do_fcntl(arg1, arg2, arg3);
11773 break;
11775 return ret;
11777 #endif
11778 #ifdef TARGET_NR_cacheflush
11779 case TARGET_NR_cacheflush:
11780 /* self-modifying code is handled automatically, so nothing needed */
11781 return 0;
11782 #endif
11783 #ifdef TARGET_NR_getpagesize
11784 case TARGET_NR_getpagesize:
11785 return TARGET_PAGE_SIZE;
11786 #endif
11787 case TARGET_NR_gettid:
11788 return get_errno(sys_gettid());
11789 #ifdef TARGET_NR_readahead
11790 case TARGET_NR_readahead:
11791 #if TARGET_ABI_BITS == 32
11792 if (regpairs_aligned(cpu_env, num)) {
11793 arg2 = arg3;
11794 arg3 = arg4;
11795 arg4 = arg5;
11797 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11798 #else
11799 ret = get_errno(readahead(arg1, arg2, arg3));
11800 #endif
11801 return ret;
11802 #endif
11803 #ifdef CONFIG_ATTR
11804 #ifdef TARGET_NR_setxattr
11805 case TARGET_NR_listxattr:
11806 case TARGET_NR_llistxattr:
11808 void *p, *b = 0;
11809 if (arg2) {
11810 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11811 if (!b) {
11812 return -TARGET_EFAULT;
11815 p = lock_user_string(arg1);
11816 if (p) {
11817 if (num == TARGET_NR_listxattr) {
11818 ret = get_errno(listxattr(p, b, arg3));
11819 } else {
11820 ret = get_errno(llistxattr(p, b, arg3));
11822 } else {
11823 ret = -TARGET_EFAULT;
11825 unlock_user(p, arg1, 0);
11826 unlock_user(b, arg2, arg3);
11827 return ret;
11829 case TARGET_NR_flistxattr:
11831 void *b = 0;
11832 if (arg2) {
11833 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11834 if (!b) {
11835 return -TARGET_EFAULT;
11838 ret = get_errno(flistxattr(arg1, b, arg3));
11839 unlock_user(b, arg2, arg3);
11840 return ret;
11842 case TARGET_NR_setxattr:
11843 case TARGET_NR_lsetxattr:
11845 void *p, *n, *v = 0;
11846 if (arg3) {
11847 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11848 if (!v) {
11849 return -TARGET_EFAULT;
11852 p = lock_user_string(arg1);
11853 n = lock_user_string(arg2);
11854 if (p && n) {
11855 if (num == TARGET_NR_setxattr) {
11856 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11857 } else {
11858 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11860 } else {
11861 ret = -TARGET_EFAULT;
11863 unlock_user(p, arg1, 0);
11864 unlock_user(n, arg2, 0);
11865 unlock_user(v, arg3, 0);
11867 return ret;
11868 case TARGET_NR_fsetxattr:
11870 void *n, *v = 0;
11871 if (arg3) {
11872 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11873 if (!v) {
11874 return -TARGET_EFAULT;
11877 n = lock_user_string(arg2);
11878 if (n) {
11879 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11880 } else {
11881 ret = -TARGET_EFAULT;
11883 unlock_user(n, arg2, 0);
11884 unlock_user(v, arg3, 0);
11886 return ret;
11887 case TARGET_NR_getxattr:
11888 case TARGET_NR_lgetxattr:
11890 void *p, *n, *v = 0;
11891 if (arg3) {
11892 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11893 if (!v) {
11894 return -TARGET_EFAULT;
11897 p = lock_user_string(arg1);
11898 n = lock_user_string(arg2);
11899 if (p && n) {
11900 if (num == TARGET_NR_getxattr) {
11901 ret = get_errno(getxattr(p, n, v, arg4));
11902 } else {
11903 ret = get_errno(lgetxattr(p, n, v, arg4));
11905 } else {
11906 ret = -TARGET_EFAULT;
11908 unlock_user(p, arg1, 0);
11909 unlock_user(n, arg2, 0);
11910 unlock_user(v, arg3, arg4);
11912 return ret;
11913 case TARGET_NR_fgetxattr:
11915 void *n, *v = 0;
11916 if (arg3) {
11917 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11918 if (!v) {
11919 return -TARGET_EFAULT;
11922 n = lock_user_string(arg2);
11923 if (n) {
11924 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11925 } else {
11926 ret = -TARGET_EFAULT;
11928 unlock_user(n, arg2, 0);
11929 unlock_user(v, arg3, arg4);
11931 return ret;
11932 case TARGET_NR_removexattr:
11933 case TARGET_NR_lremovexattr:
11935 void *p, *n;
11936 p = lock_user_string(arg1);
11937 n = lock_user_string(arg2);
11938 if (p && n) {
11939 if (num == TARGET_NR_removexattr) {
11940 ret = get_errno(removexattr(p, n));
11941 } else {
11942 ret = get_errno(lremovexattr(p, n));
11944 } else {
11945 ret = -TARGET_EFAULT;
11947 unlock_user(p, arg1, 0);
11948 unlock_user(n, arg2, 0);
11950 return ret;
11951 case TARGET_NR_fremovexattr:
11953 void *n;
11954 n = lock_user_string(arg2);
11955 if (n) {
11956 ret = get_errno(fremovexattr(arg1, n));
11957 } else {
11958 ret = -TARGET_EFAULT;
11960 unlock_user(n, arg2, 0);
11962 return ret;
11963 #endif
11964 #endif /* CONFIG_ATTR */
11965 #ifdef TARGET_NR_set_thread_area
11966 case TARGET_NR_set_thread_area:
11967 #if defined(TARGET_MIPS)
11968 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11969 return 0;
11970 #elif defined(TARGET_CRIS)
11971 if (arg1 & 0xff)
11972 ret = -TARGET_EINVAL;
11973 else {
11974 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11975 ret = 0;
11977 return ret;
11978 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11979 return do_set_thread_area(cpu_env, arg1);
11980 #elif defined(TARGET_M68K)
11982 TaskState *ts = cpu->opaque;
11983 ts->tp_value = arg1;
11984 return 0;
11986 #else
11987 return -TARGET_ENOSYS;
11988 #endif
11989 #endif
11990 #ifdef TARGET_NR_get_thread_area
11991 case TARGET_NR_get_thread_area:
11992 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11993 return do_get_thread_area(cpu_env, arg1);
11994 #elif defined(TARGET_M68K)
11996 TaskState *ts = cpu->opaque;
11997 return ts->tp_value;
11999 #else
12000 return -TARGET_ENOSYS;
12001 #endif
12002 #endif
12003 #ifdef TARGET_NR_getdomainname
12004 case TARGET_NR_getdomainname:
12005 return -TARGET_ENOSYS;
12006 #endif
12008 #ifdef TARGET_NR_clock_settime
12009 case TARGET_NR_clock_settime:
12011 struct timespec ts;
12013 ret = target_to_host_timespec(&ts, arg2);
12014 if (!is_error(ret)) {
12015 ret = get_errno(clock_settime(arg1, &ts));
12017 return ret;
12019 #endif
12020 #ifdef TARGET_NR_clock_settime64
12021 case TARGET_NR_clock_settime64:
12023 struct timespec ts;
12025 ret = target_to_host_timespec64(&ts, arg2);
12026 if (!is_error(ret)) {
12027 ret = get_errno(clock_settime(arg1, &ts));
12029 return ret;
12031 #endif
12032 #ifdef TARGET_NR_clock_gettime
12033 case TARGET_NR_clock_gettime:
12035 struct timespec ts;
12036 ret = get_errno(clock_gettime(arg1, &ts));
12037 if (!is_error(ret)) {
12038 ret = host_to_target_timespec(arg2, &ts);
12040 return ret;
12042 #endif
12043 #ifdef TARGET_NR_clock_gettime64
12044 case TARGET_NR_clock_gettime64:
12046 struct timespec ts;
12047 ret = get_errno(clock_gettime(arg1, &ts));
12048 if (!is_error(ret)) {
12049 ret = host_to_target_timespec64(arg2, &ts);
12051 return ret;
12053 #endif
12054 #ifdef TARGET_NR_clock_getres
12055 case TARGET_NR_clock_getres:
12057 struct timespec ts;
12058 ret = get_errno(clock_getres(arg1, &ts));
12059 if (!is_error(ret)) {
12060 host_to_target_timespec(arg2, &ts);
12062 return ret;
12064 #endif
12065 #ifdef TARGET_NR_clock_getres_time64
12066 case TARGET_NR_clock_getres_time64:
12068 struct timespec ts;
12069 ret = get_errno(clock_getres(arg1, &ts));
12070 if (!is_error(ret)) {
12071 host_to_target_timespec64(arg2, &ts);
12073 return ret;
12075 #endif
12076 #ifdef TARGET_NR_clock_nanosleep
12077 case TARGET_NR_clock_nanosleep:
12079 struct timespec ts;
12080 if (target_to_host_timespec(&ts, arg3)) {
12081 return -TARGET_EFAULT;
12083 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12084 &ts, arg4 ? &ts : NULL));
12086 * if the call is interrupted by a signal handler, it fails
12087 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12088 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12090 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12091 host_to_target_timespec(arg4, &ts)) {
12092 return -TARGET_EFAULT;
12095 return ret;
12097 #endif
12098 #ifdef TARGET_NR_clock_nanosleep_time64
12099 case TARGET_NR_clock_nanosleep_time64:
12101 struct timespec ts;
12103 if (target_to_host_timespec64(&ts, arg3)) {
12104 return -TARGET_EFAULT;
12107 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12108 &ts, arg4 ? &ts : NULL));
12110 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12111 host_to_target_timespec64(arg4, &ts)) {
12112 return -TARGET_EFAULT;
12114 return ret;
12116 #endif
12118 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12119 case TARGET_NR_set_tid_address:
12120 return get_errno(set_tid_address((int *)g2h(arg1)));
12121 #endif
12123 case TARGET_NR_tkill:
12124 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12126 case TARGET_NR_tgkill:
12127 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12128 target_to_host_signal(arg3)));
12130 #ifdef TARGET_NR_set_robust_list
12131 case TARGET_NR_set_robust_list:
12132 case TARGET_NR_get_robust_list:
12133 /* The ABI for supporting robust futexes has userspace pass
12134 * the kernel a pointer to a linked list which is updated by
12135 * userspace after the syscall; the list is walked by the kernel
12136 * when the thread exits. Since the linked list in QEMU guest
12137 * memory isn't a valid linked list for the host and we have
12138 * no way to reliably intercept the thread-death event, we can't
12139 * support these. Silently return ENOSYS so that guest userspace
12140 * falls back to a non-robust futex implementation (which should
12141 * be OK except in the corner case of the guest crashing while
12142 * holding a mutex that is shared with another process via
12143 * shared memory).
12145 return -TARGET_ENOSYS;
12146 #endif
12148 #if defined(TARGET_NR_utimensat)
12149 case TARGET_NR_utimensat:
12151 struct timespec *tsp, ts[2];
12152 if (!arg3) {
12153 tsp = NULL;
12154 } else {
12155 if (target_to_host_timespec(ts, arg3)) {
12156 return -TARGET_EFAULT;
12158 if (target_to_host_timespec(ts + 1, arg3 +
12159 sizeof(struct target_timespec))) {
12160 return -TARGET_EFAULT;
12162 tsp = ts;
12164 if (!arg2)
12165 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12166 else {
12167 if (!(p = lock_user_string(arg2))) {
12168 return -TARGET_EFAULT;
12170 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12171 unlock_user(p, arg2, 0);
12174 return ret;
12175 #endif
12176 #ifdef TARGET_NR_utimensat_time64
12177 case TARGET_NR_utimensat_time64:
12179 struct timespec *tsp, ts[2];
12180 if (!arg3) {
12181 tsp = NULL;
12182 } else {
12183 if (target_to_host_timespec64(ts, arg3)) {
12184 return -TARGET_EFAULT;
12186 if (target_to_host_timespec64(ts + 1, arg3 +
12187 sizeof(struct target__kernel_timespec))) {
12188 return -TARGET_EFAULT;
12190 tsp = ts;
12192 if (!arg2)
12193 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12194 else {
12195 p = lock_user_string(arg2);
12196 if (!p) {
12197 return -TARGET_EFAULT;
12199 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12200 unlock_user(p, arg2, 0);
12203 return ret;
12204 #endif
12205 #ifdef TARGET_NR_futex
12206 case TARGET_NR_futex:
12207 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12208 #endif
12209 #ifdef TARGET_NR_futex_time64
12210 case TARGET_NR_futex_time64:
12211 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12212 #endif
12213 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12214 case TARGET_NR_inotify_init:
12215 ret = get_errno(sys_inotify_init());
12216 if (ret >= 0) {
12217 fd_trans_register(ret, &target_inotify_trans);
12219 return ret;
12220 #endif
12221 #ifdef CONFIG_INOTIFY1
12222 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12223 case TARGET_NR_inotify_init1:
12224 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12225 fcntl_flags_tbl)));
12226 if (ret >= 0) {
12227 fd_trans_register(ret, &target_inotify_trans);
12229 return ret;
12230 #endif
12231 #endif
12232 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12233 case TARGET_NR_inotify_add_watch:
12234 p = lock_user_string(arg2);
12235 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12236 unlock_user(p, arg2, 0);
12237 return ret;
12238 #endif
12239 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12240 case TARGET_NR_inotify_rm_watch:
12241 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12242 #endif
12244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12245 case TARGET_NR_mq_open:
12247 struct mq_attr posix_mq_attr;
12248 struct mq_attr *pposix_mq_attr;
12249 int host_flags;
12251 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12252 pposix_mq_attr = NULL;
12253 if (arg4) {
12254 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12255 return -TARGET_EFAULT;
12257 pposix_mq_attr = &posix_mq_attr;
12259 p = lock_user_string(arg1 - 1);
12260 if (!p) {
12261 return -TARGET_EFAULT;
12263 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12264 unlock_user (p, arg1, 0);
12266 return ret;
12268 case TARGET_NR_mq_unlink:
12269 p = lock_user_string(arg1 - 1);
12270 if (!p) {
12271 return -TARGET_EFAULT;
12273 ret = get_errno(mq_unlink(p));
12274 unlock_user (p, arg1, 0);
12275 return ret;
12277 #ifdef TARGET_NR_mq_timedsend
12278 case TARGET_NR_mq_timedsend:
12280 struct timespec ts;
12282 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12283 if (arg5 != 0) {
12284 if (target_to_host_timespec(&ts, arg5)) {
12285 return -TARGET_EFAULT;
12287 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12288 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12289 return -TARGET_EFAULT;
12291 } else {
12292 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12294 unlock_user (p, arg2, arg3);
12296 return ret;
12297 #endif
12298 #ifdef TARGET_NR_mq_timedsend_time64
12299 case TARGET_NR_mq_timedsend_time64:
12301 struct timespec ts;
12303 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12304 if (arg5 != 0) {
12305 if (target_to_host_timespec64(&ts, arg5)) {
12306 return -TARGET_EFAULT;
12308 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12309 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12310 return -TARGET_EFAULT;
12312 } else {
12313 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12315 unlock_user(p, arg2, arg3);
12317 return ret;
12318 #endif
12320 #ifdef TARGET_NR_mq_timedreceive
12321 case TARGET_NR_mq_timedreceive:
12323 struct timespec ts;
12324 unsigned int prio;
12326 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12327 if (arg5 != 0) {
12328 if (target_to_host_timespec(&ts, arg5)) {
12329 return -TARGET_EFAULT;
12331 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12332 &prio, &ts));
12333 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12334 return -TARGET_EFAULT;
12336 } else {
12337 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12338 &prio, NULL));
12340 unlock_user (p, arg2, arg3);
12341 if (arg4 != 0)
12342 put_user_u32(prio, arg4);
12344 return ret;
12345 #endif
12346 #ifdef TARGET_NR_mq_timedreceive_time64
12347 case TARGET_NR_mq_timedreceive_time64:
12349 struct timespec ts;
12350 unsigned int prio;
12352 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12353 if (arg5 != 0) {
12354 if (target_to_host_timespec64(&ts, arg5)) {
12355 return -TARGET_EFAULT;
12357 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12358 &prio, &ts));
12359 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12360 return -TARGET_EFAULT;
12362 } else {
12363 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12364 &prio, NULL));
12366 unlock_user(p, arg2, arg3);
12367 if (arg4 != 0) {
12368 put_user_u32(prio, arg4);
12371 return ret;
12372 #endif
12374 /* Not implemented for now... */
12375 /* case TARGET_NR_mq_notify: */
12376 /* break; */
12378 case TARGET_NR_mq_getsetattr:
12380 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12381 ret = 0;
12382 if (arg2 != 0) {
12383 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12384 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12385 &posix_mq_attr_out));
12386 } else if (arg3 != 0) {
12387 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12389 if (ret == 0 && arg3 != 0) {
12390 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12393 return ret;
12394 #endif
12396 #ifdef CONFIG_SPLICE
12397 #ifdef TARGET_NR_tee
12398 case TARGET_NR_tee:
12400 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12402 return ret;
12403 #endif
12404 #ifdef TARGET_NR_splice
12405 case TARGET_NR_splice:
12407 loff_t loff_in, loff_out;
12408 loff_t *ploff_in = NULL, *ploff_out = NULL;
12409 if (arg2) {
12410 if (get_user_u64(loff_in, arg2)) {
12411 return -TARGET_EFAULT;
12413 ploff_in = &loff_in;
12415 if (arg4) {
12416 if (get_user_u64(loff_out, arg4)) {
12417 return -TARGET_EFAULT;
12419 ploff_out = &loff_out;
12421 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12422 if (arg2) {
12423 if (put_user_u64(loff_in, arg2)) {
12424 return -TARGET_EFAULT;
12427 if (arg4) {
12428 if (put_user_u64(loff_out, arg4)) {
12429 return -TARGET_EFAULT;
12433 return ret;
12434 #endif
12435 #ifdef TARGET_NR_vmsplice
12436 case TARGET_NR_vmsplice:
12438 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12439 if (vec != NULL) {
12440 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12441 unlock_iovec(vec, arg2, arg3, 0);
12442 } else {
12443 ret = -host_to_target_errno(errno);
12446 return ret;
12447 #endif
12448 #endif /* CONFIG_SPLICE */
12449 #ifdef CONFIG_EVENTFD
12450 #if defined(TARGET_NR_eventfd)
12451 case TARGET_NR_eventfd:
12452 ret = get_errno(eventfd(arg1, 0));
12453 if (ret >= 0) {
12454 fd_trans_register(ret, &target_eventfd_trans);
12456 return ret;
12457 #endif
12458 #if defined(TARGET_NR_eventfd2)
12459 case TARGET_NR_eventfd2:
12461 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12462 if (arg2 & TARGET_O_NONBLOCK) {
12463 host_flags |= O_NONBLOCK;
12465 if (arg2 & TARGET_O_CLOEXEC) {
12466 host_flags |= O_CLOEXEC;
12468 ret = get_errno(eventfd(arg1, host_flags));
12469 if (ret >= 0) {
12470 fd_trans_register(ret, &target_eventfd_trans);
12472 return ret;
12474 #endif
12475 #endif /* CONFIG_EVENTFD */
12476 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12477 case TARGET_NR_fallocate:
12478 #if TARGET_ABI_BITS == 32
12479 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12480 target_offset64(arg5, arg6)));
12481 #else
12482 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12483 #endif
12484 return ret;
12485 #endif
12486 #if defined(CONFIG_SYNC_FILE_RANGE)
12487 #if defined(TARGET_NR_sync_file_range)
12488 case TARGET_NR_sync_file_range:
12489 #if TARGET_ABI_BITS == 32
12490 #if defined(TARGET_MIPS)
12491 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12492 target_offset64(arg5, arg6), arg7));
12493 #else
12494 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12495 target_offset64(arg4, arg5), arg6));
12496 #endif /* !TARGET_MIPS */
12497 #else
12498 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12499 #endif
12500 return ret;
12501 #endif
12502 #if defined(TARGET_NR_sync_file_range2) || \
12503 defined(TARGET_NR_arm_sync_file_range)
12504 #if defined(TARGET_NR_sync_file_range2)
12505 case TARGET_NR_sync_file_range2:
12506 #endif
12507 #if defined(TARGET_NR_arm_sync_file_range)
12508 case TARGET_NR_arm_sync_file_range:
12509 #endif
12510 /* This is like sync_file_range but the arguments are reordered */
12511 #if TARGET_ABI_BITS == 32
12512 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12513 target_offset64(arg5, arg6), arg2));
12514 #else
12515 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12516 #endif
12517 return ret;
12518 #endif
12519 #endif
12520 #if defined(TARGET_NR_signalfd4)
12521 case TARGET_NR_signalfd4:
12522 return do_signalfd4(arg1, arg2, arg4);
12523 #endif
12524 #if defined(TARGET_NR_signalfd)
12525 case TARGET_NR_signalfd:
12526 return do_signalfd4(arg1, arg2, 0);
12527 #endif
12528 #if defined(CONFIG_EPOLL)
12529 #if defined(TARGET_NR_epoll_create)
12530 case TARGET_NR_epoll_create:
12531 return get_errno(epoll_create(arg1));
12532 #endif
12533 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12534 case TARGET_NR_epoll_create1:
12535 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12536 #endif
12537 #if defined(TARGET_NR_epoll_ctl)
12538 case TARGET_NR_epoll_ctl:
12540 struct epoll_event ep;
12541 struct epoll_event *epp = 0;
12542 if (arg4) {
12543 struct target_epoll_event *target_ep;
12544 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12545 return -TARGET_EFAULT;
12547 ep.events = tswap32(target_ep->events);
12548 /* The epoll_data_t union is just opaque data to the kernel,
12549 * so we transfer all 64 bits across and need not worry what
12550 * actual data type it is.
12552 ep.data.u64 = tswap64(target_ep->data.u64);
12553 unlock_user_struct(target_ep, arg4, 0);
12554 epp = &ep;
12556 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12558 #endif
12560 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12561 #if defined(TARGET_NR_epoll_wait)
12562 case TARGET_NR_epoll_wait:
12563 #endif
12564 #if defined(TARGET_NR_epoll_pwait)
12565 case TARGET_NR_epoll_pwait:
12566 #endif
12568 struct target_epoll_event *target_ep;
12569 struct epoll_event *ep;
12570 int epfd = arg1;
12571 int maxevents = arg3;
12572 int timeout = arg4;
12574 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12575 return -TARGET_EINVAL;
12578 target_ep = lock_user(VERIFY_WRITE, arg2,
12579 maxevents * sizeof(struct target_epoll_event), 1);
12580 if (!target_ep) {
12581 return -TARGET_EFAULT;
12584 ep = g_try_new(struct epoll_event, maxevents);
12585 if (!ep) {
12586 unlock_user(target_ep, arg2, 0);
12587 return -TARGET_ENOMEM;
12590 switch (num) {
12591 #if defined(TARGET_NR_epoll_pwait)
12592 case TARGET_NR_epoll_pwait:
12594 target_sigset_t *target_set;
12595 sigset_t _set, *set = &_set;
12597 if (arg5) {
12598 if (arg6 != sizeof(target_sigset_t)) {
12599 ret = -TARGET_EINVAL;
12600 break;
12603 target_set = lock_user(VERIFY_READ, arg5,
12604 sizeof(target_sigset_t), 1);
12605 if (!target_set) {
12606 ret = -TARGET_EFAULT;
12607 break;
12609 target_to_host_sigset(set, target_set);
12610 unlock_user(target_set, arg5, 0);
12611 } else {
12612 set = NULL;
12615 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12616 set, SIGSET_T_SIZE));
12617 break;
12619 #endif
12620 #if defined(TARGET_NR_epoll_wait)
12621 case TARGET_NR_epoll_wait:
12622 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12623 NULL, 0));
12624 break;
12625 #endif
12626 default:
12627 ret = -TARGET_ENOSYS;
12629 if (!is_error(ret)) {
12630 int i;
12631 for (i = 0; i < ret; i++) {
12632 target_ep[i].events = tswap32(ep[i].events);
12633 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12635 unlock_user(target_ep, arg2,
12636 ret * sizeof(struct target_epoll_event));
12637 } else {
12638 unlock_user(target_ep, arg2, 0);
12640 g_free(ep);
12641 return ret;
12643 #endif
12644 #endif
12645 #ifdef TARGET_NR_prlimit64
12646 case TARGET_NR_prlimit64:
12648 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12649 struct target_rlimit64 *target_rnew, *target_rold;
12650 struct host_rlimit64 rnew, rold, *rnewp = 0;
12651 int resource = target_to_host_resource(arg2);
12653 if (arg3 && (resource != RLIMIT_AS &&
12654 resource != RLIMIT_DATA &&
12655 resource != RLIMIT_STACK)) {
12656 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12657 return -TARGET_EFAULT;
12659 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12660 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12661 unlock_user_struct(target_rnew, arg3, 0);
12662 rnewp = &rnew;
12665 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12666 if (!is_error(ret) && arg4) {
12667 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12668 return -TARGET_EFAULT;
12670 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12671 target_rold->rlim_max = tswap64(rold.rlim_max);
12672 unlock_user_struct(target_rold, arg4, 1);
12674 return ret;
12676 #endif
12677 #ifdef TARGET_NR_gethostname
12678 case TARGET_NR_gethostname:
12680 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12681 if (name) {
12682 ret = get_errno(gethostname(name, arg2));
12683 unlock_user(name, arg1, arg2);
12684 } else {
12685 ret = -TARGET_EFAULT;
12687 return ret;
12689 #endif
12690 #ifdef TARGET_NR_atomic_cmpxchg_32
12691 case TARGET_NR_atomic_cmpxchg_32:
12693 /* should use start_exclusive from main.c */
12694 abi_ulong mem_value;
12695 if (get_user_u32(mem_value, arg6)) {
12696 target_siginfo_t info;
12697 info.si_signo = SIGSEGV;
12698 info.si_errno = 0;
12699 info.si_code = TARGET_SEGV_MAPERR;
12700 info._sifields._sigfault._addr = arg6;
12701 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12702 QEMU_SI_FAULT, &info);
12703 ret = 0xdeadbeef;
12706 if (mem_value == arg2)
12707 put_user_u32(arg1, arg6);
12708 return mem_value;
12710 #endif
12711 #ifdef TARGET_NR_atomic_barrier
12712 case TARGET_NR_atomic_barrier:
12713 /* Like the kernel implementation and the
12714 qemu arm barrier, no-op this? */
12715 return 0;
12716 #endif
12718 #ifdef TARGET_NR_timer_create
12719 case TARGET_NR_timer_create:
12721 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12723 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12725 int clkid = arg1;
12726 int timer_index = next_free_host_timer();
12728 if (timer_index < 0) {
12729 ret = -TARGET_EAGAIN;
12730 } else {
12731 timer_t *phtimer = g_posix_timers + timer_index;
12733 if (arg2) {
12734 phost_sevp = &host_sevp;
12735 ret = target_to_host_sigevent(phost_sevp, arg2);
12736 if (ret != 0) {
12737 return ret;
12741 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12742 if (ret) {
12743 phtimer = NULL;
12744 } else {
12745 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12746 return -TARGET_EFAULT;
12750 return ret;
12752 #endif
12754 #ifdef TARGET_NR_timer_settime
12755 case TARGET_NR_timer_settime:
12757 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12758 * struct itimerspec * old_value */
12759 target_timer_t timerid = get_timer_id(arg1);
12761 if (timerid < 0) {
12762 ret = timerid;
12763 } else if (arg3 == 0) {
12764 ret = -TARGET_EINVAL;
12765 } else {
12766 timer_t htimer = g_posix_timers[timerid];
12767 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12769 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12770 return -TARGET_EFAULT;
12772 ret = get_errno(
12773 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12774 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12775 return -TARGET_EFAULT;
12778 return ret;
12780 #endif
12782 #ifdef TARGET_NR_timer_settime64
12783 case TARGET_NR_timer_settime64:
12785 target_timer_t timerid = get_timer_id(arg1);
12787 if (timerid < 0) {
12788 ret = timerid;
12789 } else if (arg3 == 0) {
12790 ret = -TARGET_EINVAL;
12791 } else {
12792 timer_t htimer = g_posix_timers[timerid];
12793 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12795 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12796 return -TARGET_EFAULT;
12798 ret = get_errno(
12799 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12800 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12801 return -TARGET_EFAULT;
12804 return ret;
12806 #endif
12808 #ifdef TARGET_NR_timer_gettime
12809 case TARGET_NR_timer_gettime:
12811 /* args: timer_t timerid, struct itimerspec *curr_value */
12812 target_timer_t timerid = get_timer_id(arg1);
12814 if (timerid < 0) {
12815 ret = timerid;
12816 } else if (!arg2) {
12817 ret = -TARGET_EFAULT;
12818 } else {
12819 timer_t htimer = g_posix_timers[timerid];
12820 struct itimerspec hspec;
12821 ret = get_errno(timer_gettime(htimer, &hspec));
12823 if (host_to_target_itimerspec(arg2, &hspec)) {
12824 ret = -TARGET_EFAULT;
12827 return ret;
12829 #endif
12831 #ifdef TARGET_NR_timer_gettime64
12832 case TARGET_NR_timer_gettime64:
12834 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12835 target_timer_t timerid = get_timer_id(arg1);
12837 if (timerid < 0) {
12838 ret = timerid;
12839 } else if (!arg2) {
12840 ret = -TARGET_EFAULT;
12841 } else {
12842 timer_t htimer = g_posix_timers[timerid];
12843 struct itimerspec hspec;
12844 ret = get_errno(timer_gettime(htimer, &hspec));
12846 if (host_to_target_itimerspec64(arg2, &hspec)) {
12847 ret = -TARGET_EFAULT;
12850 return ret;
12852 #endif
12854 #ifdef TARGET_NR_timer_getoverrun
12855 case TARGET_NR_timer_getoverrun:
12857 /* args: timer_t timerid */
12858 target_timer_t timerid = get_timer_id(arg1);
12860 if (timerid < 0) {
12861 ret = timerid;
12862 } else {
12863 timer_t htimer = g_posix_timers[timerid];
12864 ret = get_errno(timer_getoverrun(htimer));
12866 return ret;
12868 #endif
12870 #ifdef TARGET_NR_timer_delete
12871 case TARGET_NR_timer_delete:
12873 /* args: timer_t timerid */
12874 target_timer_t timerid = get_timer_id(arg1);
12876 if (timerid < 0) {
12877 ret = timerid;
12878 } else {
12879 timer_t htimer = g_posix_timers[timerid];
12880 ret = get_errno(timer_delete(htimer));
12881 g_posix_timers[timerid] = 0;
12883 return ret;
12885 #endif
12887 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12888 case TARGET_NR_timerfd_create:
12889 return get_errno(timerfd_create(arg1,
12890 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12891 #endif
12893 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12894 case TARGET_NR_timerfd_gettime:
12896 struct itimerspec its_curr;
12898 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12900 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12901 return -TARGET_EFAULT;
12904 return ret;
12905 #endif
12907 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12908 case TARGET_NR_timerfd_gettime64:
12910 struct itimerspec its_curr;
12912 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12914 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12915 return -TARGET_EFAULT;
12918 return ret;
12919 #endif
12921 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12922 case TARGET_NR_timerfd_settime:
12924 struct itimerspec its_new, its_old, *p_new;
12926 if (arg3) {
12927 if (target_to_host_itimerspec(&its_new, arg3)) {
12928 return -TARGET_EFAULT;
12930 p_new = &its_new;
12931 } else {
12932 p_new = NULL;
12935 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12937 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12938 return -TARGET_EFAULT;
12941 return ret;
12942 #endif
12944 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12945 case TARGET_NR_timerfd_settime64:
12947 struct itimerspec its_new, its_old, *p_new;
12949 if (arg3) {
12950 if (target_to_host_itimerspec64(&its_new, arg3)) {
12951 return -TARGET_EFAULT;
12953 p_new = &its_new;
12954 } else {
12955 p_new = NULL;
12958 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12960 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12961 return -TARGET_EFAULT;
12964 return ret;
12965 #endif
12967 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12968 case TARGET_NR_ioprio_get:
12969 return get_errno(ioprio_get(arg1, arg2));
12970 #endif
12972 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12973 case TARGET_NR_ioprio_set:
12974 return get_errno(ioprio_set(arg1, arg2, arg3));
12975 #endif
12977 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12978 case TARGET_NR_setns:
12979 return get_errno(setns(arg1, arg2));
12980 #endif
12981 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12982 case TARGET_NR_unshare:
12983 return get_errno(unshare(arg1));
12984 #endif
12985 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12986 case TARGET_NR_kcmp:
12987 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12988 #endif
12989 #ifdef TARGET_NR_swapcontext
12990 case TARGET_NR_swapcontext:
12991 /* PowerPC specific. */
12992 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12993 #endif
12994 #ifdef TARGET_NR_memfd_create
12995 case TARGET_NR_memfd_create:
12996 p = lock_user_string(arg1);
12997 if (!p) {
12998 return -TARGET_EFAULT;
13000 ret = get_errno(memfd_create(p, arg2));
13001 fd_trans_unregister(ret);
13002 unlock_user(p, arg1, 0);
13003 return ret;
13004 #endif
13005 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13006 case TARGET_NR_membarrier:
13007 return get_errno(membarrier(arg1, arg2));
13008 #endif
13010 default:
13011 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13012 return -TARGET_ENOSYS;
13014 return ret;
13017 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13018 abi_long arg2, abi_long arg3, abi_long arg4,
13019 abi_long arg5, abi_long arg6, abi_long arg7,
13020 abi_long arg8)
13022 CPUState *cpu = env_cpu(cpu_env);
13023 abi_long ret;
13025 #ifdef DEBUG_ERESTARTSYS
13026 /* Debug-only code for exercising the syscall-restart code paths
13027 * in the per-architecture cpu main loops: restart every syscall
13028 * the guest makes once before letting it through.
13031 static bool flag;
13032 flag = !flag;
13033 if (flag) {
13034 return -TARGET_ERESTARTSYS;
13037 #endif
13039 record_syscall_start(cpu, num, arg1,
13040 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13042 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13043 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13046 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13047 arg5, arg6, arg7, arg8);
13049 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13050 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13051 arg3, arg4, arg5, arg6);
13054 record_syscall_return(cpu, num, ret);
13055 return ret;