linux-user: fix target_to_host_timespec64()
[qemu/ar7.git] / linux-user / syscall.c
blob3b725bbe25134db009060632da2c35a781889cd6
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
130 #ifndef CLONE_IO
131 #define CLONE_IO 0x80000000 /* Clone io context */
132 #endif
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
239 type6 arg6) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
296 loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300 siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310 const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318 unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325 void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327 struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329 struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342 unsigned long, idx1, unsigned long, idx2)
343 #endif
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350 unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
358 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
359 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
360 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
361 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
362 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
363 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
364 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
365 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
366 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
367 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
368 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
372 #endif
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
375 #endif
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
378 #endif
379 #if defined(O_PATH)
380 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
381 #endif
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
384 #endif
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389 { 0, 0, 0, 0 }
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398 const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401 const struct timespec times[2], int flags)
403 errno = ENOSYS;
404 return -1;
406 #endif
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413 const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416 int newfd, const char *new, int flags)
418 if (flags == 0) {
419 return renameat(oldfd, old, newfd, new);
421 errno = ENOSYS;
422 return -1;
424 #endif
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
439 return (inotify_add_watch(fd, pathname, mask));
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
445 return (inotify_rm_watch(fd, wd));
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
452 return (inotify_init1(flags));
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471 uint64_t rlim_cur;
472 uint64_t rlim_max;
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475 const struct host_rlimit64 *, new_limit,
476 struct host_rlimit64 *, old_limit)
477 #endif
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
486 int k ;
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489 if (g_posix_timers[k] == 0) {
490 g_posix_timers[k] = (timer_t) 1;
491 return k;
494 return -1;
496 #endif
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510 [EAGAIN] = TARGET_EAGAIN,
511 [EIDRM] = TARGET_EIDRM,
512 [ECHRNG] = TARGET_ECHRNG,
513 [EL2NSYNC] = TARGET_EL2NSYNC,
514 [EL3HLT] = TARGET_EL3HLT,
515 [EL3RST] = TARGET_EL3RST,
516 [ELNRNG] = TARGET_ELNRNG,
517 [EUNATCH] = TARGET_EUNATCH,
518 [ENOCSI] = TARGET_ENOCSI,
519 [EL2HLT] = TARGET_EL2HLT,
520 [EDEADLK] = TARGET_EDEADLK,
521 [ENOLCK] = TARGET_ENOLCK,
522 [EBADE] = TARGET_EBADE,
523 [EBADR] = TARGET_EBADR,
524 [EXFULL] = TARGET_EXFULL,
525 [ENOANO] = TARGET_ENOANO,
526 [EBADRQC] = TARGET_EBADRQC,
527 [EBADSLT] = TARGET_EBADSLT,
528 [EBFONT] = TARGET_EBFONT,
529 [ENOSTR] = TARGET_ENOSTR,
530 [ENODATA] = TARGET_ENODATA,
531 [ETIME] = TARGET_ETIME,
532 [ENOSR] = TARGET_ENOSR,
533 [ENONET] = TARGET_ENONET,
534 [ENOPKG] = TARGET_ENOPKG,
535 [EREMOTE] = TARGET_EREMOTE,
536 [ENOLINK] = TARGET_ENOLINK,
537 [EADV] = TARGET_EADV,
538 [ESRMNT] = TARGET_ESRMNT,
539 [ECOMM] = TARGET_ECOMM,
540 [EPROTO] = TARGET_EPROTO,
541 [EDOTDOT] = TARGET_EDOTDOT,
542 [EMULTIHOP] = TARGET_EMULTIHOP,
543 [EBADMSG] = TARGET_EBADMSG,
544 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
545 [EOVERFLOW] = TARGET_EOVERFLOW,
546 [ENOTUNIQ] = TARGET_ENOTUNIQ,
547 [EBADFD] = TARGET_EBADFD,
548 [EREMCHG] = TARGET_EREMCHG,
549 [ELIBACC] = TARGET_ELIBACC,
550 [ELIBBAD] = TARGET_ELIBBAD,
551 [ELIBSCN] = TARGET_ELIBSCN,
552 [ELIBMAX] = TARGET_ELIBMAX,
553 [ELIBEXEC] = TARGET_ELIBEXEC,
554 [EILSEQ] = TARGET_EILSEQ,
555 [ENOSYS] = TARGET_ENOSYS,
556 [ELOOP] = TARGET_ELOOP,
557 [ERESTART] = TARGET_ERESTART,
558 [ESTRPIPE] = TARGET_ESTRPIPE,
559 [ENOTEMPTY] = TARGET_ENOTEMPTY,
560 [EUSERS] = TARGET_EUSERS,
561 [ENOTSOCK] = TARGET_ENOTSOCK,
562 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
563 [EMSGSIZE] = TARGET_EMSGSIZE,
564 [EPROTOTYPE] = TARGET_EPROTOTYPE,
565 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
566 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
567 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
568 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
569 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
570 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
571 [EADDRINUSE] = TARGET_EADDRINUSE,
572 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
573 [ENETDOWN] = TARGET_ENETDOWN,
574 [ENETUNREACH] = TARGET_ENETUNREACH,
575 [ENETRESET] = TARGET_ENETRESET,
576 [ECONNABORTED] = TARGET_ECONNABORTED,
577 [ECONNRESET] = TARGET_ECONNRESET,
578 [ENOBUFS] = TARGET_ENOBUFS,
579 [EISCONN] = TARGET_EISCONN,
580 [ENOTCONN] = TARGET_ENOTCONN,
581 [EUCLEAN] = TARGET_EUCLEAN,
582 [ENOTNAM] = TARGET_ENOTNAM,
583 [ENAVAIL] = TARGET_ENAVAIL,
584 [EISNAM] = TARGET_EISNAM,
585 [EREMOTEIO] = TARGET_EREMOTEIO,
586 [EDQUOT] = TARGET_EDQUOT,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600 [ENOKEY] = TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618 [ENOMSG] = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621 [ERFKILL] = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624 [EHWPOISON] = TARGET_EHWPOISON,
625 #endif
628 static inline int host_to_target_errno(int err)
630 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631 host_to_target_errno_table[err]) {
632 return host_to_target_errno_table[err];
634 return err;
637 static inline int target_to_host_errno(int err)
639 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640 target_to_host_errno_table[err]) {
641 return target_to_host_errno_table[err];
643 return err;
646 static inline abi_long get_errno(abi_long ret)
648 if (ret == -1)
649 return -host_to_target_errno(errno);
650 else
651 return ret;
654 const char *target_strerror(int err)
656 if (err == TARGET_ERESTARTSYS) {
657 return "To be restarted";
659 if (err == TARGET_QEMU_ESIGRETURN) {
660 return "Successful exit from sigreturn";
663 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664 return NULL;
666 return strerror(target_to_host_errno(err));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694 type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703 type5 arg5) \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719 int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722 struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725 int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734 struct timespec *, tsp, const sigset_t *, sigmask,
735 size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738 int, maxevents, int, timeout, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742 const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746 const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755 unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757 unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759 socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769 const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772 int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775 struct timespec *, rem)
776 #endif
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
779 const struct timespec *, req, struct timespec *, rem)
780 #endif
781 #ifdef __NR_ipc
782 #ifdef __s390x__
783 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
784 void *, ptr)
785 #else
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787 void *, ptr, long, fifth)
788 #endif
789 #endif
790 #ifdef __NR_msgsnd
791 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
792 int, flags)
793 #endif
794 #ifdef __NR_msgrcv
795 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
796 long, msgtype, int, flags)
797 #endif
798 #ifdef __NR_semtimedop
799 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
800 unsigned, nsops, const struct timespec *, timeout)
801 #endif
802 #ifdef TARGET_NR_mq_timedsend
803 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
804 size_t, len, unsigned, prio, const struct timespec *, timeout)
805 #endif
806 #ifdef TARGET_NR_mq_timedreceive
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808 size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811 * "third argument might be integer or pointer or not present" behaviour of
812 * the libc function.
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817 * use the flock64 struct rather than unsuffixed flock
818 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
826 static inline int host_to_target_sock_type(int host_type)
828 int target_type;
830 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831 case SOCK_DGRAM:
832 target_type = TARGET_SOCK_DGRAM;
833 break;
834 case SOCK_STREAM:
835 target_type = TARGET_SOCK_STREAM;
836 break;
837 default:
838 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839 break;
842 #if defined(SOCK_CLOEXEC)
843 if (host_type & SOCK_CLOEXEC) {
844 target_type |= TARGET_SOCK_CLOEXEC;
846 #endif
848 #if defined(SOCK_NONBLOCK)
849 if (host_type & SOCK_NONBLOCK) {
850 target_type |= TARGET_SOCK_NONBLOCK;
852 #endif
854 return target_type;
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
861 void target_set_brk(abi_ulong new_brk)
863 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864 brk_page = HOST_PAGE_ALIGN(target_brk);
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
873 abi_long mapped_addr;
874 abi_ulong new_alloc_size;
876 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
878 if (!new_brk) {
879 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880 return target_brk;
882 if (new_brk < target_original_brk) {
883 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884 target_brk);
885 return target_brk;
888 /* If the new brk is less than the highest page reserved to the
889 * target heap allocation, set it and we're almost done... */
890 if (new_brk <= brk_page) {
891 /* Heap contents are initialized to zero, as for anonymous
892 * mapped pages. */
893 if (new_brk > target_brk) {
894 memset(g2h(target_brk), 0, new_brk - target_brk);
896 target_brk = new_brk;
897 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 return target_brk;
901 /* We need to allocate more memory after the brk... Note that
902 * we don't use MAP_FIXED because that will map over the top of
903 * any existing mapping (like the one with the host libc or qemu
904 * itself); instead we treat "mapped but at wrong address" as
905 * a failure and unmap again.
907 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909 PROT_READ|PROT_WRITE,
910 MAP_ANON|MAP_PRIVATE, 0, 0));
912 if (mapped_addr == brk_page) {
913 /* Heap contents are initialized to zero, as for anonymous
914 * mapped pages. Technically the new pages are already
915 * initialized to zero since they *are* anonymous mapped
916 * pages, however we have to take care with the contents that
917 * come from the remaining part of the previous page: it may
918 * contains garbage data due to a previous heap usage (grown
919 * then shrunken). */
920 memset(g2h(target_brk), 0, brk_page - target_brk);
922 target_brk = new_brk;
923 brk_page = HOST_PAGE_ALIGN(target_brk);
924 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925 target_brk);
926 return target_brk;
927 } else if (mapped_addr != -1) {
928 /* Mapped but at wrong address, meaning there wasn't actually
929 * enough space for this brk.
931 target_munmap(mapped_addr, new_alloc_size);
932 mapped_addr = -1;
933 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
935 else {
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
939 #if defined(TARGET_ALPHA)
940 /* We (partially) emulate OSF/1 on Alpha, which requires we
941 return a proper errno, not an unchanged brk value. */
942 return -TARGET_ENOMEM;
943 #endif
944 /* For everything else, return the previous break. */
945 return target_brk;
948 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
949 defined(TARGET_NR_pselect6)
950 static inline abi_long copy_from_user_fdset(fd_set *fds,
951 abi_ulong target_fds_addr,
952 int n)
954 int i, nw, j, k;
955 abi_ulong b, *target_fds;
957 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958 if (!(target_fds = lock_user(VERIFY_READ,
959 target_fds_addr,
960 sizeof(abi_ulong) * nw,
961 1)))
962 return -TARGET_EFAULT;
964 FD_ZERO(fds);
965 k = 0;
966 for (i = 0; i < nw; i++) {
967 /* grab the abi_ulong */
968 __get_user(b, &target_fds[i]);
969 for (j = 0; j < TARGET_ABI_BITS; j++) {
970 /* check the bit inside the abi_ulong */
971 if ((b >> j) & 1)
972 FD_SET(k, fds);
973 k++;
977 unlock_user(target_fds, target_fds_addr, 0);
979 return 0;
982 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
983 abi_ulong target_fds_addr,
984 int n)
986 if (target_fds_addr) {
987 if (copy_from_user_fdset(fds, target_fds_addr, n))
988 return -TARGET_EFAULT;
989 *fds_ptr = fds;
990 } else {
991 *fds_ptr = NULL;
993 return 0;
996 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
997 const fd_set *fds,
998 int n)
1000 int i, nw, j, k;
1001 abi_long v;
1002 abi_ulong *target_fds;
1004 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1005 if (!(target_fds = lock_user(VERIFY_WRITE,
1006 target_fds_addr,
1007 sizeof(abi_ulong) * nw,
1008 0)))
1009 return -TARGET_EFAULT;
1011 k = 0;
1012 for (i = 0; i < nw; i++) {
1013 v = 0;
1014 for (j = 0; j < TARGET_ABI_BITS; j++) {
1015 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1016 k++;
1018 __put_user(v, &target_fds[i]);
1021 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1023 return 0;
1025 #endif
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1033 static inline abi_long host_to_target_clock_t(long ticks)
1035 #if HOST_HZ == TARGET_HZ
1036 return ticks;
1037 #else
1038 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043 const struct rusage *rusage)
1045 struct target_rusage *target_rusage;
1047 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048 return -TARGET_EFAULT;
1049 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067 unlock_user_struct(target_rusage, target_addr, 1);
1069 return 0;
1072 #ifdef TARGET_NR_setrlimit
1073 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1075 abi_ulong target_rlim_swap;
1076 rlim_t result;
1078 target_rlim_swap = tswapal(target_rlim);
1079 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1080 return RLIM_INFINITY;
1082 result = target_rlim_swap;
1083 if (target_rlim_swap != (rlim_t)result)
1084 return RLIM_INFINITY;
1086 return result;
1088 #endif
1090 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1091 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 abi_ulong target_rlim_swap;
1094 abi_ulong result;
1096 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1097 target_rlim_swap = TARGET_RLIM_INFINITY;
1098 else
1099 target_rlim_swap = rlim;
1100 result = tswapal(target_rlim_swap);
1102 return result;
1104 #endif
1106 static inline int target_to_host_resource(int code)
1108 switch (code) {
1109 case TARGET_RLIMIT_AS:
1110 return RLIMIT_AS;
1111 case TARGET_RLIMIT_CORE:
1112 return RLIMIT_CORE;
1113 case TARGET_RLIMIT_CPU:
1114 return RLIMIT_CPU;
1115 case TARGET_RLIMIT_DATA:
1116 return RLIMIT_DATA;
1117 case TARGET_RLIMIT_FSIZE:
1118 return RLIMIT_FSIZE;
1119 case TARGET_RLIMIT_LOCKS:
1120 return RLIMIT_LOCKS;
1121 case TARGET_RLIMIT_MEMLOCK:
1122 return RLIMIT_MEMLOCK;
1123 case TARGET_RLIMIT_MSGQUEUE:
1124 return RLIMIT_MSGQUEUE;
1125 case TARGET_RLIMIT_NICE:
1126 return RLIMIT_NICE;
1127 case TARGET_RLIMIT_NOFILE:
1128 return RLIMIT_NOFILE;
1129 case TARGET_RLIMIT_NPROC:
1130 return RLIMIT_NPROC;
1131 case TARGET_RLIMIT_RSS:
1132 return RLIMIT_RSS;
1133 case TARGET_RLIMIT_RTPRIO:
1134 return RLIMIT_RTPRIO;
1135 case TARGET_RLIMIT_SIGPENDING:
1136 return RLIMIT_SIGPENDING;
1137 case TARGET_RLIMIT_STACK:
1138 return RLIMIT_STACK;
1139 default:
1140 return code;
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145 abi_ulong target_tv_addr)
1147 struct target_timeval *target_tv;
1149 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1150 return -TARGET_EFAULT;
1153 __get_user(tv->tv_sec, &target_tv->tv_sec);
1154 __get_user(tv->tv_usec, &target_tv->tv_usec);
1156 unlock_user_struct(target_tv, target_tv_addr, 0);
1158 return 0;
1161 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1162 const struct timeval *tv)
1164 struct target_timeval *target_tv;
1166 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1167 return -TARGET_EFAULT;
1170 __put_user(tv->tv_sec, &target_tv->tv_sec);
1171 __put_user(tv->tv_usec, &target_tv->tv_usec);
1173 unlock_user_struct(target_tv, target_tv_addr, 1);
1175 return 0;
1178 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1179 const struct timeval *tv)
1181 struct target__kernel_sock_timeval *target_tv;
1183 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1184 return -TARGET_EFAULT;
1187 __put_user(tv->tv_sec, &target_tv->tv_sec);
1188 __put_user(tv->tv_usec, &target_tv->tv_usec);
1190 unlock_user_struct(target_tv, target_tv_addr, 1);
1192 return 0;
1195 #if defined(TARGET_NR_futex) || \
1196 defined(TARGET_NR_rt_sigtimedwait) || \
1197 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202 defined(TARGET_NR_timer_settime) || \
1203 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1205 abi_ulong target_addr)
1207 struct target_timespec *target_ts;
1209 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210 return -TARGET_EFAULT;
1212 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214 unlock_user_struct(target_ts, target_addr, 0);
1215 return 0;
1217 #endif
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220 defined(TARGET_NR_timer_settime64) || \
1221 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1222 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1223 abi_ulong target_addr)
1225 struct target__kernel_timespec *target_ts;
1227 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1228 return -TARGET_EFAULT;
1230 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1231 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1232 /* in 32bit mode, this drops the padding */
1233 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1234 unlock_user_struct(target_ts, target_addr, 0);
1235 return 0;
1237 #endif
1239 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1240 struct timespec *host_ts)
1242 struct target_timespec *target_ts;
1244 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1245 return -TARGET_EFAULT;
1247 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1248 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1249 unlock_user_struct(target_ts, target_addr, 1);
1250 return 0;
1253 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1254 struct timespec *host_ts)
1256 struct target__kernel_timespec *target_ts;
1258 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1259 return -TARGET_EFAULT;
1261 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1262 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1263 unlock_user_struct(target_ts, target_addr, 1);
1264 return 0;
1267 #if defined(TARGET_NR_gettimeofday)
1268 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1269 struct timezone *tz)
1271 struct target_timezone *target_tz;
1273 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1274 return -TARGET_EFAULT;
1277 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1278 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1280 unlock_user_struct(target_tz, target_tz_addr, 1);
1282 return 0;
1284 #endif
1286 #if defined(TARGET_NR_settimeofday)
1287 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1288 abi_ulong target_tz_addr)
1290 struct target_timezone *target_tz;
1292 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1293 return -TARGET_EFAULT;
1296 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1297 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1299 unlock_user_struct(target_tz, target_tz_addr, 0);
1301 return 0;
1303 #endif
1305 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1306 #include <mqueue.h>
1308 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1309 abi_ulong target_mq_attr_addr)
1311 struct target_mq_attr *target_mq_attr;
1313 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1314 target_mq_attr_addr, 1))
1315 return -TARGET_EFAULT;
1317 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1318 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1319 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1320 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1322 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1324 return 0;
1327 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1328 const struct mq_attr *attr)
1330 struct target_mq_attr *target_mq_attr;
1332 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1333 target_mq_attr_addr, 0))
1334 return -TARGET_EFAULT;
1336 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1337 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1338 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1339 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1341 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1343 return 0;
1345 #endif
1347 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1348 /* do_select() must return target values and target errnos. */
1349 static abi_long do_select(int n,
1350 abi_ulong rfd_addr, abi_ulong wfd_addr,
1351 abi_ulong efd_addr, abi_ulong target_tv_addr)
1353 fd_set rfds, wfds, efds;
1354 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1355 struct timeval tv;
1356 struct timespec ts, *ts_ptr;
1357 abi_long ret;
1359 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1360 if (ret) {
1361 return ret;
1363 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1364 if (ret) {
1365 return ret;
1367 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1368 if (ret) {
1369 return ret;
1372 if (target_tv_addr) {
1373 if (copy_from_user_timeval(&tv, target_tv_addr))
1374 return -TARGET_EFAULT;
1375 ts.tv_sec = tv.tv_sec;
1376 ts.tv_nsec = tv.tv_usec * 1000;
1377 ts_ptr = &ts;
1378 } else {
1379 ts_ptr = NULL;
1382 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1383 ts_ptr, NULL));
1385 if (!is_error(ret)) {
1386 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1387 return -TARGET_EFAULT;
1388 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1389 return -TARGET_EFAULT;
1390 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1391 return -TARGET_EFAULT;
1393 if (target_tv_addr) {
1394 tv.tv_sec = ts.tv_sec;
1395 tv.tv_usec = ts.tv_nsec / 1000;
1396 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1397 return -TARGET_EFAULT;
1402 return ret;
1405 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1406 static abi_long do_old_select(abi_ulong arg1)
1408 struct target_sel_arg_struct *sel;
1409 abi_ulong inp, outp, exp, tvp;
1410 long nsel;
1412 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1413 return -TARGET_EFAULT;
1416 nsel = tswapal(sel->n);
1417 inp = tswapal(sel->inp);
1418 outp = tswapal(sel->outp);
1419 exp = tswapal(sel->exp);
1420 tvp = tswapal(sel->tvp);
1422 unlock_user_struct(sel, arg1, 0);
1424 return do_select(nsel, inp, outp, exp, tvp);
1426 #endif
1427 #endif
1429 static abi_long do_pipe2(int host_pipe[], int flags)
1431 #ifdef CONFIG_PIPE2
1432 return pipe2(host_pipe, flags);
1433 #else
1434 return -ENOSYS;
1435 #endif
1438 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1439 int flags, int is_pipe2)
1441 int host_pipe[2];
1442 abi_long ret;
1443 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1445 if (is_error(ret))
1446 return get_errno(ret);
1448 /* Several targets have special calling conventions for the original
1449 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1450 if (!is_pipe2) {
1451 #if defined(TARGET_ALPHA)
1452 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1453 return host_pipe[0];
1454 #elif defined(TARGET_MIPS)
1455 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1456 return host_pipe[0];
1457 #elif defined(TARGET_SH4)
1458 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1459 return host_pipe[0];
1460 #elif defined(TARGET_SPARC)
1461 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1462 return host_pipe[0];
1463 #endif
1466 if (put_user_s32(host_pipe[0], pipedes)
1467 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1468 return -TARGET_EFAULT;
1469 return get_errno(ret);
1472 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1473 abi_ulong target_addr,
1474 socklen_t len)
1476 struct target_ip_mreqn *target_smreqn;
1478 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1479 if (!target_smreqn)
1480 return -TARGET_EFAULT;
1481 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1482 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1483 if (len == sizeof(struct target_ip_mreqn))
1484 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1485 unlock_user(target_smreqn, target_addr, 0);
1487 return 0;
1490 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1491 abi_ulong target_addr,
1492 socklen_t len)
1494 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1495 sa_family_t sa_family;
1496 struct target_sockaddr *target_saddr;
1498 if (fd_trans_target_to_host_addr(fd)) {
1499 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1502 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1503 if (!target_saddr)
1504 return -TARGET_EFAULT;
1506 sa_family = tswap16(target_saddr->sa_family);
1508 /* Oops. The caller might send a incomplete sun_path; sun_path
1509 * must be terminated by \0 (see the manual page), but
1510 * unfortunately it is quite common to specify sockaddr_un
1511 * length as "strlen(x->sun_path)" while it should be
1512 * "strlen(...) + 1". We'll fix that here if needed.
1513 * Linux kernel has a similar feature.
1516 if (sa_family == AF_UNIX) {
1517 if (len < unix_maxlen && len > 0) {
1518 char *cp = (char*)target_saddr;
1520 if ( cp[len-1] && !cp[len] )
1521 len++;
1523 if (len > unix_maxlen)
1524 len = unix_maxlen;
1527 memcpy(addr, target_saddr, len);
1528 addr->sa_family = sa_family;
1529 if (sa_family == AF_NETLINK) {
1530 struct sockaddr_nl *nladdr;
1532 nladdr = (struct sockaddr_nl *)addr;
1533 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1534 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1535 } else if (sa_family == AF_PACKET) {
1536 struct target_sockaddr_ll *lladdr;
1538 lladdr = (struct target_sockaddr_ll *)addr;
1539 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1540 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1542 unlock_user(target_saddr, target_addr, 0);
1544 return 0;
1547 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1548 struct sockaddr *addr,
1549 socklen_t len)
1551 struct target_sockaddr *target_saddr;
1553 if (len == 0) {
1554 return 0;
1556 assert(addr);
1558 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1559 if (!target_saddr)
1560 return -TARGET_EFAULT;
1561 memcpy(target_saddr, addr, len);
1562 if (len >= offsetof(struct target_sockaddr, sa_family) +
1563 sizeof(target_saddr->sa_family)) {
1564 target_saddr->sa_family = tswap16(addr->sa_family);
1566 if (addr->sa_family == AF_NETLINK &&
1567 len >= sizeof(struct target_sockaddr_nl)) {
1568 struct target_sockaddr_nl *target_nl =
1569 (struct target_sockaddr_nl *)target_saddr;
1570 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1571 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1572 } else if (addr->sa_family == AF_PACKET) {
1573 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1574 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1575 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1576 } else if (addr->sa_family == AF_INET6 &&
1577 len >= sizeof(struct target_sockaddr_in6)) {
1578 struct target_sockaddr_in6 *target_in6 =
1579 (struct target_sockaddr_in6 *)target_saddr;
1580 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1582 unlock_user(target_saddr, target_addr, len);
1584 return 0;
1587 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1588 struct target_msghdr *target_msgh)
1590 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1591 abi_long msg_controllen;
1592 abi_ulong target_cmsg_addr;
1593 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1594 socklen_t space = 0;
1596 msg_controllen = tswapal(target_msgh->msg_controllen);
1597 if (msg_controllen < sizeof (struct target_cmsghdr))
1598 goto the_end;
1599 target_cmsg_addr = tswapal(target_msgh->msg_control);
1600 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1601 target_cmsg_start = target_cmsg;
1602 if (!target_cmsg)
1603 return -TARGET_EFAULT;
1605 while (cmsg && target_cmsg) {
1606 void *data = CMSG_DATA(cmsg);
1607 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1609 int len = tswapal(target_cmsg->cmsg_len)
1610 - sizeof(struct target_cmsghdr);
1612 space += CMSG_SPACE(len);
1613 if (space > msgh->msg_controllen) {
1614 space -= CMSG_SPACE(len);
1615 /* This is a QEMU bug, since we allocated the payload
1616 * area ourselves (unlike overflow in host-to-target
1617 * conversion, which is just the guest giving us a buffer
1618 * that's too small). It can't happen for the payload types
1619 * we currently support; if it becomes an issue in future
1620 * we would need to improve our allocation strategy to
1621 * something more intelligent than "twice the size of the
1622 * target buffer we're reading from".
1624 qemu_log_mask(LOG_UNIMP,
1625 ("Unsupported ancillary data %d/%d: "
1626 "unhandled msg size\n"),
1627 tswap32(target_cmsg->cmsg_level),
1628 tswap32(target_cmsg->cmsg_type));
1629 break;
1632 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1633 cmsg->cmsg_level = SOL_SOCKET;
1634 } else {
1635 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1637 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1638 cmsg->cmsg_len = CMSG_LEN(len);
1640 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1641 int *fd = (int *)data;
1642 int *target_fd = (int *)target_data;
1643 int i, numfds = len / sizeof(int);
1645 for (i = 0; i < numfds; i++) {
1646 __get_user(fd[i], target_fd + i);
1648 } else if (cmsg->cmsg_level == SOL_SOCKET
1649 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1650 struct ucred *cred = (struct ucred *)data;
1651 struct target_ucred *target_cred =
1652 (struct target_ucred *)target_data;
1654 __get_user(cred->pid, &target_cred->pid);
1655 __get_user(cred->uid, &target_cred->uid);
1656 __get_user(cred->gid, &target_cred->gid);
1657 } else {
1658 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1659 cmsg->cmsg_level, cmsg->cmsg_type);
1660 memcpy(data, target_data, len);
1663 cmsg = CMSG_NXTHDR(msgh, cmsg);
1664 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1665 target_cmsg_start);
1667 unlock_user(target_cmsg, target_cmsg_addr, 0);
1668 the_end:
1669 msgh->msg_controllen = space;
1670 return 0;
1673 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1674 struct msghdr *msgh)
1676 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1677 abi_long msg_controllen;
1678 abi_ulong target_cmsg_addr;
1679 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1680 socklen_t space = 0;
1682 msg_controllen = tswapal(target_msgh->msg_controllen);
1683 if (msg_controllen < sizeof (struct target_cmsghdr))
1684 goto the_end;
1685 target_cmsg_addr = tswapal(target_msgh->msg_control);
1686 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1687 target_cmsg_start = target_cmsg;
1688 if (!target_cmsg)
1689 return -TARGET_EFAULT;
1691 while (cmsg && target_cmsg) {
1692 void *data = CMSG_DATA(cmsg);
1693 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1695 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1696 int tgt_len, tgt_space;
1698 /* We never copy a half-header but may copy half-data;
1699 * this is Linux's behaviour in put_cmsg(). Note that
1700 * truncation here is a guest problem (which we report
1701 * to the guest via the CTRUNC bit), unlike truncation
1702 * in target_to_host_cmsg, which is a QEMU bug.
1704 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1705 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1706 break;
1709 if (cmsg->cmsg_level == SOL_SOCKET) {
1710 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1711 } else {
1712 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1714 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1716 /* Payload types which need a different size of payload on
1717 * the target must adjust tgt_len here.
1719 tgt_len = len;
1720 switch (cmsg->cmsg_level) {
1721 case SOL_SOCKET:
1722 switch (cmsg->cmsg_type) {
1723 case SO_TIMESTAMP:
1724 tgt_len = sizeof(struct target_timeval);
1725 break;
1726 default:
1727 break;
1729 break;
1730 default:
1731 break;
1734 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1735 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1736 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1739 /* We must now copy-and-convert len bytes of payload
1740 * into tgt_len bytes of destination space. Bear in mind
1741 * that in both source and destination we may be dealing
1742 * with a truncated value!
1744 switch (cmsg->cmsg_level) {
1745 case SOL_SOCKET:
1746 switch (cmsg->cmsg_type) {
1747 case SCM_RIGHTS:
1749 int *fd = (int *)data;
1750 int *target_fd = (int *)target_data;
1751 int i, numfds = tgt_len / sizeof(int);
1753 for (i = 0; i < numfds; i++) {
1754 __put_user(fd[i], target_fd + i);
1756 break;
1758 case SO_TIMESTAMP:
1760 struct timeval *tv = (struct timeval *)data;
1761 struct target_timeval *target_tv =
1762 (struct target_timeval *)target_data;
1764 if (len != sizeof(struct timeval) ||
1765 tgt_len != sizeof(struct target_timeval)) {
1766 goto unimplemented;
1769 /* copy struct timeval to target */
1770 __put_user(tv->tv_sec, &target_tv->tv_sec);
1771 __put_user(tv->tv_usec, &target_tv->tv_usec);
1772 break;
1774 case SCM_CREDENTIALS:
1776 struct ucred *cred = (struct ucred *)data;
1777 struct target_ucred *target_cred =
1778 (struct target_ucred *)target_data;
1780 __put_user(cred->pid, &target_cred->pid);
1781 __put_user(cred->uid, &target_cred->uid);
1782 __put_user(cred->gid, &target_cred->gid);
1783 break;
1785 default:
1786 goto unimplemented;
1788 break;
1790 case SOL_IP:
1791 switch (cmsg->cmsg_type) {
1792 case IP_TTL:
1794 uint32_t *v = (uint32_t *)data;
1795 uint32_t *t_int = (uint32_t *)target_data;
1797 if (len != sizeof(uint32_t) ||
1798 tgt_len != sizeof(uint32_t)) {
1799 goto unimplemented;
1801 __put_user(*v, t_int);
1802 break;
1804 case IP_RECVERR:
1806 struct errhdr_t {
1807 struct sock_extended_err ee;
1808 struct sockaddr_in offender;
1810 struct errhdr_t *errh = (struct errhdr_t *)data;
1811 struct errhdr_t *target_errh =
1812 (struct errhdr_t *)target_data;
1814 if (len != sizeof(struct errhdr_t) ||
1815 tgt_len != sizeof(struct errhdr_t)) {
1816 goto unimplemented;
1818 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1819 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1820 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1821 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1822 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1823 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1824 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1825 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1826 (void *) &errh->offender, sizeof(errh->offender));
1827 break;
1829 default:
1830 goto unimplemented;
1832 break;
1834 case SOL_IPV6:
1835 switch (cmsg->cmsg_type) {
1836 case IPV6_HOPLIMIT:
1838 uint32_t *v = (uint32_t *)data;
1839 uint32_t *t_int = (uint32_t *)target_data;
1841 if (len != sizeof(uint32_t) ||
1842 tgt_len != sizeof(uint32_t)) {
1843 goto unimplemented;
1845 __put_user(*v, t_int);
1846 break;
1848 case IPV6_RECVERR:
1850 struct errhdr6_t {
1851 struct sock_extended_err ee;
1852 struct sockaddr_in6 offender;
1854 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1855 struct errhdr6_t *target_errh =
1856 (struct errhdr6_t *)target_data;
1858 if (len != sizeof(struct errhdr6_t) ||
1859 tgt_len != sizeof(struct errhdr6_t)) {
1860 goto unimplemented;
1862 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1863 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1864 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1865 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1866 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1867 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1868 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1869 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1870 (void *) &errh->offender, sizeof(errh->offender));
1871 break;
1873 default:
1874 goto unimplemented;
1876 break;
1878 default:
1879 unimplemented:
1880 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1881 cmsg->cmsg_level, cmsg->cmsg_type);
1882 memcpy(target_data, data, MIN(len, tgt_len));
1883 if (tgt_len > len) {
1884 memset(target_data + len, 0, tgt_len - len);
1888 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1889 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1890 if (msg_controllen < tgt_space) {
1891 tgt_space = msg_controllen;
1893 msg_controllen -= tgt_space;
1894 space += tgt_space;
1895 cmsg = CMSG_NXTHDR(msgh, cmsg);
1896 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1897 target_cmsg_start);
1899 unlock_user(target_cmsg, target_cmsg_addr, space);
1900 the_end:
1901 target_msgh->msg_controllen = tswapal(space);
1902 return 0;
1905 /* do_setsockopt() Must return target values and target errnos. */
1906 static abi_long do_setsockopt(int sockfd, int level, int optname,
1907 abi_ulong optval_addr, socklen_t optlen)
1909 abi_long ret;
1910 int val;
1911 struct ip_mreqn *ip_mreq;
1912 struct ip_mreq_source *ip_mreq_source;
1914 switch(level) {
1915 case SOL_TCP:
1916 /* TCP options all take an 'int' value. */
1917 if (optlen < sizeof(uint32_t))
1918 return -TARGET_EINVAL;
1920 if (get_user_u32(val, optval_addr))
1921 return -TARGET_EFAULT;
1922 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1923 break;
1924 case SOL_IP:
1925 switch(optname) {
1926 case IP_TOS:
1927 case IP_TTL:
1928 case IP_HDRINCL:
1929 case IP_ROUTER_ALERT:
1930 case IP_RECVOPTS:
1931 case IP_RETOPTS:
1932 case IP_PKTINFO:
1933 case IP_MTU_DISCOVER:
1934 case IP_RECVERR:
1935 case IP_RECVTTL:
1936 case IP_RECVTOS:
1937 #ifdef IP_FREEBIND
1938 case IP_FREEBIND:
1939 #endif
1940 case IP_MULTICAST_TTL:
1941 case IP_MULTICAST_LOOP:
1942 val = 0;
1943 if (optlen >= sizeof(uint32_t)) {
1944 if (get_user_u32(val, optval_addr))
1945 return -TARGET_EFAULT;
1946 } else if (optlen >= 1) {
1947 if (get_user_u8(val, optval_addr))
1948 return -TARGET_EFAULT;
1950 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1951 break;
1952 case IP_ADD_MEMBERSHIP:
1953 case IP_DROP_MEMBERSHIP:
1954 if (optlen < sizeof (struct target_ip_mreq) ||
1955 optlen > sizeof (struct target_ip_mreqn))
1956 return -TARGET_EINVAL;
1958 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1959 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1960 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1961 break;
1963 case IP_BLOCK_SOURCE:
1964 case IP_UNBLOCK_SOURCE:
1965 case IP_ADD_SOURCE_MEMBERSHIP:
1966 case IP_DROP_SOURCE_MEMBERSHIP:
1967 if (optlen != sizeof (struct target_ip_mreq_source))
1968 return -TARGET_EINVAL;
1970 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1971 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1972 unlock_user (ip_mreq_source, optval_addr, 0);
1973 break;
1975 default:
1976 goto unimplemented;
1978 break;
1979 case SOL_IPV6:
1980 switch (optname) {
1981 case IPV6_MTU_DISCOVER:
1982 case IPV6_MTU:
1983 case IPV6_V6ONLY:
1984 case IPV6_RECVPKTINFO:
1985 case IPV6_UNICAST_HOPS:
1986 case IPV6_MULTICAST_HOPS:
1987 case IPV6_MULTICAST_LOOP:
1988 case IPV6_RECVERR:
1989 case IPV6_RECVHOPLIMIT:
1990 case IPV6_2292HOPLIMIT:
1991 case IPV6_CHECKSUM:
1992 case IPV6_ADDRFORM:
1993 case IPV6_2292PKTINFO:
1994 case IPV6_RECVTCLASS:
1995 case IPV6_RECVRTHDR:
1996 case IPV6_2292RTHDR:
1997 case IPV6_RECVHOPOPTS:
1998 case IPV6_2292HOPOPTS:
1999 case IPV6_RECVDSTOPTS:
2000 case IPV6_2292DSTOPTS:
2001 case IPV6_TCLASS:
2002 #ifdef IPV6_RECVPATHMTU
2003 case IPV6_RECVPATHMTU:
2004 #endif
2005 #ifdef IPV6_TRANSPARENT
2006 case IPV6_TRANSPARENT:
2007 #endif
2008 #ifdef IPV6_FREEBIND
2009 case IPV6_FREEBIND:
2010 #endif
2011 #ifdef IPV6_RECVORIGDSTADDR
2012 case IPV6_RECVORIGDSTADDR:
2013 #endif
2014 val = 0;
2015 if (optlen < sizeof(uint32_t)) {
2016 return -TARGET_EINVAL;
2018 if (get_user_u32(val, optval_addr)) {
2019 return -TARGET_EFAULT;
2021 ret = get_errno(setsockopt(sockfd, level, optname,
2022 &val, sizeof(val)));
2023 break;
2024 case IPV6_PKTINFO:
2026 struct in6_pktinfo pki;
2028 if (optlen < sizeof(pki)) {
2029 return -TARGET_EINVAL;
2032 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2033 return -TARGET_EFAULT;
2036 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2038 ret = get_errno(setsockopt(sockfd, level, optname,
2039 &pki, sizeof(pki)));
2040 break;
2042 case IPV6_ADD_MEMBERSHIP:
2043 case IPV6_DROP_MEMBERSHIP:
2045 struct ipv6_mreq ipv6mreq;
2047 if (optlen < sizeof(ipv6mreq)) {
2048 return -TARGET_EINVAL;
2051 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2052 return -TARGET_EFAULT;
2055 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2057 ret = get_errno(setsockopt(sockfd, level, optname,
2058 &ipv6mreq, sizeof(ipv6mreq)));
2059 break;
2061 default:
2062 goto unimplemented;
2064 break;
2065 case SOL_ICMPV6:
2066 switch (optname) {
2067 case ICMPV6_FILTER:
2069 struct icmp6_filter icmp6f;
2071 if (optlen > sizeof(icmp6f)) {
2072 optlen = sizeof(icmp6f);
2075 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2076 return -TARGET_EFAULT;
2079 for (val = 0; val < 8; val++) {
2080 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2083 ret = get_errno(setsockopt(sockfd, level, optname,
2084 &icmp6f, optlen));
2085 break;
2087 default:
2088 goto unimplemented;
2090 break;
2091 case SOL_RAW:
2092 switch (optname) {
2093 case ICMP_FILTER:
2094 case IPV6_CHECKSUM:
2095 /* those take an u32 value */
2096 if (optlen < sizeof(uint32_t)) {
2097 return -TARGET_EINVAL;
2100 if (get_user_u32(val, optval_addr)) {
2101 return -TARGET_EFAULT;
2103 ret = get_errno(setsockopt(sockfd, level, optname,
2104 &val, sizeof(val)));
2105 break;
2107 default:
2108 goto unimplemented;
2110 break;
2111 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2112 case SOL_ALG:
2113 switch (optname) {
2114 case ALG_SET_KEY:
2116 char *alg_key = g_malloc(optlen);
2118 if (!alg_key) {
2119 return -TARGET_ENOMEM;
2121 if (copy_from_user(alg_key, optval_addr, optlen)) {
2122 g_free(alg_key);
2123 return -TARGET_EFAULT;
2125 ret = get_errno(setsockopt(sockfd, level, optname,
2126 alg_key, optlen));
2127 g_free(alg_key);
2128 break;
2130 case ALG_SET_AEAD_AUTHSIZE:
2132 ret = get_errno(setsockopt(sockfd, level, optname,
2133 NULL, optlen));
2134 break;
2136 default:
2137 goto unimplemented;
2139 break;
2140 #endif
2141 case TARGET_SOL_SOCKET:
2142 switch (optname) {
2143 case TARGET_SO_RCVTIMEO:
2145 struct timeval tv;
2147 optname = SO_RCVTIMEO;
2149 set_timeout:
2150 if (optlen != sizeof(struct target_timeval)) {
2151 return -TARGET_EINVAL;
2154 if (copy_from_user_timeval(&tv, optval_addr)) {
2155 return -TARGET_EFAULT;
2158 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2159 &tv, sizeof(tv)));
2160 return ret;
2162 case TARGET_SO_SNDTIMEO:
2163 optname = SO_SNDTIMEO;
2164 goto set_timeout;
2165 case TARGET_SO_ATTACH_FILTER:
2167 struct target_sock_fprog *tfprog;
2168 struct target_sock_filter *tfilter;
2169 struct sock_fprog fprog;
2170 struct sock_filter *filter;
2171 int i;
2173 if (optlen != sizeof(*tfprog)) {
2174 return -TARGET_EINVAL;
2176 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2177 return -TARGET_EFAULT;
2179 if (!lock_user_struct(VERIFY_READ, tfilter,
2180 tswapal(tfprog->filter), 0)) {
2181 unlock_user_struct(tfprog, optval_addr, 1);
2182 return -TARGET_EFAULT;
2185 fprog.len = tswap16(tfprog->len);
2186 filter = g_try_new(struct sock_filter, fprog.len);
2187 if (filter == NULL) {
2188 unlock_user_struct(tfilter, tfprog->filter, 1);
2189 unlock_user_struct(tfprog, optval_addr, 1);
2190 return -TARGET_ENOMEM;
2192 for (i = 0; i < fprog.len; i++) {
2193 filter[i].code = tswap16(tfilter[i].code);
2194 filter[i].jt = tfilter[i].jt;
2195 filter[i].jf = tfilter[i].jf;
2196 filter[i].k = tswap32(tfilter[i].k);
2198 fprog.filter = filter;
2200 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2201 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2202 g_free(filter);
2204 unlock_user_struct(tfilter, tfprog->filter, 1);
2205 unlock_user_struct(tfprog, optval_addr, 1);
2206 return ret;
2208 case TARGET_SO_BINDTODEVICE:
2210 char *dev_ifname, *addr_ifname;
2212 if (optlen > IFNAMSIZ - 1) {
2213 optlen = IFNAMSIZ - 1;
2215 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2216 if (!dev_ifname) {
2217 return -TARGET_EFAULT;
2219 optname = SO_BINDTODEVICE;
2220 addr_ifname = alloca(IFNAMSIZ);
2221 memcpy(addr_ifname, dev_ifname, optlen);
2222 addr_ifname[optlen] = 0;
2223 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2224 addr_ifname, optlen));
2225 unlock_user (dev_ifname, optval_addr, 0);
2226 return ret;
2228 case TARGET_SO_LINGER:
2230 struct linger lg;
2231 struct target_linger *tlg;
2233 if (optlen != sizeof(struct target_linger)) {
2234 return -TARGET_EINVAL;
2236 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2237 return -TARGET_EFAULT;
2239 __get_user(lg.l_onoff, &tlg->l_onoff);
2240 __get_user(lg.l_linger, &tlg->l_linger);
2241 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2242 &lg, sizeof(lg)));
2243 unlock_user_struct(tlg, optval_addr, 0);
2244 return ret;
2246 /* Options with 'int' argument. */
2247 case TARGET_SO_DEBUG:
2248 optname = SO_DEBUG;
2249 break;
2250 case TARGET_SO_REUSEADDR:
2251 optname = SO_REUSEADDR;
2252 break;
2253 #ifdef SO_REUSEPORT
2254 case TARGET_SO_REUSEPORT:
2255 optname = SO_REUSEPORT;
2256 break;
2257 #endif
2258 case TARGET_SO_TYPE:
2259 optname = SO_TYPE;
2260 break;
2261 case TARGET_SO_ERROR:
2262 optname = SO_ERROR;
2263 break;
2264 case TARGET_SO_DONTROUTE:
2265 optname = SO_DONTROUTE;
2266 break;
2267 case TARGET_SO_BROADCAST:
2268 optname = SO_BROADCAST;
2269 break;
2270 case TARGET_SO_SNDBUF:
2271 optname = SO_SNDBUF;
2272 break;
2273 case TARGET_SO_SNDBUFFORCE:
2274 optname = SO_SNDBUFFORCE;
2275 break;
2276 case TARGET_SO_RCVBUF:
2277 optname = SO_RCVBUF;
2278 break;
2279 case TARGET_SO_RCVBUFFORCE:
2280 optname = SO_RCVBUFFORCE;
2281 break;
2282 case TARGET_SO_KEEPALIVE:
2283 optname = SO_KEEPALIVE;
2284 break;
2285 case TARGET_SO_OOBINLINE:
2286 optname = SO_OOBINLINE;
2287 break;
2288 case TARGET_SO_NO_CHECK:
2289 optname = SO_NO_CHECK;
2290 break;
2291 case TARGET_SO_PRIORITY:
2292 optname = SO_PRIORITY;
2293 break;
2294 #ifdef SO_BSDCOMPAT
2295 case TARGET_SO_BSDCOMPAT:
2296 optname = SO_BSDCOMPAT;
2297 break;
2298 #endif
2299 case TARGET_SO_PASSCRED:
2300 optname = SO_PASSCRED;
2301 break;
2302 case TARGET_SO_PASSSEC:
2303 optname = SO_PASSSEC;
2304 break;
2305 case TARGET_SO_TIMESTAMP:
2306 optname = SO_TIMESTAMP;
2307 break;
2308 case TARGET_SO_RCVLOWAT:
2309 optname = SO_RCVLOWAT;
2310 break;
2311 default:
2312 goto unimplemented;
2314 if (optlen < sizeof(uint32_t))
2315 return -TARGET_EINVAL;
2317 if (get_user_u32(val, optval_addr))
2318 return -TARGET_EFAULT;
2319 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2320 break;
2321 #ifdef SOL_NETLINK
2322 case SOL_NETLINK:
2323 switch (optname) {
2324 case NETLINK_PKTINFO:
2325 case NETLINK_ADD_MEMBERSHIP:
2326 case NETLINK_DROP_MEMBERSHIP:
2327 case NETLINK_BROADCAST_ERROR:
2328 case NETLINK_NO_ENOBUFS:
2329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2330 case NETLINK_LISTEN_ALL_NSID:
2331 case NETLINK_CAP_ACK:
2332 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2334 case NETLINK_EXT_ACK:
2335 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2336 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2337 case NETLINK_GET_STRICT_CHK:
2338 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2339 break;
2340 default:
2341 goto unimplemented;
2343 val = 0;
2344 if (optlen < sizeof(uint32_t)) {
2345 return -TARGET_EINVAL;
2347 if (get_user_u32(val, optval_addr)) {
2348 return -TARGET_EFAULT;
2350 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2351 sizeof(val)));
2352 break;
2353 #endif /* SOL_NETLINK */
2354 default:
2355 unimplemented:
2356 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2357 level, optname);
2358 ret = -TARGET_ENOPROTOOPT;
2360 return ret;
2363 /* do_getsockopt() Must return target values and target errnos. */
2364 static abi_long do_getsockopt(int sockfd, int level, int optname,
2365 abi_ulong optval_addr, abi_ulong optlen)
2367 abi_long ret;
2368 int len, val;
2369 socklen_t lv;
2371 switch(level) {
2372 case TARGET_SOL_SOCKET:
2373 level = SOL_SOCKET;
2374 switch (optname) {
2375 /* These don't just return a single integer */
2376 case TARGET_SO_PEERNAME:
2377 goto unimplemented;
2378 case TARGET_SO_RCVTIMEO: {
2379 struct timeval tv;
2380 socklen_t tvlen;
2382 optname = SO_RCVTIMEO;
2384 get_timeout:
2385 if (get_user_u32(len, optlen)) {
2386 return -TARGET_EFAULT;
2388 if (len < 0) {
2389 return -TARGET_EINVAL;
2392 tvlen = sizeof(tv);
2393 ret = get_errno(getsockopt(sockfd, level, optname,
2394 &tv, &tvlen));
2395 if (ret < 0) {
2396 return ret;
2398 if (len > sizeof(struct target_timeval)) {
2399 len = sizeof(struct target_timeval);
2401 if (copy_to_user_timeval(optval_addr, &tv)) {
2402 return -TARGET_EFAULT;
2404 if (put_user_u32(len, optlen)) {
2405 return -TARGET_EFAULT;
2407 break;
2409 case TARGET_SO_SNDTIMEO:
2410 optname = SO_SNDTIMEO;
2411 goto get_timeout;
2412 case TARGET_SO_PEERCRED: {
2413 struct ucred cr;
2414 socklen_t crlen;
2415 struct target_ucred *tcr;
2417 if (get_user_u32(len, optlen)) {
2418 return -TARGET_EFAULT;
2420 if (len < 0) {
2421 return -TARGET_EINVAL;
2424 crlen = sizeof(cr);
2425 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2426 &cr, &crlen));
2427 if (ret < 0) {
2428 return ret;
2430 if (len > crlen) {
2431 len = crlen;
2433 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2434 return -TARGET_EFAULT;
2436 __put_user(cr.pid, &tcr->pid);
2437 __put_user(cr.uid, &tcr->uid);
2438 __put_user(cr.gid, &tcr->gid);
2439 unlock_user_struct(tcr, optval_addr, 1);
2440 if (put_user_u32(len, optlen)) {
2441 return -TARGET_EFAULT;
2443 break;
2445 case TARGET_SO_PEERSEC: {
2446 char *name;
2448 if (get_user_u32(len, optlen)) {
2449 return -TARGET_EFAULT;
2451 if (len < 0) {
2452 return -TARGET_EINVAL;
2454 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2455 if (!name) {
2456 return -TARGET_EFAULT;
2458 lv = len;
2459 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2460 name, &lv));
2461 if (put_user_u32(lv, optlen)) {
2462 ret = -TARGET_EFAULT;
2464 unlock_user(name, optval_addr, lv);
2465 break;
2467 case TARGET_SO_LINGER:
2469 struct linger lg;
2470 socklen_t lglen;
2471 struct target_linger *tlg;
2473 if (get_user_u32(len, optlen)) {
2474 return -TARGET_EFAULT;
2476 if (len < 0) {
2477 return -TARGET_EINVAL;
2480 lglen = sizeof(lg);
2481 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2482 &lg, &lglen));
2483 if (ret < 0) {
2484 return ret;
2486 if (len > lglen) {
2487 len = lglen;
2489 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2490 return -TARGET_EFAULT;
2492 __put_user(lg.l_onoff, &tlg->l_onoff);
2493 __put_user(lg.l_linger, &tlg->l_linger);
2494 unlock_user_struct(tlg, optval_addr, 1);
2495 if (put_user_u32(len, optlen)) {
2496 return -TARGET_EFAULT;
2498 break;
2500 /* Options with 'int' argument. */
2501 case TARGET_SO_DEBUG:
2502 optname = SO_DEBUG;
2503 goto int_case;
2504 case TARGET_SO_REUSEADDR:
2505 optname = SO_REUSEADDR;
2506 goto int_case;
2507 #ifdef SO_REUSEPORT
2508 case TARGET_SO_REUSEPORT:
2509 optname = SO_REUSEPORT;
2510 goto int_case;
2511 #endif
2512 case TARGET_SO_TYPE:
2513 optname = SO_TYPE;
2514 goto int_case;
2515 case TARGET_SO_ERROR:
2516 optname = SO_ERROR;
2517 goto int_case;
2518 case TARGET_SO_DONTROUTE:
2519 optname = SO_DONTROUTE;
2520 goto int_case;
2521 case TARGET_SO_BROADCAST:
2522 optname = SO_BROADCAST;
2523 goto int_case;
2524 case TARGET_SO_SNDBUF:
2525 optname = SO_SNDBUF;
2526 goto int_case;
2527 case TARGET_SO_RCVBUF:
2528 optname = SO_RCVBUF;
2529 goto int_case;
2530 case TARGET_SO_KEEPALIVE:
2531 optname = SO_KEEPALIVE;
2532 goto int_case;
2533 case TARGET_SO_OOBINLINE:
2534 optname = SO_OOBINLINE;
2535 goto int_case;
2536 case TARGET_SO_NO_CHECK:
2537 optname = SO_NO_CHECK;
2538 goto int_case;
2539 case TARGET_SO_PRIORITY:
2540 optname = SO_PRIORITY;
2541 goto int_case;
2542 #ifdef SO_BSDCOMPAT
2543 case TARGET_SO_BSDCOMPAT:
2544 optname = SO_BSDCOMPAT;
2545 goto int_case;
2546 #endif
2547 case TARGET_SO_PASSCRED:
2548 optname = SO_PASSCRED;
2549 goto int_case;
2550 case TARGET_SO_TIMESTAMP:
2551 optname = SO_TIMESTAMP;
2552 goto int_case;
2553 case TARGET_SO_RCVLOWAT:
2554 optname = SO_RCVLOWAT;
2555 goto int_case;
2556 case TARGET_SO_ACCEPTCONN:
2557 optname = SO_ACCEPTCONN;
2558 goto int_case;
2559 default:
2560 goto int_case;
2562 break;
2563 case SOL_TCP:
2564 /* TCP options all take an 'int' value. */
2565 int_case:
2566 if (get_user_u32(len, optlen))
2567 return -TARGET_EFAULT;
2568 if (len < 0)
2569 return -TARGET_EINVAL;
2570 lv = sizeof(lv);
2571 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2572 if (ret < 0)
2573 return ret;
2574 if (optname == SO_TYPE) {
2575 val = host_to_target_sock_type(val);
2577 if (len > lv)
2578 len = lv;
2579 if (len == 4) {
2580 if (put_user_u32(val, optval_addr))
2581 return -TARGET_EFAULT;
2582 } else {
2583 if (put_user_u8(val, optval_addr))
2584 return -TARGET_EFAULT;
2586 if (put_user_u32(len, optlen))
2587 return -TARGET_EFAULT;
2588 break;
2589 case SOL_IP:
2590 switch(optname) {
2591 case IP_TOS:
2592 case IP_TTL:
2593 case IP_HDRINCL:
2594 case IP_ROUTER_ALERT:
2595 case IP_RECVOPTS:
2596 case IP_RETOPTS:
2597 case IP_PKTINFO:
2598 case IP_MTU_DISCOVER:
2599 case IP_RECVERR:
2600 case IP_RECVTOS:
2601 #ifdef IP_FREEBIND
2602 case IP_FREEBIND:
2603 #endif
2604 case IP_MULTICAST_TTL:
2605 case IP_MULTICAST_LOOP:
2606 if (get_user_u32(len, optlen))
2607 return -TARGET_EFAULT;
2608 if (len < 0)
2609 return -TARGET_EINVAL;
2610 lv = sizeof(lv);
2611 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2612 if (ret < 0)
2613 return ret;
2614 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2615 len = 1;
2616 if (put_user_u32(len, optlen)
2617 || put_user_u8(val, optval_addr))
2618 return -TARGET_EFAULT;
2619 } else {
2620 if (len > sizeof(int))
2621 len = sizeof(int);
2622 if (put_user_u32(len, optlen)
2623 || put_user_u32(val, optval_addr))
2624 return -TARGET_EFAULT;
2626 break;
2627 default:
2628 ret = -TARGET_ENOPROTOOPT;
2629 break;
2631 break;
2632 case SOL_IPV6:
2633 switch (optname) {
2634 case IPV6_MTU_DISCOVER:
2635 case IPV6_MTU:
2636 case IPV6_V6ONLY:
2637 case IPV6_RECVPKTINFO:
2638 case IPV6_UNICAST_HOPS:
2639 case IPV6_MULTICAST_HOPS:
2640 case IPV6_MULTICAST_LOOP:
2641 case IPV6_RECVERR:
2642 case IPV6_RECVHOPLIMIT:
2643 case IPV6_2292HOPLIMIT:
2644 case IPV6_CHECKSUM:
2645 case IPV6_ADDRFORM:
2646 case IPV6_2292PKTINFO:
2647 case IPV6_RECVTCLASS:
2648 case IPV6_RECVRTHDR:
2649 case IPV6_2292RTHDR:
2650 case IPV6_RECVHOPOPTS:
2651 case IPV6_2292HOPOPTS:
2652 case IPV6_RECVDSTOPTS:
2653 case IPV6_2292DSTOPTS:
2654 case IPV6_TCLASS:
2655 #ifdef IPV6_RECVPATHMTU
2656 case IPV6_RECVPATHMTU:
2657 #endif
2658 #ifdef IPV6_TRANSPARENT
2659 case IPV6_TRANSPARENT:
2660 #endif
2661 #ifdef IPV6_FREEBIND
2662 case IPV6_FREEBIND:
2663 #endif
2664 #ifdef IPV6_RECVORIGDSTADDR
2665 case IPV6_RECVORIGDSTADDR:
2666 #endif
2667 if (get_user_u32(len, optlen))
2668 return -TARGET_EFAULT;
2669 if (len < 0)
2670 return -TARGET_EINVAL;
2671 lv = sizeof(lv);
2672 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2673 if (ret < 0)
2674 return ret;
2675 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2676 len = 1;
2677 if (put_user_u32(len, optlen)
2678 || put_user_u8(val, optval_addr))
2679 return -TARGET_EFAULT;
2680 } else {
2681 if (len > sizeof(int))
2682 len = sizeof(int);
2683 if (put_user_u32(len, optlen)
2684 || put_user_u32(val, optval_addr))
2685 return -TARGET_EFAULT;
2687 break;
2688 default:
2689 ret = -TARGET_ENOPROTOOPT;
2690 break;
2692 break;
2693 #ifdef SOL_NETLINK
2694 case SOL_NETLINK:
2695 switch (optname) {
2696 case NETLINK_PKTINFO:
2697 case NETLINK_BROADCAST_ERROR:
2698 case NETLINK_NO_ENOBUFS:
2699 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2700 case NETLINK_LISTEN_ALL_NSID:
2701 case NETLINK_CAP_ACK:
2702 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2703 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2704 case NETLINK_EXT_ACK:
2705 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2706 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2707 case NETLINK_GET_STRICT_CHK:
2708 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2709 if (get_user_u32(len, optlen)) {
2710 return -TARGET_EFAULT;
2712 if (len != sizeof(val)) {
2713 return -TARGET_EINVAL;
2715 lv = len;
2716 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2717 if (ret < 0) {
2718 return ret;
2720 if (put_user_u32(lv, optlen)
2721 || put_user_u32(val, optval_addr)) {
2722 return -TARGET_EFAULT;
2724 break;
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726 case NETLINK_LIST_MEMBERSHIPS:
2728 uint32_t *results;
2729 int i;
2730 if (get_user_u32(len, optlen)) {
2731 return -TARGET_EFAULT;
2733 if (len < 0) {
2734 return -TARGET_EINVAL;
2736 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2737 if (!results) {
2738 return -TARGET_EFAULT;
2740 lv = len;
2741 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2742 if (ret < 0) {
2743 unlock_user(results, optval_addr, 0);
2744 return ret;
2746 /* swap host endianess to target endianess. */
2747 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2748 results[i] = tswap32(results[i]);
2750 if (put_user_u32(lv, optlen)) {
2751 return -TARGET_EFAULT;
2753 unlock_user(results, optval_addr, 0);
2754 break;
2756 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2757 default:
2758 goto unimplemented;
2760 break;
2761 #endif /* SOL_NETLINK */
2762 default:
2763 unimplemented:
2764 qemu_log_mask(LOG_UNIMP,
2765 "getsockopt level=%d optname=%d not yet supported\n",
2766 level, optname);
2767 ret = -TARGET_EOPNOTSUPP;
2768 break;
2770 return ret;
2773 /* Convert target low/high pair representing file offset into the host
2774 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2775 * as the kernel doesn't handle them either.
2777 static void target_to_host_low_high(abi_ulong tlow,
2778 abi_ulong thigh,
2779 unsigned long *hlow,
2780 unsigned long *hhigh)
2782 uint64_t off = tlow |
2783 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2784 TARGET_LONG_BITS / 2;
2786 *hlow = off;
2787 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2790 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2791 abi_ulong count, int copy)
2793 struct target_iovec *target_vec;
2794 struct iovec *vec;
2795 abi_ulong total_len, max_len;
2796 int i;
2797 int err = 0;
2798 bool bad_address = false;
2800 if (count == 0) {
2801 errno = 0;
2802 return NULL;
2804 if (count > IOV_MAX) {
2805 errno = EINVAL;
2806 return NULL;
2809 vec = g_try_new0(struct iovec, count);
2810 if (vec == NULL) {
2811 errno = ENOMEM;
2812 return NULL;
2815 target_vec = lock_user(VERIFY_READ, target_addr,
2816 count * sizeof(struct target_iovec), 1);
2817 if (target_vec == NULL) {
2818 err = EFAULT;
2819 goto fail2;
2822 /* ??? If host page size > target page size, this will result in a
2823 value larger than what we can actually support. */
2824 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2825 total_len = 0;
2827 for (i = 0; i < count; i++) {
2828 abi_ulong base = tswapal(target_vec[i].iov_base);
2829 abi_long len = tswapal(target_vec[i].iov_len);
2831 if (len < 0) {
2832 err = EINVAL;
2833 goto fail;
2834 } else if (len == 0) {
2835 /* Zero length pointer is ignored. */
2836 vec[i].iov_base = 0;
2837 } else {
2838 vec[i].iov_base = lock_user(type, base, len, copy);
2839 /* If the first buffer pointer is bad, this is a fault. But
2840 * subsequent bad buffers will result in a partial write; this
2841 * is realized by filling the vector with null pointers and
2842 * zero lengths. */
2843 if (!vec[i].iov_base) {
2844 if (i == 0) {
2845 err = EFAULT;
2846 goto fail;
2847 } else {
2848 bad_address = true;
2851 if (bad_address) {
2852 len = 0;
2854 if (len > max_len - total_len) {
2855 len = max_len - total_len;
2858 vec[i].iov_len = len;
2859 total_len += len;
2862 unlock_user(target_vec, target_addr, 0);
2863 return vec;
2865 fail:
2866 while (--i >= 0) {
2867 if (tswapal(target_vec[i].iov_len) > 0) {
2868 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2871 unlock_user(target_vec, target_addr, 0);
2872 fail2:
2873 g_free(vec);
2874 errno = err;
2875 return NULL;
2878 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2879 abi_ulong count, int copy)
2881 struct target_iovec *target_vec;
2882 int i;
2884 target_vec = lock_user(VERIFY_READ, target_addr,
2885 count * sizeof(struct target_iovec), 1);
2886 if (target_vec) {
2887 for (i = 0; i < count; i++) {
2888 abi_ulong base = tswapal(target_vec[i].iov_base);
2889 abi_long len = tswapal(target_vec[i].iov_len);
2890 if (len < 0) {
2891 break;
2893 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2895 unlock_user(target_vec, target_addr, 0);
2898 g_free(vec);
2901 static inline int target_to_host_sock_type(int *type)
2903 int host_type = 0;
2904 int target_type = *type;
2906 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2907 case TARGET_SOCK_DGRAM:
2908 host_type = SOCK_DGRAM;
2909 break;
2910 case TARGET_SOCK_STREAM:
2911 host_type = SOCK_STREAM;
2912 break;
2913 default:
2914 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2915 break;
2917 if (target_type & TARGET_SOCK_CLOEXEC) {
2918 #if defined(SOCK_CLOEXEC)
2919 host_type |= SOCK_CLOEXEC;
2920 #else
2921 return -TARGET_EINVAL;
2922 #endif
2924 if (target_type & TARGET_SOCK_NONBLOCK) {
2925 #if defined(SOCK_NONBLOCK)
2926 host_type |= SOCK_NONBLOCK;
2927 #elif !defined(O_NONBLOCK)
2928 return -TARGET_EINVAL;
2929 #endif
2931 *type = host_type;
2932 return 0;
2935 /* Try to emulate socket type flags after socket creation. */
2936 static int sock_flags_fixup(int fd, int target_type)
2938 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2939 if (target_type & TARGET_SOCK_NONBLOCK) {
2940 int flags = fcntl(fd, F_GETFL);
2941 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2942 close(fd);
2943 return -TARGET_EINVAL;
2946 #endif
2947 return fd;
2950 /* do_socket() Must return target values and target errnos. */
2951 static abi_long do_socket(int domain, int type, int protocol)
2953 int target_type = type;
2954 int ret;
2956 ret = target_to_host_sock_type(&type);
2957 if (ret) {
2958 return ret;
2961 if (domain == PF_NETLINK && !(
2962 #ifdef CONFIG_RTNETLINK
2963 protocol == NETLINK_ROUTE ||
2964 #endif
2965 protocol == NETLINK_KOBJECT_UEVENT ||
2966 protocol == NETLINK_AUDIT)) {
2967 return -TARGET_EPROTONOSUPPORT;
2970 if (domain == AF_PACKET ||
2971 (domain == AF_INET && type == SOCK_PACKET)) {
2972 protocol = tswap16(protocol);
2975 ret = get_errno(socket(domain, type, protocol));
2976 if (ret >= 0) {
2977 ret = sock_flags_fixup(ret, target_type);
2978 if (type == SOCK_PACKET) {
2979 /* Manage an obsolete case :
2980 * if socket type is SOCK_PACKET, bind by name
2982 fd_trans_register(ret, &target_packet_trans);
2983 } else if (domain == PF_NETLINK) {
2984 switch (protocol) {
2985 #ifdef CONFIG_RTNETLINK
2986 case NETLINK_ROUTE:
2987 fd_trans_register(ret, &target_netlink_route_trans);
2988 break;
2989 #endif
2990 case NETLINK_KOBJECT_UEVENT:
2991 /* nothing to do: messages are strings */
2992 break;
2993 case NETLINK_AUDIT:
2994 fd_trans_register(ret, &target_netlink_audit_trans);
2995 break;
2996 default:
2997 g_assert_not_reached();
3001 return ret;
3004 /* do_bind() Must return target values and target errnos. */
3005 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3006 socklen_t addrlen)
3008 void *addr;
3009 abi_long ret;
3011 if ((int)addrlen < 0) {
3012 return -TARGET_EINVAL;
3015 addr = alloca(addrlen+1);
3017 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3018 if (ret)
3019 return ret;
3021 return get_errno(bind(sockfd, addr, addrlen));
3024 /* do_connect() Must return target values and target errnos. */
3025 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3026 socklen_t addrlen)
3028 void *addr;
3029 abi_long ret;
3031 if ((int)addrlen < 0) {
3032 return -TARGET_EINVAL;
3035 addr = alloca(addrlen+1);
3037 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3038 if (ret)
3039 return ret;
3041 return get_errno(safe_connect(sockfd, addr, addrlen));
3044 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3045 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3046 int flags, int send)
3048 abi_long ret, len;
3049 struct msghdr msg;
3050 abi_ulong count;
3051 struct iovec *vec;
3052 abi_ulong target_vec;
3054 if (msgp->msg_name) {
3055 msg.msg_namelen = tswap32(msgp->msg_namelen);
3056 msg.msg_name = alloca(msg.msg_namelen+1);
3057 ret = target_to_host_sockaddr(fd, msg.msg_name,
3058 tswapal(msgp->msg_name),
3059 msg.msg_namelen);
3060 if (ret == -TARGET_EFAULT) {
3061 /* For connected sockets msg_name and msg_namelen must
3062 * be ignored, so returning EFAULT immediately is wrong.
3063 * Instead, pass a bad msg_name to the host kernel, and
3064 * let it decide whether to return EFAULT or not.
3066 msg.msg_name = (void *)-1;
3067 } else if (ret) {
3068 goto out2;
3070 } else {
3071 msg.msg_name = NULL;
3072 msg.msg_namelen = 0;
3074 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3075 msg.msg_control = alloca(msg.msg_controllen);
3076 memset(msg.msg_control, 0, msg.msg_controllen);
3078 msg.msg_flags = tswap32(msgp->msg_flags);
3080 count = tswapal(msgp->msg_iovlen);
3081 target_vec = tswapal(msgp->msg_iov);
3083 if (count > IOV_MAX) {
3084 /* sendrcvmsg returns a different errno for this condition than
3085 * readv/writev, so we must catch it here before lock_iovec() does.
3087 ret = -TARGET_EMSGSIZE;
3088 goto out2;
3091 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3092 target_vec, count, send);
3093 if (vec == NULL) {
3094 ret = -host_to_target_errno(errno);
3095 goto out2;
3097 msg.msg_iovlen = count;
3098 msg.msg_iov = vec;
3100 if (send) {
3101 if (fd_trans_target_to_host_data(fd)) {
3102 void *host_msg;
3104 host_msg = g_malloc(msg.msg_iov->iov_len);
3105 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3106 ret = fd_trans_target_to_host_data(fd)(host_msg,
3107 msg.msg_iov->iov_len);
3108 if (ret >= 0) {
3109 msg.msg_iov->iov_base = host_msg;
3110 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3112 g_free(host_msg);
3113 } else {
3114 ret = target_to_host_cmsg(&msg, msgp);
3115 if (ret == 0) {
3116 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3119 } else {
3120 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3121 if (!is_error(ret)) {
3122 len = ret;
3123 if (fd_trans_host_to_target_data(fd)) {
3124 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3125 MIN(msg.msg_iov->iov_len, len));
3126 } else {
3127 ret = host_to_target_cmsg(msgp, &msg);
3129 if (!is_error(ret)) {
3130 msgp->msg_namelen = tswap32(msg.msg_namelen);
3131 msgp->msg_flags = tswap32(msg.msg_flags);
3132 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3133 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3134 msg.msg_name, msg.msg_namelen);
3135 if (ret) {
3136 goto out;
3140 ret = len;
3145 out:
3146 unlock_iovec(vec, target_vec, count, !send);
3147 out2:
3148 return ret;
3151 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3152 int flags, int send)
3154 abi_long ret;
3155 struct target_msghdr *msgp;
3157 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3158 msgp,
3159 target_msg,
3160 send ? 1 : 0)) {
3161 return -TARGET_EFAULT;
3163 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3164 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3165 return ret;
3168 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3169 * so it might not have this *mmsg-specific flag either.
3171 #ifndef MSG_WAITFORONE
3172 #define MSG_WAITFORONE 0x10000
3173 #endif
3175 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3176 unsigned int vlen, unsigned int flags,
3177 int send)
3179 struct target_mmsghdr *mmsgp;
3180 abi_long ret = 0;
3181 int i;
3183 if (vlen > UIO_MAXIOV) {
3184 vlen = UIO_MAXIOV;
3187 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3188 if (!mmsgp) {
3189 return -TARGET_EFAULT;
3192 for (i = 0; i < vlen; i++) {
3193 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3194 if (is_error(ret)) {
3195 break;
3197 mmsgp[i].msg_len = tswap32(ret);
3198 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3199 if (flags & MSG_WAITFORONE) {
3200 flags |= MSG_DONTWAIT;
3204 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3206 /* Return number of datagrams sent if we sent any at all;
3207 * otherwise return the error.
3209 if (i) {
3210 return i;
3212 return ret;
3215 /* do_accept4() Must return target values and target errnos. */
3216 static abi_long do_accept4(int fd, abi_ulong target_addr,
3217 abi_ulong target_addrlen_addr, int flags)
3219 socklen_t addrlen, ret_addrlen;
3220 void *addr;
3221 abi_long ret;
3222 int host_flags;
3224 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3226 if (target_addr == 0) {
3227 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3230 /* linux returns EINVAL if addrlen pointer is invalid */
3231 if (get_user_u32(addrlen, target_addrlen_addr))
3232 return -TARGET_EINVAL;
3234 if ((int)addrlen < 0) {
3235 return -TARGET_EINVAL;
3238 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3239 return -TARGET_EINVAL;
3241 addr = alloca(addrlen);
3243 ret_addrlen = addrlen;
3244 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3245 if (!is_error(ret)) {
3246 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3247 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3248 ret = -TARGET_EFAULT;
3251 return ret;
3254 /* do_getpeername() Must return target values and target errnos. */
3255 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3256 abi_ulong target_addrlen_addr)
3258 socklen_t addrlen, ret_addrlen;
3259 void *addr;
3260 abi_long ret;
3262 if (get_user_u32(addrlen, target_addrlen_addr))
3263 return -TARGET_EFAULT;
3265 if ((int)addrlen < 0) {
3266 return -TARGET_EINVAL;
3269 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3270 return -TARGET_EFAULT;
3272 addr = alloca(addrlen);
3274 ret_addrlen = addrlen;
3275 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3276 if (!is_error(ret)) {
3277 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3278 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3279 ret = -TARGET_EFAULT;
3282 return ret;
3285 /* do_getsockname() Must return target values and target errnos. */
3286 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3287 abi_ulong target_addrlen_addr)
3289 socklen_t addrlen, ret_addrlen;
3290 void *addr;
3291 abi_long ret;
3293 if (get_user_u32(addrlen, target_addrlen_addr))
3294 return -TARGET_EFAULT;
3296 if ((int)addrlen < 0) {
3297 return -TARGET_EINVAL;
3300 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3301 return -TARGET_EFAULT;
3303 addr = alloca(addrlen);
3305 ret_addrlen = addrlen;
3306 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3307 if (!is_error(ret)) {
3308 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3309 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3310 ret = -TARGET_EFAULT;
3313 return ret;
3316 /* do_socketpair() Must return target values and target errnos. */
3317 static abi_long do_socketpair(int domain, int type, int protocol,
3318 abi_ulong target_tab_addr)
3320 int tab[2];
3321 abi_long ret;
3323 target_to_host_sock_type(&type);
3325 ret = get_errno(socketpair(domain, type, protocol, tab));
3326 if (!is_error(ret)) {
3327 if (put_user_s32(tab[0], target_tab_addr)
3328 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3329 ret = -TARGET_EFAULT;
3331 return ret;
3334 /* do_sendto() Must return target values and target errnos. */
3335 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3336 abi_ulong target_addr, socklen_t addrlen)
3338 void *addr;
3339 void *host_msg;
3340 void *copy_msg = NULL;
3341 abi_long ret;
3343 if ((int)addrlen < 0) {
3344 return -TARGET_EINVAL;
3347 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3348 if (!host_msg)
3349 return -TARGET_EFAULT;
3350 if (fd_trans_target_to_host_data(fd)) {
3351 copy_msg = host_msg;
3352 host_msg = g_malloc(len);
3353 memcpy(host_msg, copy_msg, len);
3354 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3355 if (ret < 0) {
3356 goto fail;
3359 if (target_addr) {
3360 addr = alloca(addrlen+1);
3361 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3362 if (ret) {
3363 goto fail;
3365 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3366 } else {
3367 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3369 fail:
3370 if (copy_msg) {
3371 g_free(host_msg);
3372 host_msg = copy_msg;
3374 unlock_user(host_msg, msg, 0);
3375 return ret;
3378 /* do_recvfrom() Must return target values and target errnos. */
3379 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3380 abi_ulong target_addr,
3381 abi_ulong target_addrlen)
3383 socklen_t addrlen, ret_addrlen;
3384 void *addr;
3385 void *host_msg;
3386 abi_long ret;
3388 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3389 if (!host_msg)
3390 return -TARGET_EFAULT;
3391 if (target_addr) {
3392 if (get_user_u32(addrlen, target_addrlen)) {
3393 ret = -TARGET_EFAULT;
3394 goto fail;
3396 if ((int)addrlen < 0) {
3397 ret = -TARGET_EINVAL;
3398 goto fail;
3400 addr = alloca(addrlen);
3401 ret_addrlen = addrlen;
3402 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3403 addr, &ret_addrlen));
3404 } else {
3405 addr = NULL; /* To keep compiler quiet. */
3406 addrlen = 0; /* To keep compiler quiet. */
3407 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3409 if (!is_error(ret)) {
3410 if (fd_trans_host_to_target_data(fd)) {
3411 abi_long trans;
3412 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3413 if (is_error(trans)) {
3414 ret = trans;
3415 goto fail;
3418 if (target_addr) {
3419 host_to_target_sockaddr(target_addr, addr,
3420 MIN(addrlen, ret_addrlen));
3421 if (put_user_u32(ret_addrlen, target_addrlen)) {
3422 ret = -TARGET_EFAULT;
3423 goto fail;
3426 unlock_user(host_msg, msg, len);
3427 } else {
3428 fail:
3429 unlock_user(host_msg, msg, 0);
3431 return ret;
3434 #ifdef TARGET_NR_socketcall
3435 /* do_socketcall() must return target values and target errnos. */
3436 static abi_long do_socketcall(int num, abi_ulong vptr)
3438 static const unsigned nargs[] = { /* number of arguments per operation */
3439 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3440 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3441 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3442 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3443 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3444 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3445 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3446 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3447 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3448 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3449 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3450 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3451 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3452 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3453 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3454 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3455 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3456 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3457 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3458 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3460 abi_long a[6]; /* max 6 args */
3461 unsigned i;
3463 /* check the range of the first argument num */
3464 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3465 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3466 return -TARGET_EINVAL;
3468 /* ensure we have space for args */
3469 if (nargs[num] > ARRAY_SIZE(a)) {
3470 return -TARGET_EINVAL;
3472 /* collect the arguments in a[] according to nargs[] */
3473 for (i = 0; i < nargs[num]; ++i) {
3474 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3475 return -TARGET_EFAULT;
3478 /* now when we have the args, invoke the appropriate underlying function */
3479 switch (num) {
3480 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3481 return do_socket(a[0], a[1], a[2]);
3482 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3483 return do_bind(a[0], a[1], a[2]);
3484 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3485 return do_connect(a[0], a[1], a[2]);
3486 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3487 return get_errno(listen(a[0], a[1]));
3488 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3489 return do_accept4(a[0], a[1], a[2], 0);
3490 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3491 return do_getsockname(a[0], a[1], a[2]);
3492 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3493 return do_getpeername(a[0], a[1], a[2]);
3494 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3495 return do_socketpair(a[0], a[1], a[2], a[3]);
3496 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3497 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3498 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3499 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3500 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3501 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3502 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3503 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3504 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3505 return get_errno(shutdown(a[0], a[1]));
3506 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3507 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3508 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3509 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3510 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3511 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3512 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3513 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3514 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3515 return do_accept4(a[0], a[1], a[2], a[3]);
3516 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3517 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3518 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3519 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3520 default:
3521 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3522 return -TARGET_EINVAL;
3525 #endif
3527 #define N_SHM_REGIONS 32
3529 static struct shm_region {
3530 abi_ulong start;
3531 abi_ulong size;
3532 bool in_use;
3533 } shm_regions[N_SHM_REGIONS];
3535 #ifndef TARGET_SEMID64_DS
3536 /* asm-generic version of this struct */
3537 struct target_semid64_ds
3539 struct target_ipc_perm sem_perm;
3540 abi_ulong sem_otime;
3541 #if TARGET_ABI_BITS == 32
3542 abi_ulong __unused1;
3543 #endif
3544 abi_ulong sem_ctime;
3545 #if TARGET_ABI_BITS == 32
3546 abi_ulong __unused2;
3547 #endif
3548 abi_ulong sem_nsems;
3549 abi_ulong __unused3;
3550 abi_ulong __unused4;
3552 #endif
3554 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3555 abi_ulong target_addr)
3557 struct target_ipc_perm *target_ip;
3558 struct target_semid64_ds *target_sd;
3560 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3561 return -TARGET_EFAULT;
3562 target_ip = &(target_sd->sem_perm);
3563 host_ip->__key = tswap32(target_ip->__key);
3564 host_ip->uid = tswap32(target_ip->uid);
3565 host_ip->gid = tswap32(target_ip->gid);
3566 host_ip->cuid = tswap32(target_ip->cuid);
3567 host_ip->cgid = tswap32(target_ip->cgid);
3568 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3569 host_ip->mode = tswap32(target_ip->mode);
3570 #else
3571 host_ip->mode = tswap16(target_ip->mode);
3572 #endif
3573 #if defined(TARGET_PPC)
3574 host_ip->__seq = tswap32(target_ip->__seq);
3575 #else
3576 host_ip->__seq = tswap16(target_ip->__seq);
3577 #endif
3578 unlock_user_struct(target_sd, target_addr, 0);
3579 return 0;
3582 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3583 struct ipc_perm *host_ip)
3585 struct target_ipc_perm *target_ip;
3586 struct target_semid64_ds *target_sd;
3588 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3589 return -TARGET_EFAULT;
3590 target_ip = &(target_sd->sem_perm);
3591 target_ip->__key = tswap32(host_ip->__key);
3592 target_ip->uid = tswap32(host_ip->uid);
3593 target_ip->gid = tswap32(host_ip->gid);
3594 target_ip->cuid = tswap32(host_ip->cuid);
3595 target_ip->cgid = tswap32(host_ip->cgid);
3596 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3597 target_ip->mode = tswap32(host_ip->mode);
3598 #else
3599 target_ip->mode = tswap16(host_ip->mode);
3600 #endif
3601 #if defined(TARGET_PPC)
3602 target_ip->__seq = tswap32(host_ip->__seq);
3603 #else
3604 target_ip->__seq = tswap16(host_ip->__seq);
3605 #endif
3606 unlock_user_struct(target_sd, target_addr, 1);
3607 return 0;
3610 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3611 abi_ulong target_addr)
3613 struct target_semid64_ds *target_sd;
3615 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3616 return -TARGET_EFAULT;
3617 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3618 return -TARGET_EFAULT;
3619 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3620 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3621 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3622 unlock_user_struct(target_sd, target_addr, 0);
3623 return 0;
3626 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3627 struct semid_ds *host_sd)
3629 struct target_semid64_ds *target_sd;
3631 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3632 return -TARGET_EFAULT;
3633 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3634 return -TARGET_EFAULT;
3635 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3636 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3637 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3638 unlock_user_struct(target_sd, target_addr, 1);
3639 return 0;
3642 struct target_seminfo {
3643 int semmap;
3644 int semmni;
3645 int semmns;
3646 int semmnu;
3647 int semmsl;
3648 int semopm;
3649 int semume;
3650 int semusz;
3651 int semvmx;
3652 int semaem;
3655 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3656 struct seminfo *host_seminfo)
3658 struct target_seminfo *target_seminfo;
3659 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3660 return -TARGET_EFAULT;
3661 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3662 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3663 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3664 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3665 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3666 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3667 __put_user(host_seminfo->semume, &target_seminfo->semume);
3668 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3669 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3670 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3671 unlock_user_struct(target_seminfo, target_addr, 1);
3672 return 0;
3675 union semun {
3676 int val;
3677 struct semid_ds *buf;
3678 unsigned short *array;
3679 struct seminfo *__buf;
3682 union target_semun {
3683 int val;
3684 abi_ulong buf;
3685 abi_ulong array;
3686 abi_ulong __buf;
3689 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3690 abi_ulong target_addr)
3692 int nsems;
3693 unsigned short *array;
3694 union semun semun;
3695 struct semid_ds semid_ds;
3696 int i, ret;
3698 semun.buf = &semid_ds;
3700 ret = semctl(semid, 0, IPC_STAT, semun);
3701 if (ret == -1)
3702 return get_errno(ret);
3704 nsems = semid_ds.sem_nsems;
3706 *host_array = g_try_new(unsigned short, nsems);
3707 if (!*host_array) {
3708 return -TARGET_ENOMEM;
3710 array = lock_user(VERIFY_READ, target_addr,
3711 nsems*sizeof(unsigned short), 1);
3712 if (!array) {
3713 g_free(*host_array);
3714 return -TARGET_EFAULT;
3717 for(i=0; i<nsems; i++) {
3718 __get_user((*host_array)[i], &array[i]);
3720 unlock_user(array, target_addr, 0);
3722 return 0;
3725 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3726 unsigned short **host_array)
3728 int nsems;
3729 unsigned short *array;
3730 union semun semun;
3731 struct semid_ds semid_ds;
3732 int i, ret;
3734 semun.buf = &semid_ds;
3736 ret = semctl(semid, 0, IPC_STAT, semun);
3737 if (ret == -1)
3738 return get_errno(ret);
3740 nsems = semid_ds.sem_nsems;
3742 array = lock_user(VERIFY_WRITE, target_addr,
3743 nsems*sizeof(unsigned short), 0);
3744 if (!array)
3745 return -TARGET_EFAULT;
3747 for(i=0; i<nsems; i++) {
3748 __put_user((*host_array)[i], &array[i]);
3750 g_free(*host_array);
3751 unlock_user(array, target_addr, 1);
3753 return 0;
3756 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3757 abi_ulong target_arg)
3759 union target_semun target_su = { .buf = target_arg };
3760 union semun arg;
3761 struct semid_ds dsarg;
3762 unsigned short *array = NULL;
3763 struct seminfo seminfo;
3764 abi_long ret = -TARGET_EINVAL;
3765 abi_long err;
3766 cmd &= 0xff;
3768 switch( cmd ) {
3769 case GETVAL:
3770 case SETVAL:
3771 /* In 64 bit cross-endian situations, we will erroneously pick up
3772 * the wrong half of the union for the "val" element. To rectify
3773 * this, the entire 8-byte structure is byteswapped, followed by
3774 * a swap of the 4 byte val field. In other cases, the data is
3775 * already in proper host byte order. */
3776 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3777 target_su.buf = tswapal(target_su.buf);
3778 arg.val = tswap32(target_su.val);
3779 } else {
3780 arg.val = target_su.val;
3782 ret = get_errno(semctl(semid, semnum, cmd, arg));
3783 break;
3784 case GETALL:
3785 case SETALL:
3786 err = target_to_host_semarray(semid, &array, target_su.array);
3787 if (err)
3788 return err;
3789 arg.array = array;
3790 ret = get_errno(semctl(semid, semnum, cmd, arg));
3791 err = host_to_target_semarray(semid, target_su.array, &array);
3792 if (err)
3793 return err;
3794 break;
3795 case IPC_STAT:
3796 case IPC_SET:
3797 case SEM_STAT:
3798 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3799 if (err)
3800 return err;
3801 arg.buf = &dsarg;
3802 ret = get_errno(semctl(semid, semnum, cmd, arg));
3803 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3804 if (err)
3805 return err;
3806 break;
3807 case IPC_INFO:
3808 case SEM_INFO:
3809 arg.__buf = &seminfo;
3810 ret = get_errno(semctl(semid, semnum, cmd, arg));
3811 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3812 if (err)
3813 return err;
3814 break;
3815 case IPC_RMID:
3816 case GETPID:
3817 case GETNCNT:
3818 case GETZCNT:
3819 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3820 break;
3823 return ret;
3826 struct target_sembuf {
3827 unsigned short sem_num;
3828 short sem_op;
3829 short sem_flg;
3832 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3833 abi_ulong target_addr,
3834 unsigned nsops)
3836 struct target_sembuf *target_sembuf;
3837 int i;
3839 target_sembuf = lock_user(VERIFY_READ, target_addr,
3840 nsops*sizeof(struct target_sembuf), 1);
3841 if (!target_sembuf)
3842 return -TARGET_EFAULT;
3844 for(i=0; i<nsops; i++) {
3845 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3846 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3847 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3850 unlock_user(target_sembuf, target_addr, 0);
3852 return 0;
3855 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3856 defined(TARGET_NR_semtimedop)
3859 * This macro is required to handle the s390 variants, which passes the
3860 * arguments in a different order than default.
3862 #ifdef __s390x__
3863 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3864 (__nsops), (__timeout), (__sops)
3865 #else
3866 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3867 (__nsops), 0, (__sops), (__timeout)
3868 #endif
3870 static inline abi_long do_semtimedop(int semid,
3871 abi_long ptr,
3872 unsigned nsops,
3873 abi_long timeout)
3875 struct sembuf *sops;
3876 struct timespec ts, *pts = NULL;
3877 abi_long ret;
3879 if (timeout) {
3880 pts = &ts;
3881 if (target_to_host_timespec(pts, timeout)) {
3882 return -TARGET_EFAULT;
3886 if (nsops > TARGET_SEMOPM) {
3887 return -TARGET_E2BIG;
3890 sops = g_new(struct sembuf, nsops);
3892 if (target_to_host_sembuf(sops, ptr, nsops)) {
3893 g_free(sops);
3894 return -TARGET_EFAULT;
3897 ret = -TARGET_ENOSYS;
3898 #ifdef __NR_semtimedop
3899 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3900 #endif
3901 #ifdef __NR_ipc
3902 if (ret == -TARGET_ENOSYS) {
3903 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3904 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3906 #endif
3907 g_free(sops);
3908 return ret;
3910 #endif
3912 struct target_msqid_ds
3914 struct target_ipc_perm msg_perm;
3915 abi_ulong msg_stime;
3916 #if TARGET_ABI_BITS == 32
3917 abi_ulong __unused1;
3918 #endif
3919 abi_ulong msg_rtime;
3920 #if TARGET_ABI_BITS == 32
3921 abi_ulong __unused2;
3922 #endif
3923 abi_ulong msg_ctime;
3924 #if TARGET_ABI_BITS == 32
3925 abi_ulong __unused3;
3926 #endif
3927 abi_ulong __msg_cbytes;
3928 abi_ulong msg_qnum;
3929 abi_ulong msg_qbytes;
3930 abi_ulong msg_lspid;
3931 abi_ulong msg_lrpid;
3932 abi_ulong __unused4;
3933 abi_ulong __unused5;
3936 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3937 abi_ulong target_addr)
3939 struct target_msqid_ds *target_md;
3941 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3942 return -TARGET_EFAULT;
3943 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3944 return -TARGET_EFAULT;
3945 host_md->msg_stime = tswapal(target_md->msg_stime);
3946 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3947 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3948 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3949 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3950 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3951 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3952 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3953 unlock_user_struct(target_md, target_addr, 0);
3954 return 0;
3957 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3958 struct msqid_ds *host_md)
3960 struct target_msqid_ds *target_md;
3962 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3963 return -TARGET_EFAULT;
3964 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3965 return -TARGET_EFAULT;
3966 target_md->msg_stime = tswapal(host_md->msg_stime);
3967 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3968 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3969 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3970 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3971 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3972 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3973 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3974 unlock_user_struct(target_md, target_addr, 1);
3975 return 0;
3978 struct target_msginfo {
3979 int msgpool;
3980 int msgmap;
3981 int msgmax;
3982 int msgmnb;
3983 int msgmni;
3984 int msgssz;
3985 int msgtql;
3986 unsigned short int msgseg;
3989 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3990 struct msginfo *host_msginfo)
3992 struct target_msginfo *target_msginfo;
3993 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3994 return -TARGET_EFAULT;
3995 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3996 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3997 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3998 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3999 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4000 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4001 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4002 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4003 unlock_user_struct(target_msginfo, target_addr, 1);
4004 return 0;
4007 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4009 struct msqid_ds dsarg;
4010 struct msginfo msginfo;
4011 abi_long ret = -TARGET_EINVAL;
4013 cmd &= 0xff;
4015 switch (cmd) {
4016 case IPC_STAT:
4017 case IPC_SET:
4018 case MSG_STAT:
4019 if (target_to_host_msqid_ds(&dsarg,ptr))
4020 return -TARGET_EFAULT;
4021 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4022 if (host_to_target_msqid_ds(ptr,&dsarg))
4023 return -TARGET_EFAULT;
4024 break;
4025 case IPC_RMID:
4026 ret = get_errno(msgctl(msgid, cmd, NULL));
4027 break;
4028 case IPC_INFO:
4029 case MSG_INFO:
4030 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4031 if (host_to_target_msginfo(ptr, &msginfo))
4032 return -TARGET_EFAULT;
4033 break;
4036 return ret;
4039 struct target_msgbuf {
4040 abi_long mtype;
4041 char mtext[1];
4044 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4045 ssize_t msgsz, int msgflg)
4047 struct target_msgbuf *target_mb;
4048 struct msgbuf *host_mb;
4049 abi_long ret = 0;
4051 if (msgsz < 0) {
4052 return -TARGET_EINVAL;
4055 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4056 return -TARGET_EFAULT;
4057 host_mb = g_try_malloc(msgsz + sizeof(long));
4058 if (!host_mb) {
4059 unlock_user_struct(target_mb, msgp, 0);
4060 return -TARGET_ENOMEM;
4062 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4063 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4064 ret = -TARGET_ENOSYS;
4065 #ifdef __NR_msgsnd
4066 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4067 #endif
4068 #ifdef __NR_ipc
4069 if (ret == -TARGET_ENOSYS) {
4070 #ifdef __s390x__
4071 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4072 host_mb));
4073 #else
4074 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4075 host_mb, 0));
4076 #endif
4078 #endif
4079 g_free(host_mb);
4080 unlock_user_struct(target_mb, msgp, 0);
4082 return ret;
4085 #ifdef __NR_ipc
4086 #if defined(__sparc__)
4087 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4088 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4089 #elif defined(__s390x__)
4090 /* The s390 sys_ipc variant has only five parameters. */
4091 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4092 ((long int[]){(long int)__msgp, __msgtyp})
4093 #else
4094 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4095 ((long int[]){(long int)__msgp, __msgtyp}), 0
4096 #endif
4097 #endif
4099 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4100 ssize_t msgsz, abi_long msgtyp,
4101 int msgflg)
4103 struct target_msgbuf *target_mb;
4104 char *target_mtext;
4105 struct msgbuf *host_mb;
4106 abi_long ret = 0;
4108 if (msgsz < 0) {
4109 return -TARGET_EINVAL;
4112 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4113 return -TARGET_EFAULT;
4115 host_mb = g_try_malloc(msgsz + sizeof(long));
4116 if (!host_mb) {
4117 ret = -TARGET_ENOMEM;
4118 goto end;
4120 ret = -TARGET_ENOSYS;
4121 #ifdef __NR_msgrcv
4122 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4123 #endif
4124 #ifdef __NR_ipc
4125 if (ret == -TARGET_ENOSYS) {
4126 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4127 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4129 #endif
4131 if (ret > 0) {
4132 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4133 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4134 if (!target_mtext) {
4135 ret = -TARGET_EFAULT;
4136 goto end;
4138 memcpy(target_mb->mtext, host_mb->mtext, ret);
4139 unlock_user(target_mtext, target_mtext_addr, ret);
4142 target_mb->mtype = tswapal(host_mb->mtype);
4144 end:
4145 if (target_mb)
4146 unlock_user_struct(target_mb, msgp, 1);
4147 g_free(host_mb);
4148 return ret;
4151 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4152 abi_ulong target_addr)
4154 struct target_shmid_ds *target_sd;
4156 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4157 return -TARGET_EFAULT;
4158 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4159 return -TARGET_EFAULT;
4160 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4161 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4162 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4163 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4164 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4165 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4166 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4167 unlock_user_struct(target_sd, target_addr, 0);
4168 return 0;
4171 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4172 struct shmid_ds *host_sd)
4174 struct target_shmid_ds *target_sd;
4176 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4177 return -TARGET_EFAULT;
4178 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4179 return -TARGET_EFAULT;
4180 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4181 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4182 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4183 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4184 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4185 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4186 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4187 unlock_user_struct(target_sd, target_addr, 1);
4188 return 0;
4191 struct target_shminfo {
4192 abi_ulong shmmax;
4193 abi_ulong shmmin;
4194 abi_ulong shmmni;
4195 abi_ulong shmseg;
4196 abi_ulong shmall;
4199 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4200 struct shminfo *host_shminfo)
4202 struct target_shminfo *target_shminfo;
4203 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4204 return -TARGET_EFAULT;
4205 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4206 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4207 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4208 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4209 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4210 unlock_user_struct(target_shminfo, target_addr, 1);
4211 return 0;
4214 struct target_shm_info {
4215 int used_ids;
4216 abi_ulong shm_tot;
4217 abi_ulong shm_rss;
4218 abi_ulong shm_swp;
4219 abi_ulong swap_attempts;
4220 abi_ulong swap_successes;
4223 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4224 struct shm_info *host_shm_info)
4226 struct target_shm_info *target_shm_info;
4227 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4228 return -TARGET_EFAULT;
4229 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4230 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4231 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4232 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4233 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4234 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4235 unlock_user_struct(target_shm_info, target_addr, 1);
4236 return 0;
4239 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4241 struct shmid_ds dsarg;
4242 struct shminfo shminfo;
4243 struct shm_info shm_info;
4244 abi_long ret = -TARGET_EINVAL;
4246 cmd &= 0xff;
4248 switch(cmd) {
4249 case IPC_STAT:
4250 case IPC_SET:
4251 case SHM_STAT:
4252 if (target_to_host_shmid_ds(&dsarg, buf))
4253 return -TARGET_EFAULT;
4254 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4255 if (host_to_target_shmid_ds(buf, &dsarg))
4256 return -TARGET_EFAULT;
4257 break;
4258 case IPC_INFO:
4259 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4260 if (host_to_target_shminfo(buf, &shminfo))
4261 return -TARGET_EFAULT;
4262 break;
4263 case SHM_INFO:
4264 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4265 if (host_to_target_shm_info(buf, &shm_info))
4266 return -TARGET_EFAULT;
4267 break;
4268 case IPC_RMID:
4269 case SHM_LOCK:
4270 case SHM_UNLOCK:
4271 ret = get_errno(shmctl(shmid, cmd, NULL));
4272 break;
4275 return ret;
4278 #ifndef TARGET_FORCE_SHMLBA
4279 /* For most architectures, SHMLBA is the same as the page size;
4280 * some architectures have larger values, in which case they should
4281 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4282 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4283 * and defining its own value for SHMLBA.
4285 * The kernel also permits SHMLBA to be set by the architecture to a
4286 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4287 * this means that addresses are rounded to the large size if
4288 * SHM_RND is set but addresses not aligned to that size are not rejected
4289 * as long as they are at least page-aligned. Since the only architecture
4290 * which uses this is ia64 this code doesn't provide for that oddity.
4292 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4294 return TARGET_PAGE_SIZE;
4296 #endif
4298 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4299 int shmid, abi_ulong shmaddr, int shmflg)
4301 abi_long raddr;
4302 void *host_raddr;
4303 struct shmid_ds shm_info;
4304 int i,ret;
4305 abi_ulong shmlba;
4307 /* find out the length of the shared memory segment */
4308 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4309 if (is_error(ret)) {
4310 /* can't get length, bail out */
4311 return ret;
4314 shmlba = target_shmlba(cpu_env);
4316 if (shmaddr & (shmlba - 1)) {
4317 if (shmflg & SHM_RND) {
4318 shmaddr &= ~(shmlba - 1);
4319 } else {
4320 return -TARGET_EINVAL;
4323 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4324 return -TARGET_EINVAL;
4327 mmap_lock();
4329 if (shmaddr)
4330 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4331 else {
4332 abi_ulong mmap_start;
4334 /* In order to use the host shmat, we need to honor host SHMLBA. */
4335 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4337 if (mmap_start == -1) {
4338 errno = ENOMEM;
4339 host_raddr = (void *)-1;
4340 } else
4341 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4344 if (host_raddr == (void *)-1) {
4345 mmap_unlock();
4346 return get_errno((long)host_raddr);
4348 raddr=h2g((unsigned long)host_raddr);
4350 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4351 PAGE_VALID | PAGE_READ |
4352 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4354 for (i = 0; i < N_SHM_REGIONS; i++) {
4355 if (!shm_regions[i].in_use) {
4356 shm_regions[i].in_use = true;
4357 shm_regions[i].start = raddr;
4358 shm_regions[i].size = shm_info.shm_segsz;
4359 break;
4363 mmap_unlock();
4364 return raddr;
4368 static inline abi_long do_shmdt(abi_ulong shmaddr)
4370 int i;
4371 abi_long rv;
4373 mmap_lock();
4375 for (i = 0; i < N_SHM_REGIONS; ++i) {
4376 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4377 shm_regions[i].in_use = false;
4378 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4379 break;
4382 rv = get_errno(shmdt(g2h(shmaddr)));
4384 mmap_unlock();
4386 return rv;
4389 #ifdef TARGET_NR_ipc
4390 /* ??? This only works with linear mappings. */
4391 /* do_ipc() must return target values and target errnos. */
4392 static abi_long do_ipc(CPUArchState *cpu_env,
4393 unsigned int call, abi_long first,
4394 abi_long second, abi_long third,
4395 abi_long ptr, abi_long fifth)
4397 int version;
4398 abi_long ret = 0;
4400 version = call >> 16;
4401 call &= 0xffff;
4403 switch (call) {
4404 case IPCOP_semop:
4405 ret = do_semtimedop(first, ptr, second, 0);
4406 break;
4407 case IPCOP_semtimedop:
4409 * The s390 sys_ipc variant has only five parameters instead of six
4410 * (as for default variant) and the only difference is the handling of
4411 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4412 * to a struct timespec where the generic variant uses fifth parameter.
4414 #if defined(TARGET_S390X)
4415 ret = do_semtimedop(first, ptr, second, third);
4416 #else
4417 ret = do_semtimedop(first, ptr, second, fifth);
4418 #endif
4419 break;
4421 case IPCOP_semget:
4422 ret = get_errno(semget(first, second, third));
4423 break;
4425 case IPCOP_semctl: {
4426 /* The semun argument to semctl is passed by value, so dereference the
4427 * ptr argument. */
4428 abi_ulong atptr;
4429 get_user_ual(atptr, ptr);
4430 ret = do_semctl(first, second, third, atptr);
4431 break;
4434 case IPCOP_msgget:
4435 ret = get_errno(msgget(first, second));
4436 break;
4438 case IPCOP_msgsnd:
4439 ret = do_msgsnd(first, ptr, second, third);
4440 break;
4442 case IPCOP_msgctl:
4443 ret = do_msgctl(first, second, ptr);
4444 break;
4446 case IPCOP_msgrcv:
4447 switch (version) {
4448 case 0:
4450 struct target_ipc_kludge {
4451 abi_long msgp;
4452 abi_long msgtyp;
4453 } *tmp;
4455 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4456 ret = -TARGET_EFAULT;
4457 break;
4460 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4462 unlock_user_struct(tmp, ptr, 0);
4463 break;
4465 default:
4466 ret = do_msgrcv(first, ptr, second, fifth, third);
4468 break;
4470 case IPCOP_shmat:
4471 switch (version) {
4472 default:
4474 abi_ulong raddr;
4475 raddr = do_shmat(cpu_env, first, ptr, second);
4476 if (is_error(raddr))
4477 return get_errno(raddr);
4478 if (put_user_ual(raddr, third))
4479 return -TARGET_EFAULT;
4480 break;
4482 case 1:
4483 ret = -TARGET_EINVAL;
4484 break;
4486 break;
4487 case IPCOP_shmdt:
4488 ret = do_shmdt(ptr);
4489 break;
4491 case IPCOP_shmget:
4492 /* IPC_* flag values are the same on all linux platforms */
4493 ret = get_errno(shmget(first, second, third));
4494 break;
4496 /* IPC_* and SHM_* command values are the same on all linux platforms */
4497 case IPCOP_shmctl:
4498 ret = do_shmctl(first, second, ptr);
4499 break;
4500 default:
4501 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4502 call, version);
4503 ret = -TARGET_ENOSYS;
4504 break;
4506 return ret;
4508 #endif
4510 /* kernel structure types definitions */
4512 #define STRUCT(name, ...) STRUCT_ ## name,
4513 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4514 enum {
4515 #include "syscall_types.h"
4516 STRUCT_MAX
4518 #undef STRUCT
4519 #undef STRUCT_SPECIAL
4521 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4522 #define STRUCT_SPECIAL(name)
4523 #include "syscall_types.h"
4524 #undef STRUCT
4525 #undef STRUCT_SPECIAL
4527 #define MAX_STRUCT_SIZE 4096
4529 #ifdef CONFIG_FIEMAP
4530 /* So fiemap access checks don't overflow on 32 bit systems.
4531 * This is very slightly smaller than the limit imposed by
4532 * the underlying kernel.
4534 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4535 / sizeof(struct fiemap_extent))
4537 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4538 int fd, int cmd, abi_long arg)
4540 /* The parameter for this ioctl is a struct fiemap followed
4541 * by an array of struct fiemap_extent whose size is set
4542 * in fiemap->fm_extent_count. The array is filled in by the
4543 * ioctl.
4545 int target_size_in, target_size_out;
4546 struct fiemap *fm;
4547 const argtype *arg_type = ie->arg_type;
4548 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4549 void *argptr, *p;
4550 abi_long ret;
4551 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4552 uint32_t outbufsz;
4553 int free_fm = 0;
4555 assert(arg_type[0] == TYPE_PTR);
4556 assert(ie->access == IOC_RW);
4557 arg_type++;
4558 target_size_in = thunk_type_size(arg_type, 0);
4559 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4560 if (!argptr) {
4561 return -TARGET_EFAULT;
4563 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4564 unlock_user(argptr, arg, 0);
4565 fm = (struct fiemap *)buf_temp;
4566 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4567 return -TARGET_EINVAL;
4570 outbufsz = sizeof (*fm) +
4571 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4573 if (outbufsz > MAX_STRUCT_SIZE) {
4574 /* We can't fit all the extents into the fixed size buffer.
4575 * Allocate one that is large enough and use it instead.
4577 fm = g_try_malloc(outbufsz);
4578 if (!fm) {
4579 return -TARGET_ENOMEM;
4581 memcpy(fm, buf_temp, sizeof(struct fiemap));
4582 free_fm = 1;
4584 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4585 if (!is_error(ret)) {
4586 target_size_out = target_size_in;
4587 /* An extent_count of 0 means we were only counting the extents
4588 * so there are no structs to copy
4590 if (fm->fm_extent_count != 0) {
4591 target_size_out += fm->fm_mapped_extents * extent_size;
4593 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4594 if (!argptr) {
4595 ret = -TARGET_EFAULT;
4596 } else {
4597 /* Convert the struct fiemap */
4598 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4599 if (fm->fm_extent_count != 0) {
4600 p = argptr + target_size_in;
4601 /* ...and then all the struct fiemap_extents */
4602 for (i = 0; i < fm->fm_mapped_extents; i++) {
4603 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4604 THUNK_TARGET);
4605 p += extent_size;
4608 unlock_user(argptr, arg, target_size_out);
4611 if (free_fm) {
4612 g_free(fm);
4614 return ret;
4616 #endif
4618 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4619 int fd, int cmd, abi_long arg)
4621 const argtype *arg_type = ie->arg_type;
4622 int target_size;
4623 void *argptr;
4624 int ret;
4625 struct ifconf *host_ifconf;
4626 uint32_t outbufsz;
4627 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4628 int target_ifreq_size;
4629 int nb_ifreq;
4630 int free_buf = 0;
4631 int i;
4632 int target_ifc_len;
4633 abi_long target_ifc_buf;
4634 int host_ifc_len;
4635 char *host_ifc_buf;
4637 assert(arg_type[0] == TYPE_PTR);
4638 assert(ie->access == IOC_RW);
4640 arg_type++;
4641 target_size = thunk_type_size(arg_type, 0);
4643 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4644 if (!argptr)
4645 return -TARGET_EFAULT;
4646 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4647 unlock_user(argptr, arg, 0);
4649 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4650 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4651 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4653 if (target_ifc_buf != 0) {
4654 target_ifc_len = host_ifconf->ifc_len;
4655 nb_ifreq = target_ifc_len / target_ifreq_size;
4656 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4658 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4659 if (outbufsz > MAX_STRUCT_SIZE) {
4661 * We can't fit all the extents into the fixed size buffer.
4662 * Allocate one that is large enough and use it instead.
4664 host_ifconf = malloc(outbufsz);
4665 if (!host_ifconf) {
4666 return -TARGET_ENOMEM;
4668 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4669 free_buf = 1;
4671 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4673 host_ifconf->ifc_len = host_ifc_len;
4674 } else {
4675 host_ifc_buf = NULL;
4677 host_ifconf->ifc_buf = host_ifc_buf;
4679 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4680 if (!is_error(ret)) {
4681 /* convert host ifc_len to target ifc_len */
4683 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4684 target_ifc_len = nb_ifreq * target_ifreq_size;
4685 host_ifconf->ifc_len = target_ifc_len;
4687 /* restore target ifc_buf */
4689 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4691 /* copy struct ifconf to target user */
4693 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4694 if (!argptr)
4695 return -TARGET_EFAULT;
4696 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4697 unlock_user(argptr, arg, target_size);
4699 if (target_ifc_buf != 0) {
4700 /* copy ifreq[] to target user */
4701 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4702 for (i = 0; i < nb_ifreq ; i++) {
4703 thunk_convert(argptr + i * target_ifreq_size,
4704 host_ifc_buf + i * sizeof(struct ifreq),
4705 ifreq_arg_type, THUNK_TARGET);
4707 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4711 if (free_buf) {
4712 free(host_ifconf);
4715 return ret;
4718 #if defined(CONFIG_USBFS)
4719 #if HOST_LONG_BITS > 64
4720 #error USBDEVFS thunks do not support >64 bit hosts yet.
4721 #endif
4722 struct live_urb {
4723 uint64_t target_urb_adr;
4724 uint64_t target_buf_adr;
4725 char *target_buf_ptr;
4726 struct usbdevfs_urb host_urb;
4729 static GHashTable *usbdevfs_urb_hashtable(void)
4731 static GHashTable *urb_hashtable;
4733 if (!urb_hashtable) {
4734 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4736 return urb_hashtable;
4739 static void urb_hashtable_insert(struct live_urb *urb)
4741 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4742 g_hash_table_insert(urb_hashtable, urb, urb);
4745 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4747 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4748 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4751 static void urb_hashtable_remove(struct live_urb *urb)
4753 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4754 g_hash_table_remove(urb_hashtable, urb);
4757 static abi_long
4758 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4759 int fd, int cmd, abi_long arg)
4761 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4762 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4763 struct live_urb *lurb;
4764 void *argptr;
4765 uint64_t hurb;
4766 int target_size;
4767 uintptr_t target_urb_adr;
4768 abi_long ret;
4770 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4772 memset(buf_temp, 0, sizeof(uint64_t));
4773 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4774 if (is_error(ret)) {
4775 return ret;
4778 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4779 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4780 if (!lurb->target_urb_adr) {
4781 return -TARGET_EFAULT;
4783 urb_hashtable_remove(lurb);
4784 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4785 lurb->host_urb.buffer_length);
4786 lurb->target_buf_ptr = NULL;
4788 /* restore the guest buffer pointer */
4789 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4791 /* update the guest urb struct */
4792 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4793 if (!argptr) {
4794 g_free(lurb);
4795 return -TARGET_EFAULT;
4797 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4798 unlock_user(argptr, lurb->target_urb_adr, target_size);
4800 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4801 /* write back the urb handle */
4802 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4803 if (!argptr) {
4804 g_free(lurb);
4805 return -TARGET_EFAULT;
4808 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4809 target_urb_adr = lurb->target_urb_adr;
4810 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4811 unlock_user(argptr, arg, target_size);
4813 g_free(lurb);
4814 return ret;
4817 static abi_long
4818 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4819 uint8_t *buf_temp __attribute__((unused)),
4820 int fd, int cmd, abi_long arg)
4822 struct live_urb *lurb;
4824 /* map target address back to host URB with metadata. */
4825 lurb = urb_hashtable_lookup(arg);
4826 if (!lurb) {
4827 return -TARGET_EFAULT;
4829 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4832 static abi_long
4833 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4834 int fd, int cmd, abi_long arg)
4836 const argtype *arg_type = ie->arg_type;
4837 int target_size;
4838 abi_long ret;
4839 void *argptr;
4840 int rw_dir;
4841 struct live_urb *lurb;
4844 * each submitted URB needs to map to a unique ID for the
4845 * kernel, and that unique ID needs to be a pointer to
4846 * host memory. hence, we need to malloc for each URB.
4847 * isochronous transfers have a variable length struct.
4849 arg_type++;
4850 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4852 /* construct host copy of urb and metadata */
4853 lurb = g_try_malloc0(sizeof(struct live_urb));
4854 if (!lurb) {
4855 return -TARGET_ENOMEM;
4858 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4859 if (!argptr) {
4860 g_free(lurb);
4861 return -TARGET_EFAULT;
4863 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4864 unlock_user(argptr, arg, 0);
4866 lurb->target_urb_adr = arg;
4867 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4869 /* buffer space used depends on endpoint type so lock the entire buffer */
4870 /* control type urbs should check the buffer contents for true direction */
4871 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4872 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4873 lurb->host_urb.buffer_length, 1);
4874 if (lurb->target_buf_ptr == NULL) {
4875 g_free(lurb);
4876 return -TARGET_EFAULT;
4879 /* update buffer pointer in host copy */
4880 lurb->host_urb.buffer = lurb->target_buf_ptr;
4882 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4883 if (is_error(ret)) {
4884 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4885 g_free(lurb);
4886 } else {
4887 urb_hashtable_insert(lurb);
4890 return ret;
4892 #endif /* CONFIG_USBFS */
4894 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4895 int cmd, abi_long arg)
4897 void *argptr;
4898 struct dm_ioctl *host_dm;
4899 abi_long guest_data;
4900 uint32_t guest_data_size;
4901 int target_size;
4902 const argtype *arg_type = ie->arg_type;
4903 abi_long ret;
4904 void *big_buf = NULL;
4905 char *host_data;
4907 arg_type++;
4908 target_size = thunk_type_size(arg_type, 0);
4909 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4910 if (!argptr) {
4911 ret = -TARGET_EFAULT;
4912 goto out;
4914 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4915 unlock_user(argptr, arg, 0);
4917 /* buf_temp is too small, so fetch things into a bigger buffer */
4918 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4919 memcpy(big_buf, buf_temp, target_size);
4920 buf_temp = big_buf;
4921 host_dm = big_buf;
4923 guest_data = arg + host_dm->data_start;
4924 if ((guest_data - arg) < 0) {
4925 ret = -TARGET_EINVAL;
4926 goto out;
4928 guest_data_size = host_dm->data_size - host_dm->data_start;
4929 host_data = (char*)host_dm + host_dm->data_start;
4931 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4932 if (!argptr) {
4933 ret = -TARGET_EFAULT;
4934 goto out;
4937 switch (ie->host_cmd) {
4938 case DM_REMOVE_ALL:
4939 case DM_LIST_DEVICES:
4940 case DM_DEV_CREATE:
4941 case DM_DEV_REMOVE:
4942 case DM_DEV_SUSPEND:
4943 case DM_DEV_STATUS:
4944 case DM_DEV_WAIT:
4945 case DM_TABLE_STATUS:
4946 case DM_TABLE_CLEAR:
4947 case DM_TABLE_DEPS:
4948 case DM_LIST_VERSIONS:
4949 /* no input data */
4950 break;
4951 case DM_DEV_RENAME:
4952 case DM_DEV_SET_GEOMETRY:
4953 /* data contains only strings */
4954 memcpy(host_data, argptr, guest_data_size);
4955 break;
4956 case DM_TARGET_MSG:
4957 memcpy(host_data, argptr, guest_data_size);
4958 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4959 break;
4960 case DM_TABLE_LOAD:
4962 void *gspec = argptr;
4963 void *cur_data = host_data;
4964 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4965 int spec_size = thunk_type_size(arg_type, 0);
4966 int i;
4968 for (i = 0; i < host_dm->target_count; i++) {
4969 struct dm_target_spec *spec = cur_data;
4970 uint32_t next;
4971 int slen;
4973 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4974 slen = strlen((char*)gspec + spec_size) + 1;
4975 next = spec->next;
4976 spec->next = sizeof(*spec) + slen;
4977 strcpy((char*)&spec[1], gspec + spec_size);
4978 gspec += next;
4979 cur_data += spec->next;
4981 break;
4983 default:
4984 ret = -TARGET_EINVAL;
4985 unlock_user(argptr, guest_data, 0);
4986 goto out;
4988 unlock_user(argptr, guest_data, 0);
4990 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4991 if (!is_error(ret)) {
4992 guest_data = arg + host_dm->data_start;
4993 guest_data_size = host_dm->data_size - host_dm->data_start;
4994 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4995 switch (ie->host_cmd) {
4996 case DM_REMOVE_ALL:
4997 case DM_DEV_CREATE:
4998 case DM_DEV_REMOVE:
4999 case DM_DEV_RENAME:
5000 case DM_DEV_SUSPEND:
5001 case DM_DEV_STATUS:
5002 case DM_TABLE_LOAD:
5003 case DM_TABLE_CLEAR:
5004 case DM_TARGET_MSG:
5005 case DM_DEV_SET_GEOMETRY:
5006 /* no return data */
5007 break;
5008 case DM_LIST_DEVICES:
5010 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5011 uint32_t remaining_data = guest_data_size;
5012 void *cur_data = argptr;
5013 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5014 int nl_size = 12; /* can't use thunk_size due to alignment */
5016 while (1) {
5017 uint32_t next = nl->next;
5018 if (next) {
5019 nl->next = nl_size + (strlen(nl->name) + 1);
5021 if (remaining_data < nl->next) {
5022 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5023 break;
5025 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5026 strcpy(cur_data + nl_size, nl->name);
5027 cur_data += nl->next;
5028 remaining_data -= nl->next;
5029 if (!next) {
5030 break;
5032 nl = (void*)nl + next;
5034 break;
5036 case DM_DEV_WAIT:
5037 case DM_TABLE_STATUS:
5039 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5040 void *cur_data = argptr;
5041 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5042 int spec_size = thunk_type_size(arg_type, 0);
5043 int i;
5045 for (i = 0; i < host_dm->target_count; i++) {
5046 uint32_t next = spec->next;
5047 int slen = strlen((char*)&spec[1]) + 1;
5048 spec->next = (cur_data - argptr) + spec_size + slen;
5049 if (guest_data_size < spec->next) {
5050 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5051 break;
5053 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5054 strcpy(cur_data + spec_size, (char*)&spec[1]);
5055 cur_data = argptr + spec->next;
5056 spec = (void*)host_dm + host_dm->data_start + next;
5058 break;
5060 case DM_TABLE_DEPS:
5062 void *hdata = (void*)host_dm + host_dm->data_start;
5063 int count = *(uint32_t*)hdata;
5064 uint64_t *hdev = hdata + 8;
5065 uint64_t *gdev = argptr + 8;
5066 int i;
5068 *(uint32_t*)argptr = tswap32(count);
5069 for (i = 0; i < count; i++) {
5070 *gdev = tswap64(*hdev);
5071 gdev++;
5072 hdev++;
5074 break;
5076 case DM_LIST_VERSIONS:
5078 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5079 uint32_t remaining_data = guest_data_size;
5080 void *cur_data = argptr;
5081 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5082 int vers_size = thunk_type_size(arg_type, 0);
5084 while (1) {
5085 uint32_t next = vers->next;
5086 if (next) {
5087 vers->next = vers_size + (strlen(vers->name) + 1);
5089 if (remaining_data < vers->next) {
5090 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5091 break;
5093 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5094 strcpy(cur_data + vers_size, vers->name);
5095 cur_data += vers->next;
5096 remaining_data -= vers->next;
5097 if (!next) {
5098 break;
5100 vers = (void*)vers + next;
5102 break;
5104 default:
5105 unlock_user(argptr, guest_data, 0);
5106 ret = -TARGET_EINVAL;
5107 goto out;
5109 unlock_user(argptr, guest_data, guest_data_size);
5111 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5112 if (!argptr) {
5113 ret = -TARGET_EFAULT;
5114 goto out;
5116 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5117 unlock_user(argptr, arg, target_size);
5119 out:
5120 g_free(big_buf);
5121 return ret;
5124 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5125 int cmd, abi_long arg)
5127 void *argptr;
5128 int target_size;
5129 const argtype *arg_type = ie->arg_type;
5130 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5131 abi_long ret;
5133 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5134 struct blkpg_partition host_part;
5136 /* Read and convert blkpg */
5137 arg_type++;
5138 target_size = thunk_type_size(arg_type, 0);
5139 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5140 if (!argptr) {
5141 ret = -TARGET_EFAULT;
5142 goto out;
5144 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5145 unlock_user(argptr, arg, 0);
5147 switch (host_blkpg->op) {
5148 case BLKPG_ADD_PARTITION:
5149 case BLKPG_DEL_PARTITION:
5150 /* payload is struct blkpg_partition */
5151 break;
5152 default:
5153 /* Unknown opcode */
5154 ret = -TARGET_EINVAL;
5155 goto out;
5158 /* Read and convert blkpg->data */
5159 arg = (abi_long)(uintptr_t)host_blkpg->data;
5160 target_size = thunk_type_size(part_arg_type, 0);
5161 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5162 if (!argptr) {
5163 ret = -TARGET_EFAULT;
5164 goto out;
5166 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5167 unlock_user(argptr, arg, 0);
5169 /* Swizzle the data pointer to our local copy and call! */
5170 host_blkpg->data = &host_part;
5171 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5173 out:
5174 return ret;
5177 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5178 int fd, int cmd, abi_long arg)
5180 const argtype *arg_type = ie->arg_type;
5181 const StructEntry *se;
5182 const argtype *field_types;
5183 const int *dst_offsets, *src_offsets;
5184 int target_size;
5185 void *argptr;
5186 abi_ulong *target_rt_dev_ptr = NULL;
5187 unsigned long *host_rt_dev_ptr = NULL;
5188 abi_long ret;
5189 int i;
5191 assert(ie->access == IOC_W);
5192 assert(*arg_type == TYPE_PTR);
5193 arg_type++;
5194 assert(*arg_type == TYPE_STRUCT);
5195 target_size = thunk_type_size(arg_type, 0);
5196 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5197 if (!argptr) {
5198 return -TARGET_EFAULT;
5200 arg_type++;
5201 assert(*arg_type == (int)STRUCT_rtentry);
5202 se = struct_entries + *arg_type++;
5203 assert(se->convert[0] == NULL);
5204 /* convert struct here to be able to catch rt_dev string */
5205 field_types = se->field_types;
5206 dst_offsets = se->field_offsets[THUNK_HOST];
5207 src_offsets = se->field_offsets[THUNK_TARGET];
5208 for (i = 0; i < se->nb_fields; i++) {
5209 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5210 assert(*field_types == TYPE_PTRVOID);
5211 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5212 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5213 if (*target_rt_dev_ptr != 0) {
5214 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5215 tswapal(*target_rt_dev_ptr));
5216 if (!*host_rt_dev_ptr) {
5217 unlock_user(argptr, arg, 0);
5218 return -TARGET_EFAULT;
5220 } else {
5221 *host_rt_dev_ptr = 0;
5223 field_types++;
5224 continue;
5226 field_types = thunk_convert(buf_temp + dst_offsets[i],
5227 argptr + src_offsets[i],
5228 field_types, THUNK_HOST);
5230 unlock_user(argptr, arg, 0);
5232 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5234 assert(host_rt_dev_ptr != NULL);
5235 assert(target_rt_dev_ptr != NULL);
5236 if (*host_rt_dev_ptr != 0) {
5237 unlock_user((void *)*host_rt_dev_ptr,
5238 *target_rt_dev_ptr, 0);
5240 return ret;
5243 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5244 int fd, int cmd, abi_long arg)
5246 int sig = target_to_host_signal(arg);
5247 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5250 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5251 int fd, int cmd, abi_long arg)
5253 struct timeval tv;
5254 abi_long ret;
5256 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5257 if (is_error(ret)) {
5258 return ret;
5261 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5262 if (copy_to_user_timeval(arg, &tv)) {
5263 return -TARGET_EFAULT;
5265 } else {
5266 if (copy_to_user_timeval64(arg, &tv)) {
5267 return -TARGET_EFAULT;
5271 return ret;
5274 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5275 int fd, int cmd, abi_long arg)
5277 struct timespec ts;
5278 abi_long ret;
5280 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5281 if (is_error(ret)) {
5282 return ret;
5285 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5286 if (host_to_target_timespec(arg, &ts)) {
5287 return -TARGET_EFAULT;
5289 } else{
5290 if (host_to_target_timespec64(arg, &ts)) {
5291 return -TARGET_EFAULT;
5295 return ret;
5298 #ifdef TIOCGPTPEER
5299 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5300 int fd, int cmd, abi_long arg)
5302 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5303 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5305 #endif
5307 #ifdef HAVE_DRM_H
5309 static void unlock_drm_version(struct drm_version *host_ver,
5310 struct target_drm_version *target_ver,
5311 bool copy)
5313 unlock_user(host_ver->name, target_ver->name,
5314 copy ? host_ver->name_len : 0);
5315 unlock_user(host_ver->date, target_ver->date,
5316 copy ? host_ver->date_len : 0);
5317 unlock_user(host_ver->desc, target_ver->desc,
5318 copy ? host_ver->desc_len : 0);
5321 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5322 struct target_drm_version *target_ver)
5324 memset(host_ver, 0, sizeof(*host_ver));
5326 __get_user(host_ver->name_len, &target_ver->name_len);
5327 if (host_ver->name_len) {
5328 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5329 target_ver->name_len, 0);
5330 if (!host_ver->name) {
5331 return -EFAULT;
5335 __get_user(host_ver->date_len, &target_ver->date_len);
5336 if (host_ver->date_len) {
5337 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5338 target_ver->date_len, 0);
5339 if (!host_ver->date) {
5340 goto err;
5344 __get_user(host_ver->desc_len, &target_ver->desc_len);
5345 if (host_ver->desc_len) {
5346 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5347 target_ver->desc_len, 0);
5348 if (!host_ver->desc) {
5349 goto err;
5353 return 0;
5354 err:
5355 unlock_drm_version(host_ver, target_ver, false);
5356 return -EFAULT;
5359 static inline void host_to_target_drmversion(
5360 struct target_drm_version *target_ver,
5361 struct drm_version *host_ver)
5363 __put_user(host_ver->version_major, &target_ver->version_major);
5364 __put_user(host_ver->version_minor, &target_ver->version_minor);
5365 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5366 __put_user(host_ver->name_len, &target_ver->name_len);
5367 __put_user(host_ver->date_len, &target_ver->date_len);
5368 __put_user(host_ver->desc_len, &target_ver->desc_len);
5369 unlock_drm_version(host_ver, target_ver, true);
5372 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5373 int fd, int cmd, abi_long arg)
5375 struct drm_version *ver;
5376 struct target_drm_version *target_ver;
5377 abi_long ret;
5379 switch (ie->host_cmd) {
5380 case DRM_IOCTL_VERSION:
5381 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5382 return -TARGET_EFAULT;
5384 ver = (struct drm_version *)buf_temp;
5385 ret = target_to_host_drmversion(ver, target_ver);
5386 if (!is_error(ret)) {
5387 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5388 if (is_error(ret)) {
5389 unlock_drm_version(ver, target_ver, false);
5390 } else {
5391 host_to_target_drmversion(target_ver, ver);
5394 unlock_user_struct(target_ver, arg, 0);
5395 return ret;
5397 return -TARGET_ENOSYS;
5400 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5401 struct drm_i915_getparam *gparam,
5402 int fd, abi_long arg)
5404 abi_long ret;
5405 int value;
5406 struct target_drm_i915_getparam *target_gparam;
5408 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5409 return -TARGET_EFAULT;
5412 __get_user(gparam->param, &target_gparam->param);
5413 gparam->value = &value;
5414 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5415 put_user_s32(value, target_gparam->value);
5417 unlock_user_struct(target_gparam, arg, 0);
5418 return ret;
5421 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5422 int fd, int cmd, abi_long arg)
5424 switch (ie->host_cmd) {
5425 case DRM_IOCTL_I915_GETPARAM:
5426 return do_ioctl_drm_i915_getparam(ie,
5427 (struct drm_i915_getparam *)buf_temp,
5428 fd, arg);
5429 default:
5430 return -TARGET_ENOSYS;
5434 #endif
5436 IOCTLEntry ioctl_entries[] = {
5437 #define IOCTL(cmd, access, ...) \
5438 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5439 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5440 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5441 #define IOCTL_IGNORE(cmd) \
5442 { TARGET_ ## cmd, 0, #cmd },
5443 #include "ioctls.h"
5444 { 0, 0, },
5447 /* ??? Implement proper locking for ioctls. */
5448 /* do_ioctl() Must return target values and target errnos. */
5449 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5451 const IOCTLEntry *ie;
5452 const argtype *arg_type;
5453 abi_long ret;
5454 uint8_t buf_temp[MAX_STRUCT_SIZE];
5455 int target_size;
5456 void *argptr;
5458 ie = ioctl_entries;
5459 for(;;) {
5460 if (ie->target_cmd == 0) {
5461 qemu_log_mask(
5462 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5463 return -TARGET_ENOSYS;
5465 if (ie->target_cmd == cmd)
5466 break;
5467 ie++;
5469 arg_type = ie->arg_type;
5470 if (ie->do_ioctl) {
5471 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5472 } else if (!ie->host_cmd) {
5473 /* Some architectures define BSD ioctls in their headers
5474 that are not implemented in Linux. */
5475 return -TARGET_ENOSYS;
5478 switch(arg_type[0]) {
5479 case TYPE_NULL:
5480 /* no argument */
5481 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5482 break;
5483 case TYPE_PTRVOID:
5484 case TYPE_INT:
5485 case TYPE_LONG:
5486 case TYPE_ULONG:
5487 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5488 break;
5489 case TYPE_PTR:
5490 arg_type++;
5491 target_size = thunk_type_size(arg_type, 0);
5492 switch(ie->access) {
5493 case IOC_R:
5494 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5495 if (!is_error(ret)) {
5496 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5497 if (!argptr)
5498 return -TARGET_EFAULT;
5499 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5500 unlock_user(argptr, arg, target_size);
5502 break;
5503 case IOC_W:
5504 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5505 if (!argptr)
5506 return -TARGET_EFAULT;
5507 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5508 unlock_user(argptr, arg, 0);
5509 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5510 break;
5511 default:
5512 case IOC_RW:
5513 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5514 if (!argptr)
5515 return -TARGET_EFAULT;
5516 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5517 unlock_user(argptr, arg, 0);
5518 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5519 if (!is_error(ret)) {
5520 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5521 if (!argptr)
5522 return -TARGET_EFAULT;
5523 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5524 unlock_user(argptr, arg, target_size);
5526 break;
5528 break;
5529 default:
5530 qemu_log_mask(LOG_UNIMP,
5531 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5532 (long)cmd, arg_type[0]);
5533 ret = -TARGET_ENOSYS;
5534 break;
5536 return ret;
5539 static const bitmask_transtbl iflag_tbl[] = {
5540 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5541 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5542 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5543 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5544 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5545 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5546 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5547 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5548 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5549 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5550 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5551 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5552 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5553 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5554 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5555 { 0, 0, 0, 0 }
5558 static const bitmask_transtbl oflag_tbl[] = {
5559 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5560 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5561 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5562 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5563 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5564 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5565 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5566 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5567 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5568 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5569 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5570 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5571 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5572 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5573 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5574 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5575 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5576 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5577 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5578 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5579 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5580 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5581 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5582 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5583 { 0, 0, 0, 0 }
5586 static const bitmask_transtbl cflag_tbl[] = {
5587 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5588 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5589 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5590 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5591 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5592 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5593 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5594 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5595 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5596 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5597 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5598 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5599 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5600 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5601 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5602 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5603 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5604 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5605 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5606 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5607 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5608 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5609 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5610 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5611 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5612 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5613 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5614 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5615 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5616 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5617 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5618 { 0, 0, 0, 0 }
5621 static const bitmask_transtbl lflag_tbl[] = {
5622 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5623 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5624 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5625 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5626 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5627 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5628 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5629 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5630 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5631 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5632 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5633 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5634 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5635 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5636 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5637 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5638 { 0, 0, 0, 0 }
5641 static void target_to_host_termios (void *dst, const void *src)
5643 struct host_termios *host = dst;
5644 const struct target_termios *target = src;
5646 host->c_iflag =
5647 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5648 host->c_oflag =
5649 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5650 host->c_cflag =
5651 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5652 host->c_lflag =
5653 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5654 host->c_line = target->c_line;
5656 memset(host->c_cc, 0, sizeof(host->c_cc));
5657 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5658 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5659 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5660 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5661 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5662 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5663 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5664 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5665 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5666 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5667 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5668 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5669 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5670 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5671 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5672 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5673 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5676 static void host_to_target_termios (void *dst, const void *src)
5678 struct target_termios *target = dst;
5679 const struct host_termios *host = src;
5681 target->c_iflag =
5682 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5683 target->c_oflag =
5684 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5685 target->c_cflag =
5686 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5687 target->c_lflag =
5688 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5689 target->c_line = host->c_line;
5691 memset(target->c_cc, 0, sizeof(target->c_cc));
5692 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5693 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5694 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5695 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5696 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5697 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5698 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5699 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5700 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5701 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5702 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5703 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5704 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5705 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5706 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5707 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5708 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5711 static const StructEntry struct_termios_def = {
5712 .convert = { host_to_target_termios, target_to_host_termios },
5713 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5714 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5715 .print = print_termios,
5718 static bitmask_transtbl mmap_flags_tbl[] = {
5719 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5720 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5721 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5722 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5723 MAP_ANONYMOUS, MAP_ANONYMOUS },
5724 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5725 MAP_GROWSDOWN, MAP_GROWSDOWN },
5726 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5727 MAP_DENYWRITE, MAP_DENYWRITE },
5728 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5729 MAP_EXECUTABLE, MAP_EXECUTABLE },
5730 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5731 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5732 MAP_NORESERVE, MAP_NORESERVE },
5733 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5734 /* MAP_STACK had been ignored by the kernel for quite some time.
5735 Recognize it for the target insofar as we do not want to pass
5736 it through to the host. */
5737 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5738 { 0, 0, 0, 0 }
5742 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5743 * TARGET_I386 is defined if TARGET_X86_64 is defined
5745 #if defined(TARGET_I386)
5747 /* NOTE: there is really one LDT for all the threads */
5748 static uint8_t *ldt_table;
5750 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5752 int size;
5753 void *p;
5755 if (!ldt_table)
5756 return 0;
5757 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5758 if (size > bytecount)
5759 size = bytecount;
5760 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5761 if (!p)
5762 return -TARGET_EFAULT;
5763 /* ??? Should this by byteswapped? */
5764 memcpy(p, ldt_table, size);
5765 unlock_user(p, ptr, size);
5766 return size;
5769 /* XXX: add locking support */
5770 static abi_long write_ldt(CPUX86State *env,
5771 abi_ulong ptr, unsigned long bytecount, int oldmode)
5773 struct target_modify_ldt_ldt_s ldt_info;
5774 struct target_modify_ldt_ldt_s *target_ldt_info;
5775 int seg_32bit, contents, read_exec_only, limit_in_pages;
5776 int seg_not_present, useable, lm;
5777 uint32_t *lp, entry_1, entry_2;
5779 if (bytecount != sizeof(ldt_info))
5780 return -TARGET_EINVAL;
5781 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5782 return -TARGET_EFAULT;
5783 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5784 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5785 ldt_info.limit = tswap32(target_ldt_info->limit);
5786 ldt_info.flags = tswap32(target_ldt_info->flags);
5787 unlock_user_struct(target_ldt_info, ptr, 0);
5789 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5790 return -TARGET_EINVAL;
5791 seg_32bit = ldt_info.flags & 1;
5792 contents = (ldt_info.flags >> 1) & 3;
5793 read_exec_only = (ldt_info.flags >> 3) & 1;
5794 limit_in_pages = (ldt_info.flags >> 4) & 1;
5795 seg_not_present = (ldt_info.flags >> 5) & 1;
5796 useable = (ldt_info.flags >> 6) & 1;
5797 #ifdef TARGET_ABI32
5798 lm = 0;
5799 #else
5800 lm = (ldt_info.flags >> 7) & 1;
5801 #endif
5802 if (contents == 3) {
5803 if (oldmode)
5804 return -TARGET_EINVAL;
5805 if (seg_not_present == 0)
5806 return -TARGET_EINVAL;
5808 /* allocate the LDT */
5809 if (!ldt_table) {
5810 env->ldt.base = target_mmap(0,
5811 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5812 PROT_READ|PROT_WRITE,
5813 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5814 if (env->ldt.base == -1)
5815 return -TARGET_ENOMEM;
5816 memset(g2h(env->ldt.base), 0,
5817 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5818 env->ldt.limit = 0xffff;
5819 ldt_table = g2h(env->ldt.base);
5822 /* NOTE: same code as Linux kernel */
5823 /* Allow LDTs to be cleared by the user. */
5824 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5825 if (oldmode ||
5826 (contents == 0 &&
5827 read_exec_only == 1 &&
5828 seg_32bit == 0 &&
5829 limit_in_pages == 0 &&
5830 seg_not_present == 1 &&
5831 useable == 0 )) {
5832 entry_1 = 0;
5833 entry_2 = 0;
5834 goto install;
5838 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5839 (ldt_info.limit & 0x0ffff);
5840 entry_2 = (ldt_info.base_addr & 0xff000000) |
5841 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5842 (ldt_info.limit & 0xf0000) |
5843 ((read_exec_only ^ 1) << 9) |
5844 (contents << 10) |
5845 ((seg_not_present ^ 1) << 15) |
5846 (seg_32bit << 22) |
5847 (limit_in_pages << 23) |
5848 (lm << 21) |
5849 0x7000;
5850 if (!oldmode)
5851 entry_2 |= (useable << 20);
5853 /* Install the new entry ... */
5854 install:
5855 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5856 lp[0] = tswap32(entry_1);
5857 lp[1] = tswap32(entry_2);
5858 return 0;
5861 /* specific and weird i386 syscalls */
5862 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5863 unsigned long bytecount)
5865 abi_long ret;
5867 switch (func) {
5868 case 0:
5869 ret = read_ldt(ptr, bytecount);
5870 break;
5871 case 1:
5872 ret = write_ldt(env, ptr, bytecount, 1);
5873 break;
5874 case 0x11:
5875 ret = write_ldt(env, ptr, bytecount, 0);
5876 break;
5877 default:
5878 ret = -TARGET_ENOSYS;
5879 break;
5881 return ret;
5884 #if defined(TARGET_ABI32)
5885 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5887 uint64_t *gdt_table = g2h(env->gdt.base);
5888 struct target_modify_ldt_ldt_s ldt_info;
5889 struct target_modify_ldt_ldt_s *target_ldt_info;
5890 int seg_32bit, contents, read_exec_only, limit_in_pages;
5891 int seg_not_present, useable, lm;
5892 uint32_t *lp, entry_1, entry_2;
5893 int i;
5895 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5896 if (!target_ldt_info)
5897 return -TARGET_EFAULT;
5898 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5899 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5900 ldt_info.limit = tswap32(target_ldt_info->limit);
5901 ldt_info.flags = tswap32(target_ldt_info->flags);
5902 if (ldt_info.entry_number == -1) {
5903 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5904 if (gdt_table[i] == 0) {
5905 ldt_info.entry_number = i;
5906 target_ldt_info->entry_number = tswap32(i);
5907 break;
5911 unlock_user_struct(target_ldt_info, ptr, 1);
5913 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5914 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5915 return -TARGET_EINVAL;
5916 seg_32bit = ldt_info.flags & 1;
5917 contents = (ldt_info.flags >> 1) & 3;
5918 read_exec_only = (ldt_info.flags >> 3) & 1;
5919 limit_in_pages = (ldt_info.flags >> 4) & 1;
5920 seg_not_present = (ldt_info.flags >> 5) & 1;
5921 useable = (ldt_info.flags >> 6) & 1;
5922 #ifdef TARGET_ABI32
5923 lm = 0;
5924 #else
5925 lm = (ldt_info.flags >> 7) & 1;
5926 #endif
5928 if (contents == 3) {
5929 if (seg_not_present == 0)
5930 return -TARGET_EINVAL;
5933 /* NOTE: same code as Linux kernel */
5934 /* Allow LDTs to be cleared by the user. */
5935 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5936 if ((contents == 0 &&
5937 read_exec_only == 1 &&
5938 seg_32bit == 0 &&
5939 limit_in_pages == 0 &&
5940 seg_not_present == 1 &&
5941 useable == 0 )) {
5942 entry_1 = 0;
5943 entry_2 = 0;
5944 goto install;
5948 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5949 (ldt_info.limit & 0x0ffff);
5950 entry_2 = (ldt_info.base_addr & 0xff000000) |
5951 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5952 (ldt_info.limit & 0xf0000) |
5953 ((read_exec_only ^ 1) << 9) |
5954 (contents << 10) |
5955 ((seg_not_present ^ 1) << 15) |
5956 (seg_32bit << 22) |
5957 (limit_in_pages << 23) |
5958 (useable << 20) |
5959 (lm << 21) |
5960 0x7000;
5962 /* Install the new entry ... */
5963 install:
5964 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5965 lp[0] = tswap32(entry_1);
5966 lp[1] = tswap32(entry_2);
5967 return 0;
5970 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5972 struct target_modify_ldt_ldt_s *target_ldt_info;
5973 uint64_t *gdt_table = g2h(env->gdt.base);
5974 uint32_t base_addr, limit, flags;
5975 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5976 int seg_not_present, useable, lm;
5977 uint32_t *lp, entry_1, entry_2;
5979 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5980 if (!target_ldt_info)
5981 return -TARGET_EFAULT;
5982 idx = tswap32(target_ldt_info->entry_number);
5983 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5984 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5985 unlock_user_struct(target_ldt_info, ptr, 1);
5986 return -TARGET_EINVAL;
5988 lp = (uint32_t *)(gdt_table + idx);
5989 entry_1 = tswap32(lp[0]);
5990 entry_2 = tswap32(lp[1]);
5992 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5993 contents = (entry_2 >> 10) & 3;
5994 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5995 seg_32bit = (entry_2 >> 22) & 1;
5996 limit_in_pages = (entry_2 >> 23) & 1;
5997 useable = (entry_2 >> 20) & 1;
5998 #ifdef TARGET_ABI32
5999 lm = 0;
6000 #else
6001 lm = (entry_2 >> 21) & 1;
6002 #endif
6003 flags = (seg_32bit << 0) | (contents << 1) |
6004 (read_exec_only << 3) | (limit_in_pages << 4) |
6005 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6006 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6007 base_addr = (entry_1 >> 16) |
6008 (entry_2 & 0xff000000) |
6009 ((entry_2 & 0xff) << 16);
6010 target_ldt_info->base_addr = tswapal(base_addr);
6011 target_ldt_info->limit = tswap32(limit);
6012 target_ldt_info->flags = tswap32(flags);
6013 unlock_user_struct(target_ldt_info, ptr, 1);
6014 return 0;
6017 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6019 return -TARGET_ENOSYS;
6021 #else
6022 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6024 abi_long ret = 0;
6025 abi_ulong val;
6026 int idx;
6028 switch(code) {
6029 case TARGET_ARCH_SET_GS:
6030 case TARGET_ARCH_SET_FS:
6031 if (code == TARGET_ARCH_SET_GS)
6032 idx = R_GS;
6033 else
6034 idx = R_FS;
6035 cpu_x86_load_seg(env, idx, 0);
6036 env->segs[idx].base = addr;
6037 break;
6038 case TARGET_ARCH_GET_GS:
6039 case TARGET_ARCH_GET_FS:
6040 if (code == TARGET_ARCH_GET_GS)
6041 idx = R_GS;
6042 else
6043 idx = R_FS;
6044 val = env->segs[idx].base;
6045 if (put_user(val, addr, abi_ulong))
6046 ret = -TARGET_EFAULT;
6047 break;
6048 default:
6049 ret = -TARGET_EINVAL;
6050 break;
6052 return ret;
6054 #endif /* defined(TARGET_ABI32 */
6056 #endif /* defined(TARGET_I386) */
6058 #define NEW_STACK_SIZE 0x40000
6061 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6062 typedef struct {
6063 CPUArchState *env;
6064 pthread_mutex_t mutex;
6065 pthread_cond_t cond;
6066 pthread_t thread;
6067 uint32_t tid;
6068 abi_ulong child_tidptr;
6069 abi_ulong parent_tidptr;
6070 sigset_t sigmask;
6071 } new_thread_info;
6073 static void *clone_func(void *arg)
6075 new_thread_info *info = arg;
6076 CPUArchState *env;
6077 CPUState *cpu;
6078 TaskState *ts;
6080 rcu_register_thread();
6081 tcg_register_thread();
6082 env = info->env;
6083 cpu = env_cpu(env);
6084 thread_cpu = cpu;
6085 ts = (TaskState *)cpu->opaque;
6086 info->tid = sys_gettid();
6087 task_settid(ts);
6088 if (info->child_tidptr)
6089 put_user_u32(info->tid, info->child_tidptr);
6090 if (info->parent_tidptr)
6091 put_user_u32(info->tid, info->parent_tidptr);
6092 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6093 /* Enable signals. */
6094 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6095 /* Signal to the parent that we're ready. */
6096 pthread_mutex_lock(&info->mutex);
6097 pthread_cond_broadcast(&info->cond);
6098 pthread_mutex_unlock(&info->mutex);
6099 /* Wait until the parent has finished initializing the tls state. */
6100 pthread_mutex_lock(&clone_lock);
6101 pthread_mutex_unlock(&clone_lock);
6102 cpu_loop(env);
6103 /* never exits */
6104 return NULL;
6107 /* do_fork() Must return host values and target errnos (unlike most
6108 do_*() functions). */
6109 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6110 abi_ulong parent_tidptr, target_ulong newtls,
6111 abi_ulong child_tidptr)
6113 CPUState *cpu = env_cpu(env);
6114 int ret;
6115 TaskState *ts;
6116 CPUState *new_cpu;
6117 CPUArchState *new_env;
6118 sigset_t sigmask;
6120 flags &= ~CLONE_IGNORED_FLAGS;
6122 /* Emulate vfork() with fork() */
6123 if (flags & CLONE_VFORK)
6124 flags &= ~(CLONE_VFORK | CLONE_VM);
6126 if (flags & CLONE_VM) {
6127 TaskState *parent_ts = (TaskState *)cpu->opaque;
6128 new_thread_info info;
6129 pthread_attr_t attr;
6131 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6132 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6133 return -TARGET_EINVAL;
6136 ts = g_new0(TaskState, 1);
6137 init_task_state(ts);
6139 /* Grab a mutex so that thread setup appears atomic. */
6140 pthread_mutex_lock(&clone_lock);
6142 /* we create a new CPU instance. */
6143 new_env = cpu_copy(env);
6144 /* Init regs that differ from the parent. */
6145 cpu_clone_regs_child(new_env, newsp, flags);
6146 cpu_clone_regs_parent(env, flags);
6147 new_cpu = env_cpu(new_env);
6148 new_cpu->opaque = ts;
6149 ts->bprm = parent_ts->bprm;
6150 ts->info = parent_ts->info;
6151 ts->signal_mask = parent_ts->signal_mask;
6153 if (flags & CLONE_CHILD_CLEARTID) {
6154 ts->child_tidptr = child_tidptr;
6157 if (flags & CLONE_SETTLS) {
6158 cpu_set_tls (new_env, newtls);
6161 memset(&info, 0, sizeof(info));
6162 pthread_mutex_init(&info.mutex, NULL);
6163 pthread_mutex_lock(&info.mutex);
6164 pthread_cond_init(&info.cond, NULL);
6165 info.env = new_env;
6166 if (flags & CLONE_CHILD_SETTID) {
6167 info.child_tidptr = child_tidptr;
6169 if (flags & CLONE_PARENT_SETTID) {
6170 info.parent_tidptr = parent_tidptr;
6173 ret = pthread_attr_init(&attr);
6174 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6175 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6176 /* It is not safe to deliver signals until the child has finished
6177 initializing, so temporarily block all signals. */
6178 sigfillset(&sigmask);
6179 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6180 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6182 /* If this is our first additional thread, we need to ensure we
6183 * generate code for parallel execution and flush old translations.
6185 if (!parallel_cpus) {
6186 parallel_cpus = true;
6187 tb_flush(cpu);
6190 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6191 /* TODO: Free new CPU state if thread creation failed. */
6193 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6194 pthread_attr_destroy(&attr);
6195 if (ret == 0) {
6196 /* Wait for the child to initialize. */
6197 pthread_cond_wait(&info.cond, &info.mutex);
6198 ret = info.tid;
6199 } else {
6200 ret = -1;
6202 pthread_mutex_unlock(&info.mutex);
6203 pthread_cond_destroy(&info.cond);
6204 pthread_mutex_destroy(&info.mutex);
6205 pthread_mutex_unlock(&clone_lock);
6206 } else {
6207 /* if no CLONE_VM, we consider it is a fork */
6208 if (flags & CLONE_INVALID_FORK_FLAGS) {
6209 return -TARGET_EINVAL;
6212 /* We can't support custom termination signals */
6213 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6214 return -TARGET_EINVAL;
6217 if (block_signals()) {
6218 return -TARGET_ERESTARTSYS;
6221 fork_start();
6222 ret = fork();
6223 if (ret == 0) {
6224 /* Child Process. */
6225 cpu_clone_regs_child(env, newsp, flags);
6226 fork_end(1);
6227 /* There is a race condition here. The parent process could
6228 theoretically read the TID in the child process before the child
6229 tid is set. This would require using either ptrace
6230 (not implemented) or having *_tidptr to point at a shared memory
6231 mapping. We can't repeat the spinlock hack used above because
6232 the child process gets its own copy of the lock. */
6233 if (flags & CLONE_CHILD_SETTID)
6234 put_user_u32(sys_gettid(), child_tidptr);
6235 if (flags & CLONE_PARENT_SETTID)
6236 put_user_u32(sys_gettid(), parent_tidptr);
6237 ts = (TaskState *)cpu->opaque;
6238 if (flags & CLONE_SETTLS)
6239 cpu_set_tls (env, newtls);
6240 if (flags & CLONE_CHILD_CLEARTID)
6241 ts->child_tidptr = child_tidptr;
6242 } else {
6243 cpu_clone_regs_parent(env, flags);
6244 fork_end(0);
6247 return ret;
6250 /* warning : doesn't handle linux specific flags... */
6251 static int target_to_host_fcntl_cmd(int cmd)
6253 int ret;
6255 switch(cmd) {
6256 case TARGET_F_DUPFD:
6257 case TARGET_F_GETFD:
6258 case TARGET_F_SETFD:
6259 case TARGET_F_GETFL:
6260 case TARGET_F_SETFL:
6261 case TARGET_F_OFD_GETLK:
6262 case TARGET_F_OFD_SETLK:
6263 case TARGET_F_OFD_SETLKW:
6264 ret = cmd;
6265 break;
6266 case TARGET_F_GETLK:
6267 ret = F_GETLK64;
6268 break;
6269 case TARGET_F_SETLK:
6270 ret = F_SETLK64;
6271 break;
6272 case TARGET_F_SETLKW:
6273 ret = F_SETLKW64;
6274 break;
6275 case TARGET_F_GETOWN:
6276 ret = F_GETOWN;
6277 break;
6278 case TARGET_F_SETOWN:
6279 ret = F_SETOWN;
6280 break;
6281 case TARGET_F_GETSIG:
6282 ret = F_GETSIG;
6283 break;
6284 case TARGET_F_SETSIG:
6285 ret = F_SETSIG;
6286 break;
6287 #if TARGET_ABI_BITS == 32
6288 case TARGET_F_GETLK64:
6289 ret = F_GETLK64;
6290 break;
6291 case TARGET_F_SETLK64:
6292 ret = F_SETLK64;
6293 break;
6294 case TARGET_F_SETLKW64:
6295 ret = F_SETLKW64;
6296 break;
6297 #endif
6298 case TARGET_F_SETLEASE:
6299 ret = F_SETLEASE;
6300 break;
6301 case TARGET_F_GETLEASE:
6302 ret = F_GETLEASE;
6303 break;
6304 #ifdef F_DUPFD_CLOEXEC
6305 case TARGET_F_DUPFD_CLOEXEC:
6306 ret = F_DUPFD_CLOEXEC;
6307 break;
6308 #endif
6309 case TARGET_F_NOTIFY:
6310 ret = F_NOTIFY;
6311 break;
6312 #ifdef F_GETOWN_EX
6313 case TARGET_F_GETOWN_EX:
6314 ret = F_GETOWN_EX;
6315 break;
6316 #endif
6317 #ifdef F_SETOWN_EX
6318 case TARGET_F_SETOWN_EX:
6319 ret = F_SETOWN_EX;
6320 break;
6321 #endif
6322 #ifdef F_SETPIPE_SZ
6323 case TARGET_F_SETPIPE_SZ:
6324 ret = F_SETPIPE_SZ;
6325 break;
6326 case TARGET_F_GETPIPE_SZ:
6327 ret = F_GETPIPE_SZ;
6328 break;
6329 #endif
6330 default:
6331 ret = -TARGET_EINVAL;
6332 break;
6335 #if defined(__powerpc64__)
6336 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6337 * is not supported by kernel. The glibc fcntl call actually adjusts
6338 * them to 5, 6 and 7 before making the syscall(). Since we make the
6339 * syscall directly, adjust to what is supported by the kernel.
6341 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6342 ret -= F_GETLK64 - 5;
6344 #endif
6346 return ret;
6349 #define FLOCK_TRANSTBL \
6350 switch (type) { \
6351 TRANSTBL_CONVERT(F_RDLCK); \
6352 TRANSTBL_CONVERT(F_WRLCK); \
6353 TRANSTBL_CONVERT(F_UNLCK); \
6354 TRANSTBL_CONVERT(F_EXLCK); \
6355 TRANSTBL_CONVERT(F_SHLCK); \
6358 static int target_to_host_flock(int type)
6360 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6361 FLOCK_TRANSTBL
6362 #undef TRANSTBL_CONVERT
6363 return -TARGET_EINVAL;
6366 static int host_to_target_flock(int type)
6368 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6369 FLOCK_TRANSTBL
6370 #undef TRANSTBL_CONVERT
6371 /* if we don't know how to convert the value coming
6372 * from the host we copy to the target field as-is
6374 return type;
6377 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6378 abi_ulong target_flock_addr)
6380 struct target_flock *target_fl;
6381 int l_type;
6383 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6384 return -TARGET_EFAULT;
6387 __get_user(l_type, &target_fl->l_type);
6388 l_type = target_to_host_flock(l_type);
6389 if (l_type < 0) {
6390 return l_type;
6392 fl->l_type = l_type;
6393 __get_user(fl->l_whence, &target_fl->l_whence);
6394 __get_user(fl->l_start, &target_fl->l_start);
6395 __get_user(fl->l_len, &target_fl->l_len);
6396 __get_user(fl->l_pid, &target_fl->l_pid);
6397 unlock_user_struct(target_fl, target_flock_addr, 0);
6398 return 0;
6401 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6402 const struct flock64 *fl)
6404 struct target_flock *target_fl;
6405 short l_type;
6407 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6408 return -TARGET_EFAULT;
6411 l_type = host_to_target_flock(fl->l_type);
6412 __put_user(l_type, &target_fl->l_type);
6413 __put_user(fl->l_whence, &target_fl->l_whence);
6414 __put_user(fl->l_start, &target_fl->l_start);
6415 __put_user(fl->l_len, &target_fl->l_len);
6416 __put_user(fl->l_pid, &target_fl->l_pid);
6417 unlock_user_struct(target_fl, target_flock_addr, 1);
6418 return 0;
6421 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6422 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6424 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6425 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6426 abi_ulong target_flock_addr)
6428 struct target_oabi_flock64 *target_fl;
6429 int l_type;
6431 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6432 return -TARGET_EFAULT;
6435 __get_user(l_type, &target_fl->l_type);
6436 l_type = target_to_host_flock(l_type);
6437 if (l_type < 0) {
6438 return l_type;
6440 fl->l_type = l_type;
6441 __get_user(fl->l_whence, &target_fl->l_whence);
6442 __get_user(fl->l_start, &target_fl->l_start);
6443 __get_user(fl->l_len, &target_fl->l_len);
6444 __get_user(fl->l_pid, &target_fl->l_pid);
6445 unlock_user_struct(target_fl, target_flock_addr, 0);
6446 return 0;
6449 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6450 const struct flock64 *fl)
6452 struct target_oabi_flock64 *target_fl;
6453 short l_type;
6455 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6456 return -TARGET_EFAULT;
6459 l_type = host_to_target_flock(fl->l_type);
6460 __put_user(l_type, &target_fl->l_type);
6461 __put_user(fl->l_whence, &target_fl->l_whence);
6462 __put_user(fl->l_start, &target_fl->l_start);
6463 __put_user(fl->l_len, &target_fl->l_len);
6464 __put_user(fl->l_pid, &target_fl->l_pid);
6465 unlock_user_struct(target_fl, target_flock_addr, 1);
6466 return 0;
6468 #endif
6470 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6471 abi_ulong target_flock_addr)
6473 struct target_flock64 *target_fl;
6474 int l_type;
6476 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6477 return -TARGET_EFAULT;
6480 __get_user(l_type, &target_fl->l_type);
6481 l_type = target_to_host_flock(l_type);
6482 if (l_type < 0) {
6483 return l_type;
6485 fl->l_type = l_type;
6486 __get_user(fl->l_whence, &target_fl->l_whence);
6487 __get_user(fl->l_start, &target_fl->l_start);
6488 __get_user(fl->l_len, &target_fl->l_len);
6489 __get_user(fl->l_pid, &target_fl->l_pid);
6490 unlock_user_struct(target_fl, target_flock_addr, 0);
6491 return 0;
6494 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6495 const struct flock64 *fl)
6497 struct target_flock64 *target_fl;
6498 short l_type;
6500 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6501 return -TARGET_EFAULT;
6504 l_type = host_to_target_flock(fl->l_type);
6505 __put_user(l_type, &target_fl->l_type);
6506 __put_user(fl->l_whence, &target_fl->l_whence);
6507 __put_user(fl->l_start, &target_fl->l_start);
6508 __put_user(fl->l_len, &target_fl->l_len);
6509 __put_user(fl->l_pid, &target_fl->l_pid);
6510 unlock_user_struct(target_fl, target_flock_addr, 1);
6511 return 0;
6514 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6516 struct flock64 fl64;
6517 #ifdef F_GETOWN_EX
6518 struct f_owner_ex fox;
6519 struct target_f_owner_ex *target_fox;
6520 #endif
6521 abi_long ret;
6522 int host_cmd = target_to_host_fcntl_cmd(cmd);
6524 if (host_cmd == -TARGET_EINVAL)
6525 return host_cmd;
6527 switch(cmd) {
6528 case TARGET_F_GETLK:
6529 ret = copy_from_user_flock(&fl64, arg);
6530 if (ret) {
6531 return ret;
6533 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6534 if (ret == 0) {
6535 ret = copy_to_user_flock(arg, &fl64);
6537 break;
6539 case TARGET_F_SETLK:
6540 case TARGET_F_SETLKW:
6541 ret = copy_from_user_flock(&fl64, arg);
6542 if (ret) {
6543 return ret;
6545 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6546 break;
6548 case TARGET_F_GETLK64:
6549 case TARGET_F_OFD_GETLK:
6550 ret = copy_from_user_flock64(&fl64, arg);
6551 if (ret) {
6552 return ret;
6554 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6555 if (ret == 0) {
6556 ret = copy_to_user_flock64(arg, &fl64);
6558 break;
6559 case TARGET_F_SETLK64:
6560 case TARGET_F_SETLKW64:
6561 case TARGET_F_OFD_SETLK:
6562 case TARGET_F_OFD_SETLKW:
6563 ret = copy_from_user_flock64(&fl64, arg);
6564 if (ret) {
6565 return ret;
6567 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6568 break;
6570 case TARGET_F_GETFL:
6571 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6572 if (ret >= 0) {
6573 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6575 break;
6577 case TARGET_F_SETFL:
6578 ret = get_errno(safe_fcntl(fd, host_cmd,
6579 target_to_host_bitmask(arg,
6580 fcntl_flags_tbl)));
6581 break;
6583 #ifdef F_GETOWN_EX
6584 case TARGET_F_GETOWN_EX:
6585 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6586 if (ret >= 0) {
6587 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6588 return -TARGET_EFAULT;
6589 target_fox->type = tswap32(fox.type);
6590 target_fox->pid = tswap32(fox.pid);
6591 unlock_user_struct(target_fox, arg, 1);
6593 break;
6594 #endif
6596 #ifdef F_SETOWN_EX
6597 case TARGET_F_SETOWN_EX:
6598 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6599 return -TARGET_EFAULT;
6600 fox.type = tswap32(target_fox->type);
6601 fox.pid = tswap32(target_fox->pid);
6602 unlock_user_struct(target_fox, arg, 0);
6603 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6604 break;
6605 #endif
6607 case TARGET_F_SETOWN:
6608 case TARGET_F_GETOWN:
6609 case TARGET_F_SETSIG:
6610 case TARGET_F_GETSIG:
6611 case TARGET_F_SETLEASE:
6612 case TARGET_F_GETLEASE:
6613 case TARGET_F_SETPIPE_SZ:
6614 case TARGET_F_GETPIPE_SZ:
6615 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6616 break;
6618 default:
6619 ret = get_errno(safe_fcntl(fd, cmd, arg));
6620 break;
6622 return ret;
6625 #ifdef USE_UID16
6627 static inline int high2lowuid(int uid)
6629 if (uid > 65535)
6630 return 65534;
6631 else
6632 return uid;
6635 static inline int high2lowgid(int gid)
6637 if (gid > 65535)
6638 return 65534;
6639 else
6640 return gid;
6643 static inline int low2highuid(int uid)
6645 if ((int16_t)uid == -1)
6646 return -1;
6647 else
6648 return uid;
6651 static inline int low2highgid(int gid)
6653 if ((int16_t)gid == -1)
6654 return -1;
6655 else
6656 return gid;
6658 static inline int tswapid(int id)
6660 return tswap16(id);
6663 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6665 #else /* !USE_UID16 */
6666 static inline int high2lowuid(int uid)
6668 return uid;
6670 static inline int high2lowgid(int gid)
6672 return gid;
6674 static inline int low2highuid(int uid)
6676 return uid;
6678 static inline int low2highgid(int gid)
6680 return gid;
6682 static inline int tswapid(int id)
6684 return tswap32(id);
6687 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6689 #endif /* USE_UID16 */
6691 /* We must do direct syscalls for setting UID/GID, because we want to
6692 * implement the Linux system call semantics of "change only for this thread",
6693 * not the libc/POSIX semantics of "change for all threads in process".
6694 * (See http://ewontfix.com/17/ for more details.)
6695 * We use the 32-bit version of the syscalls if present; if it is not
6696 * then either the host architecture supports 32-bit UIDs natively with
6697 * the standard syscall, or the 16-bit UID is the best we can do.
6699 #ifdef __NR_setuid32
6700 #define __NR_sys_setuid __NR_setuid32
6701 #else
6702 #define __NR_sys_setuid __NR_setuid
6703 #endif
6704 #ifdef __NR_setgid32
6705 #define __NR_sys_setgid __NR_setgid32
6706 #else
6707 #define __NR_sys_setgid __NR_setgid
6708 #endif
6709 #ifdef __NR_setresuid32
6710 #define __NR_sys_setresuid __NR_setresuid32
6711 #else
6712 #define __NR_sys_setresuid __NR_setresuid
6713 #endif
6714 #ifdef __NR_setresgid32
6715 #define __NR_sys_setresgid __NR_setresgid32
6716 #else
6717 #define __NR_sys_setresgid __NR_setresgid
6718 #endif
6720 _syscall1(int, sys_setuid, uid_t, uid)
6721 _syscall1(int, sys_setgid, gid_t, gid)
6722 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6723 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6725 void syscall_init(void)
6727 IOCTLEntry *ie;
6728 const argtype *arg_type;
6729 int size;
6730 int i;
6732 thunk_init(STRUCT_MAX);
6734 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6735 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6736 #include "syscall_types.h"
6737 #undef STRUCT
6738 #undef STRUCT_SPECIAL
6740 /* Build target_to_host_errno_table[] table from
6741 * host_to_target_errno_table[]. */
6742 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6743 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6746 /* we patch the ioctl size if necessary. We rely on the fact that
6747 no ioctl has all the bits at '1' in the size field */
6748 ie = ioctl_entries;
6749 while (ie->target_cmd != 0) {
6750 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6751 TARGET_IOC_SIZEMASK) {
6752 arg_type = ie->arg_type;
6753 if (arg_type[0] != TYPE_PTR) {
6754 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6755 ie->target_cmd);
6756 exit(1);
6758 arg_type++;
6759 size = thunk_type_size(arg_type, 0);
6760 ie->target_cmd = (ie->target_cmd &
6761 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6762 (size << TARGET_IOC_SIZESHIFT);
6765 /* automatic consistency check if same arch */
6766 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6767 (defined(__x86_64__) && defined(TARGET_X86_64))
6768 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6769 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6770 ie->name, ie->target_cmd, ie->host_cmd);
6772 #endif
6773 ie++;
6777 #ifdef TARGET_NR_truncate64
6778 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6779 abi_long arg2,
6780 abi_long arg3,
6781 abi_long arg4)
6783 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6784 arg2 = arg3;
6785 arg3 = arg4;
6787 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6789 #endif
6791 #ifdef TARGET_NR_ftruncate64
6792 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6793 abi_long arg2,
6794 abi_long arg3,
6795 abi_long arg4)
6797 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6798 arg2 = arg3;
6799 arg3 = arg4;
6801 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6803 #endif
6805 #if defined(TARGET_NR_timer_settime) || \
6806 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6807 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6808 abi_ulong target_addr)
6810 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6811 offsetof(struct target_itimerspec,
6812 it_interval)) ||
6813 target_to_host_timespec(&host_its->it_value, target_addr +
6814 offsetof(struct target_itimerspec,
6815 it_value))) {
6816 return -TARGET_EFAULT;
6819 return 0;
6821 #endif
6823 #if defined(TARGET_NR_timer_settime64) || \
6824 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6825 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6826 abi_ulong target_addr)
6828 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6829 offsetof(struct target__kernel_itimerspec,
6830 it_interval)) ||
6831 target_to_host_timespec64(&host_its->it_value, target_addr +
6832 offsetof(struct target__kernel_itimerspec,
6833 it_value))) {
6834 return -TARGET_EFAULT;
6837 return 0;
6839 #endif
6841 #if ((defined(TARGET_NR_timerfd_gettime) || \
6842 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6843 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6844 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6845 struct itimerspec *host_its)
6847 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6848 it_interval),
6849 &host_its->it_interval) ||
6850 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6851 it_value),
6852 &host_its->it_value)) {
6853 return -TARGET_EFAULT;
6855 return 0;
6857 #endif
6859 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6860 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6861 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6862 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6863 struct itimerspec *host_its)
6865 if (host_to_target_timespec64(target_addr +
6866 offsetof(struct target__kernel_itimerspec,
6867 it_interval),
6868 &host_its->it_interval) ||
6869 host_to_target_timespec64(target_addr +
6870 offsetof(struct target__kernel_itimerspec,
6871 it_value),
6872 &host_its->it_value)) {
6873 return -TARGET_EFAULT;
6875 return 0;
6877 #endif
6879 #if defined(TARGET_NR_adjtimex) || \
6880 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6881 static inline abi_long target_to_host_timex(struct timex *host_tx,
6882 abi_long target_addr)
6884 struct target_timex *target_tx;
6886 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6887 return -TARGET_EFAULT;
6890 __get_user(host_tx->modes, &target_tx->modes);
6891 __get_user(host_tx->offset, &target_tx->offset);
6892 __get_user(host_tx->freq, &target_tx->freq);
6893 __get_user(host_tx->maxerror, &target_tx->maxerror);
6894 __get_user(host_tx->esterror, &target_tx->esterror);
6895 __get_user(host_tx->status, &target_tx->status);
6896 __get_user(host_tx->constant, &target_tx->constant);
6897 __get_user(host_tx->precision, &target_tx->precision);
6898 __get_user(host_tx->tolerance, &target_tx->tolerance);
6899 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6900 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6901 __get_user(host_tx->tick, &target_tx->tick);
6902 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6903 __get_user(host_tx->jitter, &target_tx->jitter);
6904 __get_user(host_tx->shift, &target_tx->shift);
6905 __get_user(host_tx->stabil, &target_tx->stabil);
6906 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6907 __get_user(host_tx->calcnt, &target_tx->calcnt);
6908 __get_user(host_tx->errcnt, &target_tx->errcnt);
6909 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6910 __get_user(host_tx->tai, &target_tx->tai);
6912 unlock_user_struct(target_tx, target_addr, 0);
6913 return 0;
6916 static inline abi_long host_to_target_timex(abi_long target_addr,
6917 struct timex *host_tx)
6919 struct target_timex *target_tx;
6921 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6922 return -TARGET_EFAULT;
6925 __put_user(host_tx->modes, &target_tx->modes);
6926 __put_user(host_tx->offset, &target_tx->offset);
6927 __put_user(host_tx->freq, &target_tx->freq);
6928 __put_user(host_tx->maxerror, &target_tx->maxerror);
6929 __put_user(host_tx->esterror, &target_tx->esterror);
6930 __put_user(host_tx->status, &target_tx->status);
6931 __put_user(host_tx->constant, &target_tx->constant);
6932 __put_user(host_tx->precision, &target_tx->precision);
6933 __put_user(host_tx->tolerance, &target_tx->tolerance);
6934 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6935 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6936 __put_user(host_tx->tick, &target_tx->tick);
6937 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6938 __put_user(host_tx->jitter, &target_tx->jitter);
6939 __put_user(host_tx->shift, &target_tx->shift);
6940 __put_user(host_tx->stabil, &target_tx->stabil);
6941 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6942 __put_user(host_tx->calcnt, &target_tx->calcnt);
6943 __put_user(host_tx->errcnt, &target_tx->errcnt);
6944 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6945 __put_user(host_tx->tai, &target_tx->tai);
6947 unlock_user_struct(target_tx, target_addr, 1);
6948 return 0;
6950 #endif
6952 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6953 abi_ulong target_addr)
6955 struct target_sigevent *target_sevp;
6957 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6958 return -TARGET_EFAULT;
6961 /* This union is awkward on 64 bit systems because it has a 32 bit
6962 * integer and a pointer in it; we follow the conversion approach
6963 * used for handling sigval types in signal.c so the guest should get
6964 * the correct value back even if we did a 64 bit byteswap and it's
6965 * using the 32 bit integer.
6967 host_sevp->sigev_value.sival_ptr =
6968 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6969 host_sevp->sigev_signo =
6970 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6971 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6972 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6974 unlock_user_struct(target_sevp, target_addr, 1);
6975 return 0;
6978 #if defined(TARGET_NR_mlockall)
6979 static inline int target_to_host_mlockall_arg(int arg)
6981 int result = 0;
6983 if (arg & TARGET_MCL_CURRENT) {
6984 result |= MCL_CURRENT;
6986 if (arg & TARGET_MCL_FUTURE) {
6987 result |= MCL_FUTURE;
6989 #ifdef MCL_ONFAULT
6990 if (arg & TARGET_MCL_ONFAULT) {
6991 result |= MCL_ONFAULT;
6993 #endif
6995 return result;
6997 #endif
6999 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7000 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7001 defined(TARGET_NR_newfstatat))
7002 static inline abi_long host_to_target_stat64(void *cpu_env,
7003 abi_ulong target_addr,
7004 struct stat *host_st)
7006 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7007 if (((CPUARMState *)cpu_env)->eabi) {
7008 struct target_eabi_stat64 *target_st;
7010 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7011 return -TARGET_EFAULT;
7012 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7013 __put_user(host_st->st_dev, &target_st->st_dev);
7014 __put_user(host_st->st_ino, &target_st->st_ino);
7015 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7016 __put_user(host_st->st_ino, &target_st->__st_ino);
7017 #endif
7018 __put_user(host_st->st_mode, &target_st->st_mode);
7019 __put_user(host_st->st_nlink, &target_st->st_nlink);
7020 __put_user(host_st->st_uid, &target_st->st_uid);
7021 __put_user(host_st->st_gid, &target_st->st_gid);
7022 __put_user(host_st->st_rdev, &target_st->st_rdev);
7023 __put_user(host_st->st_size, &target_st->st_size);
7024 __put_user(host_st->st_blksize, &target_st->st_blksize);
7025 __put_user(host_st->st_blocks, &target_st->st_blocks);
7026 __put_user(host_st->st_atime, &target_st->target_st_atime);
7027 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7028 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7029 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7030 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7031 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7032 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7033 #endif
7034 unlock_user_struct(target_st, target_addr, 1);
7035 } else
7036 #endif
7038 #if defined(TARGET_HAS_STRUCT_STAT64)
7039 struct target_stat64 *target_st;
7040 #else
7041 struct target_stat *target_st;
7042 #endif
7044 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7045 return -TARGET_EFAULT;
7046 memset(target_st, 0, sizeof(*target_st));
7047 __put_user(host_st->st_dev, &target_st->st_dev);
7048 __put_user(host_st->st_ino, &target_st->st_ino);
7049 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7050 __put_user(host_st->st_ino, &target_st->__st_ino);
7051 #endif
7052 __put_user(host_st->st_mode, &target_st->st_mode);
7053 __put_user(host_st->st_nlink, &target_st->st_nlink);
7054 __put_user(host_st->st_uid, &target_st->st_uid);
7055 __put_user(host_st->st_gid, &target_st->st_gid);
7056 __put_user(host_st->st_rdev, &target_st->st_rdev);
7057 /* XXX: better use of kernel struct */
7058 __put_user(host_st->st_size, &target_st->st_size);
7059 __put_user(host_st->st_blksize, &target_st->st_blksize);
7060 __put_user(host_st->st_blocks, &target_st->st_blocks);
7061 __put_user(host_st->st_atime, &target_st->target_st_atime);
7062 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7063 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7064 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7065 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7066 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7067 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7068 #endif
7069 unlock_user_struct(target_st, target_addr, 1);
7072 return 0;
7074 #endif
7076 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7077 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7078 abi_ulong target_addr)
7080 struct target_statx *target_stx;
7082 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7083 return -TARGET_EFAULT;
7085 memset(target_stx, 0, sizeof(*target_stx));
7087 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7088 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7089 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7090 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7091 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7092 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7093 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7094 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7095 __put_user(host_stx->stx_size, &target_stx->stx_size);
7096 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7097 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7098 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7099 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7100 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7101 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7102 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7103 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7104 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7105 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7106 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7107 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7108 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7109 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7111 unlock_user_struct(target_stx, target_addr, 1);
7113 return 0;
7115 #endif
7117 static int do_sys_futex(int *uaddr, int op, int val,
7118 const struct timespec *timeout, int *uaddr2,
7119 int val3)
7121 #if HOST_LONG_BITS == 64
7122 #if defined(__NR_futex)
7123 /* always a 64-bit time_t, it doesn't define _time64 version */
7124 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7126 #endif
7127 #else /* HOST_LONG_BITS == 64 */
7128 #if defined(__NR_futex_time64)
7129 if (sizeof(timeout->tv_sec) == 8) {
7130 /* _time64 function on 32bit arch */
7131 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7133 #endif
7134 #if defined(__NR_futex)
7135 /* old function on 32bit arch */
7136 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7137 #endif
7138 #endif /* HOST_LONG_BITS == 64 */
7139 g_assert_not_reached();
7142 static int do_safe_futex(int *uaddr, int op, int val,
7143 const struct timespec *timeout, int *uaddr2,
7144 int val3)
7146 #if HOST_LONG_BITS == 64
7147 #if defined(__NR_futex)
7148 /* always a 64-bit time_t, it doesn't define _time64 version */
7149 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7150 #endif
7151 #else /* HOST_LONG_BITS == 64 */
7152 #if defined(__NR_futex_time64)
7153 if (sizeof(timeout->tv_sec) == 8) {
7154 /* _time64 function on 32bit arch */
7155 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7156 val3));
7158 #endif
7159 #if defined(__NR_futex)
7160 /* old function on 32bit arch */
7161 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7162 #endif
7163 #endif /* HOST_LONG_BITS == 64 */
7164 return -TARGET_ENOSYS;
7167 /* ??? Using host futex calls even when target atomic operations
7168 are not really atomic probably breaks things. However implementing
7169 futexes locally would make futexes shared between multiple processes
7170 tricky. However they're probably useless because guest atomic
7171 operations won't work either. */
7172 #if defined(TARGET_NR_futex)
7173 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7174 target_ulong uaddr2, int val3)
7176 struct timespec ts, *pts;
7177 int base_op;
7179 /* ??? We assume FUTEX_* constants are the same on both host
7180 and target. */
7181 #ifdef FUTEX_CMD_MASK
7182 base_op = op & FUTEX_CMD_MASK;
7183 #else
7184 base_op = op;
7185 #endif
7186 switch (base_op) {
7187 case FUTEX_WAIT:
7188 case FUTEX_WAIT_BITSET:
7189 if (timeout) {
7190 pts = &ts;
7191 target_to_host_timespec(pts, timeout);
7192 } else {
7193 pts = NULL;
7195 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7196 case FUTEX_WAKE:
7197 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7198 case FUTEX_FD:
7199 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7200 case FUTEX_REQUEUE:
7201 case FUTEX_CMP_REQUEUE:
7202 case FUTEX_WAKE_OP:
7203 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7204 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7205 But the prototype takes a `struct timespec *'; insert casts
7206 to satisfy the compiler. We do not need to tswap TIMEOUT
7207 since it's not compared to guest memory. */
7208 pts = (struct timespec *)(uintptr_t) timeout;
7209 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7210 (base_op == FUTEX_CMP_REQUEUE
7211 ? tswap32(val3)
7212 : val3));
7213 default:
7214 return -TARGET_ENOSYS;
7217 #endif
7219 #if defined(TARGET_NR_futex_time64)
7220 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7221 target_ulong uaddr2, int val3)
7223 struct timespec ts, *pts;
7224 int base_op;
7226 /* ??? We assume FUTEX_* constants are the same on both host
7227 and target. */
7228 #ifdef FUTEX_CMD_MASK
7229 base_op = op & FUTEX_CMD_MASK;
7230 #else
7231 base_op = op;
7232 #endif
7233 switch (base_op) {
7234 case FUTEX_WAIT:
7235 case FUTEX_WAIT_BITSET:
7236 if (timeout) {
7237 pts = &ts;
7238 target_to_host_timespec64(pts, timeout);
7239 } else {
7240 pts = NULL;
7242 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7243 case FUTEX_WAKE:
7244 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7245 case FUTEX_FD:
7246 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7247 case FUTEX_REQUEUE:
7248 case FUTEX_CMP_REQUEUE:
7249 case FUTEX_WAKE_OP:
7250 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7251 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7252 But the prototype takes a `struct timespec *'; insert casts
7253 to satisfy the compiler. We do not need to tswap TIMEOUT
7254 since it's not compared to guest memory. */
7255 pts = (struct timespec *)(uintptr_t) timeout;
7256 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7257 (base_op == FUTEX_CMP_REQUEUE
7258 ? tswap32(val3)
7259 : val3));
7260 default:
7261 return -TARGET_ENOSYS;
7264 #endif
7266 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7267 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7268 abi_long handle, abi_long mount_id,
7269 abi_long flags)
7271 struct file_handle *target_fh;
7272 struct file_handle *fh;
7273 int mid = 0;
7274 abi_long ret;
7275 char *name;
7276 unsigned int size, total_size;
7278 if (get_user_s32(size, handle)) {
7279 return -TARGET_EFAULT;
7282 name = lock_user_string(pathname);
7283 if (!name) {
7284 return -TARGET_EFAULT;
7287 total_size = sizeof(struct file_handle) + size;
7288 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7289 if (!target_fh) {
7290 unlock_user(name, pathname, 0);
7291 return -TARGET_EFAULT;
7294 fh = g_malloc0(total_size);
7295 fh->handle_bytes = size;
7297 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7298 unlock_user(name, pathname, 0);
7300 /* man name_to_handle_at(2):
7301 * Other than the use of the handle_bytes field, the caller should treat
7302 * the file_handle structure as an opaque data type
7305 memcpy(target_fh, fh, total_size);
7306 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7307 target_fh->handle_type = tswap32(fh->handle_type);
7308 g_free(fh);
7309 unlock_user(target_fh, handle, total_size);
7311 if (put_user_s32(mid, mount_id)) {
7312 return -TARGET_EFAULT;
7315 return ret;
7318 #endif
7320 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7321 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7322 abi_long flags)
7324 struct file_handle *target_fh;
7325 struct file_handle *fh;
7326 unsigned int size, total_size;
7327 abi_long ret;
7329 if (get_user_s32(size, handle)) {
7330 return -TARGET_EFAULT;
7333 total_size = sizeof(struct file_handle) + size;
7334 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7335 if (!target_fh) {
7336 return -TARGET_EFAULT;
7339 fh = g_memdup(target_fh, total_size);
7340 fh->handle_bytes = size;
7341 fh->handle_type = tswap32(target_fh->handle_type);
7343 ret = get_errno(open_by_handle_at(mount_fd, fh,
7344 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7346 g_free(fh);
7348 unlock_user(target_fh, handle, total_size);
7350 return ret;
7352 #endif
7354 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7356 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7358 int host_flags;
7359 target_sigset_t *target_mask;
7360 sigset_t host_mask;
7361 abi_long ret;
7363 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7364 return -TARGET_EINVAL;
7366 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7367 return -TARGET_EFAULT;
7370 target_to_host_sigset(&host_mask, target_mask);
7372 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7374 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7375 if (ret >= 0) {
7376 fd_trans_register(ret, &target_signalfd_trans);
7379 unlock_user_struct(target_mask, mask, 0);
7381 return ret;
7383 #endif
7385 /* Map host to target signal numbers for the wait family of syscalls.
7386 Assume all other status bits are the same. */
7387 int host_to_target_waitstatus(int status)
7389 if (WIFSIGNALED(status)) {
7390 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7392 if (WIFSTOPPED(status)) {
7393 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7394 | (status & 0xff);
7396 return status;
7399 static int open_self_cmdline(void *cpu_env, int fd)
7401 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7402 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7403 int i;
7405 for (i = 0; i < bprm->argc; i++) {
7406 size_t len = strlen(bprm->argv[i]) + 1;
7408 if (write(fd, bprm->argv[i], len) != len) {
7409 return -1;
7413 return 0;
7416 static int open_self_maps(void *cpu_env, int fd)
7418 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7419 TaskState *ts = cpu->opaque;
7420 GSList *map_info = read_self_maps();
7421 GSList *s;
7422 int count;
7424 for (s = map_info; s; s = g_slist_next(s)) {
7425 MapInfo *e = (MapInfo *) s->data;
7427 if (h2g_valid(e->start)) {
7428 unsigned long min = e->start;
7429 unsigned long max = e->end;
7430 int flags = page_get_flags(h2g(min));
7431 const char *path;
7433 max = h2g_valid(max - 1) ?
7434 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7436 if (page_check_range(h2g(min), max - min, flags) == -1) {
7437 continue;
7440 if (h2g(min) == ts->info->stack_limit) {
7441 path = "[stack]";
7442 } else {
7443 path = e->path;
7446 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7447 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7448 h2g(min), h2g(max - 1) + 1,
7449 e->is_read ? 'r' : '-',
7450 e->is_write ? 'w' : '-',
7451 e->is_exec ? 'x' : '-',
7452 e->is_priv ? 'p' : '-',
7453 (uint64_t) e->offset, e->dev, e->inode);
7454 if (path) {
7455 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7456 } else {
7457 dprintf(fd, "\n");
7462 free_self_maps(map_info);
7464 #ifdef TARGET_VSYSCALL_PAGE
7466 * We only support execution from the vsyscall page.
7467 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7469 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7470 " --xp 00000000 00:00 0",
7471 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7472 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7473 #endif
7475 return 0;
7478 static int open_self_stat(void *cpu_env, int fd)
7480 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7481 TaskState *ts = cpu->opaque;
7482 g_autoptr(GString) buf = g_string_new(NULL);
7483 int i;
7485 for (i = 0; i < 44; i++) {
7486 if (i == 0) {
7487 /* pid */
7488 g_string_printf(buf, FMT_pid " ", getpid());
7489 } else if (i == 1) {
7490 /* app name */
7491 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7492 bin = bin ? bin + 1 : ts->bprm->argv[0];
7493 g_string_printf(buf, "(%.15s) ", bin);
7494 } else if (i == 27) {
7495 /* stack bottom */
7496 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7497 } else {
7498 /* for the rest, there is MasterCard */
7499 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7502 if (write(fd, buf->str, buf->len) != buf->len) {
7503 return -1;
7507 return 0;
7510 static int open_self_auxv(void *cpu_env, int fd)
7512 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7513 TaskState *ts = cpu->opaque;
7514 abi_ulong auxv = ts->info->saved_auxv;
7515 abi_ulong len = ts->info->auxv_len;
7516 char *ptr;
7519 * Auxiliary vector is stored in target process stack.
7520 * read in whole auxv vector and copy it to file
7522 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7523 if (ptr != NULL) {
7524 while (len > 0) {
7525 ssize_t r;
7526 r = write(fd, ptr, len);
7527 if (r <= 0) {
7528 break;
7530 len -= r;
7531 ptr += r;
7533 lseek(fd, 0, SEEK_SET);
7534 unlock_user(ptr, auxv, len);
7537 return 0;
7540 static int is_proc_myself(const char *filename, const char *entry)
7542 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7543 filename += strlen("/proc/");
7544 if (!strncmp(filename, "self/", strlen("self/"))) {
7545 filename += strlen("self/");
7546 } else if (*filename >= '1' && *filename <= '9') {
7547 char myself[80];
7548 snprintf(myself, sizeof(myself), "%d/", getpid());
7549 if (!strncmp(filename, myself, strlen(myself))) {
7550 filename += strlen(myself);
7551 } else {
7552 return 0;
7554 } else {
7555 return 0;
7557 if (!strcmp(filename, entry)) {
7558 return 1;
7561 return 0;
7564 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7565 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7566 static int is_proc(const char *filename, const char *entry)
7568 return strcmp(filename, entry) == 0;
7570 #endif
7572 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7573 static int open_net_route(void *cpu_env, int fd)
7575 FILE *fp;
7576 char *line = NULL;
7577 size_t len = 0;
7578 ssize_t read;
7580 fp = fopen("/proc/net/route", "r");
7581 if (fp == NULL) {
7582 return -1;
7585 /* read header */
7587 read = getline(&line, &len, fp);
7588 dprintf(fd, "%s", line);
7590 /* read routes */
7592 while ((read = getline(&line, &len, fp)) != -1) {
7593 char iface[16];
7594 uint32_t dest, gw, mask;
7595 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7596 int fields;
7598 fields = sscanf(line,
7599 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7600 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7601 &mask, &mtu, &window, &irtt);
7602 if (fields != 11) {
7603 continue;
7605 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7606 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7607 metric, tswap32(mask), mtu, window, irtt);
7610 free(line);
7611 fclose(fp);
7613 return 0;
7615 #endif
7617 #if defined(TARGET_SPARC)
7618 static int open_cpuinfo(void *cpu_env, int fd)
7620 dprintf(fd, "type\t\t: sun4u\n");
7621 return 0;
7623 #endif
7625 #if defined(TARGET_HPPA)
7626 static int open_cpuinfo(void *cpu_env, int fd)
7628 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7629 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7630 dprintf(fd, "capabilities\t: os32\n");
7631 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7632 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7633 return 0;
7635 #endif
7637 #if defined(TARGET_M68K)
7638 static int open_hardware(void *cpu_env, int fd)
7640 dprintf(fd, "Model:\t\tqemu-m68k\n");
7641 return 0;
7643 #endif
7645 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7647 struct fake_open {
7648 const char *filename;
7649 int (*fill)(void *cpu_env, int fd);
7650 int (*cmp)(const char *s1, const char *s2);
7652 const struct fake_open *fake_open;
7653 static const struct fake_open fakes[] = {
7654 { "maps", open_self_maps, is_proc_myself },
7655 { "stat", open_self_stat, is_proc_myself },
7656 { "auxv", open_self_auxv, is_proc_myself },
7657 { "cmdline", open_self_cmdline, is_proc_myself },
7658 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7659 { "/proc/net/route", open_net_route, is_proc },
7660 #endif
7661 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7662 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7663 #endif
7664 #if defined(TARGET_M68K)
7665 { "/proc/hardware", open_hardware, is_proc },
7666 #endif
7667 { NULL, NULL, NULL }
7670 if (is_proc_myself(pathname, "exe")) {
7671 int execfd = qemu_getauxval(AT_EXECFD);
7672 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7675 for (fake_open = fakes; fake_open->filename; fake_open++) {
7676 if (fake_open->cmp(pathname, fake_open->filename)) {
7677 break;
7681 if (fake_open->filename) {
7682 const char *tmpdir;
7683 char filename[PATH_MAX];
7684 int fd, r;
7686 /* create temporary file to map stat to */
7687 tmpdir = getenv("TMPDIR");
7688 if (!tmpdir)
7689 tmpdir = "/tmp";
7690 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7691 fd = mkstemp(filename);
7692 if (fd < 0) {
7693 return fd;
7695 unlink(filename);
7697 if ((r = fake_open->fill(cpu_env, fd))) {
7698 int e = errno;
7699 close(fd);
7700 errno = e;
7701 return r;
7703 lseek(fd, 0, SEEK_SET);
7705 return fd;
7708 return safe_openat(dirfd, path(pathname), flags, mode);
7711 #define TIMER_MAGIC 0x0caf0000
7712 #define TIMER_MAGIC_MASK 0xffff0000
7714 /* Convert QEMU provided timer ID back to internal 16bit index format */
7715 static target_timer_t get_timer_id(abi_long arg)
7717 target_timer_t timerid = arg;
7719 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7720 return -TARGET_EINVAL;
7723 timerid &= 0xffff;
7725 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7726 return -TARGET_EINVAL;
7729 return timerid;
7732 static int target_to_host_cpu_mask(unsigned long *host_mask,
7733 size_t host_size,
7734 abi_ulong target_addr,
7735 size_t target_size)
7737 unsigned target_bits = sizeof(abi_ulong) * 8;
7738 unsigned host_bits = sizeof(*host_mask) * 8;
7739 abi_ulong *target_mask;
7740 unsigned i, j;
7742 assert(host_size >= target_size);
7744 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7745 if (!target_mask) {
7746 return -TARGET_EFAULT;
7748 memset(host_mask, 0, host_size);
7750 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7751 unsigned bit = i * target_bits;
7752 abi_ulong val;
7754 __get_user(val, &target_mask[i]);
7755 for (j = 0; j < target_bits; j++, bit++) {
7756 if (val & (1UL << j)) {
7757 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7762 unlock_user(target_mask, target_addr, 0);
7763 return 0;
7766 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7767 size_t host_size,
7768 abi_ulong target_addr,
7769 size_t target_size)
7771 unsigned target_bits = sizeof(abi_ulong) * 8;
7772 unsigned host_bits = sizeof(*host_mask) * 8;
7773 abi_ulong *target_mask;
7774 unsigned i, j;
7776 assert(host_size >= target_size);
7778 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7779 if (!target_mask) {
7780 return -TARGET_EFAULT;
7783 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7784 unsigned bit = i * target_bits;
7785 abi_ulong val = 0;
7787 for (j = 0; j < target_bits; j++, bit++) {
7788 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7789 val |= 1UL << j;
7792 __put_user(val, &target_mask[i]);
7795 unlock_user(target_mask, target_addr, target_size);
7796 return 0;
7799 /* This is an internal helper for do_syscall so that it is easier
7800 * to have a single return point, so that actions, such as logging
7801 * of syscall results, can be performed.
7802 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7804 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7805 abi_long arg2, abi_long arg3, abi_long arg4,
7806 abi_long arg5, abi_long arg6, abi_long arg7,
7807 abi_long arg8)
7809 CPUState *cpu = env_cpu(cpu_env);
7810 abi_long ret;
7811 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7812 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7813 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7814 || defined(TARGET_NR_statx)
7815 struct stat st;
7816 #endif
7817 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7818 || defined(TARGET_NR_fstatfs)
7819 struct statfs stfs;
7820 #endif
7821 void *p;
7823 switch(num) {
7824 case TARGET_NR_exit:
7825 /* In old applications this may be used to implement _exit(2).
7826 However in threaded applictions it is used for thread termination,
7827 and _exit_group is used for application termination.
7828 Do thread termination if we have more then one thread. */
7830 if (block_signals()) {
7831 return -TARGET_ERESTARTSYS;
7834 pthread_mutex_lock(&clone_lock);
7836 if (CPU_NEXT(first_cpu)) {
7837 TaskState *ts = cpu->opaque;
7839 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7840 object_unref(OBJECT(cpu));
7842 * At this point the CPU should be unrealized and removed
7843 * from cpu lists. We can clean-up the rest of the thread
7844 * data without the lock held.
7847 pthread_mutex_unlock(&clone_lock);
7849 if (ts->child_tidptr) {
7850 put_user_u32(0, ts->child_tidptr);
7851 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7852 NULL, NULL, 0);
7854 thread_cpu = NULL;
7855 g_free(ts);
7856 rcu_unregister_thread();
7857 pthread_exit(NULL);
7860 pthread_mutex_unlock(&clone_lock);
7861 preexit_cleanup(cpu_env, arg1);
7862 _exit(arg1);
7863 return 0; /* avoid warning */
7864 case TARGET_NR_read:
7865 if (arg2 == 0 && arg3 == 0) {
7866 return get_errno(safe_read(arg1, 0, 0));
7867 } else {
7868 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7869 return -TARGET_EFAULT;
7870 ret = get_errno(safe_read(arg1, p, arg3));
7871 if (ret >= 0 &&
7872 fd_trans_host_to_target_data(arg1)) {
7873 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7875 unlock_user(p, arg2, ret);
7877 return ret;
7878 case TARGET_NR_write:
7879 if (arg2 == 0 && arg3 == 0) {
7880 return get_errno(safe_write(arg1, 0, 0));
7882 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7883 return -TARGET_EFAULT;
7884 if (fd_trans_target_to_host_data(arg1)) {
7885 void *copy = g_malloc(arg3);
7886 memcpy(copy, p, arg3);
7887 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7888 if (ret >= 0) {
7889 ret = get_errno(safe_write(arg1, copy, ret));
7891 g_free(copy);
7892 } else {
7893 ret = get_errno(safe_write(arg1, p, arg3));
7895 unlock_user(p, arg2, 0);
7896 return ret;
7898 #ifdef TARGET_NR_open
7899 case TARGET_NR_open:
7900 if (!(p = lock_user_string(arg1)))
7901 return -TARGET_EFAULT;
7902 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7903 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7904 arg3));
7905 fd_trans_unregister(ret);
7906 unlock_user(p, arg1, 0);
7907 return ret;
7908 #endif
7909 case TARGET_NR_openat:
7910 if (!(p = lock_user_string(arg2)))
7911 return -TARGET_EFAULT;
7912 ret = get_errno(do_openat(cpu_env, arg1, p,
7913 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7914 arg4));
7915 fd_trans_unregister(ret);
7916 unlock_user(p, arg2, 0);
7917 return ret;
7918 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7919 case TARGET_NR_name_to_handle_at:
7920 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7921 return ret;
7922 #endif
7923 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7924 case TARGET_NR_open_by_handle_at:
7925 ret = do_open_by_handle_at(arg1, arg2, arg3);
7926 fd_trans_unregister(ret);
7927 return ret;
7928 #endif
7929 case TARGET_NR_close:
7930 fd_trans_unregister(arg1);
7931 return get_errno(close(arg1));
7933 case TARGET_NR_brk:
7934 return do_brk(arg1);
7935 #ifdef TARGET_NR_fork
7936 case TARGET_NR_fork:
7937 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7938 #endif
7939 #ifdef TARGET_NR_waitpid
7940 case TARGET_NR_waitpid:
7942 int status;
7943 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7944 if (!is_error(ret) && arg2 && ret
7945 && put_user_s32(host_to_target_waitstatus(status), arg2))
7946 return -TARGET_EFAULT;
7948 return ret;
7949 #endif
7950 #ifdef TARGET_NR_waitid
7951 case TARGET_NR_waitid:
7953 siginfo_t info;
7954 info.si_pid = 0;
7955 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7956 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7957 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7958 return -TARGET_EFAULT;
7959 host_to_target_siginfo(p, &info);
7960 unlock_user(p, arg3, sizeof(target_siginfo_t));
7963 return ret;
7964 #endif
7965 #ifdef TARGET_NR_creat /* not on alpha */
7966 case TARGET_NR_creat:
7967 if (!(p = lock_user_string(arg1)))
7968 return -TARGET_EFAULT;
7969 ret = get_errno(creat(p, arg2));
7970 fd_trans_unregister(ret);
7971 unlock_user(p, arg1, 0);
7972 return ret;
7973 #endif
7974 #ifdef TARGET_NR_link
7975 case TARGET_NR_link:
7977 void * p2;
7978 p = lock_user_string(arg1);
7979 p2 = lock_user_string(arg2);
7980 if (!p || !p2)
7981 ret = -TARGET_EFAULT;
7982 else
7983 ret = get_errno(link(p, p2));
7984 unlock_user(p2, arg2, 0);
7985 unlock_user(p, arg1, 0);
7987 return ret;
7988 #endif
7989 #if defined(TARGET_NR_linkat)
7990 case TARGET_NR_linkat:
7992 void * p2 = NULL;
7993 if (!arg2 || !arg4)
7994 return -TARGET_EFAULT;
7995 p = lock_user_string(arg2);
7996 p2 = lock_user_string(arg4);
7997 if (!p || !p2)
7998 ret = -TARGET_EFAULT;
7999 else
8000 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8001 unlock_user(p, arg2, 0);
8002 unlock_user(p2, arg4, 0);
8004 return ret;
8005 #endif
8006 #ifdef TARGET_NR_unlink
8007 case TARGET_NR_unlink:
8008 if (!(p = lock_user_string(arg1)))
8009 return -TARGET_EFAULT;
8010 ret = get_errno(unlink(p));
8011 unlock_user(p, arg1, 0);
8012 return ret;
8013 #endif
8014 #if defined(TARGET_NR_unlinkat)
8015 case TARGET_NR_unlinkat:
8016 if (!(p = lock_user_string(arg2)))
8017 return -TARGET_EFAULT;
8018 ret = get_errno(unlinkat(arg1, p, arg3));
8019 unlock_user(p, arg2, 0);
8020 return ret;
8021 #endif
8022 case TARGET_NR_execve:
8024 char **argp, **envp;
8025 int argc, envc;
8026 abi_ulong gp;
8027 abi_ulong guest_argp;
8028 abi_ulong guest_envp;
8029 abi_ulong addr;
8030 char **q;
8031 int total_size = 0;
8033 argc = 0;
8034 guest_argp = arg2;
8035 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8036 if (get_user_ual(addr, gp))
8037 return -TARGET_EFAULT;
8038 if (!addr)
8039 break;
8040 argc++;
8042 envc = 0;
8043 guest_envp = arg3;
8044 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8045 if (get_user_ual(addr, gp))
8046 return -TARGET_EFAULT;
8047 if (!addr)
8048 break;
8049 envc++;
8052 argp = g_new0(char *, argc + 1);
8053 envp = g_new0(char *, envc + 1);
8055 for (gp = guest_argp, q = argp; gp;
8056 gp += sizeof(abi_ulong), q++) {
8057 if (get_user_ual(addr, gp))
8058 goto execve_efault;
8059 if (!addr)
8060 break;
8061 if (!(*q = lock_user_string(addr)))
8062 goto execve_efault;
8063 total_size += strlen(*q) + 1;
8065 *q = NULL;
8067 for (gp = guest_envp, q = envp; gp;
8068 gp += sizeof(abi_ulong), q++) {
8069 if (get_user_ual(addr, gp))
8070 goto execve_efault;
8071 if (!addr)
8072 break;
8073 if (!(*q = lock_user_string(addr)))
8074 goto execve_efault;
8075 total_size += strlen(*q) + 1;
8077 *q = NULL;
8079 if (!(p = lock_user_string(arg1)))
8080 goto execve_efault;
8081 /* Although execve() is not an interruptible syscall it is
8082 * a special case where we must use the safe_syscall wrapper:
8083 * if we allow a signal to happen before we make the host
8084 * syscall then we will 'lose' it, because at the point of
8085 * execve the process leaves QEMU's control. So we use the
8086 * safe syscall wrapper to ensure that we either take the
8087 * signal as a guest signal, or else it does not happen
8088 * before the execve completes and makes it the other
8089 * program's problem.
8091 ret = get_errno(safe_execve(p, argp, envp));
8092 unlock_user(p, arg1, 0);
8094 goto execve_end;
8096 execve_efault:
8097 ret = -TARGET_EFAULT;
8099 execve_end:
8100 for (gp = guest_argp, q = argp; *q;
8101 gp += sizeof(abi_ulong), q++) {
8102 if (get_user_ual(addr, gp)
8103 || !addr)
8104 break;
8105 unlock_user(*q, addr, 0);
8107 for (gp = guest_envp, q = envp; *q;
8108 gp += sizeof(abi_ulong), q++) {
8109 if (get_user_ual(addr, gp)
8110 || !addr)
8111 break;
8112 unlock_user(*q, addr, 0);
8115 g_free(argp);
8116 g_free(envp);
8118 return ret;
8119 case TARGET_NR_chdir:
8120 if (!(p = lock_user_string(arg1)))
8121 return -TARGET_EFAULT;
8122 ret = get_errno(chdir(p));
8123 unlock_user(p, arg1, 0);
8124 return ret;
8125 #ifdef TARGET_NR_time
8126 case TARGET_NR_time:
8128 time_t host_time;
8129 ret = get_errno(time(&host_time));
8130 if (!is_error(ret)
8131 && arg1
8132 && put_user_sal(host_time, arg1))
8133 return -TARGET_EFAULT;
8135 return ret;
8136 #endif
8137 #ifdef TARGET_NR_mknod
8138 case TARGET_NR_mknod:
8139 if (!(p = lock_user_string(arg1)))
8140 return -TARGET_EFAULT;
8141 ret = get_errno(mknod(p, arg2, arg3));
8142 unlock_user(p, arg1, 0);
8143 return ret;
8144 #endif
8145 #if defined(TARGET_NR_mknodat)
8146 case TARGET_NR_mknodat:
8147 if (!(p = lock_user_string(arg2)))
8148 return -TARGET_EFAULT;
8149 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8150 unlock_user(p, arg2, 0);
8151 return ret;
8152 #endif
8153 #ifdef TARGET_NR_chmod
8154 case TARGET_NR_chmod:
8155 if (!(p = lock_user_string(arg1)))
8156 return -TARGET_EFAULT;
8157 ret = get_errno(chmod(p, arg2));
8158 unlock_user(p, arg1, 0);
8159 return ret;
8160 #endif
8161 #ifdef TARGET_NR_lseek
8162 case TARGET_NR_lseek:
8163 return get_errno(lseek(arg1, arg2, arg3));
8164 #endif
8165 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8166 /* Alpha specific */
8167 case TARGET_NR_getxpid:
8168 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8169 return get_errno(getpid());
8170 #endif
8171 #ifdef TARGET_NR_getpid
8172 case TARGET_NR_getpid:
8173 return get_errno(getpid());
8174 #endif
8175 case TARGET_NR_mount:
8177 /* need to look at the data field */
8178 void *p2, *p3;
8180 if (arg1) {
8181 p = lock_user_string(arg1);
8182 if (!p) {
8183 return -TARGET_EFAULT;
8185 } else {
8186 p = NULL;
8189 p2 = lock_user_string(arg2);
8190 if (!p2) {
8191 if (arg1) {
8192 unlock_user(p, arg1, 0);
8194 return -TARGET_EFAULT;
8197 if (arg3) {
8198 p3 = lock_user_string(arg3);
8199 if (!p3) {
8200 if (arg1) {
8201 unlock_user(p, arg1, 0);
8203 unlock_user(p2, arg2, 0);
8204 return -TARGET_EFAULT;
8206 } else {
8207 p3 = NULL;
8210 /* FIXME - arg5 should be locked, but it isn't clear how to
8211 * do that since it's not guaranteed to be a NULL-terminated
8212 * string.
8214 if (!arg5) {
8215 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8216 } else {
8217 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8219 ret = get_errno(ret);
8221 if (arg1) {
8222 unlock_user(p, arg1, 0);
8224 unlock_user(p2, arg2, 0);
8225 if (arg3) {
8226 unlock_user(p3, arg3, 0);
8229 return ret;
8230 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8231 #if defined(TARGET_NR_umount)
8232 case TARGET_NR_umount:
8233 #endif
8234 #if defined(TARGET_NR_oldumount)
8235 case TARGET_NR_oldumount:
8236 #endif
8237 if (!(p = lock_user_string(arg1)))
8238 return -TARGET_EFAULT;
8239 ret = get_errno(umount(p));
8240 unlock_user(p, arg1, 0);
8241 return ret;
8242 #endif
8243 #ifdef TARGET_NR_stime /* not on alpha */
8244 case TARGET_NR_stime:
8246 struct timespec ts;
8247 ts.tv_nsec = 0;
8248 if (get_user_sal(ts.tv_sec, arg1)) {
8249 return -TARGET_EFAULT;
8251 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8253 #endif
8254 #ifdef TARGET_NR_alarm /* not on alpha */
8255 case TARGET_NR_alarm:
8256 return alarm(arg1);
8257 #endif
8258 #ifdef TARGET_NR_pause /* not on alpha */
8259 case TARGET_NR_pause:
8260 if (!block_signals()) {
8261 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8263 return -TARGET_EINTR;
8264 #endif
8265 #ifdef TARGET_NR_utime
8266 case TARGET_NR_utime:
8268 struct utimbuf tbuf, *host_tbuf;
8269 struct target_utimbuf *target_tbuf;
8270 if (arg2) {
8271 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8272 return -TARGET_EFAULT;
8273 tbuf.actime = tswapal(target_tbuf->actime);
8274 tbuf.modtime = tswapal(target_tbuf->modtime);
8275 unlock_user_struct(target_tbuf, arg2, 0);
8276 host_tbuf = &tbuf;
8277 } else {
8278 host_tbuf = NULL;
8280 if (!(p = lock_user_string(arg1)))
8281 return -TARGET_EFAULT;
8282 ret = get_errno(utime(p, host_tbuf));
8283 unlock_user(p, arg1, 0);
8285 return ret;
8286 #endif
8287 #ifdef TARGET_NR_utimes
8288 case TARGET_NR_utimes:
8290 struct timeval *tvp, tv[2];
8291 if (arg2) {
8292 if (copy_from_user_timeval(&tv[0], arg2)
8293 || copy_from_user_timeval(&tv[1],
8294 arg2 + sizeof(struct target_timeval)))
8295 return -TARGET_EFAULT;
8296 tvp = tv;
8297 } else {
8298 tvp = NULL;
8300 if (!(p = lock_user_string(arg1)))
8301 return -TARGET_EFAULT;
8302 ret = get_errno(utimes(p, tvp));
8303 unlock_user(p, arg1, 0);
8305 return ret;
8306 #endif
8307 #if defined(TARGET_NR_futimesat)
8308 case TARGET_NR_futimesat:
8310 struct timeval *tvp, tv[2];
8311 if (arg3) {
8312 if (copy_from_user_timeval(&tv[0], arg3)
8313 || copy_from_user_timeval(&tv[1],
8314 arg3 + sizeof(struct target_timeval)))
8315 return -TARGET_EFAULT;
8316 tvp = tv;
8317 } else {
8318 tvp = NULL;
8320 if (!(p = lock_user_string(arg2))) {
8321 return -TARGET_EFAULT;
8323 ret = get_errno(futimesat(arg1, path(p), tvp));
8324 unlock_user(p, arg2, 0);
8326 return ret;
8327 #endif
8328 #ifdef TARGET_NR_access
8329 case TARGET_NR_access:
8330 if (!(p = lock_user_string(arg1))) {
8331 return -TARGET_EFAULT;
8333 ret = get_errno(access(path(p), arg2));
8334 unlock_user(p, arg1, 0);
8335 return ret;
8336 #endif
8337 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8338 case TARGET_NR_faccessat:
8339 if (!(p = lock_user_string(arg2))) {
8340 return -TARGET_EFAULT;
8342 ret = get_errno(faccessat(arg1, p, arg3, 0));
8343 unlock_user(p, arg2, 0);
8344 return ret;
8345 #endif
8346 #ifdef TARGET_NR_nice /* not on alpha */
8347 case TARGET_NR_nice:
8348 return get_errno(nice(arg1));
8349 #endif
8350 case TARGET_NR_sync:
8351 sync();
8352 return 0;
8353 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8354 case TARGET_NR_syncfs:
8355 return get_errno(syncfs(arg1));
8356 #endif
8357 case TARGET_NR_kill:
8358 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8359 #ifdef TARGET_NR_rename
8360 case TARGET_NR_rename:
8362 void *p2;
8363 p = lock_user_string(arg1);
8364 p2 = lock_user_string(arg2);
8365 if (!p || !p2)
8366 ret = -TARGET_EFAULT;
8367 else
8368 ret = get_errno(rename(p, p2));
8369 unlock_user(p2, arg2, 0);
8370 unlock_user(p, arg1, 0);
8372 return ret;
8373 #endif
8374 #if defined(TARGET_NR_renameat)
8375 case TARGET_NR_renameat:
8377 void *p2;
8378 p = lock_user_string(arg2);
8379 p2 = lock_user_string(arg4);
8380 if (!p || !p2)
8381 ret = -TARGET_EFAULT;
8382 else
8383 ret = get_errno(renameat(arg1, p, arg3, p2));
8384 unlock_user(p2, arg4, 0);
8385 unlock_user(p, arg2, 0);
8387 return ret;
8388 #endif
8389 #if defined(TARGET_NR_renameat2)
8390 case TARGET_NR_renameat2:
8392 void *p2;
8393 p = lock_user_string(arg2);
8394 p2 = lock_user_string(arg4);
8395 if (!p || !p2) {
8396 ret = -TARGET_EFAULT;
8397 } else {
8398 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8400 unlock_user(p2, arg4, 0);
8401 unlock_user(p, arg2, 0);
8403 return ret;
8404 #endif
8405 #ifdef TARGET_NR_mkdir
8406 case TARGET_NR_mkdir:
8407 if (!(p = lock_user_string(arg1)))
8408 return -TARGET_EFAULT;
8409 ret = get_errno(mkdir(p, arg2));
8410 unlock_user(p, arg1, 0);
8411 return ret;
8412 #endif
8413 #if defined(TARGET_NR_mkdirat)
8414 case TARGET_NR_mkdirat:
8415 if (!(p = lock_user_string(arg2)))
8416 return -TARGET_EFAULT;
8417 ret = get_errno(mkdirat(arg1, p, arg3));
8418 unlock_user(p, arg2, 0);
8419 return ret;
8420 #endif
8421 #ifdef TARGET_NR_rmdir
8422 case TARGET_NR_rmdir:
8423 if (!(p = lock_user_string(arg1)))
8424 return -TARGET_EFAULT;
8425 ret = get_errno(rmdir(p));
8426 unlock_user(p, arg1, 0);
8427 return ret;
8428 #endif
8429 case TARGET_NR_dup:
8430 ret = get_errno(dup(arg1));
8431 if (ret >= 0) {
8432 fd_trans_dup(arg1, ret);
8434 return ret;
8435 #ifdef TARGET_NR_pipe
8436 case TARGET_NR_pipe:
8437 return do_pipe(cpu_env, arg1, 0, 0);
8438 #endif
8439 #ifdef TARGET_NR_pipe2
8440 case TARGET_NR_pipe2:
8441 return do_pipe(cpu_env, arg1,
8442 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8443 #endif
8444 case TARGET_NR_times:
8446 struct target_tms *tmsp;
8447 struct tms tms;
8448 ret = get_errno(times(&tms));
8449 if (arg1) {
8450 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8451 if (!tmsp)
8452 return -TARGET_EFAULT;
8453 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8454 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8455 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8456 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8458 if (!is_error(ret))
8459 ret = host_to_target_clock_t(ret);
8461 return ret;
8462 case TARGET_NR_acct:
8463 if (arg1 == 0) {
8464 ret = get_errno(acct(NULL));
8465 } else {
8466 if (!(p = lock_user_string(arg1))) {
8467 return -TARGET_EFAULT;
8469 ret = get_errno(acct(path(p)));
8470 unlock_user(p, arg1, 0);
8472 return ret;
8473 #ifdef TARGET_NR_umount2
8474 case TARGET_NR_umount2:
8475 if (!(p = lock_user_string(arg1)))
8476 return -TARGET_EFAULT;
8477 ret = get_errno(umount2(p, arg2));
8478 unlock_user(p, arg1, 0);
8479 return ret;
8480 #endif
8481 case TARGET_NR_ioctl:
8482 return do_ioctl(arg1, arg2, arg3);
8483 #ifdef TARGET_NR_fcntl
8484 case TARGET_NR_fcntl:
8485 return do_fcntl(arg1, arg2, arg3);
8486 #endif
8487 case TARGET_NR_setpgid:
8488 return get_errno(setpgid(arg1, arg2));
8489 case TARGET_NR_umask:
8490 return get_errno(umask(arg1));
8491 case TARGET_NR_chroot:
8492 if (!(p = lock_user_string(arg1)))
8493 return -TARGET_EFAULT;
8494 ret = get_errno(chroot(p));
8495 unlock_user(p, arg1, 0);
8496 return ret;
8497 #ifdef TARGET_NR_dup2
8498 case TARGET_NR_dup2:
8499 ret = get_errno(dup2(arg1, arg2));
8500 if (ret >= 0) {
8501 fd_trans_dup(arg1, arg2);
8503 return ret;
8504 #endif
8505 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8506 case TARGET_NR_dup3:
8508 int host_flags;
8510 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8511 return -EINVAL;
8513 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8514 ret = get_errno(dup3(arg1, arg2, host_flags));
8515 if (ret >= 0) {
8516 fd_trans_dup(arg1, arg2);
8518 return ret;
8520 #endif
8521 #ifdef TARGET_NR_getppid /* not on alpha */
8522 case TARGET_NR_getppid:
8523 return get_errno(getppid());
8524 #endif
8525 #ifdef TARGET_NR_getpgrp
8526 case TARGET_NR_getpgrp:
8527 return get_errno(getpgrp());
8528 #endif
8529 case TARGET_NR_setsid:
8530 return get_errno(setsid());
8531 #ifdef TARGET_NR_sigaction
8532 case TARGET_NR_sigaction:
8534 #if defined(TARGET_ALPHA)
8535 struct target_sigaction act, oact, *pact = 0;
8536 struct target_old_sigaction *old_act;
8537 if (arg2) {
8538 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8539 return -TARGET_EFAULT;
8540 act._sa_handler = old_act->_sa_handler;
8541 target_siginitset(&act.sa_mask, old_act->sa_mask);
8542 act.sa_flags = old_act->sa_flags;
8543 act.sa_restorer = 0;
8544 unlock_user_struct(old_act, arg2, 0);
8545 pact = &act;
8547 ret = get_errno(do_sigaction(arg1, pact, &oact));
8548 if (!is_error(ret) && arg3) {
8549 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8550 return -TARGET_EFAULT;
8551 old_act->_sa_handler = oact._sa_handler;
8552 old_act->sa_mask = oact.sa_mask.sig[0];
8553 old_act->sa_flags = oact.sa_flags;
8554 unlock_user_struct(old_act, arg3, 1);
8556 #elif defined(TARGET_MIPS)
8557 struct target_sigaction act, oact, *pact, *old_act;
8559 if (arg2) {
8560 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8561 return -TARGET_EFAULT;
8562 act._sa_handler = old_act->_sa_handler;
8563 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8564 act.sa_flags = old_act->sa_flags;
8565 unlock_user_struct(old_act, arg2, 0);
8566 pact = &act;
8567 } else {
8568 pact = NULL;
8571 ret = get_errno(do_sigaction(arg1, pact, &oact));
8573 if (!is_error(ret) && arg3) {
8574 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8575 return -TARGET_EFAULT;
8576 old_act->_sa_handler = oact._sa_handler;
8577 old_act->sa_flags = oact.sa_flags;
8578 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8579 old_act->sa_mask.sig[1] = 0;
8580 old_act->sa_mask.sig[2] = 0;
8581 old_act->sa_mask.sig[3] = 0;
8582 unlock_user_struct(old_act, arg3, 1);
8584 #else
8585 struct target_old_sigaction *old_act;
8586 struct target_sigaction act, oact, *pact;
8587 if (arg2) {
8588 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8589 return -TARGET_EFAULT;
8590 act._sa_handler = old_act->_sa_handler;
8591 target_siginitset(&act.sa_mask, old_act->sa_mask);
8592 act.sa_flags = old_act->sa_flags;
8593 act.sa_restorer = old_act->sa_restorer;
8594 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8595 act.ka_restorer = 0;
8596 #endif
8597 unlock_user_struct(old_act, arg2, 0);
8598 pact = &act;
8599 } else {
8600 pact = NULL;
8602 ret = get_errno(do_sigaction(arg1, pact, &oact));
8603 if (!is_error(ret) && arg3) {
8604 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8605 return -TARGET_EFAULT;
8606 old_act->_sa_handler = oact._sa_handler;
8607 old_act->sa_mask = oact.sa_mask.sig[0];
8608 old_act->sa_flags = oact.sa_flags;
8609 old_act->sa_restorer = oact.sa_restorer;
8610 unlock_user_struct(old_act, arg3, 1);
8612 #endif
8614 return ret;
8615 #endif
8616 case TARGET_NR_rt_sigaction:
8618 #if defined(TARGET_ALPHA)
8619 /* For Alpha and SPARC this is a 5 argument syscall, with
8620 * a 'restorer' parameter which must be copied into the
8621 * sa_restorer field of the sigaction struct.
8622 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8623 * and arg5 is the sigsetsize.
8624 * Alpha also has a separate rt_sigaction struct that it uses
8625 * here; SPARC uses the usual sigaction struct.
8627 struct target_rt_sigaction *rt_act;
8628 struct target_sigaction act, oact, *pact = 0;
8630 if (arg4 != sizeof(target_sigset_t)) {
8631 return -TARGET_EINVAL;
8633 if (arg2) {
8634 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8635 return -TARGET_EFAULT;
8636 act._sa_handler = rt_act->_sa_handler;
8637 act.sa_mask = rt_act->sa_mask;
8638 act.sa_flags = rt_act->sa_flags;
8639 act.sa_restorer = arg5;
8640 unlock_user_struct(rt_act, arg2, 0);
8641 pact = &act;
8643 ret = get_errno(do_sigaction(arg1, pact, &oact));
8644 if (!is_error(ret) && arg3) {
8645 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8646 return -TARGET_EFAULT;
8647 rt_act->_sa_handler = oact._sa_handler;
8648 rt_act->sa_mask = oact.sa_mask;
8649 rt_act->sa_flags = oact.sa_flags;
8650 unlock_user_struct(rt_act, arg3, 1);
8652 #else
8653 #ifdef TARGET_SPARC
8654 target_ulong restorer = arg4;
8655 target_ulong sigsetsize = arg5;
8656 #else
8657 target_ulong sigsetsize = arg4;
8658 #endif
8659 struct target_sigaction *act;
8660 struct target_sigaction *oact;
8662 if (sigsetsize != sizeof(target_sigset_t)) {
8663 return -TARGET_EINVAL;
8665 if (arg2) {
8666 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8667 return -TARGET_EFAULT;
8669 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8670 act->ka_restorer = restorer;
8671 #endif
8672 } else {
8673 act = NULL;
8675 if (arg3) {
8676 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8677 ret = -TARGET_EFAULT;
8678 goto rt_sigaction_fail;
8680 } else
8681 oact = NULL;
8682 ret = get_errno(do_sigaction(arg1, act, oact));
8683 rt_sigaction_fail:
8684 if (act)
8685 unlock_user_struct(act, arg2, 0);
8686 if (oact)
8687 unlock_user_struct(oact, arg3, 1);
8688 #endif
8690 return ret;
8691 #ifdef TARGET_NR_sgetmask /* not on alpha */
8692 case TARGET_NR_sgetmask:
8694 sigset_t cur_set;
8695 abi_ulong target_set;
8696 ret = do_sigprocmask(0, NULL, &cur_set);
8697 if (!ret) {
8698 host_to_target_old_sigset(&target_set, &cur_set);
8699 ret = target_set;
8702 return ret;
8703 #endif
8704 #ifdef TARGET_NR_ssetmask /* not on alpha */
8705 case TARGET_NR_ssetmask:
8707 sigset_t set, oset;
8708 abi_ulong target_set = arg1;
8709 target_to_host_old_sigset(&set, &target_set);
8710 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8711 if (!ret) {
8712 host_to_target_old_sigset(&target_set, &oset);
8713 ret = target_set;
8716 return ret;
8717 #endif
8718 #ifdef TARGET_NR_sigprocmask
8719 case TARGET_NR_sigprocmask:
8721 #if defined(TARGET_ALPHA)
8722 sigset_t set, oldset;
8723 abi_ulong mask;
8724 int how;
8726 switch (arg1) {
8727 case TARGET_SIG_BLOCK:
8728 how = SIG_BLOCK;
8729 break;
8730 case TARGET_SIG_UNBLOCK:
8731 how = SIG_UNBLOCK;
8732 break;
8733 case TARGET_SIG_SETMASK:
8734 how = SIG_SETMASK;
8735 break;
8736 default:
8737 return -TARGET_EINVAL;
8739 mask = arg2;
8740 target_to_host_old_sigset(&set, &mask);
8742 ret = do_sigprocmask(how, &set, &oldset);
8743 if (!is_error(ret)) {
8744 host_to_target_old_sigset(&mask, &oldset);
8745 ret = mask;
8746 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8748 #else
8749 sigset_t set, oldset, *set_ptr;
8750 int how;
8752 if (arg2) {
8753 switch (arg1) {
8754 case TARGET_SIG_BLOCK:
8755 how = SIG_BLOCK;
8756 break;
8757 case TARGET_SIG_UNBLOCK:
8758 how = SIG_UNBLOCK;
8759 break;
8760 case TARGET_SIG_SETMASK:
8761 how = SIG_SETMASK;
8762 break;
8763 default:
8764 return -TARGET_EINVAL;
8766 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8767 return -TARGET_EFAULT;
8768 target_to_host_old_sigset(&set, p);
8769 unlock_user(p, arg2, 0);
8770 set_ptr = &set;
8771 } else {
8772 how = 0;
8773 set_ptr = NULL;
8775 ret = do_sigprocmask(how, set_ptr, &oldset);
8776 if (!is_error(ret) && arg3) {
8777 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8778 return -TARGET_EFAULT;
8779 host_to_target_old_sigset(p, &oldset);
8780 unlock_user(p, arg3, sizeof(target_sigset_t));
8782 #endif
8784 return ret;
8785 #endif
8786 case TARGET_NR_rt_sigprocmask:
8788 int how = arg1;
8789 sigset_t set, oldset, *set_ptr;
8791 if (arg4 != sizeof(target_sigset_t)) {
8792 return -TARGET_EINVAL;
8795 if (arg2) {
8796 switch(how) {
8797 case TARGET_SIG_BLOCK:
8798 how = SIG_BLOCK;
8799 break;
8800 case TARGET_SIG_UNBLOCK:
8801 how = SIG_UNBLOCK;
8802 break;
8803 case TARGET_SIG_SETMASK:
8804 how = SIG_SETMASK;
8805 break;
8806 default:
8807 return -TARGET_EINVAL;
8809 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8810 return -TARGET_EFAULT;
8811 target_to_host_sigset(&set, p);
8812 unlock_user(p, arg2, 0);
8813 set_ptr = &set;
8814 } else {
8815 how = 0;
8816 set_ptr = NULL;
8818 ret = do_sigprocmask(how, set_ptr, &oldset);
8819 if (!is_error(ret) && arg3) {
8820 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8821 return -TARGET_EFAULT;
8822 host_to_target_sigset(p, &oldset);
8823 unlock_user(p, arg3, sizeof(target_sigset_t));
8826 return ret;
8827 #ifdef TARGET_NR_sigpending
8828 case TARGET_NR_sigpending:
8830 sigset_t set;
8831 ret = get_errno(sigpending(&set));
8832 if (!is_error(ret)) {
8833 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8834 return -TARGET_EFAULT;
8835 host_to_target_old_sigset(p, &set);
8836 unlock_user(p, arg1, sizeof(target_sigset_t));
8839 return ret;
8840 #endif
8841 case TARGET_NR_rt_sigpending:
8843 sigset_t set;
8845 /* Yes, this check is >, not != like most. We follow the kernel's
8846 * logic and it does it like this because it implements
8847 * NR_sigpending through the same code path, and in that case
8848 * the old_sigset_t is smaller in size.
8850 if (arg2 > sizeof(target_sigset_t)) {
8851 return -TARGET_EINVAL;
8854 ret = get_errno(sigpending(&set));
8855 if (!is_error(ret)) {
8856 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8857 return -TARGET_EFAULT;
8858 host_to_target_sigset(p, &set);
8859 unlock_user(p, arg1, sizeof(target_sigset_t));
8862 return ret;
8863 #ifdef TARGET_NR_sigsuspend
8864 case TARGET_NR_sigsuspend:
8866 TaskState *ts = cpu->opaque;
8867 #if defined(TARGET_ALPHA)
8868 abi_ulong mask = arg1;
8869 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8870 #else
8871 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8872 return -TARGET_EFAULT;
8873 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8874 unlock_user(p, arg1, 0);
8875 #endif
8876 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8877 SIGSET_T_SIZE));
8878 if (ret != -TARGET_ERESTARTSYS) {
8879 ts->in_sigsuspend = 1;
8882 return ret;
8883 #endif
8884 case TARGET_NR_rt_sigsuspend:
8886 TaskState *ts = cpu->opaque;
8888 if (arg2 != sizeof(target_sigset_t)) {
8889 return -TARGET_EINVAL;
8891 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8892 return -TARGET_EFAULT;
8893 target_to_host_sigset(&ts->sigsuspend_mask, p);
8894 unlock_user(p, arg1, 0);
8895 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8896 SIGSET_T_SIZE));
8897 if (ret != -TARGET_ERESTARTSYS) {
8898 ts->in_sigsuspend = 1;
8901 return ret;
8902 #ifdef TARGET_NR_rt_sigtimedwait
8903 case TARGET_NR_rt_sigtimedwait:
8905 sigset_t set;
8906 struct timespec uts, *puts;
8907 siginfo_t uinfo;
8909 if (arg4 != sizeof(target_sigset_t)) {
8910 return -TARGET_EINVAL;
8913 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8914 return -TARGET_EFAULT;
8915 target_to_host_sigset(&set, p);
8916 unlock_user(p, arg1, 0);
8917 if (arg3) {
8918 puts = &uts;
8919 if (target_to_host_timespec(puts, arg3)) {
8920 return -TARGET_EFAULT;
8922 } else {
8923 puts = NULL;
8925 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8926 SIGSET_T_SIZE));
8927 if (!is_error(ret)) {
8928 if (arg2) {
8929 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8931 if (!p) {
8932 return -TARGET_EFAULT;
8934 host_to_target_siginfo(p, &uinfo);
8935 unlock_user(p, arg2, sizeof(target_siginfo_t));
8937 ret = host_to_target_signal(ret);
8940 return ret;
8941 #endif
8942 case TARGET_NR_rt_sigqueueinfo:
8944 siginfo_t uinfo;
8946 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8947 if (!p) {
8948 return -TARGET_EFAULT;
8950 target_to_host_siginfo(&uinfo, p);
8951 unlock_user(p, arg3, 0);
8952 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8954 return ret;
8955 case TARGET_NR_rt_tgsigqueueinfo:
8957 siginfo_t uinfo;
8959 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8960 if (!p) {
8961 return -TARGET_EFAULT;
8963 target_to_host_siginfo(&uinfo, p);
8964 unlock_user(p, arg4, 0);
8965 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8967 return ret;
8968 #ifdef TARGET_NR_sigreturn
8969 case TARGET_NR_sigreturn:
8970 if (block_signals()) {
8971 return -TARGET_ERESTARTSYS;
8973 return do_sigreturn(cpu_env);
8974 #endif
8975 case TARGET_NR_rt_sigreturn:
8976 if (block_signals()) {
8977 return -TARGET_ERESTARTSYS;
8979 return do_rt_sigreturn(cpu_env);
8980 case TARGET_NR_sethostname:
8981 if (!(p = lock_user_string(arg1)))
8982 return -TARGET_EFAULT;
8983 ret = get_errno(sethostname(p, arg2));
8984 unlock_user(p, arg1, 0);
8985 return ret;
8986 #ifdef TARGET_NR_setrlimit
8987 case TARGET_NR_setrlimit:
8989 int resource = target_to_host_resource(arg1);
8990 struct target_rlimit *target_rlim;
8991 struct rlimit rlim;
8992 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8993 return -TARGET_EFAULT;
8994 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8995 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8996 unlock_user_struct(target_rlim, arg2, 0);
8998 * If we just passed through resource limit settings for memory then
8999 * they would also apply to QEMU's own allocations, and QEMU will
9000 * crash or hang or die if its allocations fail. Ideally we would
9001 * track the guest allocations in QEMU and apply the limits ourselves.
9002 * For now, just tell the guest the call succeeded but don't actually
9003 * limit anything.
9005 if (resource != RLIMIT_AS &&
9006 resource != RLIMIT_DATA &&
9007 resource != RLIMIT_STACK) {
9008 return get_errno(setrlimit(resource, &rlim));
9009 } else {
9010 return 0;
9013 #endif
9014 #ifdef TARGET_NR_getrlimit
9015 case TARGET_NR_getrlimit:
9017 int resource = target_to_host_resource(arg1);
9018 struct target_rlimit *target_rlim;
9019 struct rlimit rlim;
9021 ret = get_errno(getrlimit(resource, &rlim));
9022 if (!is_error(ret)) {
9023 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9024 return -TARGET_EFAULT;
9025 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9026 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9027 unlock_user_struct(target_rlim, arg2, 1);
9030 return ret;
9031 #endif
9032 case TARGET_NR_getrusage:
9034 struct rusage rusage;
9035 ret = get_errno(getrusage(arg1, &rusage));
9036 if (!is_error(ret)) {
9037 ret = host_to_target_rusage(arg2, &rusage);
9040 return ret;
9041 #if defined(TARGET_NR_gettimeofday)
9042 case TARGET_NR_gettimeofday:
9044 struct timeval tv;
9045 struct timezone tz;
9047 ret = get_errno(gettimeofday(&tv, &tz));
9048 if (!is_error(ret)) {
9049 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9050 return -TARGET_EFAULT;
9052 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9053 return -TARGET_EFAULT;
9057 return ret;
9058 #endif
9059 #if defined(TARGET_NR_settimeofday)
9060 case TARGET_NR_settimeofday:
9062 struct timeval tv, *ptv = NULL;
9063 struct timezone tz, *ptz = NULL;
9065 if (arg1) {
9066 if (copy_from_user_timeval(&tv, arg1)) {
9067 return -TARGET_EFAULT;
9069 ptv = &tv;
9072 if (arg2) {
9073 if (copy_from_user_timezone(&tz, arg2)) {
9074 return -TARGET_EFAULT;
9076 ptz = &tz;
9079 return get_errno(settimeofday(ptv, ptz));
9081 #endif
9082 #if defined(TARGET_NR_select)
9083 case TARGET_NR_select:
9084 #if defined(TARGET_WANT_NI_OLD_SELECT)
9085 /* some architectures used to have old_select here
9086 * but now ENOSYS it.
9088 ret = -TARGET_ENOSYS;
9089 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9090 ret = do_old_select(arg1);
9091 #else
9092 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9093 #endif
9094 return ret;
9095 #endif
9096 #ifdef TARGET_NR_pselect6
9097 case TARGET_NR_pselect6:
9099 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9100 fd_set rfds, wfds, efds;
9101 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9102 struct timespec ts, *ts_ptr;
9105 * The 6th arg is actually two args smashed together,
9106 * so we cannot use the C library.
9108 sigset_t set;
9109 struct {
9110 sigset_t *set;
9111 size_t size;
9112 } sig, *sig_ptr;
9114 abi_ulong arg_sigset, arg_sigsize, *arg7;
9115 target_sigset_t *target_sigset;
9117 n = arg1;
9118 rfd_addr = arg2;
9119 wfd_addr = arg3;
9120 efd_addr = arg4;
9121 ts_addr = arg5;
9123 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9124 if (ret) {
9125 return ret;
9127 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9128 if (ret) {
9129 return ret;
9131 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9132 if (ret) {
9133 return ret;
9137 * This takes a timespec, and not a timeval, so we cannot
9138 * use the do_select() helper ...
9140 if (ts_addr) {
9141 if (target_to_host_timespec(&ts, ts_addr)) {
9142 return -TARGET_EFAULT;
9144 ts_ptr = &ts;
9145 } else {
9146 ts_ptr = NULL;
9149 /* Extract the two packed args for the sigset */
9150 if (arg6) {
9151 sig_ptr = &sig;
9152 sig.size = SIGSET_T_SIZE;
9154 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9155 if (!arg7) {
9156 return -TARGET_EFAULT;
9158 arg_sigset = tswapal(arg7[0]);
9159 arg_sigsize = tswapal(arg7[1]);
9160 unlock_user(arg7, arg6, 0);
9162 if (arg_sigset) {
9163 sig.set = &set;
9164 if (arg_sigsize != sizeof(*target_sigset)) {
9165 /* Like the kernel, we enforce correct size sigsets */
9166 return -TARGET_EINVAL;
9168 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9169 sizeof(*target_sigset), 1);
9170 if (!target_sigset) {
9171 return -TARGET_EFAULT;
9173 target_to_host_sigset(&set, target_sigset);
9174 unlock_user(target_sigset, arg_sigset, 0);
9175 } else {
9176 sig.set = NULL;
9178 } else {
9179 sig_ptr = NULL;
9182 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9183 ts_ptr, sig_ptr));
9185 if (!is_error(ret)) {
9186 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9187 return -TARGET_EFAULT;
9188 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9189 return -TARGET_EFAULT;
9190 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9191 return -TARGET_EFAULT;
9193 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9194 return -TARGET_EFAULT;
9197 return ret;
9198 #endif
9199 #ifdef TARGET_NR_symlink
9200 case TARGET_NR_symlink:
9202 void *p2;
9203 p = lock_user_string(arg1);
9204 p2 = lock_user_string(arg2);
9205 if (!p || !p2)
9206 ret = -TARGET_EFAULT;
9207 else
9208 ret = get_errno(symlink(p, p2));
9209 unlock_user(p2, arg2, 0);
9210 unlock_user(p, arg1, 0);
9212 return ret;
9213 #endif
9214 #if defined(TARGET_NR_symlinkat)
9215 case TARGET_NR_symlinkat:
9217 void *p2;
9218 p = lock_user_string(arg1);
9219 p2 = lock_user_string(arg3);
9220 if (!p || !p2)
9221 ret = -TARGET_EFAULT;
9222 else
9223 ret = get_errno(symlinkat(p, arg2, p2));
9224 unlock_user(p2, arg3, 0);
9225 unlock_user(p, arg1, 0);
9227 return ret;
9228 #endif
9229 #ifdef TARGET_NR_readlink
9230 case TARGET_NR_readlink:
9232 void *p2;
9233 p = lock_user_string(arg1);
9234 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9235 if (!p || !p2) {
9236 ret = -TARGET_EFAULT;
9237 } else if (!arg3) {
9238 /* Short circuit this for the magic exe check. */
9239 ret = -TARGET_EINVAL;
9240 } else if (is_proc_myself((const char *)p, "exe")) {
9241 char real[PATH_MAX], *temp;
9242 temp = realpath(exec_path, real);
9243 /* Return value is # of bytes that we wrote to the buffer. */
9244 if (temp == NULL) {
9245 ret = get_errno(-1);
9246 } else {
9247 /* Don't worry about sign mismatch as earlier mapping
9248 * logic would have thrown a bad address error. */
9249 ret = MIN(strlen(real), arg3);
9250 /* We cannot NUL terminate the string. */
9251 memcpy(p2, real, ret);
9253 } else {
9254 ret = get_errno(readlink(path(p), p2, arg3));
9256 unlock_user(p2, arg2, ret);
9257 unlock_user(p, arg1, 0);
9259 return ret;
9260 #endif
9261 #if defined(TARGET_NR_readlinkat)
9262 case TARGET_NR_readlinkat:
9264 void *p2;
9265 p = lock_user_string(arg2);
9266 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9267 if (!p || !p2) {
9268 ret = -TARGET_EFAULT;
9269 } else if (is_proc_myself((const char *)p, "exe")) {
9270 char real[PATH_MAX], *temp;
9271 temp = realpath(exec_path, real);
9272 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9273 snprintf((char *)p2, arg4, "%s", real);
9274 } else {
9275 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9277 unlock_user(p2, arg3, ret);
9278 unlock_user(p, arg2, 0);
9280 return ret;
9281 #endif
9282 #ifdef TARGET_NR_swapon
9283 case TARGET_NR_swapon:
9284 if (!(p = lock_user_string(arg1)))
9285 return -TARGET_EFAULT;
9286 ret = get_errno(swapon(p, arg2));
9287 unlock_user(p, arg1, 0);
9288 return ret;
9289 #endif
9290 case TARGET_NR_reboot:
9291 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9292 /* arg4 must be ignored in all other cases */
9293 p = lock_user_string(arg4);
9294 if (!p) {
9295 return -TARGET_EFAULT;
9297 ret = get_errno(reboot(arg1, arg2, arg3, p));
9298 unlock_user(p, arg4, 0);
9299 } else {
9300 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9302 return ret;
9303 #ifdef TARGET_NR_mmap
9304 case TARGET_NR_mmap:
9305 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9306 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9307 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9308 || defined(TARGET_S390X)
9310 abi_ulong *v;
9311 abi_ulong v1, v2, v3, v4, v5, v6;
9312 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9313 return -TARGET_EFAULT;
9314 v1 = tswapal(v[0]);
9315 v2 = tswapal(v[1]);
9316 v3 = tswapal(v[2]);
9317 v4 = tswapal(v[3]);
9318 v5 = tswapal(v[4]);
9319 v6 = tswapal(v[5]);
9320 unlock_user(v, arg1, 0);
9321 ret = get_errno(target_mmap(v1, v2, v3,
9322 target_to_host_bitmask(v4, mmap_flags_tbl),
9323 v5, v6));
9325 #else
9326 ret = get_errno(target_mmap(arg1, arg2, arg3,
9327 target_to_host_bitmask(arg4, mmap_flags_tbl),
9328 arg5,
9329 arg6));
9330 #endif
9331 return ret;
9332 #endif
9333 #ifdef TARGET_NR_mmap2
9334 case TARGET_NR_mmap2:
9335 #ifndef MMAP_SHIFT
9336 #define MMAP_SHIFT 12
9337 #endif
9338 ret = target_mmap(arg1, arg2, arg3,
9339 target_to_host_bitmask(arg4, mmap_flags_tbl),
9340 arg5, arg6 << MMAP_SHIFT);
9341 return get_errno(ret);
9342 #endif
9343 case TARGET_NR_munmap:
9344 return get_errno(target_munmap(arg1, arg2));
9345 case TARGET_NR_mprotect:
9347 TaskState *ts = cpu->opaque;
9348 /* Special hack to detect libc making the stack executable. */
9349 if ((arg3 & PROT_GROWSDOWN)
9350 && arg1 >= ts->info->stack_limit
9351 && arg1 <= ts->info->start_stack) {
9352 arg3 &= ~PROT_GROWSDOWN;
9353 arg2 = arg2 + arg1 - ts->info->stack_limit;
9354 arg1 = ts->info->stack_limit;
9357 return get_errno(target_mprotect(arg1, arg2, arg3));
9358 #ifdef TARGET_NR_mremap
9359 case TARGET_NR_mremap:
9360 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9361 #endif
9362 /* ??? msync/mlock/munlock are broken for softmmu. */
9363 #ifdef TARGET_NR_msync
9364 case TARGET_NR_msync:
9365 return get_errno(msync(g2h(arg1), arg2, arg3));
9366 #endif
9367 #ifdef TARGET_NR_mlock
9368 case TARGET_NR_mlock:
9369 return get_errno(mlock(g2h(arg1), arg2));
9370 #endif
9371 #ifdef TARGET_NR_munlock
9372 case TARGET_NR_munlock:
9373 return get_errno(munlock(g2h(arg1), arg2));
9374 #endif
9375 #ifdef TARGET_NR_mlockall
9376 case TARGET_NR_mlockall:
9377 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9378 #endif
9379 #ifdef TARGET_NR_munlockall
9380 case TARGET_NR_munlockall:
9381 return get_errno(munlockall());
9382 #endif
9383 #ifdef TARGET_NR_truncate
9384 case TARGET_NR_truncate:
9385 if (!(p = lock_user_string(arg1)))
9386 return -TARGET_EFAULT;
9387 ret = get_errno(truncate(p, arg2));
9388 unlock_user(p, arg1, 0);
9389 return ret;
9390 #endif
9391 #ifdef TARGET_NR_ftruncate
9392 case TARGET_NR_ftruncate:
9393 return get_errno(ftruncate(arg1, arg2));
9394 #endif
9395 case TARGET_NR_fchmod:
9396 return get_errno(fchmod(arg1, arg2));
9397 #if defined(TARGET_NR_fchmodat)
9398 case TARGET_NR_fchmodat:
9399 if (!(p = lock_user_string(arg2)))
9400 return -TARGET_EFAULT;
9401 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9402 unlock_user(p, arg2, 0);
9403 return ret;
9404 #endif
9405 case TARGET_NR_getpriority:
9406 /* Note that negative values are valid for getpriority, so we must
9407 differentiate based on errno settings. */
9408 errno = 0;
9409 ret = getpriority(arg1, arg2);
9410 if (ret == -1 && errno != 0) {
9411 return -host_to_target_errno(errno);
9413 #ifdef TARGET_ALPHA
9414 /* Return value is the unbiased priority. Signal no error. */
9415 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9416 #else
9417 /* Return value is a biased priority to avoid negative numbers. */
9418 ret = 20 - ret;
9419 #endif
9420 return ret;
9421 case TARGET_NR_setpriority:
9422 return get_errno(setpriority(arg1, arg2, arg3));
9423 #ifdef TARGET_NR_statfs
9424 case TARGET_NR_statfs:
9425 if (!(p = lock_user_string(arg1))) {
9426 return -TARGET_EFAULT;
9428 ret = get_errno(statfs(path(p), &stfs));
9429 unlock_user(p, arg1, 0);
9430 convert_statfs:
9431 if (!is_error(ret)) {
9432 struct target_statfs *target_stfs;
9434 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9435 return -TARGET_EFAULT;
9436 __put_user(stfs.f_type, &target_stfs->f_type);
9437 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9438 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9439 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9440 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9441 __put_user(stfs.f_files, &target_stfs->f_files);
9442 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9443 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9444 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9445 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9446 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9447 #ifdef _STATFS_F_FLAGS
9448 __put_user(stfs.f_flags, &target_stfs->f_flags);
9449 #else
9450 __put_user(0, &target_stfs->f_flags);
9451 #endif
9452 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9453 unlock_user_struct(target_stfs, arg2, 1);
9455 return ret;
9456 #endif
9457 #ifdef TARGET_NR_fstatfs
9458 case TARGET_NR_fstatfs:
9459 ret = get_errno(fstatfs(arg1, &stfs));
9460 goto convert_statfs;
9461 #endif
9462 #ifdef TARGET_NR_statfs64
9463 case TARGET_NR_statfs64:
9464 if (!(p = lock_user_string(arg1))) {
9465 return -TARGET_EFAULT;
9467 ret = get_errno(statfs(path(p), &stfs));
9468 unlock_user(p, arg1, 0);
9469 convert_statfs64:
9470 if (!is_error(ret)) {
9471 struct target_statfs64 *target_stfs;
9473 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9474 return -TARGET_EFAULT;
9475 __put_user(stfs.f_type, &target_stfs->f_type);
9476 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9477 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9478 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9479 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9480 __put_user(stfs.f_files, &target_stfs->f_files);
9481 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9482 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9483 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9484 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9485 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9486 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9487 unlock_user_struct(target_stfs, arg3, 1);
9489 return ret;
9490 case TARGET_NR_fstatfs64:
9491 ret = get_errno(fstatfs(arg1, &stfs));
9492 goto convert_statfs64;
9493 #endif
9494 #ifdef TARGET_NR_socketcall
9495 case TARGET_NR_socketcall:
9496 return do_socketcall(arg1, arg2);
9497 #endif
9498 #ifdef TARGET_NR_accept
9499 case TARGET_NR_accept:
9500 return do_accept4(arg1, arg2, arg3, 0);
9501 #endif
9502 #ifdef TARGET_NR_accept4
9503 case TARGET_NR_accept4:
9504 return do_accept4(arg1, arg2, arg3, arg4);
9505 #endif
9506 #ifdef TARGET_NR_bind
9507 case TARGET_NR_bind:
9508 return do_bind(arg1, arg2, arg3);
9509 #endif
9510 #ifdef TARGET_NR_connect
9511 case TARGET_NR_connect:
9512 return do_connect(arg1, arg2, arg3);
9513 #endif
9514 #ifdef TARGET_NR_getpeername
9515 case TARGET_NR_getpeername:
9516 return do_getpeername(arg1, arg2, arg3);
9517 #endif
9518 #ifdef TARGET_NR_getsockname
9519 case TARGET_NR_getsockname:
9520 return do_getsockname(arg1, arg2, arg3);
9521 #endif
9522 #ifdef TARGET_NR_getsockopt
9523 case TARGET_NR_getsockopt:
9524 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9525 #endif
9526 #ifdef TARGET_NR_listen
9527 case TARGET_NR_listen:
9528 return get_errno(listen(arg1, arg2));
9529 #endif
9530 #ifdef TARGET_NR_recv
9531 case TARGET_NR_recv:
9532 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9533 #endif
9534 #ifdef TARGET_NR_recvfrom
9535 case TARGET_NR_recvfrom:
9536 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9537 #endif
9538 #ifdef TARGET_NR_recvmsg
9539 case TARGET_NR_recvmsg:
9540 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9541 #endif
9542 #ifdef TARGET_NR_send
9543 case TARGET_NR_send:
9544 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9545 #endif
9546 #ifdef TARGET_NR_sendmsg
9547 case TARGET_NR_sendmsg:
9548 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9549 #endif
9550 #ifdef TARGET_NR_sendmmsg
9551 case TARGET_NR_sendmmsg:
9552 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9553 #endif
9554 #ifdef TARGET_NR_recvmmsg
9555 case TARGET_NR_recvmmsg:
9556 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9557 #endif
9558 #ifdef TARGET_NR_sendto
9559 case TARGET_NR_sendto:
9560 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9561 #endif
9562 #ifdef TARGET_NR_shutdown
9563 case TARGET_NR_shutdown:
9564 return get_errno(shutdown(arg1, arg2));
9565 #endif
9566 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9567 case TARGET_NR_getrandom:
9568 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9569 if (!p) {
9570 return -TARGET_EFAULT;
9572 ret = get_errno(getrandom(p, arg2, arg3));
9573 unlock_user(p, arg1, ret);
9574 return ret;
9575 #endif
9576 #ifdef TARGET_NR_socket
9577 case TARGET_NR_socket:
9578 return do_socket(arg1, arg2, arg3);
9579 #endif
9580 #ifdef TARGET_NR_socketpair
9581 case TARGET_NR_socketpair:
9582 return do_socketpair(arg1, arg2, arg3, arg4);
9583 #endif
9584 #ifdef TARGET_NR_setsockopt
9585 case TARGET_NR_setsockopt:
9586 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9587 #endif
9588 #if defined(TARGET_NR_syslog)
9589 case TARGET_NR_syslog:
9591 int len = arg2;
9593 switch (arg1) {
9594 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9595 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9596 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9597 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9598 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9599 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9600 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9601 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9602 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9603 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9604 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9605 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9607 if (len < 0) {
9608 return -TARGET_EINVAL;
9610 if (len == 0) {
9611 return 0;
9613 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9614 if (!p) {
9615 return -TARGET_EFAULT;
9617 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9618 unlock_user(p, arg2, arg3);
9620 return ret;
9621 default:
9622 return -TARGET_EINVAL;
9625 break;
9626 #endif
9627 case TARGET_NR_setitimer:
9629 struct itimerval value, ovalue, *pvalue;
9631 if (arg2) {
9632 pvalue = &value;
9633 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9634 || copy_from_user_timeval(&pvalue->it_value,
9635 arg2 + sizeof(struct target_timeval)))
9636 return -TARGET_EFAULT;
9637 } else {
9638 pvalue = NULL;
9640 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9641 if (!is_error(ret) && arg3) {
9642 if (copy_to_user_timeval(arg3,
9643 &ovalue.it_interval)
9644 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9645 &ovalue.it_value))
9646 return -TARGET_EFAULT;
9649 return ret;
9650 case TARGET_NR_getitimer:
9652 struct itimerval value;
9654 ret = get_errno(getitimer(arg1, &value));
9655 if (!is_error(ret) && arg2) {
9656 if (copy_to_user_timeval(arg2,
9657 &value.it_interval)
9658 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9659 &value.it_value))
9660 return -TARGET_EFAULT;
9663 return ret;
9664 #ifdef TARGET_NR_stat
9665 case TARGET_NR_stat:
9666 if (!(p = lock_user_string(arg1))) {
9667 return -TARGET_EFAULT;
9669 ret = get_errno(stat(path(p), &st));
9670 unlock_user(p, arg1, 0);
9671 goto do_stat;
9672 #endif
9673 #ifdef TARGET_NR_lstat
9674 case TARGET_NR_lstat:
9675 if (!(p = lock_user_string(arg1))) {
9676 return -TARGET_EFAULT;
9678 ret = get_errno(lstat(path(p), &st));
9679 unlock_user(p, arg1, 0);
9680 goto do_stat;
9681 #endif
9682 #ifdef TARGET_NR_fstat
9683 case TARGET_NR_fstat:
9685 ret = get_errno(fstat(arg1, &st));
9686 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9687 do_stat:
9688 #endif
9689 if (!is_error(ret)) {
9690 struct target_stat *target_st;
9692 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9693 return -TARGET_EFAULT;
9694 memset(target_st, 0, sizeof(*target_st));
9695 __put_user(st.st_dev, &target_st->st_dev);
9696 __put_user(st.st_ino, &target_st->st_ino);
9697 __put_user(st.st_mode, &target_st->st_mode);
9698 __put_user(st.st_uid, &target_st->st_uid);
9699 __put_user(st.st_gid, &target_st->st_gid);
9700 __put_user(st.st_nlink, &target_st->st_nlink);
9701 __put_user(st.st_rdev, &target_st->st_rdev);
9702 __put_user(st.st_size, &target_st->st_size);
9703 __put_user(st.st_blksize, &target_st->st_blksize);
9704 __put_user(st.st_blocks, &target_st->st_blocks);
9705 __put_user(st.st_atime, &target_st->target_st_atime);
9706 __put_user(st.st_mtime, &target_st->target_st_mtime);
9707 __put_user(st.st_ctime, &target_st->target_st_ctime);
9708 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9709 defined(TARGET_STAT_HAVE_NSEC)
9710 __put_user(st.st_atim.tv_nsec,
9711 &target_st->target_st_atime_nsec);
9712 __put_user(st.st_mtim.tv_nsec,
9713 &target_st->target_st_mtime_nsec);
9714 __put_user(st.st_ctim.tv_nsec,
9715 &target_st->target_st_ctime_nsec);
9716 #endif
9717 unlock_user_struct(target_st, arg2, 1);
9720 return ret;
9721 #endif
9722 case TARGET_NR_vhangup:
9723 return get_errno(vhangup());
9724 #ifdef TARGET_NR_syscall
9725 case TARGET_NR_syscall:
9726 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9727 arg6, arg7, arg8, 0);
9728 #endif
9729 #if defined(TARGET_NR_wait4)
9730 case TARGET_NR_wait4:
9732 int status;
9733 abi_long status_ptr = arg2;
9734 struct rusage rusage, *rusage_ptr;
9735 abi_ulong target_rusage = arg4;
9736 abi_long rusage_err;
9737 if (target_rusage)
9738 rusage_ptr = &rusage;
9739 else
9740 rusage_ptr = NULL;
9741 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9742 if (!is_error(ret)) {
9743 if (status_ptr && ret) {
9744 status = host_to_target_waitstatus(status);
9745 if (put_user_s32(status, status_ptr))
9746 return -TARGET_EFAULT;
9748 if (target_rusage) {
9749 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9750 if (rusage_err) {
9751 ret = rusage_err;
9756 return ret;
9757 #endif
9758 #ifdef TARGET_NR_swapoff
9759 case TARGET_NR_swapoff:
9760 if (!(p = lock_user_string(arg1)))
9761 return -TARGET_EFAULT;
9762 ret = get_errno(swapoff(p));
9763 unlock_user(p, arg1, 0);
9764 return ret;
9765 #endif
9766 case TARGET_NR_sysinfo:
9768 struct target_sysinfo *target_value;
9769 struct sysinfo value;
9770 ret = get_errno(sysinfo(&value));
9771 if (!is_error(ret) && arg1)
9773 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9774 return -TARGET_EFAULT;
9775 __put_user(value.uptime, &target_value->uptime);
9776 __put_user(value.loads[0], &target_value->loads[0]);
9777 __put_user(value.loads[1], &target_value->loads[1]);
9778 __put_user(value.loads[2], &target_value->loads[2]);
9779 __put_user(value.totalram, &target_value->totalram);
9780 __put_user(value.freeram, &target_value->freeram);
9781 __put_user(value.sharedram, &target_value->sharedram);
9782 __put_user(value.bufferram, &target_value->bufferram);
9783 __put_user(value.totalswap, &target_value->totalswap);
9784 __put_user(value.freeswap, &target_value->freeswap);
9785 __put_user(value.procs, &target_value->procs);
9786 __put_user(value.totalhigh, &target_value->totalhigh);
9787 __put_user(value.freehigh, &target_value->freehigh);
9788 __put_user(value.mem_unit, &target_value->mem_unit);
9789 unlock_user_struct(target_value, arg1, 1);
9792 return ret;
9793 #ifdef TARGET_NR_ipc
9794 case TARGET_NR_ipc:
9795 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9796 #endif
9797 #ifdef TARGET_NR_semget
9798 case TARGET_NR_semget:
9799 return get_errno(semget(arg1, arg2, arg3));
9800 #endif
9801 #ifdef TARGET_NR_semop
9802 case TARGET_NR_semop:
9803 return do_semtimedop(arg1, arg2, arg3, 0);
9804 #endif
9805 #ifdef TARGET_NR_semtimedop
9806 case TARGET_NR_semtimedop:
9807 return do_semtimedop(arg1, arg2, arg3, arg4);
9808 #endif
9809 #ifdef TARGET_NR_semctl
9810 case TARGET_NR_semctl:
9811 return do_semctl(arg1, arg2, arg3, arg4);
9812 #endif
9813 #ifdef TARGET_NR_msgctl
9814 case TARGET_NR_msgctl:
9815 return do_msgctl(arg1, arg2, arg3);
9816 #endif
9817 #ifdef TARGET_NR_msgget
9818 case TARGET_NR_msgget:
9819 return get_errno(msgget(arg1, arg2));
9820 #endif
9821 #ifdef TARGET_NR_msgrcv
9822 case TARGET_NR_msgrcv:
9823 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9824 #endif
9825 #ifdef TARGET_NR_msgsnd
9826 case TARGET_NR_msgsnd:
9827 return do_msgsnd(arg1, arg2, arg3, arg4);
9828 #endif
9829 #ifdef TARGET_NR_shmget
9830 case TARGET_NR_shmget:
9831 return get_errno(shmget(arg1, arg2, arg3));
9832 #endif
9833 #ifdef TARGET_NR_shmctl
9834 case TARGET_NR_shmctl:
9835 return do_shmctl(arg1, arg2, arg3);
9836 #endif
9837 #ifdef TARGET_NR_shmat
9838 case TARGET_NR_shmat:
9839 return do_shmat(cpu_env, arg1, arg2, arg3);
9840 #endif
9841 #ifdef TARGET_NR_shmdt
9842 case TARGET_NR_shmdt:
9843 return do_shmdt(arg1);
9844 #endif
9845 case TARGET_NR_fsync:
9846 return get_errno(fsync(arg1));
9847 case TARGET_NR_clone:
9848 /* Linux manages to have three different orderings for its
9849 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9850 * match the kernel's CONFIG_CLONE_* settings.
9851 * Microblaze is further special in that it uses a sixth
9852 * implicit argument to clone for the TLS pointer.
9854 #if defined(TARGET_MICROBLAZE)
9855 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9856 #elif defined(TARGET_CLONE_BACKWARDS)
9857 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9858 #elif defined(TARGET_CLONE_BACKWARDS2)
9859 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9860 #else
9861 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9862 #endif
9863 return ret;
9864 #ifdef __NR_exit_group
9865 /* new thread calls */
9866 case TARGET_NR_exit_group:
9867 preexit_cleanup(cpu_env, arg1);
9868 return get_errno(exit_group(arg1));
9869 #endif
9870 case TARGET_NR_setdomainname:
9871 if (!(p = lock_user_string(arg1)))
9872 return -TARGET_EFAULT;
9873 ret = get_errno(setdomainname(p, arg2));
9874 unlock_user(p, arg1, 0);
9875 return ret;
9876 case TARGET_NR_uname:
9877 /* no need to transcode because we use the linux syscall */
9879 struct new_utsname * buf;
9881 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9882 return -TARGET_EFAULT;
9883 ret = get_errno(sys_uname(buf));
9884 if (!is_error(ret)) {
9885 /* Overwrite the native machine name with whatever is being
9886 emulated. */
9887 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9888 sizeof(buf->machine));
9889 /* Allow the user to override the reported release. */
9890 if (qemu_uname_release && *qemu_uname_release) {
9891 g_strlcpy(buf->release, qemu_uname_release,
9892 sizeof(buf->release));
9895 unlock_user_struct(buf, arg1, 1);
9897 return ret;
9898 #ifdef TARGET_I386
9899 case TARGET_NR_modify_ldt:
9900 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9901 #if !defined(TARGET_X86_64)
9902 case TARGET_NR_vm86:
9903 return do_vm86(cpu_env, arg1, arg2);
9904 #endif
9905 #endif
9906 #if defined(TARGET_NR_adjtimex)
9907 case TARGET_NR_adjtimex:
9909 struct timex host_buf;
9911 if (target_to_host_timex(&host_buf, arg1) != 0) {
9912 return -TARGET_EFAULT;
9914 ret = get_errno(adjtimex(&host_buf));
9915 if (!is_error(ret)) {
9916 if (host_to_target_timex(arg1, &host_buf) != 0) {
9917 return -TARGET_EFAULT;
9921 return ret;
9922 #endif
9923 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9924 case TARGET_NR_clock_adjtime:
9926 struct timex htx, *phtx = &htx;
9928 if (target_to_host_timex(phtx, arg2) != 0) {
9929 return -TARGET_EFAULT;
9931 ret = get_errno(clock_adjtime(arg1, phtx));
9932 if (!is_error(ret) && phtx) {
9933 if (host_to_target_timex(arg2, phtx) != 0) {
9934 return -TARGET_EFAULT;
9938 return ret;
9939 #endif
9940 case TARGET_NR_getpgid:
9941 return get_errno(getpgid(arg1));
9942 case TARGET_NR_fchdir:
9943 return get_errno(fchdir(arg1));
9944 case TARGET_NR_personality:
9945 return get_errno(personality(arg1));
9946 #ifdef TARGET_NR__llseek /* Not on alpha */
9947 case TARGET_NR__llseek:
9949 int64_t res;
9950 #if !defined(__NR_llseek)
9951 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9952 if (res == -1) {
9953 ret = get_errno(res);
9954 } else {
9955 ret = 0;
9957 #else
9958 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9959 #endif
9960 if ((ret == 0) && put_user_s64(res, arg4)) {
9961 return -TARGET_EFAULT;
9964 return ret;
9965 #endif
9966 #ifdef TARGET_NR_getdents
9967 case TARGET_NR_getdents:
9968 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9969 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9971 struct target_dirent *target_dirp;
9972 struct linux_dirent *dirp;
9973 abi_long count = arg3;
9975 dirp = g_try_malloc(count);
9976 if (!dirp) {
9977 return -TARGET_ENOMEM;
9980 ret = get_errno(sys_getdents(arg1, dirp, count));
9981 if (!is_error(ret)) {
9982 struct linux_dirent *de;
9983 struct target_dirent *tde;
9984 int len = ret;
9985 int reclen, treclen;
9986 int count1, tnamelen;
9988 count1 = 0;
9989 de = dirp;
9990 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9991 return -TARGET_EFAULT;
9992 tde = target_dirp;
9993 while (len > 0) {
9994 reclen = de->d_reclen;
9995 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9996 assert(tnamelen >= 0);
9997 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9998 assert(count1 + treclen <= count);
9999 tde->d_reclen = tswap16(treclen);
10000 tde->d_ino = tswapal(de->d_ino);
10001 tde->d_off = tswapal(de->d_off);
10002 memcpy(tde->d_name, de->d_name, tnamelen);
10003 de = (struct linux_dirent *)((char *)de + reclen);
10004 len -= reclen;
10005 tde = (struct target_dirent *)((char *)tde + treclen);
10006 count1 += treclen;
10008 ret = count1;
10009 unlock_user(target_dirp, arg2, ret);
10011 g_free(dirp);
10013 #else
10015 struct linux_dirent *dirp;
10016 abi_long count = arg3;
10018 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10019 return -TARGET_EFAULT;
10020 ret = get_errno(sys_getdents(arg1, dirp, count));
10021 if (!is_error(ret)) {
10022 struct linux_dirent *de;
10023 int len = ret;
10024 int reclen;
10025 de = dirp;
10026 while (len > 0) {
10027 reclen = de->d_reclen;
10028 if (reclen > len)
10029 break;
10030 de->d_reclen = tswap16(reclen);
10031 tswapls(&de->d_ino);
10032 tswapls(&de->d_off);
10033 de = (struct linux_dirent *)((char *)de + reclen);
10034 len -= reclen;
10037 unlock_user(dirp, arg2, ret);
10039 #endif
10040 #else
10041 /* Implement getdents in terms of getdents64 */
10043 struct linux_dirent64 *dirp;
10044 abi_long count = arg3;
10046 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10047 if (!dirp) {
10048 return -TARGET_EFAULT;
10050 ret = get_errno(sys_getdents64(arg1, dirp, count));
10051 if (!is_error(ret)) {
10052 /* Convert the dirent64 structs to target dirent. We do this
10053 * in-place, since we can guarantee that a target_dirent is no
10054 * larger than a dirent64; however this means we have to be
10055 * careful to read everything before writing in the new format.
10057 struct linux_dirent64 *de;
10058 struct target_dirent *tde;
10059 int len = ret;
10060 int tlen = 0;
10062 de = dirp;
10063 tde = (struct target_dirent *)dirp;
10064 while (len > 0) {
10065 int namelen, treclen;
10066 int reclen = de->d_reclen;
10067 uint64_t ino = de->d_ino;
10068 int64_t off = de->d_off;
10069 uint8_t type = de->d_type;
10071 namelen = strlen(de->d_name);
10072 treclen = offsetof(struct target_dirent, d_name)
10073 + namelen + 2;
10074 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10076 memmove(tde->d_name, de->d_name, namelen + 1);
10077 tde->d_ino = tswapal(ino);
10078 tde->d_off = tswapal(off);
10079 tde->d_reclen = tswap16(treclen);
10080 /* The target_dirent type is in what was formerly a padding
10081 * byte at the end of the structure:
10083 *(((char *)tde) + treclen - 1) = type;
10085 de = (struct linux_dirent64 *)((char *)de + reclen);
10086 tde = (struct target_dirent *)((char *)tde + treclen);
10087 len -= reclen;
10088 tlen += treclen;
10090 ret = tlen;
10092 unlock_user(dirp, arg2, ret);
10094 #endif
10095 return ret;
10096 #endif /* TARGET_NR_getdents */
10097 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10098 case TARGET_NR_getdents64:
10100 struct linux_dirent64 *dirp;
10101 abi_long count = arg3;
10102 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10103 return -TARGET_EFAULT;
10104 ret = get_errno(sys_getdents64(arg1, dirp, count));
10105 if (!is_error(ret)) {
10106 struct linux_dirent64 *de;
10107 int len = ret;
10108 int reclen;
10109 de = dirp;
10110 while (len > 0) {
10111 reclen = de->d_reclen;
10112 if (reclen > len)
10113 break;
10114 de->d_reclen = tswap16(reclen);
10115 tswap64s((uint64_t *)&de->d_ino);
10116 tswap64s((uint64_t *)&de->d_off);
10117 de = (struct linux_dirent64 *)((char *)de + reclen);
10118 len -= reclen;
10121 unlock_user(dirp, arg2, ret);
10123 return ret;
10124 #endif /* TARGET_NR_getdents64 */
10125 #if defined(TARGET_NR__newselect)
10126 case TARGET_NR__newselect:
10127 return do_select(arg1, arg2, arg3, arg4, arg5);
10128 #endif
10129 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10130 # ifdef TARGET_NR_poll
10131 case TARGET_NR_poll:
10132 # endif
10133 # ifdef TARGET_NR_ppoll
10134 case TARGET_NR_ppoll:
10135 # endif
10137 struct target_pollfd *target_pfd;
10138 unsigned int nfds = arg2;
10139 struct pollfd *pfd;
10140 unsigned int i;
10142 pfd = NULL;
10143 target_pfd = NULL;
10144 if (nfds) {
10145 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10146 return -TARGET_EINVAL;
10149 target_pfd = lock_user(VERIFY_WRITE, arg1,
10150 sizeof(struct target_pollfd) * nfds, 1);
10151 if (!target_pfd) {
10152 return -TARGET_EFAULT;
10155 pfd = alloca(sizeof(struct pollfd) * nfds);
10156 for (i = 0; i < nfds; i++) {
10157 pfd[i].fd = tswap32(target_pfd[i].fd);
10158 pfd[i].events = tswap16(target_pfd[i].events);
10162 switch (num) {
10163 # ifdef TARGET_NR_ppoll
10164 case TARGET_NR_ppoll:
10166 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10167 target_sigset_t *target_set;
10168 sigset_t _set, *set = &_set;
10170 if (arg3) {
10171 if (target_to_host_timespec(timeout_ts, arg3)) {
10172 unlock_user(target_pfd, arg1, 0);
10173 return -TARGET_EFAULT;
10175 } else {
10176 timeout_ts = NULL;
10179 if (arg4) {
10180 if (arg5 != sizeof(target_sigset_t)) {
10181 unlock_user(target_pfd, arg1, 0);
10182 return -TARGET_EINVAL;
10185 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10186 if (!target_set) {
10187 unlock_user(target_pfd, arg1, 0);
10188 return -TARGET_EFAULT;
10190 target_to_host_sigset(set, target_set);
10191 } else {
10192 set = NULL;
10195 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10196 set, SIGSET_T_SIZE));
10198 if (!is_error(ret) && arg3) {
10199 host_to_target_timespec(arg3, timeout_ts);
10201 if (arg4) {
10202 unlock_user(target_set, arg4, 0);
10204 break;
10206 # endif
10207 # ifdef TARGET_NR_poll
10208 case TARGET_NR_poll:
10210 struct timespec ts, *pts;
10212 if (arg3 >= 0) {
10213 /* Convert ms to secs, ns */
10214 ts.tv_sec = arg3 / 1000;
10215 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10216 pts = &ts;
10217 } else {
10218 /* -ve poll() timeout means "infinite" */
10219 pts = NULL;
10221 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10222 break;
10224 # endif
10225 default:
10226 g_assert_not_reached();
10229 if (!is_error(ret)) {
10230 for(i = 0; i < nfds; i++) {
10231 target_pfd[i].revents = tswap16(pfd[i].revents);
10234 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10236 return ret;
10237 #endif
10238 case TARGET_NR_flock:
10239 /* NOTE: the flock constant seems to be the same for every
10240 Linux platform */
10241 return get_errno(safe_flock(arg1, arg2));
10242 case TARGET_NR_readv:
10244 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10245 if (vec != NULL) {
10246 ret = get_errno(safe_readv(arg1, vec, arg3));
10247 unlock_iovec(vec, arg2, arg3, 1);
10248 } else {
10249 ret = -host_to_target_errno(errno);
10252 return ret;
10253 case TARGET_NR_writev:
10255 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10256 if (vec != NULL) {
10257 ret = get_errno(safe_writev(arg1, vec, arg3));
10258 unlock_iovec(vec, arg2, arg3, 0);
10259 } else {
10260 ret = -host_to_target_errno(errno);
10263 return ret;
10264 #if defined(TARGET_NR_preadv)
10265 case TARGET_NR_preadv:
10267 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10268 if (vec != NULL) {
10269 unsigned long low, high;
10271 target_to_host_low_high(arg4, arg5, &low, &high);
10272 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10273 unlock_iovec(vec, arg2, arg3, 1);
10274 } else {
10275 ret = -host_to_target_errno(errno);
10278 return ret;
10279 #endif
10280 #if defined(TARGET_NR_pwritev)
10281 case TARGET_NR_pwritev:
10283 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10284 if (vec != NULL) {
10285 unsigned long low, high;
10287 target_to_host_low_high(arg4, arg5, &low, &high);
10288 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10289 unlock_iovec(vec, arg2, arg3, 0);
10290 } else {
10291 ret = -host_to_target_errno(errno);
10294 return ret;
10295 #endif
10296 case TARGET_NR_getsid:
10297 return get_errno(getsid(arg1));
10298 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10299 case TARGET_NR_fdatasync:
10300 return get_errno(fdatasync(arg1));
10301 #endif
10302 #ifdef TARGET_NR__sysctl
10303 case TARGET_NR__sysctl:
10304 /* We don't implement this, but ENOTDIR is always a safe
10305 return value. */
10306 return -TARGET_ENOTDIR;
10307 #endif
10308 case TARGET_NR_sched_getaffinity:
10310 unsigned int mask_size;
10311 unsigned long *mask;
10314 * sched_getaffinity needs multiples of ulong, so need to take
10315 * care of mismatches between target ulong and host ulong sizes.
10317 if (arg2 & (sizeof(abi_ulong) - 1)) {
10318 return -TARGET_EINVAL;
10320 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10322 mask = alloca(mask_size);
10323 memset(mask, 0, mask_size);
10324 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10326 if (!is_error(ret)) {
10327 if (ret > arg2) {
10328 /* More data returned than the caller's buffer will fit.
10329 * This only happens if sizeof(abi_long) < sizeof(long)
10330 * and the caller passed us a buffer holding an odd number
10331 * of abi_longs. If the host kernel is actually using the
10332 * extra 4 bytes then fail EINVAL; otherwise we can just
10333 * ignore them and only copy the interesting part.
10335 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10336 if (numcpus > arg2 * 8) {
10337 return -TARGET_EINVAL;
10339 ret = arg2;
10342 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10343 return -TARGET_EFAULT;
10347 return ret;
10348 case TARGET_NR_sched_setaffinity:
10350 unsigned int mask_size;
10351 unsigned long *mask;
10354 * sched_setaffinity needs multiples of ulong, so need to take
10355 * care of mismatches between target ulong and host ulong sizes.
10357 if (arg2 & (sizeof(abi_ulong) - 1)) {
10358 return -TARGET_EINVAL;
10360 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10361 mask = alloca(mask_size);
10363 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10364 if (ret) {
10365 return ret;
10368 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10370 case TARGET_NR_getcpu:
10372 unsigned cpu, node;
10373 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10374 arg2 ? &node : NULL,
10375 NULL));
10376 if (is_error(ret)) {
10377 return ret;
10379 if (arg1 && put_user_u32(cpu, arg1)) {
10380 return -TARGET_EFAULT;
10382 if (arg2 && put_user_u32(node, arg2)) {
10383 return -TARGET_EFAULT;
10386 return ret;
10387 case TARGET_NR_sched_setparam:
10389 struct sched_param *target_schp;
10390 struct sched_param schp;
10392 if (arg2 == 0) {
10393 return -TARGET_EINVAL;
10395 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10396 return -TARGET_EFAULT;
10397 schp.sched_priority = tswap32(target_schp->sched_priority);
10398 unlock_user_struct(target_schp, arg2, 0);
10399 return get_errno(sched_setparam(arg1, &schp));
10401 case TARGET_NR_sched_getparam:
10403 struct sched_param *target_schp;
10404 struct sched_param schp;
10406 if (arg2 == 0) {
10407 return -TARGET_EINVAL;
10409 ret = get_errno(sched_getparam(arg1, &schp));
10410 if (!is_error(ret)) {
10411 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10412 return -TARGET_EFAULT;
10413 target_schp->sched_priority = tswap32(schp.sched_priority);
10414 unlock_user_struct(target_schp, arg2, 1);
10417 return ret;
10418 case TARGET_NR_sched_setscheduler:
10420 struct sched_param *target_schp;
10421 struct sched_param schp;
10422 if (arg3 == 0) {
10423 return -TARGET_EINVAL;
10425 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10426 return -TARGET_EFAULT;
10427 schp.sched_priority = tswap32(target_schp->sched_priority);
10428 unlock_user_struct(target_schp, arg3, 0);
10429 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10431 case TARGET_NR_sched_getscheduler:
10432 return get_errno(sched_getscheduler(arg1));
10433 case TARGET_NR_sched_yield:
10434 return get_errno(sched_yield());
10435 case TARGET_NR_sched_get_priority_max:
10436 return get_errno(sched_get_priority_max(arg1));
10437 case TARGET_NR_sched_get_priority_min:
10438 return get_errno(sched_get_priority_min(arg1));
10439 #ifdef TARGET_NR_sched_rr_get_interval
10440 case TARGET_NR_sched_rr_get_interval:
10442 struct timespec ts;
10443 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10444 if (!is_error(ret)) {
10445 ret = host_to_target_timespec(arg2, &ts);
10448 return ret;
10449 #endif
10450 #if defined(TARGET_NR_nanosleep)
10451 case TARGET_NR_nanosleep:
10453 struct timespec req, rem;
10454 target_to_host_timespec(&req, arg1);
10455 ret = get_errno(safe_nanosleep(&req, &rem));
10456 if (is_error(ret) && arg2) {
10457 host_to_target_timespec(arg2, &rem);
10460 return ret;
10461 #endif
10462 case TARGET_NR_prctl:
10463 switch (arg1) {
10464 case PR_GET_PDEATHSIG:
10466 int deathsig;
10467 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10468 if (!is_error(ret) && arg2
10469 && put_user_ual(deathsig, arg2)) {
10470 return -TARGET_EFAULT;
10472 return ret;
10474 #ifdef PR_GET_NAME
10475 case PR_GET_NAME:
10477 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10478 if (!name) {
10479 return -TARGET_EFAULT;
10481 ret = get_errno(prctl(arg1, (unsigned long)name,
10482 arg3, arg4, arg5));
10483 unlock_user(name, arg2, 16);
10484 return ret;
10486 case PR_SET_NAME:
10488 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10489 if (!name) {
10490 return -TARGET_EFAULT;
10492 ret = get_errno(prctl(arg1, (unsigned long)name,
10493 arg3, arg4, arg5));
10494 unlock_user(name, arg2, 0);
10495 return ret;
10497 #endif
10498 #ifdef TARGET_MIPS
10499 case TARGET_PR_GET_FP_MODE:
10501 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10502 ret = 0;
10503 if (env->CP0_Status & (1 << CP0St_FR)) {
10504 ret |= TARGET_PR_FP_MODE_FR;
10506 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10507 ret |= TARGET_PR_FP_MODE_FRE;
10509 return ret;
10511 case TARGET_PR_SET_FP_MODE:
10513 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10514 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10515 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10516 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10517 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10519 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10520 TARGET_PR_FP_MODE_FRE;
10522 /* If nothing to change, return right away, successfully. */
10523 if (old_fr == new_fr && old_fre == new_fre) {
10524 return 0;
10526 /* Check the value is valid */
10527 if (arg2 & ~known_bits) {
10528 return -TARGET_EOPNOTSUPP;
10530 /* Setting FRE without FR is not supported. */
10531 if (new_fre && !new_fr) {
10532 return -TARGET_EOPNOTSUPP;
10534 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10535 /* FR1 is not supported */
10536 return -TARGET_EOPNOTSUPP;
10538 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10539 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10540 /* cannot set FR=0 */
10541 return -TARGET_EOPNOTSUPP;
10543 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10544 /* Cannot set FRE=1 */
10545 return -TARGET_EOPNOTSUPP;
10548 int i;
10549 fpr_t *fpr = env->active_fpu.fpr;
10550 for (i = 0; i < 32 ; i += 2) {
10551 if (!old_fr && new_fr) {
10552 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10553 } else if (old_fr && !new_fr) {
10554 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10558 if (new_fr) {
10559 env->CP0_Status |= (1 << CP0St_FR);
10560 env->hflags |= MIPS_HFLAG_F64;
10561 } else {
10562 env->CP0_Status &= ~(1 << CP0St_FR);
10563 env->hflags &= ~MIPS_HFLAG_F64;
10565 if (new_fre) {
10566 env->CP0_Config5 |= (1 << CP0C5_FRE);
10567 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10568 env->hflags |= MIPS_HFLAG_FRE;
10570 } else {
10571 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10572 env->hflags &= ~MIPS_HFLAG_FRE;
10575 return 0;
10577 #endif /* MIPS */
10578 #ifdef TARGET_AARCH64
10579 case TARGET_PR_SVE_SET_VL:
10581 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10582 * PR_SVE_VL_INHERIT. Note the kernel definition
10583 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10584 * even though the current architectural maximum is VQ=16.
10586 ret = -TARGET_EINVAL;
10587 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10588 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10589 CPUARMState *env = cpu_env;
10590 ARMCPU *cpu = env_archcpu(env);
10591 uint32_t vq, old_vq;
10593 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10594 vq = MAX(arg2 / 16, 1);
10595 vq = MIN(vq, cpu->sve_max_vq);
10597 if (vq < old_vq) {
10598 aarch64_sve_narrow_vq(env, vq);
10600 env->vfp.zcr_el[1] = vq - 1;
10601 arm_rebuild_hflags(env);
10602 ret = vq * 16;
10604 return ret;
10605 case TARGET_PR_SVE_GET_VL:
10606 ret = -TARGET_EINVAL;
10608 ARMCPU *cpu = env_archcpu(cpu_env);
10609 if (cpu_isar_feature(aa64_sve, cpu)) {
10610 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10613 return ret;
10614 case TARGET_PR_PAC_RESET_KEYS:
10616 CPUARMState *env = cpu_env;
10617 ARMCPU *cpu = env_archcpu(env);
10619 if (arg3 || arg4 || arg5) {
10620 return -TARGET_EINVAL;
10622 if (cpu_isar_feature(aa64_pauth, cpu)) {
10623 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10624 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10625 TARGET_PR_PAC_APGAKEY);
10626 int ret = 0;
10627 Error *err = NULL;
10629 if (arg2 == 0) {
10630 arg2 = all;
10631 } else if (arg2 & ~all) {
10632 return -TARGET_EINVAL;
10634 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10635 ret |= qemu_guest_getrandom(&env->keys.apia,
10636 sizeof(ARMPACKey), &err);
10638 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10639 ret |= qemu_guest_getrandom(&env->keys.apib,
10640 sizeof(ARMPACKey), &err);
10642 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10643 ret |= qemu_guest_getrandom(&env->keys.apda,
10644 sizeof(ARMPACKey), &err);
10646 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10647 ret |= qemu_guest_getrandom(&env->keys.apdb,
10648 sizeof(ARMPACKey), &err);
10650 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10651 ret |= qemu_guest_getrandom(&env->keys.apga,
10652 sizeof(ARMPACKey), &err);
10654 if (ret != 0) {
10656 * Some unknown failure in the crypto. The best
10657 * we can do is log it and fail the syscall.
10658 * The real syscall cannot fail this way.
10660 qemu_log_mask(LOG_UNIMP,
10661 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10662 error_get_pretty(err));
10663 error_free(err);
10664 return -TARGET_EIO;
10666 return 0;
10669 return -TARGET_EINVAL;
10670 #endif /* AARCH64 */
10671 case PR_GET_SECCOMP:
10672 case PR_SET_SECCOMP:
10673 /* Disable seccomp to prevent the target disabling syscalls we
10674 * need. */
10675 return -TARGET_EINVAL;
10676 default:
10677 /* Most prctl options have no pointer arguments */
10678 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10680 break;
10681 #ifdef TARGET_NR_arch_prctl
10682 case TARGET_NR_arch_prctl:
10683 return do_arch_prctl(cpu_env, arg1, arg2);
10684 #endif
10685 #ifdef TARGET_NR_pread64
10686 case TARGET_NR_pread64:
10687 if (regpairs_aligned(cpu_env, num)) {
10688 arg4 = arg5;
10689 arg5 = arg6;
10691 if (arg2 == 0 && arg3 == 0) {
10692 /* Special-case NULL buffer and zero length, which should succeed */
10693 p = 0;
10694 } else {
10695 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10696 if (!p) {
10697 return -TARGET_EFAULT;
10700 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10701 unlock_user(p, arg2, ret);
10702 return ret;
10703 case TARGET_NR_pwrite64:
10704 if (regpairs_aligned(cpu_env, num)) {
10705 arg4 = arg5;
10706 arg5 = arg6;
10708 if (arg2 == 0 && arg3 == 0) {
10709 /* Special-case NULL buffer and zero length, which should succeed */
10710 p = 0;
10711 } else {
10712 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10713 if (!p) {
10714 return -TARGET_EFAULT;
10717 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10718 unlock_user(p, arg2, 0);
10719 return ret;
10720 #endif
10721 case TARGET_NR_getcwd:
10722 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10723 return -TARGET_EFAULT;
10724 ret = get_errno(sys_getcwd1(p, arg2));
10725 unlock_user(p, arg1, ret);
10726 return ret;
10727 case TARGET_NR_capget:
10728 case TARGET_NR_capset:
10730 struct target_user_cap_header *target_header;
10731 struct target_user_cap_data *target_data = NULL;
10732 struct __user_cap_header_struct header;
10733 struct __user_cap_data_struct data[2];
10734 struct __user_cap_data_struct *dataptr = NULL;
10735 int i, target_datalen;
10736 int data_items = 1;
10738 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10739 return -TARGET_EFAULT;
10741 header.version = tswap32(target_header->version);
10742 header.pid = tswap32(target_header->pid);
10744 if (header.version != _LINUX_CAPABILITY_VERSION) {
10745 /* Version 2 and up takes pointer to two user_data structs */
10746 data_items = 2;
10749 target_datalen = sizeof(*target_data) * data_items;
10751 if (arg2) {
10752 if (num == TARGET_NR_capget) {
10753 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10754 } else {
10755 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10757 if (!target_data) {
10758 unlock_user_struct(target_header, arg1, 0);
10759 return -TARGET_EFAULT;
10762 if (num == TARGET_NR_capset) {
10763 for (i = 0; i < data_items; i++) {
10764 data[i].effective = tswap32(target_data[i].effective);
10765 data[i].permitted = tswap32(target_data[i].permitted);
10766 data[i].inheritable = tswap32(target_data[i].inheritable);
10770 dataptr = data;
10773 if (num == TARGET_NR_capget) {
10774 ret = get_errno(capget(&header, dataptr));
10775 } else {
10776 ret = get_errno(capset(&header, dataptr));
10779 /* The kernel always updates version for both capget and capset */
10780 target_header->version = tswap32(header.version);
10781 unlock_user_struct(target_header, arg1, 1);
10783 if (arg2) {
10784 if (num == TARGET_NR_capget) {
10785 for (i = 0; i < data_items; i++) {
10786 target_data[i].effective = tswap32(data[i].effective);
10787 target_data[i].permitted = tswap32(data[i].permitted);
10788 target_data[i].inheritable = tswap32(data[i].inheritable);
10790 unlock_user(target_data, arg2, target_datalen);
10791 } else {
10792 unlock_user(target_data, arg2, 0);
10795 return ret;
10797 case TARGET_NR_sigaltstack:
10798 return do_sigaltstack(arg1, arg2,
10799 get_sp_from_cpustate((CPUArchState *)cpu_env));
10801 #ifdef CONFIG_SENDFILE
10802 #ifdef TARGET_NR_sendfile
10803 case TARGET_NR_sendfile:
10805 off_t *offp = NULL;
10806 off_t off;
10807 if (arg3) {
10808 ret = get_user_sal(off, arg3);
10809 if (is_error(ret)) {
10810 return ret;
10812 offp = &off;
10814 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10815 if (!is_error(ret) && arg3) {
10816 abi_long ret2 = put_user_sal(off, arg3);
10817 if (is_error(ret2)) {
10818 ret = ret2;
10821 return ret;
10823 #endif
10824 #ifdef TARGET_NR_sendfile64
10825 case TARGET_NR_sendfile64:
10827 off_t *offp = NULL;
10828 off_t off;
10829 if (arg3) {
10830 ret = get_user_s64(off, arg3);
10831 if (is_error(ret)) {
10832 return ret;
10834 offp = &off;
10836 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10837 if (!is_error(ret) && arg3) {
10838 abi_long ret2 = put_user_s64(off, arg3);
10839 if (is_error(ret2)) {
10840 ret = ret2;
10843 return ret;
10845 #endif
10846 #endif
10847 #ifdef TARGET_NR_vfork
10848 case TARGET_NR_vfork:
10849 return get_errno(do_fork(cpu_env,
10850 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10851 0, 0, 0, 0));
10852 #endif
10853 #ifdef TARGET_NR_ugetrlimit
10854 case TARGET_NR_ugetrlimit:
10856 struct rlimit rlim;
10857 int resource = target_to_host_resource(arg1);
10858 ret = get_errno(getrlimit(resource, &rlim));
10859 if (!is_error(ret)) {
10860 struct target_rlimit *target_rlim;
10861 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10862 return -TARGET_EFAULT;
10863 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10864 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10865 unlock_user_struct(target_rlim, arg2, 1);
10867 return ret;
10869 #endif
10870 #ifdef TARGET_NR_truncate64
10871 case TARGET_NR_truncate64:
10872 if (!(p = lock_user_string(arg1)))
10873 return -TARGET_EFAULT;
10874 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10875 unlock_user(p, arg1, 0);
10876 return ret;
10877 #endif
10878 #ifdef TARGET_NR_ftruncate64
10879 case TARGET_NR_ftruncate64:
10880 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10881 #endif
10882 #ifdef TARGET_NR_stat64
10883 case TARGET_NR_stat64:
10884 if (!(p = lock_user_string(arg1))) {
10885 return -TARGET_EFAULT;
10887 ret = get_errno(stat(path(p), &st));
10888 unlock_user(p, arg1, 0);
10889 if (!is_error(ret))
10890 ret = host_to_target_stat64(cpu_env, arg2, &st);
10891 return ret;
10892 #endif
10893 #ifdef TARGET_NR_lstat64
10894 case TARGET_NR_lstat64:
10895 if (!(p = lock_user_string(arg1))) {
10896 return -TARGET_EFAULT;
10898 ret = get_errno(lstat(path(p), &st));
10899 unlock_user(p, arg1, 0);
10900 if (!is_error(ret))
10901 ret = host_to_target_stat64(cpu_env, arg2, &st);
10902 return ret;
10903 #endif
10904 #ifdef TARGET_NR_fstat64
10905 case TARGET_NR_fstat64:
10906 ret = get_errno(fstat(arg1, &st));
10907 if (!is_error(ret))
10908 ret = host_to_target_stat64(cpu_env, arg2, &st);
10909 return ret;
10910 #endif
10911 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10912 #ifdef TARGET_NR_fstatat64
10913 case TARGET_NR_fstatat64:
10914 #endif
10915 #ifdef TARGET_NR_newfstatat
10916 case TARGET_NR_newfstatat:
10917 #endif
10918 if (!(p = lock_user_string(arg2))) {
10919 return -TARGET_EFAULT;
10921 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10922 unlock_user(p, arg2, 0);
10923 if (!is_error(ret))
10924 ret = host_to_target_stat64(cpu_env, arg3, &st);
10925 return ret;
10926 #endif
10927 #if defined(TARGET_NR_statx)
10928 case TARGET_NR_statx:
10930 struct target_statx *target_stx;
10931 int dirfd = arg1;
10932 int flags = arg3;
10934 p = lock_user_string(arg2);
10935 if (p == NULL) {
10936 return -TARGET_EFAULT;
10938 #if defined(__NR_statx)
10941 * It is assumed that struct statx is architecture independent.
10943 struct target_statx host_stx;
10944 int mask = arg4;
10946 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10947 if (!is_error(ret)) {
10948 if (host_to_target_statx(&host_stx, arg5) != 0) {
10949 unlock_user(p, arg2, 0);
10950 return -TARGET_EFAULT;
10954 if (ret != -TARGET_ENOSYS) {
10955 unlock_user(p, arg2, 0);
10956 return ret;
10959 #endif
10960 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10961 unlock_user(p, arg2, 0);
10963 if (!is_error(ret)) {
10964 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10965 return -TARGET_EFAULT;
10967 memset(target_stx, 0, sizeof(*target_stx));
10968 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10969 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10970 __put_user(st.st_ino, &target_stx->stx_ino);
10971 __put_user(st.st_mode, &target_stx->stx_mode);
10972 __put_user(st.st_uid, &target_stx->stx_uid);
10973 __put_user(st.st_gid, &target_stx->stx_gid);
10974 __put_user(st.st_nlink, &target_stx->stx_nlink);
10975 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10976 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10977 __put_user(st.st_size, &target_stx->stx_size);
10978 __put_user(st.st_blksize, &target_stx->stx_blksize);
10979 __put_user(st.st_blocks, &target_stx->stx_blocks);
10980 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10981 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10982 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10983 unlock_user_struct(target_stx, arg5, 1);
10986 return ret;
10987 #endif
10988 #ifdef TARGET_NR_lchown
10989 case TARGET_NR_lchown:
10990 if (!(p = lock_user_string(arg1)))
10991 return -TARGET_EFAULT;
10992 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10993 unlock_user(p, arg1, 0);
10994 return ret;
10995 #endif
10996 #ifdef TARGET_NR_getuid
10997 case TARGET_NR_getuid:
10998 return get_errno(high2lowuid(getuid()));
10999 #endif
11000 #ifdef TARGET_NR_getgid
11001 case TARGET_NR_getgid:
11002 return get_errno(high2lowgid(getgid()));
11003 #endif
11004 #ifdef TARGET_NR_geteuid
11005 case TARGET_NR_geteuid:
11006 return get_errno(high2lowuid(geteuid()));
11007 #endif
11008 #ifdef TARGET_NR_getegid
11009 case TARGET_NR_getegid:
11010 return get_errno(high2lowgid(getegid()));
11011 #endif
11012 case TARGET_NR_setreuid:
11013 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11014 case TARGET_NR_setregid:
11015 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11016 case TARGET_NR_getgroups:
11018 int gidsetsize = arg1;
11019 target_id *target_grouplist;
11020 gid_t *grouplist;
11021 int i;
11023 grouplist = alloca(gidsetsize * sizeof(gid_t));
11024 ret = get_errno(getgroups(gidsetsize, grouplist));
11025 if (gidsetsize == 0)
11026 return ret;
11027 if (!is_error(ret)) {
11028 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11029 if (!target_grouplist)
11030 return -TARGET_EFAULT;
11031 for(i = 0;i < ret; i++)
11032 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11033 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11036 return ret;
11037 case TARGET_NR_setgroups:
11039 int gidsetsize = arg1;
11040 target_id *target_grouplist;
11041 gid_t *grouplist = NULL;
11042 int i;
11043 if (gidsetsize) {
11044 grouplist = alloca(gidsetsize * sizeof(gid_t));
11045 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11046 if (!target_grouplist) {
11047 return -TARGET_EFAULT;
11049 for (i = 0; i < gidsetsize; i++) {
11050 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11052 unlock_user(target_grouplist, arg2, 0);
11054 return get_errno(setgroups(gidsetsize, grouplist));
11056 case TARGET_NR_fchown:
11057 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11058 #if defined(TARGET_NR_fchownat)
11059 case TARGET_NR_fchownat:
11060 if (!(p = lock_user_string(arg2)))
11061 return -TARGET_EFAULT;
11062 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11063 low2highgid(arg4), arg5));
11064 unlock_user(p, arg2, 0);
11065 return ret;
11066 #endif
11067 #ifdef TARGET_NR_setresuid
11068 case TARGET_NR_setresuid:
11069 return get_errno(sys_setresuid(low2highuid(arg1),
11070 low2highuid(arg2),
11071 low2highuid(arg3)));
11072 #endif
11073 #ifdef TARGET_NR_getresuid
11074 case TARGET_NR_getresuid:
11076 uid_t ruid, euid, suid;
11077 ret = get_errno(getresuid(&ruid, &euid, &suid));
11078 if (!is_error(ret)) {
11079 if (put_user_id(high2lowuid(ruid), arg1)
11080 || put_user_id(high2lowuid(euid), arg2)
11081 || put_user_id(high2lowuid(suid), arg3))
11082 return -TARGET_EFAULT;
11085 return ret;
11086 #endif
11087 #ifdef TARGET_NR_getresgid
11088 case TARGET_NR_setresgid:
11089 return get_errno(sys_setresgid(low2highgid(arg1),
11090 low2highgid(arg2),
11091 low2highgid(arg3)));
11092 #endif
11093 #ifdef TARGET_NR_getresgid
11094 case TARGET_NR_getresgid:
11096 gid_t rgid, egid, sgid;
11097 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11098 if (!is_error(ret)) {
11099 if (put_user_id(high2lowgid(rgid), arg1)
11100 || put_user_id(high2lowgid(egid), arg2)
11101 || put_user_id(high2lowgid(sgid), arg3))
11102 return -TARGET_EFAULT;
11105 return ret;
11106 #endif
11107 #ifdef TARGET_NR_chown
11108 case TARGET_NR_chown:
11109 if (!(p = lock_user_string(arg1)))
11110 return -TARGET_EFAULT;
11111 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11112 unlock_user(p, arg1, 0);
11113 return ret;
11114 #endif
11115 case TARGET_NR_setuid:
11116 return get_errno(sys_setuid(low2highuid(arg1)));
11117 case TARGET_NR_setgid:
11118 return get_errno(sys_setgid(low2highgid(arg1)));
11119 case TARGET_NR_setfsuid:
11120 return get_errno(setfsuid(arg1));
11121 case TARGET_NR_setfsgid:
11122 return get_errno(setfsgid(arg1));
11124 #ifdef TARGET_NR_lchown32
11125 case TARGET_NR_lchown32:
11126 if (!(p = lock_user_string(arg1)))
11127 return -TARGET_EFAULT;
11128 ret = get_errno(lchown(p, arg2, arg3));
11129 unlock_user(p, arg1, 0);
11130 return ret;
11131 #endif
11132 #ifdef TARGET_NR_getuid32
11133 case TARGET_NR_getuid32:
11134 return get_errno(getuid());
11135 #endif
11137 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11138 /* Alpha specific */
11139 case TARGET_NR_getxuid:
11141 uid_t euid;
11142 euid=geteuid();
11143 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11145 return get_errno(getuid());
11146 #endif
11147 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11148 /* Alpha specific */
11149 case TARGET_NR_getxgid:
11151 uid_t egid;
11152 egid=getegid();
11153 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11155 return get_errno(getgid());
11156 #endif
11157 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11158 /* Alpha specific */
11159 case TARGET_NR_osf_getsysinfo:
11160 ret = -TARGET_EOPNOTSUPP;
11161 switch (arg1) {
11162 case TARGET_GSI_IEEE_FP_CONTROL:
11164 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11165 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11167 swcr &= ~SWCR_STATUS_MASK;
11168 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11170 if (put_user_u64 (swcr, arg2))
11171 return -TARGET_EFAULT;
11172 ret = 0;
11174 break;
11176 /* case GSI_IEEE_STATE_AT_SIGNAL:
11177 -- Not implemented in linux kernel.
11178 case GSI_UACPROC:
11179 -- Retrieves current unaligned access state; not much used.
11180 case GSI_PROC_TYPE:
11181 -- Retrieves implver information; surely not used.
11182 case GSI_GET_HWRPB:
11183 -- Grabs a copy of the HWRPB; surely not used.
11186 return ret;
11187 #endif
11188 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11189 /* Alpha specific */
11190 case TARGET_NR_osf_setsysinfo:
11191 ret = -TARGET_EOPNOTSUPP;
11192 switch (arg1) {
11193 case TARGET_SSI_IEEE_FP_CONTROL:
11195 uint64_t swcr, fpcr;
11197 if (get_user_u64 (swcr, arg2)) {
11198 return -TARGET_EFAULT;
11202 * The kernel calls swcr_update_status to update the
11203 * status bits from the fpcr at every point that it
11204 * could be queried. Therefore, we store the status
11205 * bits only in FPCR.
11207 ((CPUAlphaState *)cpu_env)->swcr
11208 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11210 fpcr = cpu_alpha_load_fpcr(cpu_env);
11211 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11212 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11213 cpu_alpha_store_fpcr(cpu_env, fpcr);
11214 ret = 0;
11216 break;
11218 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11220 uint64_t exc, fpcr, fex;
11222 if (get_user_u64(exc, arg2)) {
11223 return -TARGET_EFAULT;
11225 exc &= SWCR_STATUS_MASK;
11226 fpcr = cpu_alpha_load_fpcr(cpu_env);
11228 /* Old exceptions are not signaled. */
11229 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11230 fex = exc & ~fex;
11231 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11232 fex &= ((CPUArchState *)cpu_env)->swcr;
11234 /* Update the hardware fpcr. */
11235 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11236 cpu_alpha_store_fpcr(cpu_env, fpcr);
11238 if (fex) {
11239 int si_code = TARGET_FPE_FLTUNK;
11240 target_siginfo_t info;
11242 if (fex & SWCR_TRAP_ENABLE_DNO) {
11243 si_code = TARGET_FPE_FLTUND;
11245 if (fex & SWCR_TRAP_ENABLE_INE) {
11246 si_code = TARGET_FPE_FLTRES;
11248 if (fex & SWCR_TRAP_ENABLE_UNF) {
11249 si_code = TARGET_FPE_FLTUND;
11251 if (fex & SWCR_TRAP_ENABLE_OVF) {
11252 si_code = TARGET_FPE_FLTOVF;
11254 if (fex & SWCR_TRAP_ENABLE_DZE) {
11255 si_code = TARGET_FPE_FLTDIV;
11257 if (fex & SWCR_TRAP_ENABLE_INV) {
11258 si_code = TARGET_FPE_FLTINV;
11261 info.si_signo = SIGFPE;
11262 info.si_errno = 0;
11263 info.si_code = si_code;
11264 info._sifields._sigfault._addr
11265 = ((CPUArchState *)cpu_env)->pc;
11266 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11267 QEMU_SI_FAULT, &info);
11269 ret = 0;
11271 break;
11273 /* case SSI_NVPAIRS:
11274 -- Used with SSIN_UACPROC to enable unaligned accesses.
11275 case SSI_IEEE_STATE_AT_SIGNAL:
11276 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11277 -- Not implemented in linux kernel
11280 return ret;
11281 #endif
11282 #ifdef TARGET_NR_osf_sigprocmask
11283 /* Alpha specific. */
11284 case TARGET_NR_osf_sigprocmask:
11286 abi_ulong mask;
11287 int how;
11288 sigset_t set, oldset;
11290 switch(arg1) {
11291 case TARGET_SIG_BLOCK:
11292 how = SIG_BLOCK;
11293 break;
11294 case TARGET_SIG_UNBLOCK:
11295 how = SIG_UNBLOCK;
11296 break;
11297 case TARGET_SIG_SETMASK:
11298 how = SIG_SETMASK;
11299 break;
11300 default:
11301 return -TARGET_EINVAL;
11303 mask = arg2;
11304 target_to_host_old_sigset(&set, &mask);
11305 ret = do_sigprocmask(how, &set, &oldset);
11306 if (!ret) {
11307 host_to_target_old_sigset(&mask, &oldset);
11308 ret = mask;
11311 return ret;
11312 #endif
11314 #ifdef TARGET_NR_getgid32
11315 case TARGET_NR_getgid32:
11316 return get_errno(getgid());
11317 #endif
11318 #ifdef TARGET_NR_geteuid32
11319 case TARGET_NR_geteuid32:
11320 return get_errno(geteuid());
11321 #endif
11322 #ifdef TARGET_NR_getegid32
11323 case TARGET_NR_getegid32:
11324 return get_errno(getegid());
11325 #endif
11326 #ifdef TARGET_NR_setreuid32
11327 case TARGET_NR_setreuid32:
11328 return get_errno(setreuid(arg1, arg2));
11329 #endif
11330 #ifdef TARGET_NR_setregid32
11331 case TARGET_NR_setregid32:
11332 return get_errno(setregid(arg1, arg2));
11333 #endif
11334 #ifdef TARGET_NR_getgroups32
11335 case TARGET_NR_getgroups32:
11337 int gidsetsize = arg1;
11338 uint32_t *target_grouplist;
11339 gid_t *grouplist;
11340 int i;
11342 grouplist = alloca(gidsetsize * sizeof(gid_t));
11343 ret = get_errno(getgroups(gidsetsize, grouplist));
11344 if (gidsetsize == 0)
11345 return ret;
11346 if (!is_error(ret)) {
11347 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11348 if (!target_grouplist) {
11349 return -TARGET_EFAULT;
11351 for(i = 0;i < ret; i++)
11352 target_grouplist[i] = tswap32(grouplist[i]);
11353 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11356 return ret;
11357 #endif
11358 #ifdef TARGET_NR_setgroups32
11359 case TARGET_NR_setgroups32:
11361 int gidsetsize = arg1;
11362 uint32_t *target_grouplist;
11363 gid_t *grouplist;
11364 int i;
11366 grouplist = alloca(gidsetsize * sizeof(gid_t));
11367 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11368 if (!target_grouplist) {
11369 return -TARGET_EFAULT;
11371 for(i = 0;i < gidsetsize; i++)
11372 grouplist[i] = tswap32(target_grouplist[i]);
11373 unlock_user(target_grouplist, arg2, 0);
11374 return get_errno(setgroups(gidsetsize, grouplist));
11376 #endif
11377 #ifdef TARGET_NR_fchown32
11378 case TARGET_NR_fchown32:
11379 return get_errno(fchown(arg1, arg2, arg3));
11380 #endif
11381 #ifdef TARGET_NR_setresuid32
11382 case TARGET_NR_setresuid32:
11383 return get_errno(sys_setresuid(arg1, arg2, arg3));
11384 #endif
11385 #ifdef TARGET_NR_getresuid32
11386 case TARGET_NR_getresuid32:
11388 uid_t ruid, euid, suid;
11389 ret = get_errno(getresuid(&ruid, &euid, &suid));
11390 if (!is_error(ret)) {
11391 if (put_user_u32(ruid, arg1)
11392 || put_user_u32(euid, arg2)
11393 || put_user_u32(suid, arg3))
11394 return -TARGET_EFAULT;
11397 return ret;
11398 #endif
11399 #ifdef TARGET_NR_setresgid32
11400 case TARGET_NR_setresgid32:
11401 return get_errno(sys_setresgid(arg1, arg2, arg3));
11402 #endif
11403 #ifdef TARGET_NR_getresgid32
11404 case TARGET_NR_getresgid32:
11406 gid_t rgid, egid, sgid;
11407 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11408 if (!is_error(ret)) {
11409 if (put_user_u32(rgid, arg1)
11410 || put_user_u32(egid, arg2)
11411 || put_user_u32(sgid, arg3))
11412 return -TARGET_EFAULT;
11415 return ret;
11416 #endif
11417 #ifdef TARGET_NR_chown32
11418 case TARGET_NR_chown32:
11419 if (!(p = lock_user_string(arg1)))
11420 return -TARGET_EFAULT;
11421 ret = get_errno(chown(p, arg2, arg3));
11422 unlock_user(p, arg1, 0);
11423 return ret;
11424 #endif
11425 #ifdef TARGET_NR_setuid32
11426 case TARGET_NR_setuid32:
11427 return get_errno(sys_setuid(arg1));
11428 #endif
11429 #ifdef TARGET_NR_setgid32
11430 case TARGET_NR_setgid32:
11431 return get_errno(sys_setgid(arg1));
11432 #endif
11433 #ifdef TARGET_NR_setfsuid32
11434 case TARGET_NR_setfsuid32:
11435 return get_errno(setfsuid(arg1));
11436 #endif
11437 #ifdef TARGET_NR_setfsgid32
11438 case TARGET_NR_setfsgid32:
11439 return get_errno(setfsgid(arg1));
11440 #endif
11441 #ifdef TARGET_NR_mincore
11442 case TARGET_NR_mincore:
11444 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11445 if (!a) {
11446 return -TARGET_ENOMEM;
11448 p = lock_user_string(arg3);
11449 if (!p) {
11450 ret = -TARGET_EFAULT;
11451 } else {
11452 ret = get_errno(mincore(a, arg2, p));
11453 unlock_user(p, arg3, ret);
11455 unlock_user(a, arg1, 0);
11457 return ret;
11458 #endif
11459 #ifdef TARGET_NR_arm_fadvise64_64
11460 case TARGET_NR_arm_fadvise64_64:
11461 /* arm_fadvise64_64 looks like fadvise64_64 but
11462 * with different argument order: fd, advice, offset, len
11463 * rather than the usual fd, offset, len, advice.
11464 * Note that offset and len are both 64-bit so appear as
11465 * pairs of 32-bit registers.
11467 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11468 target_offset64(arg5, arg6), arg2);
11469 return -host_to_target_errno(ret);
11470 #endif
11472 #if TARGET_ABI_BITS == 32
11474 #ifdef TARGET_NR_fadvise64_64
11475 case TARGET_NR_fadvise64_64:
11476 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11477 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11478 ret = arg2;
11479 arg2 = arg3;
11480 arg3 = arg4;
11481 arg4 = arg5;
11482 arg5 = arg6;
11483 arg6 = ret;
11484 #else
11485 /* 6 args: fd, offset (high, low), len (high, low), advice */
11486 if (regpairs_aligned(cpu_env, num)) {
11487 /* offset is in (3,4), len in (5,6) and advice in 7 */
11488 arg2 = arg3;
11489 arg3 = arg4;
11490 arg4 = arg5;
11491 arg5 = arg6;
11492 arg6 = arg7;
11494 #endif
11495 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11496 target_offset64(arg4, arg5), arg6);
11497 return -host_to_target_errno(ret);
11498 #endif
11500 #ifdef TARGET_NR_fadvise64
11501 case TARGET_NR_fadvise64:
11502 /* 5 args: fd, offset (high, low), len, advice */
11503 if (regpairs_aligned(cpu_env, num)) {
11504 /* offset is in (3,4), len in 5 and advice in 6 */
11505 arg2 = arg3;
11506 arg3 = arg4;
11507 arg4 = arg5;
11508 arg5 = arg6;
11510 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11511 return -host_to_target_errno(ret);
11512 #endif
11514 #else /* not a 32-bit ABI */
11515 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11516 #ifdef TARGET_NR_fadvise64_64
11517 case TARGET_NR_fadvise64_64:
11518 #endif
11519 #ifdef TARGET_NR_fadvise64
11520 case TARGET_NR_fadvise64:
11521 #endif
11522 #ifdef TARGET_S390X
11523 switch (arg4) {
11524 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11525 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11526 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11527 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11528 default: break;
11530 #endif
11531 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11532 #endif
11533 #endif /* end of 64-bit ABI fadvise handling */
11535 #ifdef TARGET_NR_madvise
11536 case TARGET_NR_madvise:
11537 /* A straight passthrough may not be safe because qemu sometimes
11538 turns private file-backed mappings into anonymous mappings.
11539 This will break MADV_DONTNEED.
11540 This is a hint, so ignoring and returning success is ok. */
11541 return 0;
11542 #endif
11543 #ifdef TARGET_NR_fcntl64
11544 case TARGET_NR_fcntl64:
11546 int cmd;
11547 struct flock64 fl;
11548 from_flock64_fn *copyfrom = copy_from_user_flock64;
11549 to_flock64_fn *copyto = copy_to_user_flock64;
11551 #ifdef TARGET_ARM
11552 if (!((CPUARMState *)cpu_env)->eabi) {
11553 copyfrom = copy_from_user_oabi_flock64;
11554 copyto = copy_to_user_oabi_flock64;
11556 #endif
11558 cmd = target_to_host_fcntl_cmd(arg2);
11559 if (cmd == -TARGET_EINVAL) {
11560 return cmd;
11563 switch(arg2) {
11564 case TARGET_F_GETLK64:
11565 ret = copyfrom(&fl, arg3);
11566 if (ret) {
11567 break;
11569 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11570 if (ret == 0) {
11571 ret = copyto(arg3, &fl);
11573 break;
11575 case TARGET_F_SETLK64:
11576 case TARGET_F_SETLKW64:
11577 ret = copyfrom(&fl, arg3);
11578 if (ret) {
11579 break;
11581 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11582 break;
11583 default:
11584 ret = do_fcntl(arg1, arg2, arg3);
11585 break;
11587 return ret;
11589 #endif
11590 #ifdef TARGET_NR_cacheflush
11591 case TARGET_NR_cacheflush:
11592 /* self-modifying code is handled automatically, so nothing needed */
11593 return 0;
11594 #endif
11595 #ifdef TARGET_NR_getpagesize
11596 case TARGET_NR_getpagesize:
11597 return TARGET_PAGE_SIZE;
11598 #endif
11599 case TARGET_NR_gettid:
11600 return get_errno(sys_gettid());
11601 #ifdef TARGET_NR_readahead
11602 case TARGET_NR_readahead:
11603 #if TARGET_ABI_BITS == 32
11604 if (regpairs_aligned(cpu_env, num)) {
11605 arg2 = arg3;
11606 arg3 = arg4;
11607 arg4 = arg5;
11609 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11610 #else
11611 ret = get_errno(readahead(arg1, arg2, arg3));
11612 #endif
11613 return ret;
11614 #endif
11615 #ifdef CONFIG_ATTR
11616 #ifdef TARGET_NR_setxattr
11617 case TARGET_NR_listxattr:
11618 case TARGET_NR_llistxattr:
11620 void *p, *b = 0;
11621 if (arg2) {
11622 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11623 if (!b) {
11624 return -TARGET_EFAULT;
11627 p = lock_user_string(arg1);
11628 if (p) {
11629 if (num == TARGET_NR_listxattr) {
11630 ret = get_errno(listxattr(p, b, arg3));
11631 } else {
11632 ret = get_errno(llistxattr(p, b, arg3));
11634 } else {
11635 ret = -TARGET_EFAULT;
11637 unlock_user(p, arg1, 0);
11638 unlock_user(b, arg2, arg3);
11639 return ret;
11641 case TARGET_NR_flistxattr:
11643 void *b = 0;
11644 if (arg2) {
11645 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11646 if (!b) {
11647 return -TARGET_EFAULT;
11650 ret = get_errno(flistxattr(arg1, b, arg3));
11651 unlock_user(b, arg2, arg3);
11652 return ret;
11654 case TARGET_NR_setxattr:
11655 case TARGET_NR_lsetxattr:
11657 void *p, *n, *v = 0;
11658 if (arg3) {
11659 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11660 if (!v) {
11661 return -TARGET_EFAULT;
11664 p = lock_user_string(arg1);
11665 n = lock_user_string(arg2);
11666 if (p && n) {
11667 if (num == TARGET_NR_setxattr) {
11668 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11669 } else {
11670 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11672 } else {
11673 ret = -TARGET_EFAULT;
11675 unlock_user(p, arg1, 0);
11676 unlock_user(n, arg2, 0);
11677 unlock_user(v, arg3, 0);
11679 return ret;
11680 case TARGET_NR_fsetxattr:
11682 void *n, *v = 0;
11683 if (arg3) {
11684 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11685 if (!v) {
11686 return -TARGET_EFAULT;
11689 n = lock_user_string(arg2);
11690 if (n) {
11691 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11692 } else {
11693 ret = -TARGET_EFAULT;
11695 unlock_user(n, arg2, 0);
11696 unlock_user(v, arg3, 0);
11698 return ret;
11699 case TARGET_NR_getxattr:
11700 case TARGET_NR_lgetxattr:
11702 void *p, *n, *v = 0;
11703 if (arg3) {
11704 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11705 if (!v) {
11706 return -TARGET_EFAULT;
11709 p = lock_user_string(arg1);
11710 n = lock_user_string(arg2);
11711 if (p && n) {
11712 if (num == TARGET_NR_getxattr) {
11713 ret = get_errno(getxattr(p, n, v, arg4));
11714 } else {
11715 ret = get_errno(lgetxattr(p, n, v, arg4));
11717 } else {
11718 ret = -TARGET_EFAULT;
11720 unlock_user(p, arg1, 0);
11721 unlock_user(n, arg2, 0);
11722 unlock_user(v, arg3, arg4);
11724 return ret;
11725 case TARGET_NR_fgetxattr:
11727 void *n, *v = 0;
11728 if (arg3) {
11729 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11730 if (!v) {
11731 return -TARGET_EFAULT;
11734 n = lock_user_string(arg2);
11735 if (n) {
11736 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11737 } else {
11738 ret = -TARGET_EFAULT;
11740 unlock_user(n, arg2, 0);
11741 unlock_user(v, arg3, arg4);
11743 return ret;
11744 case TARGET_NR_removexattr:
11745 case TARGET_NR_lremovexattr:
11747 void *p, *n;
11748 p = lock_user_string(arg1);
11749 n = lock_user_string(arg2);
11750 if (p && n) {
11751 if (num == TARGET_NR_removexattr) {
11752 ret = get_errno(removexattr(p, n));
11753 } else {
11754 ret = get_errno(lremovexattr(p, n));
11756 } else {
11757 ret = -TARGET_EFAULT;
11759 unlock_user(p, arg1, 0);
11760 unlock_user(n, arg2, 0);
11762 return ret;
11763 case TARGET_NR_fremovexattr:
11765 void *n;
11766 n = lock_user_string(arg2);
11767 if (n) {
11768 ret = get_errno(fremovexattr(arg1, n));
11769 } else {
11770 ret = -TARGET_EFAULT;
11772 unlock_user(n, arg2, 0);
11774 return ret;
11775 #endif
11776 #endif /* CONFIG_ATTR */
11777 #ifdef TARGET_NR_set_thread_area
11778 case TARGET_NR_set_thread_area:
11779 #if defined(TARGET_MIPS)
11780 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11781 return 0;
11782 #elif defined(TARGET_CRIS)
11783 if (arg1 & 0xff)
11784 ret = -TARGET_EINVAL;
11785 else {
11786 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11787 ret = 0;
11789 return ret;
11790 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11791 return do_set_thread_area(cpu_env, arg1);
11792 #elif defined(TARGET_M68K)
11794 TaskState *ts = cpu->opaque;
11795 ts->tp_value = arg1;
11796 return 0;
11798 #else
11799 return -TARGET_ENOSYS;
11800 #endif
11801 #endif
11802 #ifdef TARGET_NR_get_thread_area
11803 case TARGET_NR_get_thread_area:
11804 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11805 return do_get_thread_area(cpu_env, arg1);
11806 #elif defined(TARGET_M68K)
11808 TaskState *ts = cpu->opaque;
11809 return ts->tp_value;
11811 #else
11812 return -TARGET_ENOSYS;
11813 #endif
11814 #endif
11815 #ifdef TARGET_NR_getdomainname
11816 case TARGET_NR_getdomainname:
11817 return -TARGET_ENOSYS;
11818 #endif
11820 #ifdef TARGET_NR_clock_settime
11821 case TARGET_NR_clock_settime:
11823 struct timespec ts;
11825 ret = target_to_host_timespec(&ts, arg2);
11826 if (!is_error(ret)) {
11827 ret = get_errno(clock_settime(arg1, &ts));
11829 return ret;
11831 #endif
11832 #ifdef TARGET_NR_clock_settime64
11833 case TARGET_NR_clock_settime64:
11835 struct timespec ts;
11837 ret = target_to_host_timespec64(&ts, arg2);
11838 if (!is_error(ret)) {
11839 ret = get_errno(clock_settime(arg1, &ts));
11841 return ret;
11843 #endif
11844 #ifdef TARGET_NR_clock_gettime
11845 case TARGET_NR_clock_gettime:
11847 struct timespec ts;
11848 ret = get_errno(clock_gettime(arg1, &ts));
11849 if (!is_error(ret)) {
11850 ret = host_to_target_timespec(arg2, &ts);
11852 return ret;
11854 #endif
11855 #ifdef TARGET_NR_clock_gettime64
11856 case TARGET_NR_clock_gettime64:
11858 struct timespec ts;
11859 ret = get_errno(clock_gettime(arg1, &ts));
11860 if (!is_error(ret)) {
11861 ret = host_to_target_timespec64(arg2, &ts);
11863 return ret;
11865 #endif
11866 #ifdef TARGET_NR_clock_getres
11867 case TARGET_NR_clock_getres:
11869 struct timespec ts;
11870 ret = get_errno(clock_getres(arg1, &ts));
11871 if (!is_error(ret)) {
11872 host_to_target_timespec(arg2, &ts);
11874 return ret;
11876 #endif
11877 #ifdef TARGET_NR_clock_getres_time64
11878 case TARGET_NR_clock_getres_time64:
11880 struct timespec ts;
11881 ret = get_errno(clock_getres(arg1, &ts));
11882 if (!is_error(ret)) {
11883 host_to_target_timespec64(arg2, &ts);
11885 return ret;
11887 #endif
11888 #ifdef TARGET_NR_clock_nanosleep
11889 case TARGET_NR_clock_nanosleep:
11891 struct timespec ts;
11892 if (target_to_host_timespec(&ts, arg3)) {
11893 return -TARGET_EFAULT;
11895 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11896 &ts, arg4 ? &ts : NULL));
11898 * if the call is interrupted by a signal handler, it fails
11899 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11900 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11902 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
11903 host_to_target_timespec(arg4, &ts)) {
11904 return -TARGET_EFAULT;
11907 return ret;
11909 #endif
11911 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11912 case TARGET_NR_set_tid_address:
11913 return get_errno(set_tid_address((int *)g2h(arg1)));
11914 #endif
11916 case TARGET_NR_tkill:
11917 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11919 case TARGET_NR_tgkill:
11920 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11921 target_to_host_signal(arg3)));
11923 #ifdef TARGET_NR_set_robust_list
11924 case TARGET_NR_set_robust_list:
11925 case TARGET_NR_get_robust_list:
11926 /* The ABI for supporting robust futexes has userspace pass
11927 * the kernel a pointer to a linked list which is updated by
11928 * userspace after the syscall; the list is walked by the kernel
11929 * when the thread exits. Since the linked list in QEMU guest
11930 * memory isn't a valid linked list for the host and we have
11931 * no way to reliably intercept the thread-death event, we can't
11932 * support these. Silently return ENOSYS so that guest userspace
11933 * falls back to a non-robust futex implementation (which should
11934 * be OK except in the corner case of the guest crashing while
11935 * holding a mutex that is shared with another process via
11936 * shared memory).
11938 return -TARGET_ENOSYS;
11939 #endif
11941 #if defined(TARGET_NR_utimensat)
11942 case TARGET_NR_utimensat:
11944 struct timespec *tsp, ts[2];
11945 if (!arg3) {
11946 tsp = NULL;
11947 } else {
11948 if (target_to_host_timespec(ts, arg3)) {
11949 return -TARGET_EFAULT;
11951 if (target_to_host_timespec(ts + 1, arg3 +
11952 sizeof(struct target_timespec))) {
11953 return -TARGET_EFAULT;
11955 tsp = ts;
11957 if (!arg2)
11958 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11959 else {
11960 if (!(p = lock_user_string(arg2))) {
11961 return -TARGET_EFAULT;
11963 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11964 unlock_user(p, arg2, 0);
11967 return ret;
11968 #endif
11969 #ifdef TARGET_NR_futex
11970 case TARGET_NR_futex:
11971 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11972 #endif
11973 #ifdef TARGET_NR_futex_time64
11974 case TARGET_NR_futex_time64:
11975 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11976 #endif
11977 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11978 case TARGET_NR_inotify_init:
11979 ret = get_errno(sys_inotify_init());
11980 if (ret >= 0) {
11981 fd_trans_register(ret, &target_inotify_trans);
11983 return ret;
11984 #endif
11985 #ifdef CONFIG_INOTIFY1
11986 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11987 case TARGET_NR_inotify_init1:
11988 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11989 fcntl_flags_tbl)));
11990 if (ret >= 0) {
11991 fd_trans_register(ret, &target_inotify_trans);
11993 return ret;
11994 #endif
11995 #endif
11996 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11997 case TARGET_NR_inotify_add_watch:
11998 p = lock_user_string(arg2);
11999 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12000 unlock_user(p, arg2, 0);
12001 return ret;
12002 #endif
12003 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12004 case TARGET_NR_inotify_rm_watch:
12005 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12006 #endif
12008 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12009 case TARGET_NR_mq_open:
12011 struct mq_attr posix_mq_attr;
12012 struct mq_attr *pposix_mq_attr;
12013 int host_flags;
12015 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12016 pposix_mq_attr = NULL;
12017 if (arg4) {
12018 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12019 return -TARGET_EFAULT;
12021 pposix_mq_attr = &posix_mq_attr;
12023 p = lock_user_string(arg1 - 1);
12024 if (!p) {
12025 return -TARGET_EFAULT;
12027 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12028 unlock_user (p, arg1, 0);
12030 return ret;
12032 case TARGET_NR_mq_unlink:
12033 p = lock_user_string(arg1 - 1);
12034 if (!p) {
12035 return -TARGET_EFAULT;
12037 ret = get_errno(mq_unlink(p));
12038 unlock_user (p, arg1, 0);
12039 return ret;
12041 #ifdef TARGET_NR_mq_timedsend
12042 case TARGET_NR_mq_timedsend:
12044 struct timespec ts;
12046 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12047 if (arg5 != 0) {
12048 if (target_to_host_timespec(&ts, arg5)) {
12049 return -TARGET_EFAULT;
12051 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12052 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12053 return -TARGET_EFAULT;
12055 } else {
12056 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12058 unlock_user (p, arg2, arg3);
12060 return ret;
12061 #endif
12063 #ifdef TARGET_NR_mq_timedreceive
12064 case TARGET_NR_mq_timedreceive:
12066 struct timespec ts;
12067 unsigned int prio;
12069 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12070 if (arg5 != 0) {
12071 if (target_to_host_timespec(&ts, arg5)) {
12072 return -TARGET_EFAULT;
12074 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12075 &prio, &ts));
12076 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12077 return -TARGET_EFAULT;
12079 } else {
12080 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12081 &prio, NULL));
12083 unlock_user (p, arg2, arg3);
12084 if (arg4 != 0)
12085 put_user_u32(prio, arg4);
12087 return ret;
12088 #endif
12090 /* Not implemented for now... */
12091 /* case TARGET_NR_mq_notify: */
12092 /* break; */
12094 case TARGET_NR_mq_getsetattr:
12096 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12097 ret = 0;
12098 if (arg2 != 0) {
12099 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12100 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12101 &posix_mq_attr_out));
12102 } else if (arg3 != 0) {
12103 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12105 if (ret == 0 && arg3 != 0) {
12106 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12109 return ret;
12110 #endif
12112 #ifdef CONFIG_SPLICE
12113 #ifdef TARGET_NR_tee
12114 case TARGET_NR_tee:
12116 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12118 return ret;
12119 #endif
12120 #ifdef TARGET_NR_splice
12121 case TARGET_NR_splice:
12123 loff_t loff_in, loff_out;
12124 loff_t *ploff_in = NULL, *ploff_out = NULL;
12125 if (arg2) {
12126 if (get_user_u64(loff_in, arg2)) {
12127 return -TARGET_EFAULT;
12129 ploff_in = &loff_in;
12131 if (arg4) {
12132 if (get_user_u64(loff_out, arg4)) {
12133 return -TARGET_EFAULT;
12135 ploff_out = &loff_out;
12137 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12138 if (arg2) {
12139 if (put_user_u64(loff_in, arg2)) {
12140 return -TARGET_EFAULT;
12143 if (arg4) {
12144 if (put_user_u64(loff_out, arg4)) {
12145 return -TARGET_EFAULT;
12149 return ret;
12150 #endif
12151 #ifdef TARGET_NR_vmsplice
12152 case TARGET_NR_vmsplice:
12154 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12155 if (vec != NULL) {
12156 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12157 unlock_iovec(vec, arg2, arg3, 0);
12158 } else {
12159 ret = -host_to_target_errno(errno);
12162 return ret;
12163 #endif
12164 #endif /* CONFIG_SPLICE */
12165 #ifdef CONFIG_EVENTFD
12166 #if defined(TARGET_NR_eventfd)
12167 case TARGET_NR_eventfd:
12168 ret = get_errno(eventfd(arg1, 0));
12169 if (ret >= 0) {
12170 fd_trans_register(ret, &target_eventfd_trans);
12172 return ret;
12173 #endif
12174 #if defined(TARGET_NR_eventfd2)
12175 case TARGET_NR_eventfd2:
12177 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12178 if (arg2 & TARGET_O_NONBLOCK) {
12179 host_flags |= O_NONBLOCK;
12181 if (arg2 & TARGET_O_CLOEXEC) {
12182 host_flags |= O_CLOEXEC;
12184 ret = get_errno(eventfd(arg1, host_flags));
12185 if (ret >= 0) {
12186 fd_trans_register(ret, &target_eventfd_trans);
12188 return ret;
12190 #endif
12191 #endif /* CONFIG_EVENTFD */
12192 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12193 case TARGET_NR_fallocate:
12194 #if TARGET_ABI_BITS == 32
12195 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12196 target_offset64(arg5, arg6)));
12197 #else
12198 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12199 #endif
12200 return ret;
12201 #endif
12202 #if defined(CONFIG_SYNC_FILE_RANGE)
12203 #if defined(TARGET_NR_sync_file_range)
12204 case TARGET_NR_sync_file_range:
12205 #if TARGET_ABI_BITS == 32
12206 #if defined(TARGET_MIPS)
12207 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12208 target_offset64(arg5, arg6), arg7));
12209 #else
12210 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12211 target_offset64(arg4, arg5), arg6));
12212 #endif /* !TARGET_MIPS */
12213 #else
12214 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12215 #endif
12216 return ret;
12217 #endif
12218 #if defined(TARGET_NR_sync_file_range2) || \
12219 defined(TARGET_NR_arm_sync_file_range)
12220 #if defined(TARGET_NR_sync_file_range2)
12221 case TARGET_NR_sync_file_range2:
12222 #endif
12223 #if defined(TARGET_NR_arm_sync_file_range)
12224 case TARGET_NR_arm_sync_file_range:
12225 #endif
12226 /* This is like sync_file_range but the arguments are reordered */
12227 #if TARGET_ABI_BITS == 32
12228 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12229 target_offset64(arg5, arg6), arg2));
12230 #else
12231 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12232 #endif
12233 return ret;
12234 #endif
12235 #endif
12236 #if defined(TARGET_NR_signalfd4)
12237 case TARGET_NR_signalfd4:
12238 return do_signalfd4(arg1, arg2, arg4);
12239 #endif
12240 #if defined(TARGET_NR_signalfd)
12241 case TARGET_NR_signalfd:
12242 return do_signalfd4(arg1, arg2, 0);
12243 #endif
12244 #if defined(CONFIG_EPOLL)
12245 #if defined(TARGET_NR_epoll_create)
12246 case TARGET_NR_epoll_create:
12247 return get_errno(epoll_create(arg1));
12248 #endif
12249 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12250 case TARGET_NR_epoll_create1:
12251 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12252 #endif
12253 #if defined(TARGET_NR_epoll_ctl)
12254 case TARGET_NR_epoll_ctl:
12256 struct epoll_event ep;
12257 struct epoll_event *epp = 0;
12258 if (arg4) {
12259 struct target_epoll_event *target_ep;
12260 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12261 return -TARGET_EFAULT;
12263 ep.events = tswap32(target_ep->events);
12264 /* The epoll_data_t union is just opaque data to the kernel,
12265 * so we transfer all 64 bits across and need not worry what
12266 * actual data type it is.
12268 ep.data.u64 = tswap64(target_ep->data.u64);
12269 unlock_user_struct(target_ep, arg4, 0);
12270 epp = &ep;
12272 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12274 #endif
12276 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12277 #if defined(TARGET_NR_epoll_wait)
12278 case TARGET_NR_epoll_wait:
12279 #endif
12280 #if defined(TARGET_NR_epoll_pwait)
12281 case TARGET_NR_epoll_pwait:
12282 #endif
12284 struct target_epoll_event *target_ep;
12285 struct epoll_event *ep;
12286 int epfd = arg1;
12287 int maxevents = arg3;
12288 int timeout = arg4;
12290 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12291 return -TARGET_EINVAL;
12294 target_ep = lock_user(VERIFY_WRITE, arg2,
12295 maxevents * sizeof(struct target_epoll_event), 1);
12296 if (!target_ep) {
12297 return -TARGET_EFAULT;
12300 ep = g_try_new(struct epoll_event, maxevents);
12301 if (!ep) {
12302 unlock_user(target_ep, arg2, 0);
12303 return -TARGET_ENOMEM;
12306 switch (num) {
12307 #if defined(TARGET_NR_epoll_pwait)
12308 case TARGET_NR_epoll_pwait:
12310 target_sigset_t *target_set;
12311 sigset_t _set, *set = &_set;
12313 if (arg5) {
12314 if (arg6 != sizeof(target_sigset_t)) {
12315 ret = -TARGET_EINVAL;
12316 break;
12319 target_set = lock_user(VERIFY_READ, arg5,
12320 sizeof(target_sigset_t), 1);
12321 if (!target_set) {
12322 ret = -TARGET_EFAULT;
12323 break;
12325 target_to_host_sigset(set, target_set);
12326 unlock_user(target_set, arg5, 0);
12327 } else {
12328 set = NULL;
12331 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12332 set, SIGSET_T_SIZE));
12333 break;
12335 #endif
12336 #if defined(TARGET_NR_epoll_wait)
12337 case TARGET_NR_epoll_wait:
12338 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12339 NULL, 0));
12340 break;
12341 #endif
12342 default:
12343 ret = -TARGET_ENOSYS;
12345 if (!is_error(ret)) {
12346 int i;
12347 for (i = 0; i < ret; i++) {
12348 target_ep[i].events = tswap32(ep[i].events);
12349 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12351 unlock_user(target_ep, arg2,
12352 ret * sizeof(struct target_epoll_event));
12353 } else {
12354 unlock_user(target_ep, arg2, 0);
12356 g_free(ep);
12357 return ret;
12359 #endif
12360 #endif
12361 #ifdef TARGET_NR_prlimit64
12362 case TARGET_NR_prlimit64:
12364 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12365 struct target_rlimit64 *target_rnew, *target_rold;
12366 struct host_rlimit64 rnew, rold, *rnewp = 0;
12367 int resource = target_to_host_resource(arg2);
12369 if (arg3 && (resource != RLIMIT_AS &&
12370 resource != RLIMIT_DATA &&
12371 resource != RLIMIT_STACK)) {
12372 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12373 return -TARGET_EFAULT;
12375 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12376 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12377 unlock_user_struct(target_rnew, arg3, 0);
12378 rnewp = &rnew;
12381 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12382 if (!is_error(ret) && arg4) {
12383 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12384 return -TARGET_EFAULT;
12386 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12387 target_rold->rlim_max = tswap64(rold.rlim_max);
12388 unlock_user_struct(target_rold, arg4, 1);
12390 return ret;
12392 #endif
12393 #ifdef TARGET_NR_gethostname
12394 case TARGET_NR_gethostname:
12396 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12397 if (name) {
12398 ret = get_errno(gethostname(name, arg2));
12399 unlock_user(name, arg1, arg2);
12400 } else {
12401 ret = -TARGET_EFAULT;
12403 return ret;
12405 #endif
12406 #ifdef TARGET_NR_atomic_cmpxchg_32
12407 case TARGET_NR_atomic_cmpxchg_32:
12409 /* should use start_exclusive from main.c */
12410 abi_ulong mem_value;
12411 if (get_user_u32(mem_value, arg6)) {
12412 target_siginfo_t info;
12413 info.si_signo = SIGSEGV;
12414 info.si_errno = 0;
12415 info.si_code = TARGET_SEGV_MAPERR;
12416 info._sifields._sigfault._addr = arg6;
12417 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12418 QEMU_SI_FAULT, &info);
12419 ret = 0xdeadbeef;
12422 if (mem_value == arg2)
12423 put_user_u32(arg1, arg6);
12424 return mem_value;
12426 #endif
12427 #ifdef TARGET_NR_atomic_barrier
12428 case TARGET_NR_atomic_barrier:
12429 /* Like the kernel implementation and the
12430 qemu arm barrier, no-op this? */
12431 return 0;
12432 #endif
12434 #ifdef TARGET_NR_timer_create
12435 case TARGET_NR_timer_create:
12437 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12439 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12441 int clkid = arg1;
12442 int timer_index = next_free_host_timer();
12444 if (timer_index < 0) {
12445 ret = -TARGET_EAGAIN;
12446 } else {
12447 timer_t *phtimer = g_posix_timers + timer_index;
12449 if (arg2) {
12450 phost_sevp = &host_sevp;
12451 ret = target_to_host_sigevent(phost_sevp, arg2);
12452 if (ret != 0) {
12453 return ret;
12457 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12458 if (ret) {
12459 phtimer = NULL;
12460 } else {
12461 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12462 return -TARGET_EFAULT;
12466 return ret;
12468 #endif
12470 #ifdef TARGET_NR_timer_settime
12471 case TARGET_NR_timer_settime:
12473 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12474 * struct itimerspec * old_value */
12475 target_timer_t timerid = get_timer_id(arg1);
12477 if (timerid < 0) {
12478 ret = timerid;
12479 } else if (arg3 == 0) {
12480 ret = -TARGET_EINVAL;
12481 } else {
12482 timer_t htimer = g_posix_timers[timerid];
12483 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12485 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12486 return -TARGET_EFAULT;
12488 ret = get_errno(
12489 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12490 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12491 return -TARGET_EFAULT;
12494 return ret;
12496 #endif
12498 #ifdef TARGET_NR_timer_settime64
12499 case TARGET_NR_timer_settime64:
12501 target_timer_t timerid = get_timer_id(arg1);
12503 if (timerid < 0) {
12504 ret = timerid;
12505 } else if (arg3 == 0) {
12506 ret = -TARGET_EINVAL;
12507 } else {
12508 timer_t htimer = g_posix_timers[timerid];
12509 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12511 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12512 return -TARGET_EFAULT;
12514 ret = get_errno(
12515 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12516 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12517 return -TARGET_EFAULT;
12520 return ret;
12522 #endif
12524 #ifdef TARGET_NR_timer_gettime
12525 case TARGET_NR_timer_gettime:
12527 /* args: timer_t timerid, struct itimerspec *curr_value */
12528 target_timer_t timerid = get_timer_id(arg1);
12530 if (timerid < 0) {
12531 ret = timerid;
12532 } else if (!arg2) {
12533 ret = -TARGET_EFAULT;
12534 } else {
12535 timer_t htimer = g_posix_timers[timerid];
12536 struct itimerspec hspec;
12537 ret = get_errno(timer_gettime(htimer, &hspec));
12539 if (host_to_target_itimerspec(arg2, &hspec)) {
12540 ret = -TARGET_EFAULT;
12543 return ret;
12545 #endif
12547 #ifdef TARGET_NR_timer_gettime64
12548 case TARGET_NR_timer_gettime64:
12550 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12551 target_timer_t timerid = get_timer_id(arg1);
12553 if (timerid < 0) {
12554 ret = timerid;
12555 } else if (!arg2) {
12556 ret = -TARGET_EFAULT;
12557 } else {
12558 timer_t htimer = g_posix_timers[timerid];
12559 struct itimerspec hspec;
12560 ret = get_errno(timer_gettime(htimer, &hspec));
12562 if (host_to_target_itimerspec64(arg2, &hspec)) {
12563 ret = -TARGET_EFAULT;
12566 return ret;
12568 #endif
12570 #ifdef TARGET_NR_timer_getoverrun
12571 case TARGET_NR_timer_getoverrun:
12573 /* args: timer_t timerid */
12574 target_timer_t timerid = get_timer_id(arg1);
12576 if (timerid < 0) {
12577 ret = timerid;
12578 } else {
12579 timer_t htimer = g_posix_timers[timerid];
12580 ret = get_errno(timer_getoverrun(htimer));
12582 return ret;
12584 #endif
12586 #ifdef TARGET_NR_timer_delete
12587 case TARGET_NR_timer_delete:
12589 /* args: timer_t timerid */
12590 target_timer_t timerid = get_timer_id(arg1);
12592 if (timerid < 0) {
12593 ret = timerid;
12594 } else {
12595 timer_t htimer = g_posix_timers[timerid];
12596 ret = get_errno(timer_delete(htimer));
12597 g_posix_timers[timerid] = 0;
12599 return ret;
12601 #endif
12603 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12604 case TARGET_NR_timerfd_create:
12605 return get_errno(timerfd_create(arg1,
12606 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12607 #endif
12609 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12610 case TARGET_NR_timerfd_gettime:
12612 struct itimerspec its_curr;
12614 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12616 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12617 return -TARGET_EFAULT;
12620 return ret;
12621 #endif
12623 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12624 case TARGET_NR_timerfd_gettime64:
12626 struct itimerspec its_curr;
12628 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12630 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12631 return -TARGET_EFAULT;
12634 return ret;
12635 #endif
12637 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12638 case TARGET_NR_timerfd_settime:
12640 struct itimerspec its_new, its_old, *p_new;
12642 if (arg3) {
12643 if (target_to_host_itimerspec(&its_new, arg3)) {
12644 return -TARGET_EFAULT;
12646 p_new = &its_new;
12647 } else {
12648 p_new = NULL;
12651 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12653 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12654 return -TARGET_EFAULT;
12657 return ret;
12658 #endif
12660 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12661 case TARGET_NR_timerfd_settime64:
12663 struct itimerspec its_new, its_old, *p_new;
12665 if (arg3) {
12666 if (target_to_host_itimerspec64(&its_new, arg3)) {
12667 return -TARGET_EFAULT;
12669 p_new = &its_new;
12670 } else {
12671 p_new = NULL;
12674 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12676 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12677 return -TARGET_EFAULT;
12680 return ret;
12681 #endif
12683 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12684 case TARGET_NR_ioprio_get:
12685 return get_errno(ioprio_get(arg1, arg2));
12686 #endif
12688 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12689 case TARGET_NR_ioprio_set:
12690 return get_errno(ioprio_set(arg1, arg2, arg3));
12691 #endif
12693 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12694 case TARGET_NR_setns:
12695 return get_errno(setns(arg1, arg2));
12696 #endif
12697 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12698 case TARGET_NR_unshare:
12699 return get_errno(unshare(arg1));
12700 #endif
12701 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12702 case TARGET_NR_kcmp:
12703 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12704 #endif
12705 #ifdef TARGET_NR_swapcontext
12706 case TARGET_NR_swapcontext:
12707 /* PowerPC specific. */
12708 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12709 #endif
12710 #ifdef TARGET_NR_memfd_create
12711 case TARGET_NR_memfd_create:
12712 p = lock_user_string(arg1);
12713 if (!p) {
12714 return -TARGET_EFAULT;
12716 ret = get_errno(memfd_create(p, arg2));
12717 fd_trans_unregister(ret);
12718 unlock_user(p, arg1, 0);
12719 return ret;
12720 #endif
12721 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12722 case TARGET_NR_membarrier:
12723 return get_errno(membarrier(arg1, arg2));
12724 #endif
12726 default:
12727 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12728 return -TARGET_ENOSYS;
12730 return ret;
12733 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12734 abi_long arg2, abi_long arg3, abi_long arg4,
12735 abi_long arg5, abi_long arg6, abi_long arg7,
12736 abi_long arg8)
12738 CPUState *cpu = env_cpu(cpu_env);
12739 abi_long ret;
12741 #ifdef DEBUG_ERESTARTSYS
12742 /* Debug-only code for exercising the syscall-restart code paths
12743 * in the per-architecture cpu main loops: restart every syscall
12744 * the guest makes once before letting it through.
12747 static bool flag;
12748 flag = !flag;
12749 if (flag) {
12750 return -TARGET_ERESTARTSYS;
12753 #endif
12755 record_syscall_start(cpu, num, arg1,
12756 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12758 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12759 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12762 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12763 arg5, arg6, arg7, arg8);
12765 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12766 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12767 arg3, arg4, arg5, arg6);
12770 record_syscall_return(cpu, num, ret);
12771 return ret;