tests/qtest: do not list ahci-test twice
[qemu/ar7.git] / linux-user / syscall.c
blob93da3b97285234c3cec09673d7129ad776eaeaca
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef CONFIG_BTRFS
116 #include <linux/btrfs.h>
117 #endif
118 #ifdef HAVE_DRM_H
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
121 #endif
122 #include "linux_loop.h"
123 #include "uname.h"
125 #include "qemu.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
131 #include "tcg/tcg.h"
133 #ifndef CLONE_IO
134 #define CLONE_IO 0x80000000 /* Clone io context */
135 #endif
137 /* We can't directly call the host clone syscall, because this will
138 * badly confuse libc (breaking mutexes, for example). So we must
139 * divide clone flags into:
140 * * flag combinations that look like pthread_create()
141 * * flag combinations that look like fork()
142 * * flags we can implement within QEMU itself
143 * * flags we can't support and will return an error for
145 /* For thread creation, all these flags must be present; for
146 * fork, none must be present.
148 #define CLONE_THREAD_FLAGS \
149 (CLONE_VM | CLONE_FS | CLONE_FILES | \
150 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 /* These flags are ignored:
153 * CLONE_DETACHED is now ignored by the kernel;
154 * CLONE_IO is just an optimisation hint to the I/O scheduler
156 #define CLONE_IGNORED_FLAGS \
157 (CLONE_DETACHED | CLONE_IO)
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS \
161 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
162 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS \
166 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
167 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 #define CLONE_INVALID_FORK_FLAGS \
170 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 #define CLONE_INVALID_THREAD_FLAGS \
173 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
174 CLONE_IGNORED_FLAGS))
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177 * have almost all been allocated. We cannot support any of
178 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180 * The checks against the invalid thread masks above will catch these.
181 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185 * once. This exercises the codepaths for restart.
187 //#define DEBUG_ERESTARTSYS
189 //#include <linux/msdos_fs.h>
190 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
191 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
193 #undef _syscall0
194 #undef _syscall1
195 #undef _syscall2
196 #undef _syscall3
197 #undef _syscall4
198 #undef _syscall5
199 #undef _syscall6
201 #define _syscall0(type,name) \
202 static type name (void) \
204 return syscall(__NR_##name); \
207 #define _syscall1(type,name,type1,arg1) \
208 static type name (type1 arg1) \
210 return syscall(__NR_##name, arg1); \
213 #define _syscall2(type,name,type1,arg1,type2,arg2) \
214 static type name (type1 arg1,type2 arg2) \
216 return syscall(__NR_##name, arg1, arg2); \
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
220 static type name (type1 arg1,type2 arg2,type3 arg3) \
222 return syscall(__NR_##name, arg1, arg2, arg3); \
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
232 type5,arg5) \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
240 type5,arg5,type6,arg6) \
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
242 type6 arg6) \
244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
258 #endif
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
261 #endif
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
269 #endif
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
274 #endif
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid)
279 /* For the 64-bit guest on 32-bit host case we must emulate
280 * getdents using getdents64, because otherwise the host
281 * might hand us back more dirent records than we can fit
282 * into the guest buffer after structure format conversion.
283 * Otherwise we emulate getdents with getdents if the host has it.
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
287 #endif
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
291 #endif
292 #if (defined(TARGET_NR_getdents) && \
293 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
296 #endif
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
299 loff_t *, res, uint, wh);
300 #endif
301 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
302 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
303 siginfo_t *, uinfo)
304 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group,int,error_code)
307 #endif
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address,int *,tidptr)
310 #endif
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
313 const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
317 const struct timespec *,timeout,int *,uaddr2,int,val3)
318 #endif
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
324 unsigned long *, user_mask_ptr);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
327 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
328 void *, arg);
329 _syscall2(int, capget, struct __user_cap_header_struct *, header,
330 struct __user_cap_data_struct *, data);
331 _syscall2(int, capset, struct __user_cap_header_struct *, header,
332 struct __user_cap_data_struct *, data);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get, int, which, int, who)
335 #endif
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
338 #endif
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
341 #endif
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
345 unsigned long, idx1, unsigned long, idx2)
346 #endif
349 * It is assumed that struct statx is architecture independent.
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
353 unsigned int, mask, struct target_statx *, statxbuf)
354 #endif
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier, int, cmd, int, flags)
357 #endif
359 static bitmask_transtbl fcntl_flags_tbl[] = {
360 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
361 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
362 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
363 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
364 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
365 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
366 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
367 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
368 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
369 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
370 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
371 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
372 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
373 #if defined(O_DIRECT)
374 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
375 #endif
376 #if defined(O_NOATIME)
377 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
378 #endif
379 #if defined(O_CLOEXEC)
380 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
381 #endif
382 #if defined(O_PATH)
383 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
384 #endif
385 #if defined(O_TMPFILE)
386 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
387 #endif
388 /* Don't terminate the list prematurely on 64-bit host+guest. */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
391 #endif
392 { 0, 0, 0, 0 }
395 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
401 const struct timespec *,tsp,int,flags)
402 #else
403 static int sys_utimensat(int dirfd, const char *pathname,
404 const struct timespec times[2], int flags)
406 errno = ENOSYS;
407 return -1;
409 #endif
410 #endif /* TARGET_NR_utimensat */
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
416 const char *, new, unsigned int, flags)
417 #else
418 static int sys_renameat2(int oldfd, const char *old,
419 int newfd, const char *new, int flags)
421 if (flags == 0) {
422 return renameat(oldfd, old, newfd, new);
424 errno = ENOSYS;
425 return -1;
427 #endif
428 #endif /* TARGET_NR_renameat2 */
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
436 return (inotify_init());
438 #endif
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
442 return (inotify_add_watch(fd, pathname, mask));
444 #endif
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd, int32_t wd)
448 return (inotify_rm_watch(fd, wd));
450 #endif
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags)
455 return (inotify_init1(flags));
457 #endif
458 #endif
459 #else
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY */
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
470 #endif
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64 {
474 uint64_t rlim_cur;
475 uint64_t rlim_max;
477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
478 const struct host_rlimit64 *, new_limit,
479 struct host_rlimit64 *, old_limit)
480 #endif
483 #if defined(TARGET_NR_timer_create)
484 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers[32] = { 0, } ;
487 static inline int next_free_host_timer(void)
489 int k ;
490 /* FIXME: Does finding the next free slot require a lock? */
491 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
492 if (g_posix_timers[k] == 0) {
493 g_posix_timers[k] = (timer_t) 1;
494 return k;
497 return -1;
499 #endif
501 #define ERRNO_TABLE_SIZE 1200
503 /* target_to_host_errno_table[] is initialized from
504 * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
509 * This list is the union of errno values overridden in asm-<arch>/errno.h
510 * minus the errnos that are not actually generic to all archs.
512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
513 [EAGAIN] = TARGET_EAGAIN,
514 [EIDRM] = TARGET_EIDRM,
515 [ECHRNG] = TARGET_ECHRNG,
516 [EL2NSYNC] = TARGET_EL2NSYNC,
517 [EL3HLT] = TARGET_EL3HLT,
518 [EL3RST] = TARGET_EL3RST,
519 [ELNRNG] = TARGET_ELNRNG,
520 [EUNATCH] = TARGET_EUNATCH,
521 [ENOCSI] = TARGET_ENOCSI,
522 [EL2HLT] = TARGET_EL2HLT,
523 [EDEADLK] = TARGET_EDEADLK,
524 [ENOLCK] = TARGET_ENOLCK,
525 [EBADE] = TARGET_EBADE,
526 [EBADR] = TARGET_EBADR,
527 [EXFULL] = TARGET_EXFULL,
528 [ENOANO] = TARGET_ENOANO,
529 [EBADRQC] = TARGET_EBADRQC,
530 [EBADSLT] = TARGET_EBADSLT,
531 [EBFONT] = TARGET_EBFONT,
532 [ENOSTR] = TARGET_ENOSTR,
533 [ENODATA] = TARGET_ENODATA,
534 [ETIME] = TARGET_ETIME,
535 [ENOSR] = TARGET_ENOSR,
536 [ENONET] = TARGET_ENONET,
537 [ENOPKG] = TARGET_ENOPKG,
538 [EREMOTE] = TARGET_EREMOTE,
539 [ENOLINK] = TARGET_ENOLINK,
540 [EADV] = TARGET_EADV,
541 [ESRMNT] = TARGET_ESRMNT,
542 [ECOMM] = TARGET_ECOMM,
543 [EPROTO] = TARGET_EPROTO,
544 [EDOTDOT] = TARGET_EDOTDOT,
545 [EMULTIHOP] = TARGET_EMULTIHOP,
546 [EBADMSG] = TARGET_EBADMSG,
547 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
548 [EOVERFLOW] = TARGET_EOVERFLOW,
549 [ENOTUNIQ] = TARGET_ENOTUNIQ,
550 [EBADFD] = TARGET_EBADFD,
551 [EREMCHG] = TARGET_EREMCHG,
552 [ELIBACC] = TARGET_ELIBACC,
553 [ELIBBAD] = TARGET_ELIBBAD,
554 [ELIBSCN] = TARGET_ELIBSCN,
555 [ELIBMAX] = TARGET_ELIBMAX,
556 [ELIBEXEC] = TARGET_ELIBEXEC,
557 [EILSEQ] = TARGET_EILSEQ,
558 [ENOSYS] = TARGET_ENOSYS,
559 [ELOOP] = TARGET_ELOOP,
560 [ERESTART] = TARGET_ERESTART,
561 [ESTRPIPE] = TARGET_ESTRPIPE,
562 [ENOTEMPTY] = TARGET_ENOTEMPTY,
563 [EUSERS] = TARGET_EUSERS,
564 [ENOTSOCK] = TARGET_ENOTSOCK,
565 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
566 [EMSGSIZE] = TARGET_EMSGSIZE,
567 [EPROTOTYPE] = TARGET_EPROTOTYPE,
568 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
569 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
570 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
571 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
572 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
573 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
574 [EADDRINUSE] = TARGET_EADDRINUSE,
575 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
576 [ENETDOWN] = TARGET_ENETDOWN,
577 [ENETUNREACH] = TARGET_ENETUNREACH,
578 [ENETRESET] = TARGET_ENETRESET,
579 [ECONNABORTED] = TARGET_ECONNABORTED,
580 [ECONNRESET] = TARGET_ECONNRESET,
581 [ENOBUFS] = TARGET_ENOBUFS,
582 [EISCONN] = TARGET_EISCONN,
583 [ENOTCONN] = TARGET_ENOTCONN,
584 [EUCLEAN] = TARGET_EUCLEAN,
585 [ENOTNAM] = TARGET_ENOTNAM,
586 [ENAVAIL] = TARGET_ENAVAIL,
587 [EISNAM] = TARGET_EISNAM,
588 [EREMOTEIO] = TARGET_EREMOTEIO,
589 [EDQUOT] = TARGET_EDQUOT,
590 [ESHUTDOWN] = TARGET_ESHUTDOWN,
591 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
592 [ETIMEDOUT] = TARGET_ETIMEDOUT,
593 [ECONNREFUSED] = TARGET_ECONNREFUSED,
594 [EHOSTDOWN] = TARGET_EHOSTDOWN,
595 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
596 [EALREADY] = TARGET_EALREADY,
597 [EINPROGRESS] = TARGET_EINPROGRESS,
598 [ESTALE] = TARGET_ESTALE,
599 [ECANCELED] = TARGET_ECANCELED,
600 [ENOMEDIUM] = TARGET_ENOMEDIUM,
601 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
602 #ifdef ENOKEY
603 [ENOKEY] = TARGET_ENOKEY,
604 #endif
605 #ifdef EKEYEXPIRED
606 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
607 #endif
608 #ifdef EKEYREVOKED
609 [EKEYREVOKED] = TARGET_EKEYREVOKED,
610 #endif
611 #ifdef EKEYREJECTED
612 [EKEYREJECTED] = TARGET_EKEYREJECTED,
613 #endif
614 #ifdef EOWNERDEAD
615 [EOWNERDEAD] = TARGET_EOWNERDEAD,
616 #endif
617 #ifdef ENOTRECOVERABLE
618 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
619 #endif
620 #ifdef ENOMSG
621 [ENOMSG] = TARGET_ENOMSG,
622 #endif
623 #ifdef ERKFILL
624 [ERFKILL] = TARGET_ERFKILL,
625 #endif
626 #ifdef EHWPOISON
627 [EHWPOISON] = TARGET_EHWPOISON,
628 #endif
631 static inline int host_to_target_errno(int err)
633 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
634 host_to_target_errno_table[err]) {
635 return host_to_target_errno_table[err];
637 return err;
640 static inline int target_to_host_errno(int err)
642 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643 target_to_host_errno_table[err]) {
644 return target_to_host_errno_table[err];
646 return err;
649 static inline abi_long get_errno(abi_long ret)
651 if (ret == -1)
652 return -host_to_target_errno(errno);
653 else
654 return ret;
657 const char *target_strerror(int err)
659 if (err == TARGET_ERESTARTSYS) {
660 return "To be restarted";
662 if (err == TARGET_QEMU_ESIGRETURN) {
663 return "Successful exit from sigreturn";
666 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
667 return NULL;
669 return strerror(target_to_host_errno(err));
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
675 return safe_syscall(__NR_##name); \
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
681 return safe_syscall(__NR_##name, arg1); \
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
687 return safe_syscall(__NR_##name, arg1, arg2); \
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697 type4, arg4) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
706 type5 arg5) \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714 type5 arg5, type6 arg6) \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
722 int, flags, mode_t, mode)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
725 struct rusage *, rusage)
726 #endif
727 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
728 int, options, struct rusage *, rusage)
729 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731 defined(TARGET_NR_pselect6)
732 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
733 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
734 #endif
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
736 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
737 struct timespec *, tsp, const sigset_t *, sigmask,
738 size_t, sigsetsize)
739 #endif
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741 int, maxevents, int, timeout, const sigset_t *, sigmask,
742 size_t, sigsetsize)
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
745 const struct timespec *,timeout,int *,uaddr2,int,val3)
746 #endif
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
749 const struct timespec *,timeout,int *,uaddr2,int,val3)
750 #endif
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758 unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760 unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762 socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
772 const struct timespec *, uts, size_t, sigsetsize)
773 #endif
774 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
775 int, flags)
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep, const struct timespec *, req,
778 struct timespec *, rem)
779 #endif
780 #if defined(TARGET_NR_clock_nanosleep) || \
781 defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783 const struct timespec *, req, struct timespec *, rem)
784 #endif
785 #ifdef __NR_ipc
786 #ifdef __s390x__
787 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
788 void *, ptr)
789 #else
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791 void *, ptr, long, fifth)
792 #endif
793 #endif
794 #ifdef __NR_msgsnd
795 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
796 int, flags)
797 #endif
798 #ifdef __NR_msgrcv
799 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
800 long, msgtype, int, flags)
801 #endif
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
804 unsigned, nsops, const struct timespec *, timeout)
805 #endif
806 #if defined(TARGET_NR_mq_timedsend) || \
807 defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809 size_t, len, unsigned, prio, const struct timespec *, timeout)
810 #endif
811 #if defined(TARGET_NR_mq_timedreceive) || \
812 defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
814 size_t, len, unsigned *, prio, const struct timespec *, timeout)
815 #endif
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817 * "third argument might be integer or pointer or not present" behaviour of
818 * the libc function.
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823 * use the flock64 struct rather than unsuffixed flock
824 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826 #ifdef __NR_fcntl64
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
828 #else
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
830 #endif
832 static inline int host_to_target_sock_type(int host_type)
834 int target_type;
836 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
837 case SOCK_DGRAM:
838 target_type = TARGET_SOCK_DGRAM;
839 break;
840 case SOCK_STREAM:
841 target_type = TARGET_SOCK_STREAM;
842 break;
843 default:
844 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
845 break;
848 #if defined(SOCK_CLOEXEC)
849 if (host_type & SOCK_CLOEXEC) {
850 target_type |= TARGET_SOCK_CLOEXEC;
852 #endif
854 #if defined(SOCK_NONBLOCK)
855 if (host_type & SOCK_NONBLOCK) {
856 target_type |= TARGET_SOCK_NONBLOCK;
858 #endif
860 return target_type;
863 static abi_ulong target_brk;
864 static abi_ulong target_original_brk;
865 static abi_ulong brk_page;
867 void target_set_brk(abi_ulong new_brk)
869 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
870 brk_page = HOST_PAGE_ALIGN(target_brk);
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
876 /* do_brk() must return target values and target errnos. */
877 abi_long do_brk(abi_ulong new_brk)
879 abi_long mapped_addr;
880 abi_ulong new_alloc_size;
882 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 if (!new_brk) {
885 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
886 return target_brk;
888 if (new_brk < target_original_brk) {
889 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
890 target_brk);
891 return target_brk;
894 /* If the new brk is less than the highest page reserved to the
895 * target heap allocation, set it and we're almost done... */
896 if (new_brk <= brk_page) {
897 /* Heap contents are initialized to zero, as for anonymous
898 * mapped pages. */
899 if (new_brk > target_brk) {
900 memset(g2h(target_brk), 0, new_brk - target_brk);
902 target_brk = new_brk;
903 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
904 return target_brk;
907 /* We need to allocate more memory after the brk... Note that
908 * we don't use MAP_FIXED because that will map over the top of
909 * any existing mapping (like the one with the host libc or qemu
910 * itself); instead we treat "mapped but at wrong address" as
911 * a failure and unmap again.
913 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
914 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
915 PROT_READ|PROT_WRITE,
916 MAP_ANON|MAP_PRIVATE, 0, 0));
918 if (mapped_addr == brk_page) {
919 /* Heap contents are initialized to zero, as for anonymous
920 * mapped pages. Technically the new pages are already
921 * initialized to zero since they *are* anonymous mapped
922 * pages, however we have to take care with the contents that
923 * come from the remaining part of the previous page: it may
924 * contains garbage data due to a previous heap usage (grown
925 * then shrunken). */
926 memset(g2h(target_brk), 0, brk_page - target_brk);
928 target_brk = new_brk;
929 brk_page = HOST_PAGE_ALIGN(target_brk);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
931 target_brk);
932 return target_brk;
933 } else if (mapped_addr != -1) {
934 /* Mapped but at wrong address, meaning there wasn't actually
935 * enough space for this brk.
937 target_munmap(mapped_addr, new_alloc_size);
938 mapped_addr = -1;
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941 else {
942 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
945 #if defined(TARGET_ALPHA)
946 /* We (partially) emulate OSF/1 on Alpha, which requires we
947 return a proper errno, not an unchanged brk value. */
948 return -TARGET_ENOMEM;
949 #endif
950 /* For everything else, return the previous break. */
951 return target_brk;
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
955 defined(TARGET_NR_pselect6)
956 static inline abi_long copy_from_user_fdset(fd_set *fds,
957 abi_ulong target_fds_addr,
958 int n)
960 int i, nw, j, k;
961 abi_ulong b, *target_fds;
963 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
964 if (!(target_fds = lock_user(VERIFY_READ,
965 target_fds_addr,
966 sizeof(abi_ulong) * nw,
967 1)))
968 return -TARGET_EFAULT;
970 FD_ZERO(fds);
971 k = 0;
972 for (i = 0; i < nw; i++) {
973 /* grab the abi_ulong */
974 __get_user(b, &target_fds[i]);
975 for (j = 0; j < TARGET_ABI_BITS; j++) {
976 /* check the bit inside the abi_ulong */
977 if ((b >> j) & 1)
978 FD_SET(k, fds);
979 k++;
983 unlock_user(target_fds, target_fds_addr, 0);
985 return 0;
988 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
989 abi_ulong target_fds_addr,
990 int n)
992 if (target_fds_addr) {
993 if (copy_from_user_fdset(fds, target_fds_addr, n))
994 return -TARGET_EFAULT;
995 *fds_ptr = fds;
996 } else {
997 *fds_ptr = NULL;
999 return 0;
1002 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1003 const fd_set *fds,
1004 int n)
1006 int i, nw, j, k;
1007 abi_long v;
1008 abi_ulong *target_fds;
1010 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1011 if (!(target_fds = lock_user(VERIFY_WRITE,
1012 target_fds_addr,
1013 sizeof(abi_ulong) * nw,
1014 0)))
1015 return -TARGET_EFAULT;
1017 k = 0;
1018 for (i = 0; i < nw; i++) {
1019 v = 0;
1020 for (j = 0; j < TARGET_ABI_BITS; j++) {
1021 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1022 k++;
1024 __put_user(v, &target_fds[i]);
1027 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1029 return 0;
1031 #endif
1033 #if defined(__alpha__)
1034 #define HOST_HZ 1024
1035 #else
1036 #define HOST_HZ 100
1037 #endif
1039 static inline abi_long host_to_target_clock_t(long ticks)
1041 #if HOST_HZ == TARGET_HZ
1042 return ticks;
1043 #else
1044 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1045 #endif
1048 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1049 const struct rusage *rusage)
1051 struct target_rusage *target_rusage;
1053 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1054 return -TARGET_EFAULT;
1055 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1056 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1057 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1058 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1059 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1060 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1061 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1062 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1063 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1064 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1065 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1066 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1067 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1068 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1069 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1070 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1071 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1072 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1073 unlock_user_struct(target_rusage, target_addr, 1);
1075 return 0;
1078 #ifdef TARGET_NR_setrlimit
1079 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1081 abi_ulong target_rlim_swap;
1082 rlim_t result;
1084 target_rlim_swap = tswapal(target_rlim);
1085 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1086 return RLIM_INFINITY;
1088 result = target_rlim_swap;
1089 if (target_rlim_swap != (rlim_t)result)
1090 return RLIM_INFINITY;
1092 return result;
1094 #endif
1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1097 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1099 abi_ulong target_rlim_swap;
1100 abi_ulong result;
1102 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1103 target_rlim_swap = TARGET_RLIM_INFINITY;
1104 else
1105 target_rlim_swap = rlim;
1106 result = tswapal(target_rlim_swap);
1108 return result;
1110 #endif
1112 static inline int target_to_host_resource(int code)
1114 switch (code) {
1115 case TARGET_RLIMIT_AS:
1116 return RLIMIT_AS;
1117 case TARGET_RLIMIT_CORE:
1118 return RLIMIT_CORE;
1119 case TARGET_RLIMIT_CPU:
1120 return RLIMIT_CPU;
1121 case TARGET_RLIMIT_DATA:
1122 return RLIMIT_DATA;
1123 case TARGET_RLIMIT_FSIZE:
1124 return RLIMIT_FSIZE;
1125 case TARGET_RLIMIT_LOCKS:
1126 return RLIMIT_LOCKS;
1127 case TARGET_RLIMIT_MEMLOCK:
1128 return RLIMIT_MEMLOCK;
1129 case TARGET_RLIMIT_MSGQUEUE:
1130 return RLIMIT_MSGQUEUE;
1131 case TARGET_RLIMIT_NICE:
1132 return RLIMIT_NICE;
1133 case TARGET_RLIMIT_NOFILE:
1134 return RLIMIT_NOFILE;
1135 case TARGET_RLIMIT_NPROC:
1136 return RLIMIT_NPROC;
1137 case TARGET_RLIMIT_RSS:
1138 return RLIMIT_RSS;
1139 case TARGET_RLIMIT_RTPRIO:
1140 return RLIMIT_RTPRIO;
1141 case TARGET_RLIMIT_SIGPENDING:
1142 return RLIMIT_SIGPENDING;
1143 case TARGET_RLIMIT_STACK:
1144 return RLIMIT_STACK;
1145 default:
1146 return code;
1150 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1151 abi_ulong target_tv_addr)
1153 struct target_timeval *target_tv;
1155 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1156 return -TARGET_EFAULT;
1159 __get_user(tv->tv_sec, &target_tv->tv_sec);
1160 __get_user(tv->tv_usec, &target_tv->tv_usec);
1162 unlock_user_struct(target_tv, target_tv_addr, 0);
1164 return 0;
1167 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1168 const struct timeval *tv)
1170 struct target_timeval *target_tv;
1172 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1173 return -TARGET_EFAULT;
1176 __put_user(tv->tv_sec, &target_tv->tv_sec);
1177 __put_user(tv->tv_usec, &target_tv->tv_usec);
1179 unlock_user_struct(target_tv, target_tv_addr, 1);
1181 return 0;
1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1185 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1186 abi_ulong target_tv_addr)
1188 struct target__kernel_sock_timeval *target_tv;
1190 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1191 return -TARGET_EFAULT;
1194 __get_user(tv->tv_sec, &target_tv->tv_sec);
1195 __get_user(tv->tv_usec, &target_tv->tv_usec);
1197 unlock_user_struct(target_tv, target_tv_addr, 0);
1199 return 0;
1201 #endif
1203 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1204 const struct timeval *tv)
1206 struct target__kernel_sock_timeval *target_tv;
1208 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1209 return -TARGET_EFAULT;
1212 __put_user(tv->tv_sec, &target_tv->tv_sec);
1213 __put_user(tv->tv_usec, &target_tv->tv_usec);
1215 unlock_user_struct(target_tv, target_tv_addr, 1);
1217 return 0;
1220 #if defined(TARGET_NR_futex) || \
1221 defined(TARGET_NR_rt_sigtimedwait) || \
1222 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1223 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1224 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1225 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1226 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1227 defined(TARGET_NR_timer_settime) || \
1228 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1229 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1230 abi_ulong target_addr)
1232 struct target_timespec *target_ts;
1234 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1235 return -TARGET_EFAULT;
1237 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1238 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1239 unlock_user_struct(target_ts, target_addr, 0);
1240 return 0;
1242 #endif
1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1245 defined(TARGET_NR_timer_settime64) || \
1246 defined(TARGET_NR_mq_timedsend_time64) || \
1247 defined(TARGET_NR_mq_timedreceive_time64) || \
1248 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1249 defined(TARGET_NR_clock_nanosleep_time64) || \
1250 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1251 defined(TARGET_NR_utimensat) || \
1252 defined(TARGET_NR_utimensat_time64) || \
1253 defined(TARGET_NR_semtimedop_time64)
1254 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1255 abi_ulong target_addr)
1257 struct target__kernel_timespec *target_ts;
1259 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1260 return -TARGET_EFAULT;
1262 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1263 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264 /* in 32bit mode, this drops the padding */
1265 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1266 unlock_user_struct(target_ts, target_addr, 0);
1267 return 0;
1269 #endif
1271 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1272 struct timespec *host_ts)
1274 struct target_timespec *target_ts;
1276 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1277 return -TARGET_EFAULT;
1279 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1280 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1281 unlock_user_struct(target_ts, target_addr, 1);
1282 return 0;
1285 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1286 struct timespec *host_ts)
1288 struct target__kernel_timespec *target_ts;
1290 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1291 return -TARGET_EFAULT;
1293 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1294 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1295 unlock_user_struct(target_ts, target_addr, 1);
1296 return 0;
1299 #if defined(TARGET_NR_gettimeofday)
1300 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1301 struct timezone *tz)
1303 struct target_timezone *target_tz;
1305 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1306 return -TARGET_EFAULT;
1309 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1310 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1312 unlock_user_struct(target_tz, target_tz_addr, 1);
1314 return 0;
1316 #endif
1318 #if defined(TARGET_NR_settimeofday)
1319 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1320 abi_ulong target_tz_addr)
1322 struct target_timezone *target_tz;
1324 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1325 return -TARGET_EFAULT;
1328 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1329 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1331 unlock_user_struct(target_tz, target_tz_addr, 0);
1333 return 0;
1335 #endif
1337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1338 #include <mqueue.h>
1340 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1341 abi_ulong target_mq_attr_addr)
1343 struct target_mq_attr *target_mq_attr;
1345 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1346 target_mq_attr_addr, 1))
1347 return -TARGET_EFAULT;
1349 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1350 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1351 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1352 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1354 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1356 return 0;
1359 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1360 const struct mq_attr *attr)
1362 struct target_mq_attr *target_mq_attr;
1364 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1365 target_mq_attr_addr, 0))
1366 return -TARGET_EFAULT;
1368 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1369 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1370 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1371 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1373 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1375 return 0;
1377 #endif
1379 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1380 /* do_select() must return target values and target errnos. */
1381 static abi_long do_select(int n,
1382 abi_ulong rfd_addr, abi_ulong wfd_addr,
1383 abi_ulong efd_addr, abi_ulong target_tv_addr)
1385 fd_set rfds, wfds, efds;
1386 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387 struct timeval tv;
1388 struct timespec ts, *ts_ptr;
1389 abi_long ret;
1391 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1392 if (ret) {
1393 return ret;
1395 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1396 if (ret) {
1397 return ret;
1399 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1400 if (ret) {
1401 return ret;
1404 if (target_tv_addr) {
1405 if (copy_from_user_timeval(&tv, target_tv_addr))
1406 return -TARGET_EFAULT;
1407 ts.tv_sec = tv.tv_sec;
1408 ts.tv_nsec = tv.tv_usec * 1000;
1409 ts_ptr = &ts;
1410 } else {
1411 ts_ptr = NULL;
1414 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1415 ts_ptr, NULL));
1417 if (!is_error(ret)) {
1418 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1419 return -TARGET_EFAULT;
1420 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1421 return -TARGET_EFAULT;
1422 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1423 return -TARGET_EFAULT;
1425 if (target_tv_addr) {
1426 tv.tv_sec = ts.tv_sec;
1427 tv.tv_usec = ts.tv_nsec / 1000;
1428 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1429 return -TARGET_EFAULT;
1434 return ret;
1437 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1438 static abi_long do_old_select(abi_ulong arg1)
1440 struct target_sel_arg_struct *sel;
1441 abi_ulong inp, outp, exp, tvp;
1442 long nsel;
1444 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1445 return -TARGET_EFAULT;
1448 nsel = tswapal(sel->n);
1449 inp = tswapal(sel->inp);
1450 outp = tswapal(sel->outp);
1451 exp = tswapal(sel->exp);
1452 tvp = tswapal(sel->tvp);
1454 unlock_user_struct(sel, arg1, 0);
1456 return do_select(nsel, inp, outp, exp, tvp);
1458 #endif
1459 #endif
1461 static abi_long do_pipe2(int host_pipe[], int flags)
1463 #ifdef CONFIG_PIPE2
1464 return pipe2(host_pipe, flags);
1465 #else
1466 return -ENOSYS;
1467 #endif
1470 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1471 int flags, int is_pipe2)
1473 int host_pipe[2];
1474 abi_long ret;
1475 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1477 if (is_error(ret))
1478 return get_errno(ret);
1480 /* Several targets have special calling conventions for the original
1481 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1482 if (!is_pipe2) {
1483 #if defined(TARGET_ALPHA)
1484 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1485 return host_pipe[0];
1486 #elif defined(TARGET_MIPS)
1487 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1488 return host_pipe[0];
1489 #elif defined(TARGET_SH4)
1490 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1491 return host_pipe[0];
1492 #elif defined(TARGET_SPARC)
1493 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1494 return host_pipe[0];
1495 #endif
1498 if (put_user_s32(host_pipe[0], pipedes)
1499 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1500 return -TARGET_EFAULT;
1501 return get_errno(ret);
1504 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1505 abi_ulong target_addr,
1506 socklen_t len)
1508 struct target_ip_mreqn *target_smreqn;
1510 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1511 if (!target_smreqn)
1512 return -TARGET_EFAULT;
1513 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1514 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1515 if (len == sizeof(struct target_ip_mreqn))
1516 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1517 unlock_user(target_smreqn, target_addr, 0);
1519 return 0;
1522 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1523 abi_ulong target_addr,
1524 socklen_t len)
1526 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1527 sa_family_t sa_family;
1528 struct target_sockaddr *target_saddr;
1530 if (fd_trans_target_to_host_addr(fd)) {
1531 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1534 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1535 if (!target_saddr)
1536 return -TARGET_EFAULT;
1538 sa_family = tswap16(target_saddr->sa_family);
1540 /* Oops. The caller might send a incomplete sun_path; sun_path
1541 * must be terminated by \0 (see the manual page), but
1542 * unfortunately it is quite common to specify sockaddr_un
1543 * length as "strlen(x->sun_path)" while it should be
1544 * "strlen(...) + 1". We'll fix that here if needed.
1545 * Linux kernel has a similar feature.
1548 if (sa_family == AF_UNIX) {
1549 if (len < unix_maxlen && len > 0) {
1550 char *cp = (char*)target_saddr;
1552 if ( cp[len-1] && !cp[len] )
1553 len++;
1555 if (len > unix_maxlen)
1556 len = unix_maxlen;
1559 memcpy(addr, target_saddr, len);
1560 addr->sa_family = sa_family;
1561 if (sa_family == AF_NETLINK) {
1562 struct sockaddr_nl *nladdr;
1564 nladdr = (struct sockaddr_nl *)addr;
1565 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1566 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1567 } else if (sa_family == AF_PACKET) {
1568 struct target_sockaddr_ll *lladdr;
1570 lladdr = (struct target_sockaddr_ll *)addr;
1571 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1572 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1574 unlock_user(target_saddr, target_addr, 0);
1576 return 0;
1579 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1580 struct sockaddr *addr,
1581 socklen_t len)
1583 struct target_sockaddr *target_saddr;
1585 if (len == 0) {
1586 return 0;
1588 assert(addr);
1590 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1591 if (!target_saddr)
1592 return -TARGET_EFAULT;
1593 memcpy(target_saddr, addr, len);
1594 if (len >= offsetof(struct target_sockaddr, sa_family) +
1595 sizeof(target_saddr->sa_family)) {
1596 target_saddr->sa_family = tswap16(addr->sa_family);
1598 if (addr->sa_family == AF_NETLINK &&
1599 len >= sizeof(struct target_sockaddr_nl)) {
1600 struct target_sockaddr_nl *target_nl =
1601 (struct target_sockaddr_nl *)target_saddr;
1602 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1603 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1604 } else if (addr->sa_family == AF_PACKET) {
1605 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1606 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1607 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1608 } else if (addr->sa_family == AF_INET6 &&
1609 len >= sizeof(struct target_sockaddr_in6)) {
1610 struct target_sockaddr_in6 *target_in6 =
1611 (struct target_sockaddr_in6 *)target_saddr;
1612 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1614 unlock_user(target_saddr, target_addr, len);
1616 return 0;
1619 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1620 struct target_msghdr *target_msgh)
1622 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1623 abi_long msg_controllen;
1624 abi_ulong target_cmsg_addr;
1625 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1626 socklen_t space = 0;
1628 msg_controllen = tswapal(target_msgh->msg_controllen);
1629 if (msg_controllen < sizeof (struct target_cmsghdr))
1630 goto the_end;
1631 target_cmsg_addr = tswapal(target_msgh->msg_control);
1632 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1633 target_cmsg_start = target_cmsg;
1634 if (!target_cmsg)
1635 return -TARGET_EFAULT;
1637 while (cmsg && target_cmsg) {
1638 void *data = CMSG_DATA(cmsg);
1639 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1641 int len = tswapal(target_cmsg->cmsg_len)
1642 - sizeof(struct target_cmsghdr);
1644 space += CMSG_SPACE(len);
1645 if (space > msgh->msg_controllen) {
1646 space -= CMSG_SPACE(len);
1647 /* This is a QEMU bug, since we allocated the payload
1648 * area ourselves (unlike overflow in host-to-target
1649 * conversion, which is just the guest giving us a buffer
1650 * that's too small). It can't happen for the payload types
1651 * we currently support; if it becomes an issue in future
1652 * we would need to improve our allocation strategy to
1653 * something more intelligent than "twice the size of the
1654 * target buffer we're reading from".
1656 qemu_log_mask(LOG_UNIMP,
1657 ("Unsupported ancillary data %d/%d: "
1658 "unhandled msg size\n"),
1659 tswap32(target_cmsg->cmsg_level),
1660 tswap32(target_cmsg->cmsg_type));
1661 break;
1664 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1665 cmsg->cmsg_level = SOL_SOCKET;
1666 } else {
1667 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1669 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1670 cmsg->cmsg_len = CMSG_LEN(len);
1672 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1673 int *fd = (int *)data;
1674 int *target_fd = (int *)target_data;
1675 int i, numfds = len / sizeof(int);
1677 for (i = 0; i < numfds; i++) {
1678 __get_user(fd[i], target_fd + i);
1680 } else if (cmsg->cmsg_level == SOL_SOCKET
1681 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1682 struct ucred *cred = (struct ucred *)data;
1683 struct target_ucred *target_cred =
1684 (struct target_ucred *)target_data;
1686 __get_user(cred->pid, &target_cred->pid);
1687 __get_user(cred->uid, &target_cred->uid);
1688 __get_user(cred->gid, &target_cred->gid);
1689 } else {
1690 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1691 cmsg->cmsg_level, cmsg->cmsg_type);
1692 memcpy(data, target_data, len);
1695 cmsg = CMSG_NXTHDR(msgh, cmsg);
1696 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1697 target_cmsg_start);
1699 unlock_user(target_cmsg, target_cmsg_addr, 0);
1700 the_end:
1701 msgh->msg_controllen = space;
1702 return 0;
1705 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1706 struct msghdr *msgh)
1708 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1709 abi_long msg_controllen;
1710 abi_ulong target_cmsg_addr;
1711 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1712 socklen_t space = 0;
1714 msg_controllen = tswapal(target_msgh->msg_controllen);
1715 if (msg_controllen < sizeof (struct target_cmsghdr))
1716 goto the_end;
1717 target_cmsg_addr = tswapal(target_msgh->msg_control);
1718 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1719 target_cmsg_start = target_cmsg;
1720 if (!target_cmsg)
1721 return -TARGET_EFAULT;
1723 while (cmsg && target_cmsg) {
1724 void *data = CMSG_DATA(cmsg);
1725 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1727 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1728 int tgt_len, tgt_space;
1730 /* We never copy a half-header but may copy half-data;
1731 * this is Linux's behaviour in put_cmsg(). Note that
1732 * truncation here is a guest problem (which we report
1733 * to the guest via the CTRUNC bit), unlike truncation
1734 * in target_to_host_cmsg, which is a QEMU bug.
1736 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1737 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1738 break;
1741 if (cmsg->cmsg_level == SOL_SOCKET) {
1742 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1743 } else {
1744 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1746 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1748 /* Payload types which need a different size of payload on
1749 * the target must adjust tgt_len here.
1751 tgt_len = len;
1752 switch (cmsg->cmsg_level) {
1753 case SOL_SOCKET:
1754 switch (cmsg->cmsg_type) {
1755 case SO_TIMESTAMP:
1756 tgt_len = sizeof(struct target_timeval);
1757 break;
1758 default:
1759 break;
1761 break;
1762 default:
1763 break;
1766 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1767 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1768 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1771 /* We must now copy-and-convert len bytes of payload
1772 * into tgt_len bytes of destination space. Bear in mind
1773 * that in both source and destination we may be dealing
1774 * with a truncated value!
1776 switch (cmsg->cmsg_level) {
1777 case SOL_SOCKET:
1778 switch (cmsg->cmsg_type) {
1779 case SCM_RIGHTS:
1781 int *fd = (int *)data;
1782 int *target_fd = (int *)target_data;
1783 int i, numfds = tgt_len / sizeof(int);
1785 for (i = 0; i < numfds; i++) {
1786 __put_user(fd[i], target_fd + i);
1788 break;
1790 case SO_TIMESTAMP:
1792 struct timeval *tv = (struct timeval *)data;
1793 struct target_timeval *target_tv =
1794 (struct target_timeval *)target_data;
1796 if (len != sizeof(struct timeval) ||
1797 tgt_len != sizeof(struct target_timeval)) {
1798 goto unimplemented;
1801 /* copy struct timeval to target */
1802 __put_user(tv->tv_sec, &target_tv->tv_sec);
1803 __put_user(tv->tv_usec, &target_tv->tv_usec);
1804 break;
1806 case SCM_CREDENTIALS:
1808 struct ucred *cred = (struct ucred *)data;
1809 struct target_ucred *target_cred =
1810 (struct target_ucred *)target_data;
1812 __put_user(cred->pid, &target_cred->pid);
1813 __put_user(cred->uid, &target_cred->uid);
1814 __put_user(cred->gid, &target_cred->gid);
1815 break;
1817 default:
1818 goto unimplemented;
1820 break;
1822 case SOL_IP:
1823 switch (cmsg->cmsg_type) {
1824 case IP_TTL:
1826 uint32_t *v = (uint32_t *)data;
1827 uint32_t *t_int = (uint32_t *)target_data;
1829 if (len != sizeof(uint32_t) ||
1830 tgt_len != sizeof(uint32_t)) {
1831 goto unimplemented;
1833 __put_user(*v, t_int);
1834 break;
1836 case IP_RECVERR:
1838 struct errhdr_t {
1839 struct sock_extended_err ee;
1840 struct sockaddr_in offender;
1842 struct errhdr_t *errh = (struct errhdr_t *)data;
1843 struct errhdr_t *target_errh =
1844 (struct errhdr_t *)target_data;
1846 if (len != sizeof(struct errhdr_t) ||
1847 tgt_len != sizeof(struct errhdr_t)) {
1848 goto unimplemented;
1850 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1851 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1852 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1853 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1854 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1855 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1856 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1857 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1858 (void *) &errh->offender, sizeof(errh->offender));
1859 break;
1861 default:
1862 goto unimplemented;
1864 break;
1866 case SOL_IPV6:
1867 switch (cmsg->cmsg_type) {
1868 case IPV6_HOPLIMIT:
1870 uint32_t *v = (uint32_t *)data;
1871 uint32_t *t_int = (uint32_t *)target_data;
1873 if (len != sizeof(uint32_t) ||
1874 tgt_len != sizeof(uint32_t)) {
1875 goto unimplemented;
1877 __put_user(*v, t_int);
1878 break;
1880 case IPV6_RECVERR:
1882 struct errhdr6_t {
1883 struct sock_extended_err ee;
1884 struct sockaddr_in6 offender;
1886 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1887 struct errhdr6_t *target_errh =
1888 (struct errhdr6_t *)target_data;
1890 if (len != sizeof(struct errhdr6_t) ||
1891 tgt_len != sizeof(struct errhdr6_t)) {
1892 goto unimplemented;
1894 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1895 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1896 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1897 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1898 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1899 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1900 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1901 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1902 (void *) &errh->offender, sizeof(errh->offender));
1903 break;
1905 default:
1906 goto unimplemented;
1908 break;
1910 default:
1911 unimplemented:
1912 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1913 cmsg->cmsg_level, cmsg->cmsg_type);
1914 memcpy(target_data, data, MIN(len, tgt_len));
1915 if (tgt_len > len) {
1916 memset(target_data + len, 0, tgt_len - len);
1920 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1921 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1922 if (msg_controllen < tgt_space) {
1923 tgt_space = msg_controllen;
1925 msg_controllen -= tgt_space;
1926 space += tgt_space;
1927 cmsg = CMSG_NXTHDR(msgh, cmsg);
1928 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1929 target_cmsg_start);
1931 unlock_user(target_cmsg, target_cmsg_addr, space);
1932 the_end:
1933 target_msgh->msg_controllen = tswapal(space);
1934 return 0;
1937 /* do_setsockopt() Must return target values and target errnos. */
1938 static abi_long do_setsockopt(int sockfd, int level, int optname,
1939 abi_ulong optval_addr, socklen_t optlen)
1941 abi_long ret;
1942 int val;
1943 struct ip_mreqn *ip_mreq;
1944 struct ip_mreq_source *ip_mreq_source;
1946 switch(level) {
1947 case SOL_TCP:
1948 /* TCP options all take an 'int' value. */
1949 if (optlen < sizeof(uint32_t))
1950 return -TARGET_EINVAL;
1952 if (get_user_u32(val, optval_addr))
1953 return -TARGET_EFAULT;
1954 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1955 break;
1956 case SOL_IP:
1957 switch(optname) {
1958 case IP_TOS:
1959 case IP_TTL:
1960 case IP_HDRINCL:
1961 case IP_ROUTER_ALERT:
1962 case IP_RECVOPTS:
1963 case IP_RETOPTS:
1964 case IP_PKTINFO:
1965 case IP_MTU_DISCOVER:
1966 case IP_RECVERR:
1967 case IP_RECVTTL:
1968 case IP_RECVTOS:
1969 #ifdef IP_FREEBIND
1970 case IP_FREEBIND:
1971 #endif
1972 case IP_MULTICAST_TTL:
1973 case IP_MULTICAST_LOOP:
1974 val = 0;
1975 if (optlen >= sizeof(uint32_t)) {
1976 if (get_user_u32(val, optval_addr))
1977 return -TARGET_EFAULT;
1978 } else if (optlen >= 1) {
1979 if (get_user_u8(val, optval_addr))
1980 return -TARGET_EFAULT;
1982 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1983 break;
1984 case IP_ADD_MEMBERSHIP:
1985 case IP_DROP_MEMBERSHIP:
1986 if (optlen < sizeof (struct target_ip_mreq) ||
1987 optlen > sizeof (struct target_ip_mreqn))
1988 return -TARGET_EINVAL;
1990 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1991 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1992 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1993 break;
1995 case IP_BLOCK_SOURCE:
1996 case IP_UNBLOCK_SOURCE:
1997 case IP_ADD_SOURCE_MEMBERSHIP:
1998 case IP_DROP_SOURCE_MEMBERSHIP:
1999 if (optlen != sizeof (struct target_ip_mreq_source))
2000 return -TARGET_EINVAL;
2002 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2003 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2004 unlock_user (ip_mreq_source, optval_addr, 0);
2005 break;
2007 default:
2008 goto unimplemented;
2010 break;
2011 case SOL_IPV6:
2012 switch (optname) {
2013 case IPV6_MTU_DISCOVER:
2014 case IPV6_MTU:
2015 case IPV6_V6ONLY:
2016 case IPV6_RECVPKTINFO:
2017 case IPV6_UNICAST_HOPS:
2018 case IPV6_MULTICAST_HOPS:
2019 case IPV6_MULTICAST_LOOP:
2020 case IPV6_RECVERR:
2021 case IPV6_RECVHOPLIMIT:
2022 case IPV6_2292HOPLIMIT:
2023 case IPV6_CHECKSUM:
2024 case IPV6_ADDRFORM:
2025 case IPV6_2292PKTINFO:
2026 case IPV6_RECVTCLASS:
2027 case IPV6_RECVRTHDR:
2028 case IPV6_2292RTHDR:
2029 case IPV6_RECVHOPOPTS:
2030 case IPV6_2292HOPOPTS:
2031 case IPV6_RECVDSTOPTS:
2032 case IPV6_2292DSTOPTS:
2033 case IPV6_TCLASS:
2034 #ifdef IPV6_RECVPATHMTU
2035 case IPV6_RECVPATHMTU:
2036 #endif
2037 #ifdef IPV6_TRANSPARENT
2038 case IPV6_TRANSPARENT:
2039 #endif
2040 #ifdef IPV6_FREEBIND
2041 case IPV6_FREEBIND:
2042 #endif
2043 #ifdef IPV6_RECVORIGDSTADDR
2044 case IPV6_RECVORIGDSTADDR:
2045 #endif
2046 val = 0;
2047 if (optlen < sizeof(uint32_t)) {
2048 return -TARGET_EINVAL;
2050 if (get_user_u32(val, optval_addr)) {
2051 return -TARGET_EFAULT;
2053 ret = get_errno(setsockopt(sockfd, level, optname,
2054 &val, sizeof(val)));
2055 break;
2056 case IPV6_PKTINFO:
2058 struct in6_pktinfo pki;
2060 if (optlen < sizeof(pki)) {
2061 return -TARGET_EINVAL;
2064 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2065 return -TARGET_EFAULT;
2068 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2070 ret = get_errno(setsockopt(sockfd, level, optname,
2071 &pki, sizeof(pki)));
2072 break;
2074 case IPV6_ADD_MEMBERSHIP:
2075 case IPV6_DROP_MEMBERSHIP:
2077 struct ipv6_mreq ipv6mreq;
2079 if (optlen < sizeof(ipv6mreq)) {
2080 return -TARGET_EINVAL;
2083 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2084 return -TARGET_EFAULT;
2087 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2089 ret = get_errno(setsockopt(sockfd, level, optname,
2090 &ipv6mreq, sizeof(ipv6mreq)));
2091 break;
2093 default:
2094 goto unimplemented;
2096 break;
2097 case SOL_ICMPV6:
2098 switch (optname) {
2099 case ICMPV6_FILTER:
2101 struct icmp6_filter icmp6f;
2103 if (optlen > sizeof(icmp6f)) {
2104 optlen = sizeof(icmp6f);
2107 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2108 return -TARGET_EFAULT;
2111 for (val = 0; val < 8; val++) {
2112 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2115 ret = get_errno(setsockopt(sockfd, level, optname,
2116 &icmp6f, optlen));
2117 break;
2119 default:
2120 goto unimplemented;
2122 break;
2123 case SOL_RAW:
2124 switch (optname) {
2125 case ICMP_FILTER:
2126 case IPV6_CHECKSUM:
2127 /* those take an u32 value */
2128 if (optlen < sizeof(uint32_t)) {
2129 return -TARGET_EINVAL;
2132 if (get_user_u32(val, optval_addr)) {
2133 return -TARGET_EFAULT;
2135 ret = get_errno(setsockopt(sockfd, level, optname,
2136 &val, sizeof(val)));
2137 break;
2139 default:
2140 goto unimplemented;
2142 break;
2143 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2144 case SOL_ALG:
2145 switch (optname) {
2146 case ALG_SET_KEY:
2148 char *alg_key = g_malloc(optlen);
2150 if (!alg_key) {
2151 return -TARGET_ENOMEM;
2153 if (copy_from_user(alg_key, optval_addr, optlen)) {
2154 g_free(alg_key);
2155 return -TARGET_EFAULT;
2157 ret = get_errno(setsockopt(sockfd, level, optname,
2158 alg_key, optlen));
2159 g_free(alg_key);
2160 break;
2162 case ALG_SET_AEAD_AUTHSIZE:
2164 ret = get_errno(setsockopt(sockfd, level, optname,
2165 NULL, optlen));
2166 break;
2168 default:
2169 goto unimplemented;
2171 break;
2172 #endif
2173 case TARGET_SOL_SOCKET:
2174 switch (optname) {
2175 case TARGET_SO_RCVTIMEO:
2177 struct timeval tv;
2179 optname = SO_RCVTIMEO;
2181 set_timeout:
2182 if (optlen != sizeof(struct target_timeval)) {
2183 return -TARGET_EINVAL;
2186 if (copy_from_user_timeval(&tv, optval_addr)) {
2187 return -TARGET_EFAULT;
2190 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2191 &tv, sizeof(tv)));
2192 return ret;
2194 case TARGET_SO_SNDTIMEO:
2195 optname = SO_SNDTIMEO;
2196 goto set_timeout;
2197 case TARGET_SO_ATTACH_FILTER:
2199 struct target_sock_fprog *tfprog;
2200 struct target_sock_filter *tfilter;
2201 struct sock_fprog fprog;
2202 struct sock_filter *filter;
2203 int i;
2205 if (optlen != sizeof(*tfprog)) {
2206 return -TARGET_EINVAL;
2208 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2209 return -TARGET_EFAULT;
2211 if (!lock_user_struct(VERIFY_READ, tfilter,
2212 tswapal(tfprog->filter), 0)) {
2213 unlock_user_struct(tfprog, optval_addr, 1);
2214 return -TARGET_EFAULT;
2217 fprog.len = tswap16(tfprog->len);
2218 filter = g_try_new(struct sock_filter, fprog.len);
2219 if (filter == NULL) {
2220 unlock_user_struct(tfilter, tfprog->filter, 1);
2221 unlock_user_struct(tfprog, optval_addr, 1);
2222 return -TARGET_ENOMEM;
2224 for (i = 0; i < fprog.len; i++) {
2225 filter[i].code = tswap16(tfilter[i].code);
2226 filter[i].jt = tfilter[i].jt;
2227 filter[i].jf = tfilter[i].jf;
2228 filter[i].k = tswap32(tfilter[i].k);
2230 fprog.filter = filter;
2232 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2233 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2234 g_free(filter);
2236 unlock_user_struct(tfilter, tfprog->filter, 1);
2237 unlock_user_struct(tfprog, optval_addr, 1);
2238 return ret;
2240 case TARGET_SO_BINDTODEVICE:
2242 char *dev_ifname, *addr_ifname;
2244 if (optlen > IFNAMSIZ - 1) {
2245 optlen = IFNAMSIZ - 1;
2247 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2248 if (!dev_ifname) {
2249 return -TARGET_EFAULT;
2251 optname = SO_BINDTODEVICE;
2252 addr_ifname = alloca(IFNAMSIZ);
2253 memcpy(addr_ifname, dev_ifname, optlen);
2254 addr_ifname[optlen] = 0;
2255 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2256 addr_ifname, optlen));
2257 unlock_user (dev_ifname, optval_addr, 0);
2258 return ret;
2260 case TARGET_SO_LINGER:
2262 struct linger lg;
2263 struct target_linger *tlg;
2265 if (optlen != sizeof(struct target_linger)) {
2266 return -TARGET_EINVAL;
2268 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2269 return -TARGET_EFAULT;
2271 __get_user(lg.l_onoff, &tlg->l_onoff);
2272 __get_user(lg.l_linger, &tlg->l_linger);
2273 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2274 &lg, sizeof(lg)));
2275 unlock_user_struct(tlg, optval_addr, 0);
2276 return ret;
2278 /* Options with 'int' argument. */
2279 case TARGET_SO_DEBUG:
2280 optname = SO_DEBUG;
2281 break;
2282 case TARGET_SO_REUSEADDR:
2283 optname = SO_REUSEADDR;
2284 break;
2285 #ifdef SO_REUSEPORT
2286 case TARGET_SO_REUSEPORT:
2287 optname = SO_REUSEPORT;
2288 break;
2289 #endif
2290 case TARGET_SO_TYPE:
2291 optname = SO_TYPE;
2292 break;
2293 case TARGET_SO_ERROR:
2294 optname = SO_ERROR;
2295 break;
2296 case TARGET_SO_DONTROUTE:
2297 optname = SO_DONTROUTE;
2298 break;
2299 case TARGET_SO_BROADCAST:
2300 optname = SO_BROADCAST;
2301 break;
2302 case TARGET_SO_SNDBUF:
2303 optname = SO_SNDBUF;
2304 break;
2305 case TARGET_SO_SNDBUFFORCE:
2306 optname = SO_SNDBUFFORCE;
2307 break;
2308 case TARGET_SO_RCVBUF:
2309 optname = SO_RCVBUF;
2310 break;
2311 case TARGET_SO_RCVBUFFORCE:
2312 optname = SO_RCVBUFFORCE;
2313 break;
2314 case TARGET_SO_KEEPALIVE:
2315 optname = SO_KEEPALIVE;
2316 break;
2317 case TARGET_SO_OOBINLINE:
2318 optname = SO_OOBINLINE;
2319 break;
2320 case TARGET_SO_NO_CHECK:
2321 optname = SO_NO_CHECK;
2322 break;
2323 case TARGET_SO_PRIORITY:
2324 optname = SO_PRIORITY;
2325 break;
2326 #ifdef SO_BSDCOMPAT
2327 case TARGET_SO_BSDCOMPAT:
2328 optname = SO_BSDCOMPAT;
2329 break;
2330 #endif
2331 case TARGET_SO_PASSCRED:
2332 optname = SO_PASSCRED;
2333 break;
2334 case TARGET_SO_PASSSEC:
2335 optname = SO_PASSSEC;
2336 break;
2337 case TARGET_SO_TIMESTAMP:
2338 optname = SO_TIMESTAMP;
2339 break;
2340 case TARGET_SO_RCVLOWAT:
2341 optname = SO_RCVLOWAT;
2342 break;
2343 default:
2344 goto unimplemented;
2346 if (optlen < sizeof(uint32_t))
2347 return -TARGET_EINVAL;
2349 if (get_user_u32(val, optval_addr))
2350 return -TARGET_EFAULT;
2351 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2352 break;
2353 #ifdef SOL_NETLINK
2354 case SOL_NETLINK:
2355 switch (optname) {
2356 case NETLINK_PKTINFO:
2357 case NETLINK_ADD_MEMBERSHIP:
2358 case NETLINK_DROP_MEMBERSHIP:
2359 case NETLINK_BROADCAST_ERROR:
2360 case NETLINK_NO_ENOBUFS:
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2362 case NETLINK_LISTEN_ALL_NSID:
2363 case NETLINK_CAP_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2366 case NETLINK_EXT_ACK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2369 case NETLINK_GET_STRICT_CHK:
2370 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2371 break;
2372 default:
2373 goto unimplemented;
2375 val = 0;
2376 if (optlen < sizeof(uint32_t)) {
2377 return -TARGET_EINVAL;
2379 if (get_user_u32(val, optval_addr)) {
2380 return -TARGET_EFAULT;
2382 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2383 sizeof(val)));
2384 break;
2385 #endif /* SOL_NETLINK */
2386 default:
2387 unimplemented:
2388 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2389 level, optname);
2390 ret = -TARGET_ENOPROTOOPT;
2392 return ret;
2395 /* do_getsockopt() Must return target values and target errnos. */
2396 static abi_long do_getsockopt(int sockfd, int level, int optname,
2397 abi_ulong optval_addr, abi_ulong optlen)
2399 abi_long ret;
2400 int len, val;
2401 socklen_t lv;
2403 switch(level) {
2404 case TARGET_SOL_SOCKET:
2405 level = SOL_SOCKET;
2406 switch (optname) {
2407 /* These don't just return a single integer */
2408 case TARGET_SO_PEERNAME:
2409 goto unimplemented;
2410 case TARGET_SO_RCVTIMEO: {
2411 struct timeval tv;
2412 socklen_t tvlen;
2414 optname = SO_RCVTIMEO;
2416 get_timeout:
2417 if (get_user_u32(len, optlen)) {
2418 return -TARGET_EFAULT;
2420 if (len < 0) {
2421 return -TARGET_EINVAL;
2424 tvlen = sizeof(tv);
2425 ret = get_errno(getsockopt(sockfd, level, optname,
2426 &tv, &tvlen));
2427 if (ret < 0) {
2428 return ret;
2430 if (len > sizeof(struct target_timeval)) {
2431 len = sizeof(struct target_timeval);
2433 if (copy_to_user_timeval(optval_addr, &tv)) {
2434 return -TARGET_EFAULT;
2436 if (put_user_u32(len, optlen)) {
2437 return -TARGET_EFAULT;
2439 break;
2441 case TARGET_SO_SNDTIMEO:
2442 optname = SO_SNDTIMEO;
2443 goto get_timeout;
2444 case TARGET_SO_PEERCRED: {
2445 struct ucred cr;
2446 socklen_t crlen;
2447 struct target_ucred *tcr;
2449 if (get_user_u32(len, optlen)) {
2450 return -TARGET_EFAULT;
2452 if (len < 0) {
2453 return -TARGET_EINVAL;
2456 crlen = sizeof(cr);
2457 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2458 &cr, &crlen));
2459 if (ret < 0) {
2460 return ret;
2462 if (len > crlen) {
2463 len = crlen;
2465 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2466 return -TARGET_EFAULT;
2468 __put_user(cr.pid, &tcr->pid);
2469 __put_user(cr.uid, &tcr->uid);
2470 __put_user(cr.gid, &tcr->gid);
2471 unlock_user_struct(tcr, optval_addr, 1);
2472 if (put_user_u32(len, optlen)) {
2473 return -TARGET_EFAULT;
2475 break;
2477 case TARGET_SO_PEERSEC: {
2478 char *name;
2480 if (get_user_u32(len, optlen)) {
2481 return -TARGET_EFAULT;
2483 if (len < 0) {
2484 return -TARGET_EINVAL;
2486 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2487 if (!name) {
2488 return -TARGET_EFAULT;
2490 lv = len;
2491 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2492 name, &lv));
2493 if (put_user_u32(lv, optlen)) {
2494 ret = -TARGET_EFAULT;
2496 unlock_user(name, optval_addr, lv);
2497 break;
2499 case TARGET_SO_LINGER:
2501 struct linger lg;
2502 socklen_t lglen;
2503 struct target_linger *tlg;
2505 if (get_user_u32(len, optlen)) {
2506 return -TARGET_EFAULT;
2508 if (len < 0) {
2509 return -TARGET_EINVAL;
2512 lglen = sizeof(lg);
2513 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2514 &lg, &lglen));
2515 if (ret < 0) {
2516 return ret;
2518 if (len > lglen) {
2519 len = lglen;
2521 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2522 return -TARGET_EFAULT;
2524 __put_user(lg.l_onoff, &tlg->l_onoff);
2525 __put_user(lg.l_linger, &tlg->l_linger);
2526 unlock_user_struct(tlg, optval_addr, 1);
2527 if (put_user_u32(len, optlen)) {
2528 return -TARGET_EFAULT;
2530 break;
2532 /* Options with 'int' argument. */
2533 case TARGET_SO_DEBUG:
2534 optname = SO_DEBUG;
2535 goto int_case;
2536 case TARGET_SO_REUSEADDR:
2537 optname = SO_REUSEADDR;
2538 goto int_case;
2539 #ifdef SO_REUSEPORT
2540 case TARGET_SO_REUSEPORT:
2541 optname = SO_REUSEPORT;
2542 goto int_case;
2543 #endif
2544 case TARGET_SO_TYPE:
2545 optname = SO_TYPE;
2546 goto int_case;
2547 case TARGET_SO_ERROR:
2548 optname = SO_ERROR;
2549 goto int_case;
2550 case TARGET_SO_DONTROUTE:
2551 optname = SO_DONTROUTE;
2552 goto int_case;
2553 case TARGET_SO_BROADCAST:
2554 optname = SO_BROADCAST;
2555 goto int_case;
2556 case TARGET_SO_SNDBUF:
2557 optname = SO_SNDBUF;
2558 goto int_case;
2559 case TARGET_SO_RCVBUF:
2560 optname = SO_RCVBUF;
2561 goto int_case;
2562 case TARGET_SO_KEEPALIVE:
2563 optname = SO_KEEPALIVE;
2564 goto int_case;
2565 case TARGET_SO_OOBINLINE:
2566 optname = SO_OOBINLINE;
2567 goto int_case;
2568 case TARGET_SO_NO_CHECK:
2569 optname = SO_NO_CHECK;
2570 goto int_case;
2571 case TARGET_SO_PRIORITY:
2572 optname = SO_PRIORITY;
2573 goto int_case;
2574 #ifdef SO_BSDCOMPAT
2575 case TARGET_SO_BSDCOMPAT:
2576 optname = SO_BSDCOMPAT;
2577 goto int_case;
2578 #endif
2579 case TARGET_SO_PASSCRED:
2580 optname = SO_PASSCRED;
2581 goto int_case;
2582 case TARGET_SO_TIMESTAMP:
2583 optname = SO_TIMESTAMP;
2584 goto int_case;
2585 case TARGET_SO_RCVLOWAT:
2586 optname = SO_RCVLOWAT;
2587 goto int_case;
2588 case TARGET_SO_ACCEPTCONN:
2589 optname = SO_ACCEPTCONN;
2590 goto int_case;
2591 default:
2592 goto int_case;
2594 break;
2595 case SOL_TCP:
2596 /* TCP options all take an 'int' value. */
2597 int_case:
2598 if (get_user_u32(len, optlen))
2599 return -TARGET_EFAULT;
2600 if (len < 0)
2601 return -TARGET_EINVAL;
2602 lv = sizeof(lv);
2603 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2604 if (ret < 0)
2605 return ret;
2606 if (optname == SO_TYPE) {
2607 val = host_to_target_sock_type(val);
2609 if (len > lv)
2610 len = lv;
2611 if (len == 4) {
2612 if (put_user_u32(val, optval_addr))
2613 return -TARGET_EFAULT;
2614 } else {
2615 if (put_user_u8(val, optval_addr))
2616 return -TARGET_EFAULT;
2618 if (put_user_u32(len, optlen))
2619 return -TARGET_EFAULT;
2620 break;
2621 case SOL_IP:
2622 switch(optname) {
2623 case IP_TOS:
2624 case IP_TTL:
2625 case IP_HDRINCL:
2626 case IP_ROUTER_ALERT:
2627 case IP_RECVOPTS:
2628 case IP_RETOPTS:
2629 case IP_PKTINFO:
2630 case IP_MTU_DISCOVER:
2631 case IP_RECVERR:
2632 case IP_RECVTOS:
2633 #ifdef IP_FREEBIND
2634 case IP_FREEBIND:
2635 #endif
2636 case IP_MULTICAST_TTL:
2637 case IP_MULTICAST_LOOP:
2638 if (get_user_u32(len, optlen))
2639 return -TARGET_EFAULT;
2640 if (len < 0)
2641 return -TARGET_EINVAL;
2642 lv = sizeof(lv);
2643 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2644 if (ret < 0)
2645 return ret;
2646 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2647 len = 1;
2648 if (put_user_u32(len, optlen)
2649 || put_user_u8(val, optval_addr))
2650 return -TARGET_EFAULT;
2651 } else {
2652 if (len > sizeof(int))
2653 len = sizeof(int);
2654 if (put_user_u32(len, optlen)
2655 || put_user_u32(val, optval_addr))
2656 return -TARGET_EFAULT;
2658 break;
2659 default:
2660 ret = -TARGET_ENOPROTOOPT;
2661 break;
2663 break;
2664 case SOL_IPV6:
2665 switch (optname) {
2666 case IPV6_MTU_DISCOVER:
2667 case IPV6_MTU:
2668 case IPV6_V6ONLY:
2669 case IPV6_RECVPKTINFO:
2670 case IPV6_UNICAST_HOPS:
2671 case IPV6_MULTICAST_HOPS:
2672 case IPV6_MULTICAST_LOOP:
2673 case IPV6_RECVERR:
2674 case IPV6_RECVHOPLIMIT:
2675 case IPV6_2292HOPLIMIT:
2676 case IPV6_CHECKSUM:
2677 case IPV6_ADDRFORM:
2678 case IPV6_2292PKTINFO:
2679 case IPV6_RECVTCLASS:
2680 case IPV6_RECVRTHDR:
2681 case IPV6_2292RTHDR:
2682 case IPV6_RECVHOPOPTS:
2683 case IPV6_2292HOPOPTS:
2684 case IPV6_RECVDSTOPTS:
2685 case IPV6_2292DSTOPTS:
2686 case IPV6_TCLASS:
2687 #ifdef IPV6_RECVPATHMTU
2688 case IPV6_RECVPATHMTU:
2689 #endif
2690 #ifdef IPV6_TRANSPARENT
2691 case IPV6_TRANSPARENT:
2692 #endif
2693 #ifdef IPV6_FREEBIND
2694 case IPV6_FREEBIND:
2695 #endif
2696 #ifdef IPV6_RECVORIGDSTADDR
2697 case IPV6_RECVORIGDSTADDR:
2698 #endif
2699 if (get_user_u32(len, optlen))
2700 return -TARGET_EFAULT;
2701 if (len < 0)
2702 return -TARGET_EINVAL;
2703 lv = sizeof(lv);
2704 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2705 if (ret < 0)
2706 return ret;
2707 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2708 len = 1;
2709 if (put_user_u32(len, optlen)
2710 || put_user_u8(val, optval_addr))
2711 return -TARGET_EFAULT;
2712 } else {
2713 if (len > sizeof(int))
2714 len = sizeof(int);
2715 if (put_user_u32(len, optlen)
2716 || put_user_u32(val, optval_addr))
2717 return -TARGET_EFAULT;
2719 break;
2720 default:
2721 ret = -TARGET_ENOPROTOOPT;
2722 break;
2724 break;
2725 #ifdef SOL_NETLINK
2726 case SOL_NETLINK:
2727 switch (optname) {
2728 case NETLINK_PKTINFO:
2729 case NETLINK_BROADCAST_ERROR:
2730 case NETLINK_NO_ENOBUFS:
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2732 case NETLINK_LISTEN_ALL_NSID:
2733 case NETLINK_CAP_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2736 case NETLINK_EXT_ACK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2739 case NETLINK_GET_STRICT_CHK:
2740 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2741 if (get_user_u32(len, optlen)) {
2742 return -TARGET_EFAULT;
2744 if (len != sizeof(val)) {
2745 return -TARGET_EINVAL;
2747 lv = len;
2748 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2749 if (ret < 0) {
2750 return ret;
2752 if (put_user_u32(lv, optlen)
2753 || put_user_u32(val, optval_addr)) {
2754 return -TARGET_EFAULT;
2756 break;
2757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2758 case NETLINK_LIST_MEMBERSHIPS:
2760 uint32_t *results;
2761 int i;
2762 if (get_user_u32(len, optlen)) {
2763 return -TARGET_EFAULT;
2765 if (len < 0) {
2766 return -TARGET_EINVAL;
2768 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2769 if (!results) {
2770 return -TARGET_EFAULT;
2772 lv = len;
2773 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2774 if (ret < 0) {
2775 unlock_user(results, optval_addr, 0);
2776 return ret;
2778 /* swap host endianess to target endianess. */
2779 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2780 results[i] = tswap32(results[i]);
2782 if (put_user_u32(lv, optlen)) {
2783 return -TARGET_EFAULT;
2785 unlock_user(results, optval_addr, 0);
2786 break;
2788 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2789 default:
2790 goto unimplemented;
2792 break;
2793 #endif /* SOL_NETLINK */
2794 default:
2795 unimplemented:
2796 qemu_log_mask(LOG_UNIMP,
2797 "getsockopt level=%d optname=%d not yet supported\n",
2798 level, optname);
2799 ret = -TARGET_EOPNOTSUPP;
2800 break;
2802 return ret;
2805 /* Convert target low/high pair representing file offset into the host
2806 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2807 * as the kernel doesn't handle them either.
2809 static void target_to_host_low_high(abi_ulong tlow,
2810 abi_ulong thigh,
2811 unsigned long *hlow,
2812 unsigned long *hhigh)
2814 uint64_t off = tlow |
2815 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2816 TARGET_LONG_BITS / 2;
2818 *hlow = off;
2819 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2822 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2823 abi_ulong count, int copy)
2825 struct target_iovec *target_vec;
2826 struct iovec *vec;
2827 abi_ulong total_len, max_len;
2828 int i;
2829 int err = 0;
2830 bool bad_address = false;
2832 if (count == 0) {
2833 errno = 0;
2834 return NULL;
2836 if (count > IOV_MAX) {
2837 errno = EINVAL;
2838 return NULL;
2841 vec = g_try_new0(struct iovec, count);
2842 if (vec == NULL) {
2843 errno = ENOMEM;
2844 return NULL;
2847 target_vec = lock_user(VERIFY_READ, target_addr,
2848 count * sizeof(struct target_iovec), 1);
2849 if (target_vec == NULL) {
2850 err = EFAULT;
2851 goto fail2;
2854 /* ??? If host page size > target page size, this will result in a
2855 value larger than what we can actually support. */
2856 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2857 total_len = 0;
2859 for (i = 0; i < count; i++) {
2860 abi_ulong base = tswapal(target_vec[i].iov_base);
2861 abi_long len = tswapal(target_vec[i].iov_len);
2863 if (len < 0) {
2864 err = EINVAL;
2865 goto fail;
2866 } else if (len == 0) {
2867 /* Zero length pointer is ignored. */
2868 vec[i].iov_base = 0;
2869 } else {
2870 vec[i].iov_base = lock_user(type, base, len, copy);
2871 /* If the first buffer pointer is bad, this is a fault. But
2872 * subsequent bad buffers will result in a partial write; this
2873 * is realized by filling the vector with null pointers and
2874 * zero lengths. */
2875 if (!vec[i].iov_base) {
2876 if (i == 0) {
2877 err = EFAULT;
2878 goto fail;
2879 } else {
2880 bad_address = true;
2883 if (bad_address) {
2884 len = 0;
2886 if (len > max_len - total_len) {
2887 len = max_len - total_len;
2890 vec[i].iov_len = len;
2891 total_len += len;
2894 unlock_user(target_vec, target_addr, 0);
2895 return vec;
2897 fail:
2898 while (--i >= 0) {
2899 if (tswapal(target_vec[i].iov_len) > 0) {
2900 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2903 unlock_user(target_vec, target_addr, 0);
2904 fail2:
2905 g_free(vec);
2906 errno = err;
2907 return NULL;
2910 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2911 abi_ulong count, int copy)
2913 struct target_iovec *target_vec;
2914 int i;
2916 target_vec = lock_user(VERIFY_READ, target_addr,
2917 count * sizeof(struct target_iovec), 1);
2918 if (target_vec) {
2919 for (i = 0; i < count; i++) {
2920 abi_ulong base = tswapal(target_vec[i].iov_base);
2921 abi_long len = tswapal(target_vec[i].iov_len);
2922 if (len < 0) {
2923 break;
2925 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2927 unlock_user(target_vec, target_addr, 0);
2930 g_free(vec);
2933 static inline int target_to_host_sock_type(int *type)
2935 int host_type = 0;
2936 int target_type = *type;
2938 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2939 case TARGET_SOCK_DGRAM:
2940 host_type = SOCK_DGRAM;
2941 break;
2942 case TARGET_SOCK_STREAM:
2943 host_type = SOCK_STREAM;
2944 break;
2945 default:
2946 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2947 break;
2949 if (target_type & TARGET_SOCK_CLOEXEC) {
2950 #if defined(SOCK_CLOEXEC)
2951 host_type |= SOCK_CLOEXEC;
2952 #else
2953 return -TARGET_EINVAL;
2954 #endif
2956 if (target_type & TARGET_SOCK_NONBLOCK) {
2957 #if defined(SOCK_NONBLOCK)
2958 host_type |= SOCK_NONBLOCK;
2959 #elif !defined(O_NONBLOCK)
2960 return -TARGET_EINVAL;
2961 #endif
2963 *type = host_type;
2964 return 0;
2967 /* Try to emulate socket type flags after socket creation. */
2968 static int sock_flags_fixup(int fd, int target_type)
2970 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2971 if (target_type & TARGET_SOCK_NONBLOCK) {
2972 int flags = fcntl(fd, F_GETFL);
2973 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2974 close(fd);
2975 return -TARGET_EINVAL;
2978 #endif
2979 return fd;
2982 /* do_socket() Must return target values and target errnos. */
2983 static abi_long do_socket(int domain, int type, int protocol)
2985 int target_type = type;
2986 int ret;
2988 ret = target_to_host_sock_type(&type);
2989 if (ret) {
2990 return ret;
2993 if (domain == PF_NETLINK && !(
2994 #ifdef CONFIG_RTNETLINK
2995 protocol == NETLINK_ROUTE ||
2996 #endif
2997 protocol == NETLINK_KOBJECT_UEVENT ||
2998 protocol == NETLINK_AUDIT)) {
2999 return -TARGET_EPROTONOSUPPORT;
3002 if (domain == AF_PACKET ||
3003 (domain == AF_INET && type == SOCK_PACKET)) {
3004 protocol = tswap16(protocol);
3007 ret = get_errno(socket(domain, type, protocol));
3008 if (ret >= 0) {
3009 ret = sock_flags_fixup(ret, target_type);
3010 if (type == SOCK_PACKET) {
3011 /* Manage an obsolete case :
3012 * if socket type is SOCK_PACKET, bind by name
3014 fd_trans_register(ret, &target_packet_trans);
3015 } else if (domain == PF_NETLINK) {
3016 switch (protocol) {
3017 #ifdef CONFIG_RTNETLINK
3018 case NETLINK_ROUTE:
3019 fd_trans_register(ret, &target_netlink_route_trans);
3020 break;
3021 #endif
3022 case NETLINK_KOBJECT_UEVENT:
3023 /* nothing to do: messages are strings */
3024 break;
3025 case NETLINK_AUDIT:
3026 fd_trans_register(ret, &target_netlink_audit_trans);
3027 break;
3028 default:
3029 g_assert_not_reached();
3033 return ret;
3036 /* do_bind() Must return target values and target errnos. */
3037 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3038 socklen_t addrlen)
3040 void *addr;
3041 abi_long ret;
3043 if ((int)addrlen < 0) {
3044 return -TARGET_EINVAL;
3047 addr = alloca(addrlen+1);
3049 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3050 if (ret)
3051 return ret;
3053 return get_errno(bind(sockfd, addr, addrlen));
3056 /* do_connect() Must return target values and target errnos. */
3057 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3058 socklen_t addrlen)
3060 void *addr;
3061 abi_long ret;
3063 if ((int)addrlen < 0) {
3064 return -TARGET_EINVAL;
3067 addr = alloca(addrlen+1);
3069 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3070 if (ret)
3071 return ret;
3073 return get_errno(safe_connect(sockfd, addr, addrlen));
3076 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3077 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3078 int flags, int send)
3080 abi_long ret, len;
3081 struct msghdr msg;
3082 abi_ulong count;
3083 struct iovec *vec;
3084 abi_ulong target_vec;
3086 if (msgp->msg_name) {
3087 msg.msg_namelen = tswap32(msgp->msg_namelen);
3088 msg.msg_name = alloca(msg.msg_namelen+1);
3089 ret = target_to_host_sockaddr(fd, msg.msg_name,
3090 tswapal(msgp->msg_name),
3091 msg.msg_namelen);
3092 if (ret == -TARGET_EFAULT) {
3093 /* For connected sockets msg_name and msg_namelen must
3094 * be ignored, so returning EFAULT immediately is wrong.
3095 * Instead, pass a bad msg_name to the host kernel, and
3096 * let it decide whether to return EFAULT or not.
3098 msg.msg_name = (void *)-1;
3099 } else if (ret) {
3100 goto out2;
3102 } else {
3103 msg.msg_name = NULL;
3104 msg.msg_namelen = 0;
3106 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3107 msg.msg_control = alloca(msg.msg_controllen);
3108 memset(msg.msg_control, 0, msg.msg_controllen);
3110 msg.msg_flags = tswap32(msgp->msg_flags);
3112 count = tswapal(msgp->msg_iovlen);
3113 target_vec = tswapal(msgp->msg_iov);
3115 if (count > IOV_MAX) {
3116 /* sendrcvmsg returns a different errno for this condition than
3117 * readv/writev, so we must catch it here before lock_iovec() does.
3119 ret = -TARGET_EMSGSIZE;
3120 goto out2;
3123 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3124 target_vec, count, send);
3125 if (vec == NULL) {
3126 ret = -host_to_target_errno(errno);
3127 goto out2;
3129 msg.msg_iovlen = count;
3130 msg.msg_iov = vec;
3132 if (send) {
3133 if (fd_trans_target_to_host_data(fd)) {
3134 void *host_msg;
3136 host_msg = g_malloc(msg.msg_iov->iov_len);
3137 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3138 ret = fd_trans_target_to_host_data(fd)(host_msg,
3139 msg.msg_iov->iov_len);
3140 if (ret >= 0) {
3141 msg.msg_iov->iov_base = host_msg;
3142 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3144 g_free(host_msg);
3145 } else {
3146 ret = target_to_host_cmsg(&msg, msgp);
3147 if (ret == 0) {
3148 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3151 } else {
3152 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3153 if (!is_error(ret)) {
3154 len = ret;
3155 if (fd_trans_host_to_target_data(fd)) {
3156 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3157 MIN(msg.msg_iov->iov_len, len));
3158 } else {
3159 ret = host_to_target_cmsg(msgp, &msg);
3161 if (!is_error(ret)) {
3162 msgp->msg_namelen = tswap32(msg.msg_namelen);
3163 msgp->msg_flags = tswap32(msg.msg_flags);
3164 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3165 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3166 msg.msg_name, msg.msg_namelen);
3167 if (ret) {
3168 goto out;
3172 ret = len;
3177 out:
3178 unlock_iovec(vec, target_vec, count, !send);
3179 out2:
3180 return ret;
3183 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3184 int flags, int send)
3186 abi_long ret;
3187 struct target_msghdr *msgp;
3189 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3190 msgp,
3191 target_msg,
3192 send ? 1 : 0)) {
3193 return -TARGET_EFAULT;
3195 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3196 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3197 return ret;
3200 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3201 * so it might not have this *mmsg-specific flag either.
3203 #ifndef MSG_WAITFORONE
3204 #define MSG_WAITFORONE 0x10000
3205 #endif
3207 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3208 unsigned int vlen, unsigned int flags,
3209 int send)
3211 struct target_mmsghdr *mmsgp;
3212 abi_long ret = 0;
3213 int i;
3215 if (vlen > UIO_MAXIOV) {
3216 vlen = UIO_MAXIOV;
3219 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3220 if (!mmsgp) {
3221 return -TARGET_EFAULT;
3224 for (i = 0; i < vlen; i++) {
3225 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3226 if (is_error(ret)) {
3227 break;
3229 mmsgp[i].msg_len = tswap32(ret);
3230 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3231 if (flags & MSG_WAITFORONE) {
3232 flags |= MSG_DONTWAIT;
3236 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3238 /* Return number of datagrams sent if we sent any at all;
3239 * otherwise return the error.
3241 if (i) {
3242 return i;
3244 return ret;
3247 /* do_accept4() Must return target values and target errnos. */
3248 static abi_long do_accept4(int fd, abi_ulong target_addr,
3249 abi_ulong target_addrlen_addr, int flags)
3251 socklen_t addrlen, ret_addrlen;
3252 void *addr;
3253 abi_long ret;
3254 int host_flags;
3256 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3258 if (target_addr == 0) {
3259 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3262 /* linux returns EINVAL if addrlen pointer is invalid */
3263 if (get_user_u32(addrlen, target_addrlen_addr))
3264 return -TARGET_EINVAL;
3266 if ((int)addrlen < 0) {
3267 return -TARGET_EINVAL;
3270 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3271 return -TARGET_EINVAL;
3273 addr = alloca(addrlen);
3275 ret_addrlen = addrlen;
3276 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3277 if (!is_error(ret)) {
3278 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3279 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3280 ret = -TARGET_EFAULT;
3283 return ret;
3286 /* do_getpeername() Must return target values and target errnos. */
3287 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3288 abi_ulong target_addrlen_addr)
3290 socklen_t addrlen, ret_addrlen;
3291 void *addr;
3292 abi_long ret;
3294 if (get_user_u32(addrlen, target_addrlen_addr))
3295 return -TARGET_EFAULT;
3297 if ((int)addrlen < 0) {
3298 return -TARGET_EINVAL;
3301 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3302 return -TARGET_EFAULT;
3304 addr = alloca(addrlen);
3306 ret_addrlen = addrlen;
3307 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3308 if (!is_error(ret)) {
3309 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3310 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3311 ret = -TARGET_EFAULT;
3314 return ret;
3317 /* do_getsockname() Must return target values and target errnos. */
3318 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3319 abi_ulong target_addrlen_addr)
3321 socklen_t addrlen, ret_addrlen;
3322 void *addr;
3323 abi_long ret;
3325 if (get_user_u32(addrlen, target_addrlen_addr))
3326 return -TARGET_EFAULT;
3328 if ((int)addrlen < 0) {
3329 return -TARGET_EINVAL;
3332 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3333 return -TARGET_EFAULT;
3335 addr = alloca(addrlen);
3337 ret_addrlen = addrlen;
3338 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3339 if (!is_error(ret)) {
3340 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3341 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3342 ret = -TARGET_EFAULT;
3345 return ret;
3348 /* do_socketpair() Must return target values and target errnos. */
3349 static abi_long do_socketpair(int domain, int type, int protocol,
3350 abi_ulong target_tab_addr)
3352 int tab[2];
3353 abi_long ret;
3355 target_to_host_sock_type(&type);
3357 ret = get_errno(socketpair(domain, type, protocol, tab));
3358 if (!is_error(ret)) {
3359 if (put_user_s32(tab[0], target_tab_addr)
3360 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3361 ret = -TARGET_EFAULT;
3363 return ret;
3366 /* do_sendto() Must return target values and target errnos. */
3367 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3368 abi_ulong target_addr, socklen_t addrlen)
3370 void *addr;
3371 void *host_msg;
3372 void *copy_msg = NULL;
3373 abi_long ret;
3375 if ((int)addrlen < 0) {
3376 return -TARGET_EINVAL;
3379 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3380 if (!host_msg)
3381 return -TARGET_EFAULT;
3382 if (fd_trans_target_to_host_data(fd)) {
3383 copy_msg = host_msg;
3384 host_msg = g_malloc(len);
3385 memcpy(host_msg, copy_msg, len);
3386 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3387 if (ret < 0) {
3388 goto fail;
3391 if (target_addr) {
3392 addr = alloca(addrlen+1);
3393 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3394 if (ret) {
3395 goto fail;
3397 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3398 } else {
3399 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3401 fail:
3402 if (copy_msg) {
3403 g_free(host_msg);
3404 host_msg = copy_msg;
3406 unlock_user(host_msg, msg, 0);
3407 return ret;
3410 /* do_recvfrom() Must return target values and target errnos. */
3411 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3412 abi_ulong target_addr,
3413 abi_ulong target_addrlen)
3415 socklen_t addrlen, ret_addrlen;
3416 void *addr;
3417 void *host_msg;
3418 abi_long ret;
3420 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3421 if (!host_msg)
3422 return -TARGET_EFAULT;
3423 if (target_addr) {
3424 if (get_user_u32(addrlen, target_addrlen)) {
3425 ret = -TARGET_EFAULT;
3426 goto fail;
3428 if ((int)addrlen < 0) {
3429 ret = -TARGET_EINVAL;
3430 goto fail;
3432 addr = alloca(addrlen);
3433 ret_addrlen = addrlen;
3434 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3435 addr, &ret_addrlen));
3436 } else {
3437 addr = NULL; /* To keep compiler quiet. */
3438 addrlen = 0; /* To keep compiler quiet. */
3439 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3441 if (!is_error(ret)) {
3442 if (fd_trans_host_to_target_data(fd)) {
3443 abi_long trans;
3444 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3445 if (is_error(trans)) {
3446 ret = trans;
3447 goto fail;
3450 if (target_addr) {
3451 host_to_target_sockaddr(target_addr, addr,
3452 MIN(addrlen, ret_addrlen));
3453 if (put_user_u32(ret_addrlen, target_addrlen)) {
3454 ret = -TARGET_EFAULT;
3455 goto fail;
3458 unlock_user(host_msg, msg, len);
3459 } else {
3460 fail:
3461 unlock_user(host_msg, msg, 0);
3463 return ret;
3466 #ifdef TARGET_NR_socketcall
3467 /* do_socketcall() must return target values and target errnos. */
3468 static abi_long do_socketcall(int num, abi_ulong vptr)
3470 static const unsigned nargs[] = { /* number of arguments per operation */
3471 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3472 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3475 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3476 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3477 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3478 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3479 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3480 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3481 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3482 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3483 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3484 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3485 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3486 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3487 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3488 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3489 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3490 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3492 abi_long a[6]; /* max 6 args */
3493 unsigned i;
3495 /* check the range of the first argument num */
3496 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3497 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3498 return -TARGET_EINVAL;
3500 /* ensure we have space for args */
3501 if (nargs[num] > ARRAY_SIZE(a)) {
3502 return -TARGET_EINVAL;
3504 /* collect the arguments in a[] according to nargs[] */
3505 for (i = 0; i < nargs[num]; ++i) {
3506 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3507 return -TARGET_EFAULT;
3510 /* now when we have the args, invoke the appropriate underlying function */
3511 switch (num) {
3512 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3513 return do_socket(a[0], a[1], a[2]);
3514 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3515 return do_bind(a[0], a[1], a[2]);
3516 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3517 return do_connect(a[0], a[1], a[2]);
3518 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3519 return get_errno(listen(a[0], a[1]));
3520 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3521 return do_accept4(a[0], a[1], a[2], 0);
3522 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3523 return do_getsockname(a[0], a[1], a[2]);
3524 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3525 return do_getpeername(a[0], a[1], a[2]);
3526 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3527 return do_socketpair(a[0], a[1], a[2], a[3]);
3528 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3529 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3530 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3531 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3532 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3533 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3534 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3535 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3536 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3537 return get_errno(shutdown(a[0], a[1]));
3538 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3539 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3540 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3541 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3542 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3543 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3544 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3545 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3546 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3547 return do_accept4(a[0], a[1], a[2], a[3]);
3548 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3549 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3550 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3551 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3552 default:
3553 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3554 return -TARGET_EINVAL;
3557 #endif
3559 #define N_SHM_REGIONS 32
3561 static struct shm_region {
3562 abi_ulong start;
3563 abi_ulong size;
3564 bool in_use;
3565 } shm_regions[N_SHM_REGIONS];
3567 #ifndef TARGET_SEMID64_DS
3568 /* asm-generic version of this struct */
3569 struct target_semid64_ds
3571 struct target_ipc_perm sem_perm;
3572 abi_ulong sem_otime;
3573 #if TARGET_ABI_BITS == 32
3574 abi_ulong __unused1;
3575 #endif
3576 abi_ulong sem_ctime;
3577 #if TARGET_ABI_BITS == 32
3578 abi_ulong __unused2;
3579 #endif
3580 abi_ulong sem_nsems;
3581 abi_ulong __unused3;
3582 abi_ulong __unused4;
3584 #endif
3586 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3587 abi_ulong target_addr)
3589 struct target_ipc_perm *target_ip;
3590 struct target_semid64_ds *target_sd;
3592 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3593 return -TARGET_EFAULT;
3594 target_ip = &(target_sd->sem_perm);
3595 host_ip->__key = tswap32(target_ip->__key);
3596 host_ip->uid = tswap32(target_ip->uid);
3597 host_ip->gid = tswap32(target_ip->gid);
3598 host_ip->cuid = tswap32(target_ip->cuid);
3599 host_ip->cgid = tswap32(target_ip->cgid);
3600 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3601 host_ip->mode = tswap32(target_ip->mode);
3602 #else
3603 host_ip->mode = tswap16(target_ip->mode);
3604 #endif
3605 #if defined(TARGET_PPC)
3606 host_ip->__seq = tswap32(target_ip->__seq);
3607 #else
3608 host_ip->__seq = tswap16(target_ip->__seq);
3609 #endif
3610 unlock_user_struct(target_sd, target_addr, 0);
3611 return 0;
3614 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3615 struct ipc_perm *host_ip)
3617 struct target_ipc_perm *target_ip;
3618 struct target_semid64_ds *target_sd;
3620 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3621 return -TARGET_EFAULT;
3622 target_ip = &(target_sd->sem_perm);
3623 target_ip->__key = tswap32(host_ip->__key);
3624 target_ip->uid = tswap32(host_ip->uid);
3625 target_ip->gid = tswap32(host_ip->gid);
3626 target_ip->cuid = tswap32(host_ip->cuid);
3627 target_ip->cgid = tswap32(host_ip->cgid);
3628 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3629 target_ip->mode = tswap32(host_ip->mode);
3630 #else
3631 target_ip->mode = tswap16(host_ip->mode);
3632 #endif
3633 #if defined(TARGET_PPC)
3634 target_ip->__seq = tswap32(host_ip->__seq);
3635 #else
3636 target_ip->__seq = tswap16(host_ip->__seq);
3637 #endif
3638 unlock_user_struct(target_sd, target_addr, 1);
3639 return 0;
3642 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3643 abi_ulong target_addr)
3645 struct target_semid64_ds *target_sd;
3647 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3648 return -TARGET_EFAULT;
3649 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3650 return -TARGET_EFAULT;
3651 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3652 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3653 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3654 unlock_user_struct(target_sd, target_addr, 0);
3655 return 0;
3658 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3659 struct semid_ds *host_sd)
3661 struct target_semid64_ds *target_sd;
3663 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3664 return -TARGET_EFAULT;
3665 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3666 return -TARGET_EFAULT;
3667 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3668 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3669 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3670 unlock_user_struct(target_sd, target_addr, 1);
3671 return 0;
3674 struct target_seminfo {
3675 int semmap;
3676 int semmni;
3677 int semmns;
3678 int semmnu;
3679 int semmsl;
3680 int semopm;
3681 int semume;
3682 int semusz;
3683 int semvmx;
3684 int semaem;
3687 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3688 struct seminfo *host_seminfo)
3690 struct target_seminfo *target_seminfo;
3691 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3692 return -TARGET_EFAULT;
3693 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3694 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3695 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3696 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3697 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3698 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3699 __put_user(host_seminfo->semume, &target_seminfo->semume);
3700 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3701 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3702 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3703 unlock_user_struct(target_seminfo, target_addr, 1);
3704 return 0;
3707 union semun {
3708 int val;
3709 struct semid_ds *buf;
3710 unsigned short *array;
3711 struct seminfo *__buf;
3714 union target_semun {
3715 int val;
3716 abi_ulong buf;
3717 abi_ulong array;
3718 abi_ulong __buf;
3721 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3722 abi_ulong target_addr)
3724 int nsems;
3725 unsigned short *array;
3726 union semun semun;
3727 struct semid_ds semid_ds;
3728 int i, ret;
3730 semun.buf = &semid_ds;
3732 ret = semctl(semid, 0, IPC_STAT, semun);
3733 if (ret == -1)
3734 return get_errno(ret);
3736 nsems = semid_ds.sem_nsems;
3738 *host_array = g_try_new(unsigned short, nsems);
3739 if (!*host_array) {
3740 return -TARGET_ENOMEM;
3742 array = lock_user(VERIFY_READ, target_addr,
3743 nsems*sizeof(unsigned short), 1);
3744 if (!array) {
3745 g_free(*host_array);
3746 return -TARGET_EFAULT;
3749 for(i=0; i<nsems; i++) {
3750 __get_user((*host_array)[i], &array[i]);
3752 unlock_user(array, target_addr, 0);
3754 return 0;
3757 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3758 unsigned short **host_array)
3760 int nsems;
3761 unsigned short *array;
3762 union semun semun;
3763 struct semid_ds semid_ds;
3764 int i, ret;
3766 semun.buf = &semid_ds;
3768 ret = semctl(semid, 0, IPC_STAT, semun);
3769 if (ret == -1)
3770 return get_errno(ret);
3772 nsems = semid_ds.sem_nsems;
3774 array = lock_user(VERIFY_WRITE, target_addr,
3775 nsems*sizeof(unsigned short), 0);
3776 if (!array)
3777 return -TARGET_EFAULT;
3779 for(i=0; i<nsems; i++) {
3780 __put_user((*host_array)[i], &array[i]);
3782 g_free(*host_array);
3783 unlock_user(array, target_addr, 1);
3785 return 0;
3788 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3789 abi_ulong target_arg)
3791 union target_semun target_su = { .buf = target_arg };
3792 union semun arg;
3793 struct semid_ds dsarg;
3794 unsigned short *array = NULL;
3795 struct seminfo seminfo;
3796 abi_long ret = -TARGET_EINVAL;
3797 abi_long err;
3798 cmd &= 0xff;
3800 switch( cmd ) {
3801 case GETVAL:
3802 case SETVAL:
3803 /* In 64 bit cross-endian situations, we will erroneously pick up
3804 * the wrong half of the union for the "val" element. To rectify
3805 * this, the entire 8-byte structure is byteswapped, followed by
3806 * a swap of the 4 byte val field. In other cases, the data is
3807 * already in proper host byte order. */
3808 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3809 target_su.buf = tswapal(target_su.buf);
3810 arg.val = tswap32(target_su.val);
3811 } else {
3812 arg.val = target_su.val;
3814 ret = get_errno(semctl(semid, semnum, cmd, arg));
3815 break;
3816 case GETALL:
3817 case SETALL:
3818 err = target_to_host_semarray(semid, &array, target_su.array);
3819 if (err)
3820 return err;
3821 arg.array = array;
3822 ret = get_errno(semctl(semid, semnum, cmd, arg));
3823 err = host_to_target_semarray(semid, target_su.array, &array);
3824 if (err)
3825 return err;
3826 break;
3827 case IPC_STAT:
3828 case IPC_SET:
3829 case SEM_STAT:
3830 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3831 if (err)
3832 return err;
3833 arg.buf = &dsarg;
3834 ret = get_errno(semctl(semid, semnum, cmd, arg));
3835 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3836 if (err)
3837 return err;
3838 break;
3839 case IPC_INFO:
3840 case SEM_INFO:
3841 arg.__buf = &seminfo;
3842 ret = get_errno(semctl(semid, semnum, cmd, arg));
3843 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3844 if (err)
3845 return err;
3846 break;
3847 case IPC_RMID:
3848 case GETPID:
3849 case GETNCNT:
3850 case GETZCNT:
3851 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3852 break;
3855 return ret;
3858 struct target_sembuf {
3859 unsigned short sem_num;
3860 short sem_op;
3861 short sem_flg;
3864 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3865 abi_ulong target_addr,
3866 unsigned nsops)
3868 struct target_sembuf *target_sembuf;
3869 int i;
3871 target_sembuf = lock_user(VERIFY_READ, target_addr,
3872 nsops*sizeof(struct target_sembuf), 1);
3873 if (!target_sembuf)
3874 return -TARGET_EFAULT;
3876 for(i=0; i<nsops; i++) {
3877 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3878 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3879 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3882 unlock_user(target_sembuf, target_addr, 0);
3884 return 0;
3887 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3888 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3891 * This macro is required to handle the s390 variants, which passes the
3892 * arguments in a different order than default.
3894 #ifdef __s390x__
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), (__timeout), (__sops)
3897 #else
3898 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3899 (__nsops), 0, (__sops), (__timeout)
3900 #endif
3902 static inline abi_long do_semtimedop(int semid,
3903 abi_long ptr,
3904 unsigned nsops,
3905 abi_long timeout, bool time64)
3907 struct sembuf *sops;
3908 struct timespec ts, *pts = NULL;
3909 abi_long ret;
3911 if (timeout) {
3912 pts = &ts;
3913 if (time64) {
3914 if (target_to_host_timespec64(pts, timeout)) {
3915 return -TARGET_EFAULT;
3917 } else {
3918 if (target_to_host_timespec(pts, timeout)) {
3919 return -TARGET_EFAULT;
3924 if (nsops > TARGET_SEMOPM) {
3925 return -TARGET_E2BIG;
3928 sops = g_new(struct sembuf, nsops);
3930 if (target_to_host_sembuf(sops, ptr, nsops)) {
3931 g_free(sops);
3932 return -TARGET_EFAULT;
3935 ret = -TARGET_ENOSYS;
3936 #ifdef __NR_semtimedop
3937 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3938 #endif
3939 #ifdef __NR_ipc
3940 if (ret == -TARGET_ENOSYS) {
3941 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3942 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3944 #endif
3945 g_free(sops);
3946 return ret;
3948 #endif
3950 struct target_msqid_ds
3952 struct target_ipc_perm msg_perm;
3953 abi_ulong msg_stime;
3954 #if TARGET_ABI_BITS == 32
3955 abi_ulong __unused1;
3956 #endif
3957 abi_ulong msg_rtime;
3958 #if TARGET_ABI_BITS == 32
3959 abi_ulong __unused2;
3960 #endif
3961 abi_ulong msg_ctime;
3962 #if TARGET_ABI_BITS == 32
3963 abi_ulong __unused3;
3964 #endif
3965 abi_ulong __msg_cbytes;
3966 abi_ulong msg_qnum;
3967 abi_ulong msg_qbytes;
3968 abi_ulong msg_lspid;
3969 abi_ulong msg_lrpid;
3970 abi_ulong __unused4;
3971 abi_ulong __unused5;
3974 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3975 abi_ulong target_addr)
3977 struct target_msqid_ds *target_md;
3979 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3980 return -TARGET_EFAULT;
3981 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3982 return -TARGET_EFAULT;
3983 host_md->msg_stime = tswapal(target_md->msg_stime);
3984 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3985 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3986 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3987 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3988 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3989 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3990 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3991 unlock_user_struct(target_md, target_addr, 0);
3992 return 0;
3995 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3996 struct msqid_ds *host_md)
3998 struct target_msqid_ds *target_md;
4000 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4001 return -TARGET_EFAULT;
4002 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4003 return -TARGET_EFAULT;
4004 target_md->msg_stime = tswapal(host_md->msg_stime);
4005 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4006 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4007 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4008 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4009 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4010 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4011 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4012 unlock_user_struct(target_md, target_addr, 1);
4013 return 0;
4016 struct target_msginfo {
4017 int msgpool;
4018 int msgmap;
4019 int msgmax;
4020 int msgmnb;
4021 int msgmni;
4022 int msgssz;
4023 int msgtql;
4024 unsigned short int msgseg;
4027 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4028 struct msginfo *host_msginfo)
4030 struct target_msginfo *target_msginfo;
4031 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4032 return -TARGET_EFAULT;
4033 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4034 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4035 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4036 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4037 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4038 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4039 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4040 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4041 unlock_user_struct(target_msginfo, target_addr, 1);
4042 return 0;
4045 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4047 struct msqid_ds dsarg;
4048 struct msginfo msginfo;
4049 abi_long ret = -TARGET_EINVAL;
4051 cmd &= 0xff;
4053 switch (cmd) {
4054 case IPC_STAT:
4055 case IPC_SET:
4056 case MSG_STAT:
4057 if (target_to_host_msqid_ds(&dsarg,ptr))
4058 return -TARGET_EFAULT;
4059 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4060 if (host_to_target_msqid_ds(ptr,&dsarg))
4061 return -TARGET_EFAULT;
4062 break;
4063 case IPC_RMID:
4064 ret = get_errno(msgctl(msgid, cmd, NULL));
4065 break;
4066 case IPC_INFO:
4067 case MSG_INFO:
4068 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4069 if (host_to_target_msginfo(ptr, &msginfo))
4070 return -TARGET_EFAULT;
4071 break;
4074 return ret;
4077 struct target_msgbuf {
4078 abi_long mtype;
4079 char mtext[1];
4082 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4083 ssize_t msgsz, int msgflg)
4085 struct target_msgbuf *target_mb;
4086 struct msgbuf *host_mb;
4087 abi_long ret = 0;
4089 if (msgsz < 0) {
4090 return -TARGET_EINVAL;
4093 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4094 return -TARGET_EFAULT;
4095 host_mb = g_try_malloc(msgsz + sizeof(long));
4096 if (!host_mb) {
4097 unlock_user_struct(target_mb, msgp, 0);
4098 return -TARGET_ENOMEM;
4100 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4101 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4102 ret = -TARGET_ENOSYS;
4103 #ifdef __NR_msgsnd
4104 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4105 #endif
4106 #ifdef __NR_ipc
4107 if (ret == -TARGET_ENOSYS) {
4108 #ifdef __s390x__
4109 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4110 host_mb));
4111 #else
4112 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4113 host_mb, 0));
4114 #endif
4116 #endif
4117 g_free(host_mb);
4118 unlock_user_struct(target_mb, msgp, 0);
4120 return ret;
4123 #ifdef __NR_ipc
4124 #if defined(__sparc__)
4125 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4127 #elif defined(__s390x__)
4128 /* The s390 sys_ipc variant has only five parameters. */
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130 ((long int[]){(long int)__msgp, __msgtyp})
4131 #else
4132 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4133 ((long int[]){(long int)__msgp, __msgtyp}), 0
4134 #endif
4135 #endif
4137 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4138 ssize_t msgsz, abi_long msgtyp,
4139 int msgflg)
4141 struct target_msgbuf *target_mb;
4142 char *target_mtext;
4143 struct msgbuf *host_mb;
4144 abi_long ret = 0;
4146 if (msgsz < 0) {
4147 return -TARGET_EINVAL;
4150 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4151 return -TARGET_EFAULT;
4153 host_mb = g_try_malloc(msgsz + sizeof(long));
4154 if (!host_mb) {
4155 ret = -TARGET_ENOMEM;
4156 goto end;
4158 ret = -TARGET_ENOSYS;
4159 #ifdef __NR_msgrcv
4160 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4161 #endif
4162 #ifdef __NR_ipc
4163 if (ret == -TARGET_ENOSYS) {
4164 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4165 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4167 #endif
4169 if (ret > 0) {
4170 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4171 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4172 if (!target_mtext) {
4173 ret = -TARGET_EFAULT;
4174 goto end;
4176 memcpy(target_mb->mtext, host_mb->mtext, ret);
4177 unlock_user(target_mtext, target_mtext_addr, ret);
4180 target_mb->mtype = tswapal(host_mb->mtype);
4182 end:
4183 if (target_mb)
4184 unlock_user_struct(target_mb, msgp, 1);
4185 g_free(host_mb);
4186 return ret;
4189 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4190 abi_ulong target_addr)
4192 struct target_shmid_ds *target_sd;
4194 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4195 return -TARGET_EFAULT;
4196 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4197 return -TARGET_EFAULT;
4198 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4199 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4200 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4201 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4202 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4203 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4204 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4205 unlock_user_struct(target_sd, target_addr, 0);
4206 return 0;
4209 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4210 struct shmid_ds *host_sd)
4212 struct target_shmid_ds *target_sd;
4214 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4215 return -TARGET_EFAULT;
4216 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4217 return -TARGET_EFAULT;
4218 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4219 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4220 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4221 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4222 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4223 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4224 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4225 unlock_user_struct(target_sd, target_addr, 1);
4226 return 0;
4229 struct target_shminfo {
4230 abi_ulong shmmax;
4231 abi_ulong shmmin;
4232 abi_ulong shmmni;
4233 abi_ulong shmseg;
4234 abi_ulong shmall;
4237 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4238 struct shminfo *host_shminfo)
4240 struct target_shminfo *target_shminfo;
4241 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4242 return -TARGET_EFAULT;
4243 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4244 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4245 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4246 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4247 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4248 unlock_user_struct(target_shminfo, target_addr, 1);
4249 return 0;
4252 struct target_shm_info {
4253 int used_ids;
4254 abi_ulong shm_tot;
4255 abi_ulong shm_rss;
4256 abi_ulong shm_swp;
4257 abi_ulong swap_attempts;
4258 abi_ulong swap_successes;
4261 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4262 struct shm_info *host_shm_info)
4264 struct target_shm_info *target_shm_info;
4265 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4266 return -TARGET_EFAULT;
4267 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4268 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4269 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4270 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4271 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4272 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4273 unlock_user_struct(target_shm_info, target_addr, 1);
4274 return 0;
4277 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4279 struct shmid_ds dsarg;
4280 struct shminfo shminfo;
4281 struct shm_info shm_info;
4282 abi_long ret = -TARGET_EINVAL;
4284 cmd &= 0xff;
4286 switch(cmd) {
4287 case IPC_STAT:
4288 case IPC_SET:
4289 case SHM_STAT:
4290 if (target_to_host_shmid_ds(&dsarg, buf))
4291 return -TARGET_EFAULT;
4292 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4293 if (host_to_target_shmid_ds(buf, &dsarg))
4294 return -TARGET_EFAULT;
4295 break;
4296 case IPC_INFO:
4297 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4298 if (host_to_target_shminfo(buf, &shminfo))
4299 return -TARGET_EFAULT;
4300 break;
4301 case SHM_INFO:
4302 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4303 if (host_to_target_shm_info(buf, &shm_info))
4304 return -TARGET_EFAULT;
4305 break;
4306 case IPC_RMID:
4307 case SHM_LOCK:
4308 case SHM_UNLOCK:
4309 ret = get_errno(shmctl(shmid, cmd, NULL));
4310 break;
4313 return ret;
4316 #ifndef TARGET_FORCE_SHMLBA
4317 /* For most architectures, SHMLBA is the same as the page size;
4318 * some architectures have larger values, in which case they should
4319 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4320 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4321 * and defining its own value for SHMLBA.
4323 * The kernel also permits SHMLBA to be set by the architecture to a
4324 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4325 * this means that addresses are rounded to the large size if
4326 * SHM_RND is set but addresses not aligned to that size are not rejected
4327 * as long as they are at least page-aligned. Since the only architecture
4328 * which uses this is ia64 this code doesn't provide for that oddity.
4330 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4332 return TARGET_PAGE_SIZE;
4334 #endif
4336 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4337 int shmid, abi_ulong shmaddr, int shmflg)
4339 abi_long raddr;
4340 void *host_raddr;
4341 struct shmid_ds shm_info;
4342 int i,ret;
4343 abi_ulong shmlba;
4345 /* find out the length of the shared memory segment */
4346 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4347 if (is_error(ret)) {
4348 /* can't get length, bail out */
4349 return ret;
4352 shmlba = target_shmlba(cpu_env);
4354 if (shmaddr & (shmlba - 1)) {
4355 if (shmflg & SHM_RND) {
4356 shmaddr &= ~(shmlba - 1);
4357 } else {
4358 return -TARGET_EINVAL;
4361 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4362 return -TARGET_EINVAL;
4365 mmap_lock();
4367 if (shmaddr)
4368 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4369 else {
4370 abi_ulong mmap_start;
4372 /* In order to use the host shmat, we need to honor host SHMLBA. */
4373 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4375 if (mmap_start == -1) {
4376 errno = ENOMEM;
4377 host_raddr = (void *)-1;
4378 } else
4379 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4382 if (host_raddr == (void *)-1) {
4383 mmap_unlock();
4384 return get_errno((long)host_raddr);
4386 raddr=h2g((unsigned long)host_raddr);
4388 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4389 PAGE_VALID | PAGE_READ |
4390 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4392 for (i = 0; i < N_SHM_REGIONS; i++) {
4393 if (!shm_regions[i].in_use) {
4394 shm_regions[i].in_use = true;
4395 shm_regions[i].start = raddr;
4396 shm_regions[i].size = shm_info.shm_segsz;
4397 break;
4401 mmap_unlock();
4402 return raddr;
4406 static inline abi_long do_shmdt(abi_ulong shmaddr)
4408 int i;
4409 abi_long rv;
4411 mmap_lock();
4413 for (i = 0; i < N_SHM_REGIONS; ++i) {
4414 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4415 shm_regions[i].in_use = false;
4416 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4417 break;
4420 rv = get_errno(shmdt(g2h(shmaddr)));
4422 mmap_unlock();
4424 return rv;
4427 #ifdef TARGET_NR_ipc
4428 /* ??? This only works with linear mappings. */
4429 /* do_ipc() must return target values and target errnos. */
4430 static abi_long do_ipc(CPUArchState *cpu_env,
4431 unsigned int call, abi_long first,
4432 abi_long second, abi_long third,
4433 abi_long ptr, abi_long fifth)
4435 int version;
4436 abi_long ret = 0;
4438 version = call >> 16;
4439 call &= 0xffff;
4441 switch (call) {
4442 case IPCOP_semop:
4443 ret = do_semtimedop(first, ptr, second, 0, false);
4444 break;
4445 case IPCOP_semtimedop:
4447 * The s390 sys_ipc variant has only five parameters instead of six
4448 * (as for default variant) and the only difference is the handling of
4449 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4450 * to a struct timespec where the generic variant uses fifth parameter.
4452 #if defined(TARGET_S390X)
4453 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4454 #else
4455 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4456 #endif
4457 break;
4459 case IPCOP_semget:
4460 ret = get_errno(semget(first, second, third));
4461 break;
4463 case IPCOP_semctl: {
4464 /* The semun argument to semctl is passed by value, so dereference the
4465 * ptr argument. */
4466 abi_ulong atptr;
4467 get_user_ual(atptr, ptr);
4468 ret = do_semctl(first, second, third, atptr);
4469 break;
4472 case IPCOP_msgget:
4473 ret = get_errno(msgget(first, second));
4474 break;
4476 case IPCOP_msgsnd:
4477 ret = do_msgsnd(first, ptr, second, third);
4478 break;
4480 case IPCOP_msgctl:
4481 ret = do_msgctl(first, second, ptr);
4482 break;
4484 case IPCOP_msgrcv:
4485 switch (version) {
4486 case 0:
4488 struct target_ipc_kludge {
4489 abi_long msgp;
4490 abi_long msgtyp;
4491 } *tmp;
4493 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4494 ret = -TARGET_EFAULT;
4495 break;
4498 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4500 unlock_user_struct(tmp, ptr, 0);
4501 break;
4503 default:
4504 ret = do_msgrcv(first, ptr, second, fifth, third);
4506 break;
4508 case IPCOP_shmat:
4509 switch (version) {
4510 default:
4512 abi_ulong raddr;
4513 raddr = do_shmat(cpu_env, first, ptr, second);
4514 if (is_error(raddr))
4515 return get_errno(raddr);
4516 if (put_user_ual(raddr, third))
4517 return -TARGET_EFAULT;
4518 break;
4520 case 1:
4521 ret = -TARGET_EINVAL;
4522 break;
4524 break;
4525 case IPCOP_shmdt:
4526 ret = do_shmdt(ptr);
4527 break;
4529 case IPCOP_shmget:
4530 /* IPC_* flag values are the same on all linux platforms */
4531 ret = get_errno(shmget(first, second, third));
4532 break;
4534 /* IPC_* and SHM_* command values are the same on all linux platforms */
4535 case IPCOP_shmctl:
4536 ret = do_shmctl(first, second, ptr);
4537 break;
4538 default:
4539 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4540 call, version);
4541 ret = -TARGET_ENOSYS;
4542 break;
4544 return ret;
4546 #endif
4548 /* kernel structure types definitions */
4550 #define STRUCT(name, ...) STRUCT_ ## name,
4551 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4552 enum {
4553 #include "syscall_types.h"
4554 STRUCT_MAX
4556 #undef STRUCT
4557 #undef STRUCT_SPECIAL
4559 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4560 #define STRUCT_SPECIAL(name)
4561 #include "syscall_types.h"
4562 #undef STRUCT
4563 #undef STRUCT_SPECIAL
4565 #define MAX_STRUCT_SIZE 4096
4567 #ifdef CONFIG_FIEMAP
4568 /* So fiemap access checks don't overflow on 32 bit systems.
4569 * This is very slightly smaller than the limit imposed by
4570 * the underlying kernel.
4572 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4573 / sizeof(struct fiemap_extent))
4575 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4576 int fd, int cmd, abi_long arg)
4578 /* The parameter for this ioctl is a struct fiemap followed
4579 * by an array of struct fiemap_extent whose size is set
4580 * in fiemap->fm_extent_count. The array is filled in by the
4581 * ioctl.
4583 int target_size_in, target_size_out;
4584 struct fiemap *fm;
4585 const argtype *arg_type = ie->arg_type;
4586 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4587 void *argptr, *p;
4588 abi_long ret;
4589 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4590 uint32_t outbufsz;
4591 int free_fm = 0;
4593 assert(arg_type[0] == TYPE_PTR);
4594 assert(ie->access == IOC_RW);
4595 arg_type++;
4596 target_size_in = thunk_type_size(arg_type, 0);
4597 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4598 if (!argptr) {
4599 return -TARGET_EFAULT;
4601 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4602 unlock_user(argptr, arg, 0);
4603 fm = (struct fiemap *)buf_temp;
4604 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4605 return -TARGET_EINVAL;
4608 outbufsz = sizeof (*fm) +
4609 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4611 if (outbufsz > MAX_STRUCT_SIZE) {
4612 /* We can't fit all the extents into the fixed size buffer.
4613 * Allocate one that is large enough and use it instead.
4615 fm = g_try_malloc(outbufsz);
4616 if (!fm) {
4617 return -TARGET_ENOMEM;
4619 memcpy(fm, buf_temp, sizeof(struct fiemap));
4620 free_fm = 1;
4622 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4623 if (!is_error(ret)) {
4624 target_size_out = target_size_in;
4625 /* An extent_count of 0 means we were only counting the extents
4626 * so there are no structs to copy
4628 if (fm->fm_extent_count != 0) {
4629 target_size_out += fm->fm_mapped_extents * extent_size;
4631 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4632 if (!argptr) {
4633 ret = -TARGET_EFAULT;
4634 } else {
4635 /* Convert the struct fiemap */
4636 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4637 if (fm->fm_extent_count != 0) {
4638 p = argptr + target_size_in;
4639 /* ...and then all the struct fiemap_extents */
4640 for (i = 0; i < fm->fm_mapped_extents; i++) {
4641 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4642 THUNK_TARGET);
4643 p += extent_size;
4646 unlock_user(argptr, arg, target_size_out);
4649 if (free_fm) {
4650 g_free(fm);
4652 return ret;
4654 #endif
4656 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4657 int fd, int cmd, abi_long arg)
4659 const argtype *arg_type = ie->arg_type;
4660 int target_size;
4661 void *argptr;
4662 int ret;
4663 struct ifconf *host_ifconf;
4664 uint32_t outbufsz;
4665 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4666 int target_ifreq_size;
4667 int nb_ifreq;
4668 int free_buf = 0;
4669 int i;
4670 int target_ifc_len;
4671 abi_long target_ifc_buf;
4672 int host_ifc_len;
4673 char *host_ifc_buf;
4675 assert(arg_type[0] == TYPE_PTR);
4676 assert(ie->access == IOC_RW);
4678 arg_type++;
4679 target_size = thunk_type_size(arg_type, 0);
4681 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4682 if (!argptr)
4683 return -TARGET_EFAULT;
4684 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4685 unlock_user(argptr, arg, 0);
4687 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4688 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4689 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4691 if (target_ifc_buf != 0) {
4692 target_ifc_len = host_ifconf->ifc_len;
4693 nb_ifreq = target_ifc_len / target_ifreq_size;
4694 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4696 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4697 if (outbufsz > MAX_STRUCT_SIZE) {
4699 * We can't fit all the extents into the fixed size buffer.
4700 * Allocate one that is large enough and use it instead.
4702 host_ifconf = malloc(outbufsz);
4703 if (!host_ifconf) {
4704 return -TARGET_ENOMEM;
4706 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4707 free_buf = 1;
4709 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4711 host_ifconf->ifc_len = host_ifc_len;
4712 } else {
4713 host_ifc_buf = NULL;
4715 host_ifconf->ifc_buf = host_ifc_buf;
4717 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4718 if (!is_error(ret)) {
4719 /* convert host ifc_len to target ifc_len */
4721 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4722 target_ifc_len = nb_ifreq * target_ifreq_size;
4723 host_ifconf->ifc_len = target_ifc_len;
4725 /* restore target ifc_buf */
4727 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4729 /* copy struct ifconf to target user */
4731 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4732 if (!argptr)
4733 return -TARGET_EFAULT;
4734 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4735 unlock_user(argptr, arg, target_size);
4737 if (target_ifc_buf != 0) {
4738 /* copy ifreq[] to target user */
4739 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4740 for (i = 0; i < nb_ifreq ; i++) {
4741 thunk_convert(argptr + i * target_ifreq_size,
4742 host_ifc_buf + i * sizeof(struct ifreq),
4743 ifreq_arg_type, THUNK_TARGET);
4745 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4749 if (free_buf) {
4750 free(host_ifconf);
4753 return ret;
4756 #if defined(CONFIG_USBFS)
4757 #if HOST_LONG_BITS > 64
4758 #error USBDEVFS thunks do not support >64 bit hosts yet.
4759 #endif
4760 struct live_urb {
4761 uint64_t target_urb_adr;
4762 uint64_t target_buf_adr;
4763 char *target_buf_ptr;
4764 struct usbdevfs_urb host_urb;
4767 static GHashTable *usbdevfs_urb_hashtable(void)
4769 static GHashTable *urb_hashtable;
4771 if (!urb_hashtable) {
4772 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4774 return urb_hashtable;
4777 static void urb_hashtable_insert(struct live_urb *urb)
4779 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4780 g_hash_table_insert(urb_hashtable, urb, urb);
4783 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4785 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4786 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4789 static void urb_hashtable_remove(struct live_urb *urb)
4791 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4792 g_hash_table_remove(urb_hashtable, urb);
4795 static abi_long
4796 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4797 int fd, int cmd, abi_long arg)
4799 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4800 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4801 struct live_urb *lurb;
4802 void *argptr;
4803 uint64_t hurb;
4804 int target_size;
4805 uintptr_t target_urb_adr;
4806 abi_long ret;
4808 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4810 memset(buf_temp, 0, sizeof(uint64_t));
4811 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812 if (is_error(ret)) {
4813 return ret;
4816 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4817 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4818 if (!lurb->target_urb_adr) {
4819 return -TARGET_EFAULT;
4821 urb_hashtable_remove(lurb);
4822 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4823 lurb->host_urb.buffer_length);
4824 lurb->target_buf_ptr = NULL;
4826 /* restore the guest buffer pointer */
4827 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4829 /* update the guest urb struct */
4830 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4831 if (!argptr) {
4832 g_free(lurb);
4833 return -TARGET_EFAULT;
4835 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4836 unlock_user(argptr, lurb->target_urb_adr, target_size);
4838 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4839 /* write back the urb handle */
4840 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4841 if (!argptr) {
4842 g_free(lurb);
4843 return -TARGET_EFAULT;
4846 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4847 target_urb_adr = lurb->target_urb_adr;
4848 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4849 unlock_user(argptr, arg, target_size);
4851 g_free(lurb);
4852 return ret;
4855 static abi_long
4856 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4857 uint8_t *buf_temp __attribute__((unused)),
4858 int fd, int cmd, abi_long arg)
4860 struct live_urb *lurb;
4862 /* map target address back to host URB with metadata. */
4863 lurb = urb_hashtable_lookup(arg);
4864 if (!lurb) {
4865 return -TARGET_EFAULT;
4867 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4870 static abi_long
4871 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4872 int fd, int cmd, abi_long arg)
4874 const argtype *arg_type = ie->arg_type;
4875 int target_size;
4876 abi_long ret;
4877 void *argptr;
4878 int rw_dir;
4879 struct live_urb *lurb;
4882 * each submitted URB needs to map to a unique ID for the
4883 * kernel, and that unique ID needs to be a pointer to
4884 * host memory. hence, we need to malloc for each URB.
4885 * isochronous transfers have a variable length struct.
4887 arg_type++;
4888 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4890 /* construct host copy of urb and metadata */
4891 lurb = g_try_malloc0(sizeof(struct live_urb));
4892 if (!lurb) {
4893 return -TARGET_ENOMEM;
4896 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4897 if (!argptr) {
4898 g_free(lurb);
4899 return -TARGET_EFAULT;
4901 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4902 unlock_user(argptr, arg, 0);
4904 lurb->target_urb_adr = arg;
4905 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4907 /* buffer space used depends on endpoint type so lock the entire buffer */
4908 /* control type urbs should check the buffer contents for true direction */
4909 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4910 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4911 lurb->host_urb.buffer_length, 1);
4912 if (lurb->target_buf_ptr == NULL) {
4913 g_free(lurb);
4914 return -TARGET_EFAULT;
4917 /* update buffer pointer in host copy */
4918 lurb->host_urb.buffer = lurb->target_buf_ptr;
4920 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4921 if (is_error(ret)) {
4922 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4923 g_free(lurb);
4924 } else {
4925 urb_hashtable_insert(lurb);
4928 return ret;
4930 #endif /* CONFIG_USBFS */
4932 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4933 int cmd, abi_long arg)
4935 void *argptr;
4936 struct dm_ioctl *host_dm;
4937 abi_long guest_data;
4938 uint32_t guest_data_size;
4939 int target_size;
4940 const argtype *arg_type = ie->arg_type;
4941 abi_long ret;
4942 void *big_buf = NULL;
4943 char *host_data;
4945 arg_type++;
4946 target_size = thunk_type_size(arg_type, 0);
4947 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4948 if (!argptr) {
4949 ret = -TARGET_EFAULT;
4950 goto out;
4952 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4953 unlock_user(argptr, arg, 0);
4955 /* buf_temp is too small, so fetch things into a bigger buffer */
4956 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4957 memcpy(big_buf, buf_temp, target_size);
4958 buf_temp = big_buf;
4959 host_dm = big_buf;
4961 guest_data = arg + host_dm->data_start;
4962 if ((guest_data - arg) < 0) {
4963 ret = -TARGET_EINVAL;
4964 goto out;
4966 guest_data_size = host_dm->data_size - host_dm->data_start;
4967 host_data = (char*)host_dm + host_dm->data_start;
4969 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4970 if (!argptr) {
4971 ret = -TARGET_EFAULT;
4972 goto out;
4975 switch (ie->host_cmd) {
4976 case DM_REMOVE_ALL:
4977 case DM_LIST_DEVICES:
4978 case DM_DEV_CREATE:
4979 case DM_DEV_REMOVE:
4980 case DM_DEV_SUSPEND:
4981 case DM_DEV_STATUS:
4982 case DM_DEV_WAIT:
4983 case DM_TABLE_STATUS:
4984 case DM_TABLE_CLEAR:
4985 case DM_TABLE_DEPS:
4986 case DM_LIST_VERSIONS:
4987 /* no input data */
4988 break;
4989 case DM_DEV_RENAME:
4990 case DM_DEV_SET_GEOMETRY:
4991 /* data contains only strings */
4992 memcpy(host_data, argptr, guest_data_size);
4993 break;
4994 case DM_TARGET_MSG:
4995 memcpy(host_data, argptr, guest_data_size);
4996 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4997 break;
4998 case DM_TABLE_LOAD:
5000 void *gspec = argptr;
5001 void *cur_data = host_data;
5002 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5003 int spec_size = thunk_type_size(arg_type, 0);
5004 int i;
5006 for (i = 0; i < host_dm->target_count; i++) {
5007 struct dm_target_spec *spec = cur_data;
5008 uint32_t next;
5009 int slen;
5011 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5012 slen = strlen((char*)gspec + spec_size) + 1;
5013 next = spec->next;
5014 spec->next = sizeof(*spec) + slen;
5015 strcpy((char*)&spec[1], gspec + spec_size);
5016 gspec += next;
5017 cur_data += spec->next;
5019 break;
5021 default:
5022 ret = -TARGET_EINVAL;
5023 unlock_user(argptr, guest_data, 0);
5024 goto out;
5026 unlock_user(argptr, guest_data, 0);
5028 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5029 if (!is_error(ret)) {
5030 guest_data = arg + host_dm->data_start;
5031 guest_data_size = host_dm->data_size - host_dm->data_start;
5032 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5033 switch (ie->host_cmd) {
5034 case DM_REMOVE_ALL:
5035 case DM_DEV_CREATE:
5036 case DM_DEV_REMOVE:
5037 case DM_DEV_RENAME:
5038 case DM_DEV_SUSPEND:
5039 case DM_DEV_STATUS:
5040 case DM_TABLE_LOAD:
5041 case DM_TABLE_CLEAR:
5042 case DM_TARGET_MSG:
5043 case DM_DEV_SET_GEOMETRY:
5044 /* no return data */
5045 break;
5046 case DM_LIST_DEVICES:
5048 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5049 uint32_t remaining_data = guest_data_size;
5050 void *cur_data = argptr;
5051 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5052 int nl_size = 12; /* can't use thunk_size due to alignment */
5054 while (1) {
5055 uint32_t next = nl->next;
5056 if (next) {
5057 nl->next = nl_size + (strlen(nl->name) + 1);
5059 if (remaining_data < nl->next) {
5060 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5061 break;
5063 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5064 strcpy(cur_data + nl_size, nl->name);
5065 cur_data += nl->next;
5066 remaining_data -= nl->next;
5067 if (!next) {
5068 break;
5070 nl = (void*)nl + next;
5072 break;
5074 case DM_DEV_WAIT:
5075 case DM_TABLE_STATUS:
5077 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5078 void *cur_data = argptr;
5079 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5080 int spec_size = thunk_type_size(arg_type, 0);
5081 int i;
5083 for (i = 0; i < host_dm->target_count; i++) {
5084 uint32_t next = spec->next;
5085 int slen = strlen((char*)&spec[1]) + 1;
5086 spec->next = (cur_data - argptr) + spec_size + slen;
5087 if (guest_data_size < spec->next) {
5088 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5089 break;
5091 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5092 strcpy(cur_data + spec_size, (char*)&spec[1]);
5093 cur_data = argptr + spec->next;
5094 spec = (void*)host_dm + host_dm->data_start + next;
5096 break;
5098 case DM_TABLE_DEPS:
5100 void *hdata = (void*)host_dm + host_dm->data_start;
5101 int count = *(uint32_t*)hdata;
5102 uint64_t *hdev = hdata + 8;
5103 uint64_t *gdev = argptr + 8;
5104 int i;
5106 *(uint32_t*)argptr = tswap32(count);
5107 for (i = 0; i < count; i++) {
5108 *gdev = tswap64(*hdev);
5109 gdev++;
5110 hdev++;
5112 break;
5114 case DM_LIST_VERSIONS:
5116 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5117 uint32_t remaining_data = guest_data_size;
5118 void *cur_data = argptr;
5119 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5120 int vers_size = thunk_type_size(arg_type, 0);
5122 while (1) {
5123 uint32_t next = vers->next;
5124 if (next) {
5125 vers->next = vers_size + (strlen(vers->name) + 1);
5127 if (remaining_data < vers->next) {
5128 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5129 break;
5131 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5132 strcpy(cur_data + vers_size, vers->name);
5133 cur_data += vers->next;
5134 remaining_data -= vers->next;
5135 if (!next) {
5136 break;
5138 vers = (void*)vers + next;
5140 break;
5142 default:
5143 unlock_user(argptr, guest_data, 0);
5144 ret = -TARGET_EINVAL;
5145 goto out;
5147 unlock_user(argptr, guest_data, guest_data_size);
5149 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5150 if (!argptr) {
5151 ret = -TARGET_EFAULT;
5152 goto out;
5154 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5155 unlock_user(argptr, arg, target_size);
5157 out:
5158 g_free(big_buf);
5159 return ret;
5162 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5163 int cmd, abi_long arg)
5165 void *argptr;
5166 int target_size;
5167 const argtype *arg_type = ie->arg_type;
5168 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5169 abi_long ret;
5171 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5172 struct blkpg_partition host_part;
5174 /* Read and convert blkpg */
5175 arg_type++;
5176 target_size = thunk_type_size(arg_type, 0);
5177 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5178 if (!argptr) {
5179 ret = -TARGET_EFAULT;
5180 goto out;
5182 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5183 unlock_user(argptr, arg, 0);
5185 switch (host_blkpg->op) {
5186 case BLKPG_ADD_PARTITION:
5187 case BLKPG_DEL_PARTITION:
5188 /* payload is struct blkpg_partition */
5189 break;
5190 default:
5191 /* Unknown opcode */
5192 ret = -TARGET_EINVAL;
5193 goto out;
5196 /* Read and convert blkpg->data */
5197 arg = (abi_long)(uintptr_t)host_blkpg->data;
5198 target_size = thunk_type_size(part_arg_type, 0);
5199 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5200 if (!argptr) {
5201 ret = -TARGET_EFAULT;
5202 goto out;
5204 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5205 unlock_user(argptr, arg, 0);
5207 /* Swizzle the data pointer to our local copy and call! */
5208 host_blkpg->data = &host_part;
5209 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5211 out:
5212 return ret;
5215 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5216 int fd, int cmd, abi_long arg)
5218 const argtype *arg_type = ie->arg_type;
5219 const StructEntry *se;
5220 const argtype *field_types;
5221 const int *dst_offsets, *src_offsets;
5222 int target_size;
5223 void *argptr;
5224 abi_ulong *target_rt_dev_ptr = NULL;
5225 unsigned long *host_rt_dev_ptr = NULL;
5226 abi_long ret;
5227 int i;
5229 assert(ie->access == IOC_W);
5230 assert(*arg_type == TYPE_PTR);
5231 arg_type++;
5232 assert(*arg_type == TYPE_STRUCT);
5233 target_size = thunk_type_size(arg_type, 0);
5234 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5235 if (!argptr) {
5236 return -TARGET_EFAULT;
5238 arg_type++;
5239 assert(*arg_type == (int)STRUCT_rtentry);
5240 se = struct_entries + *arg_type++;
5241 assert(se->convert[0] == NULL);
5242 /* convert struct here to be able to catch rt_dev string */
5243 field_types = se->field_types;
5244 dst_offsets = se->field_offsets[THUNK_HOST];
5245 src_offsets = se->field_offsets[THUNK_TARGET];
5246 for (i = 0; i < se->nb_fields; i++) {
5247 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5248 assert(*field_types == TYPE_PTRVOID);
5249 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5250 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5251 if (*target_rt_dev_ptr != 0) {
5252 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5253 tswapal(*target_rt_dev_ptr));
5254 if (!*host_rt_dev_ptr) {
5255 unlock_user(argptr, arg, 0);
5256 return -TARGET_EFAULT;
5258 } else {
5259 *host_rt_dev_ptr = 0;
5261 field_types++;
5262 continue;
5264 field_types = thunk_convert(buf_temp + dst_offsets[i],
5265 argptr + src_offsets[i],
5266 field_types, THUNK_HOST);
5268 unlock_user(argptr, arg, 0);
5270 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5272 assert(host_rt_dev_ptr != NULL);
5273 assert(target_rt_dev_ptr != NULL);
5274 if (*host_rt_dev_ptr != 0) {
5275 unlock_user((void *)*host_rt_dev_ptr,
5276 *target_rt_dev_ptr, 0);
5278 return ret;
5281 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5282 int fd, int cmd, abi_long arg)
5284 int sig = target_to_host_signal(arg);
5285 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5288 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5289 int fd, int cmd, abi_long arg)
5291 struct timeval tv;
5292 abi_long ret;
5294 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5295 if (is_error(ret)) {
5296 return ret;
5299 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5300 if (copy_to_user_timeval(arg, &tv)) {
5301 return -TARGET_EFAULT;
5303 } else {
5304 if (copy_to_user_timeval64(arg, &tv)) {
5305 return -TARGET_EFAULT;
5309 return ret;
5312 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5313 int fd, int cmd, abi_long arg)
5315 struct timespec ts;
5316 abi_long ret;
5318 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5319 if (is_error(ret)) {
5320 return ret;
5323 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5324 if (host_to_target_timespec(arg, &ts)) {
5325 return -TARGET_EFAULT;
5327 } else{
5328 if (host_to_target_timespec64(arg, &ts)) {
5329 return -TARGET_EFAULT;
5333 return ret;
5336 #ifdef TIOCGPTPEER
5337 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5338 int fd, int cmd, abi_long arg)
5340 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5341 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5343 #endif
5345 #ifdef HAVE_DRM_H
5347 static void unlock_drm_version(struct drm_version *host_ver,
5348 struct target_drm_version *target_ver,
5349 bool copy)
5351 unlock_user(host_ver->name, target_ver->name,
5352 copy ? host_ver->name_len : 0);
5353 unlock_user(host_ver->date, target_ver->date,
5354 copy ? host_ver->date_len : 0);
5355 unlock_user(host_ver->desc, target_ver->desc,
5356 copy ? host_ver->desc_len : 0);
5359 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5360 struct target_drm_version *target_ver)
5362 memset(host_ver, 0, sizeof(*host_ver));
5364 __get_user(host_ver->name_len, &target_ver->name_len);
5365 if (host_ver->name_len) {
5366 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5367 target_ver->name_len, 0);
5368 if (!host_ver->name) {
5369 return -EFAULT;
5373 __get_user(host_ver->date_len, &target_ver->date_len);
5374 if (host_ver->date_len) {
5375 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5376 target_ver->date_len, 0);
5377 if (!host_ver->date) {
5378 goto err;
5382 __get_user(host_ver->desc_len, &target_ver->desc_len);
5383 if (host_ver->desc_len) {
5384 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5385 target_ver->desc_len, 0);
5386 if (!host_ver->desc) {
5387 goto err;
5391 return 0;
5392 err:
5393 unlock_drm_version(host_ver, target_ver, false);
5394 return -EFAULT;
5397 static inline void host_to_target_drmversion(
5398 struct target_drm_version *target_ver,
5399 struct drm_version *host_ver)
5401 __put_user(host_ver->version_major, &target_ver->version_major);
5402 __put_user(host_ver->version_minor, &target_ver->version_minor);
5403 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5404 __put_user(host_ver->name_len, &target_ver->name_len);
5405 __put_user(host_ver->date_len, &target_ver->date_len);
5406 __put_user(host_ver->desc_len, &target_ver->desc_len);
5407 unlock_drm_version(host_ver, target_ver, true);
5410 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5411 int fd, int cmd, abi_long arg)
5413 struct drm_version *ver;
5414 struct target_drm_version *target_ver;
5415 abi_long ret;
5417 switch (ie->host_cmd) {
5418 case DRM_IOCTL_VERSION:
5419 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5420 return -TARGET_EFAULT;
5422 ver = (struct drm_version *)buf_temp;
5423 ret = target_to_host_drmversion(ver, target_ver);
5424 if (!is_error(ret)) {
5425 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5426 if (is_error(ret)) {
5427 unlock_drm_version(ver, target_ver, false);
5428 } else {
5429 host_to_target_drmversion(target_ver, ver);
5432 unlock_user_struct(target_ver, arg, 0);
5433 return ret;
5435 return -TARGET_ENOSYS;
5438 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5439 struct drm_i915_getparam *gparam,
5440 int fd, abi_long arg)
5442 abi_long ret;
5443 int value;
5444 struct target_drm_i915_getparam *target_gparam;
5446 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5447 return -TARGET_EFAULT;
5450 __get_user(gparam->param, &target_gparam->param);
5451 gparam->value = &value;
5452 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5453 put_user_s32(value, target_gparam->value);
5455 unlock_user_struct(target_gparam, arg, 0);
5456 return ret;
5459 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5460 int fd, int cmd, abi_long arg)
5462 switch (ie->host_cmd) {
5463 case DRM_IOCTL_I915_GETPARAM:
5464 return do_ioctl_drm_i915_getparam(ie,
5465 (struct drm_i915_getparam *)buf_temp,
5466 fd, arg);
5467 default:
5468 return -TARGET_ENOSYS;
5472 #endif
5474 IOCTLEntry ioctl_entries[] = {
5475 #define IOCTL(cmd, access, ...) \
5476 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5477 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5478 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5479 #define IOCTL_IGNORE(cmd) \
5480 { TARGET_ ## cmd, 0, #cmd },
5481 #include "ioctls.h"
5482 { 0, 0, },
5485 /* ??? Implement proper locking for ioctls. */
5486 /* do_ioctl() Must return target values and target errnos. */
5487 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5489 const IOCTLEntry *ie;
5490 const argtype *arg_type;
5491 abi_long ret;
5492 uint8_t buf_temp[MAX_STRUCT_SIZE];
5493 int target_size;
5494 void *argptr;
5496 ie = ioctl_entries;
5497 for(;;) {
5498 if (ie->target_cmd == 0) {
5499 qemu_log_mask(
5500 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5501 return -TARGET_ENOSYS;
5503 if (ie->target_cmd == cmd)
5504 break;
5505 ie++;
5507 arg_type = ie->arg_type;
5508 if (ie->do_ioctl) {
5509 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5510 } else if (!ie->host_cmd) {
5511 /* Some architectures define BSD ioctls in their headers
5512 that are not implemented in Linux. */
5513 return -TARGET_ENOSYS;
5516 switch(arg_type[0]) {
5517 case TYPE_NULL:
5518 /* no argument */
5519 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5520 break;
5521 case TYPE_PTRVOID:
5522 case TYPE_INT:
5523 case TYPE_LONG:
5524 case TYPE_ULONG:
5525 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5526 break;
5527 case TYPE_PTR:
5528 arg_type++;
5529 target_size = thunk_type_size(arg_type, 0);
5530 switch(ie->access) {
5531 case IOC_R:
5532 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5533 if (!is_error(ret)) {
5534 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5535 if (!argptr)
5536 return -TARGET_EFAULT;
5537 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5538 unlock_user(argptr, arg, target_size);
5540 break;
5541 case IOC_W:
5542 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5543 if (!argptr)
5544 return -TARGET_EFAULT;
5545 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5546 unlock_user(argptr, arg, 0);
5547 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5548 break;
5549 default:
5550 case IOC_RW:
5551 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5552 if (!argptr)
5553 return -TARGET_EFAULT;
5554 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5555 unlock_user(argptr, arg, 0);
5556 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5557 if (!is_error(ret)) {
5558 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5559 if (!argptr)
5560 return -TARGET_EFAULT;
5561 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5562 unlock_user(argptr, arg, target_size);
5564 break;
5566 break;
5567 default:
5568 qemu_log_mask(LOG_UNIMP,
5569 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5570 (long)cmd, arg_type[0]);
5571 ret = -TARGET_ENOSYS;
5572 break;
5574 return ret;
5577 static const bitmask_transtbl iflag_tbl[] = {
5578 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5579 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5580 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5581 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5582 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5583 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5584 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5585 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5586 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5587 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5588 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5589 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5590 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5591 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5592 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5593 { 0, 0, 0, 0 }
5596 static const bitmask_transtbl oflag_tbl[] = {
5597 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5598 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5599 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5600 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5601 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5602 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5603 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5604 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5605 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5606 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5607 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5608 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5609 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5610 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5611 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5612 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5613 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5614 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5615 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5616 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5617 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5618 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5619 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5620 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5621 { 0, 0, 0, 0 }
5624 static const bitmask_transtbl cflag_tbl[] = {
5625 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5626 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5627 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5628 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5629 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5630 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5631 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5632 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5633 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5634 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5635 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5636 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5637 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5638 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5639 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5640 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5641 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5642 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5643 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5644 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5645 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5646 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5647 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5648 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5649 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5650 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5651 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5652 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5653 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5654 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5655 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5656 { 0, 0, 0, 0 }
5659 static const bitmask_transtbl lflag_tbl[] = {
5660 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5661 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5662 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5663 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5664 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5665 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5666 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5667 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5668 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5669 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5670 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5671 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5672 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5673 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5674 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5675 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5676 { 0, 0, 0, 0 }
5679 static void target_to_host_termios (void *dst, const void *src)
5681 struct host_termios *host = dst;
5682 const struct target_termios *target = src;
5684 host->c_iflag =
5685 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5686 host->c_oflag =
5687 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5688 host->c_cflag =
5689 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5690 host->c_lflag =
5691 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5692 host->c_line = target->c_line;
5694 memset(host->c_cc, 0, sizeof(host->c_cc));
5695 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5696 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5697 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5698 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5699 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5700 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5701 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5702 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5703 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5704 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5705 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5706 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5707 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5708 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5709 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5710 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5711 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5714 static void host_to_target_termios (void *dst, const void *src)
5716 struct target_termios *target = dst;
5717 const struct host_termios *host = src;
5719 target->c_iflag =
5720 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5721 target->c_oflag =
5722 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5723 target->c_cflag =
5724 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5725 target->c_lflag =
5726 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5727 target->c_line = host->c_line;
5729 memset(target->c_cc, 0, sizeof(target->c_cc));
5730 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5731 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5732 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5733 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5734 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5735 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5736 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5737 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5738 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5739 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5740 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5741 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5742 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5743 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5744 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5745 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5746 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5749 static const StructEntry struct_termios_def = {
5750 .convert = { host_to_target_termios, target_to_host_termios },
5751 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5752 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5753 .print = print_termios,
5756 static bitmask_transtbl mmap_flags_tbl[] = {
5757 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5758 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5759 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5760 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5761 MAP_ANONYMOUS, MAP_ANONYMOUS },
5762 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5763 MAP_GROWSDOWN, MAP_GROWSDOWN },
5764 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5765 MAP_DENYWRITE, MAP_DENYWRITE },
5766 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5767 MAP_EXECUTABLE, MAP_EXECUTABLE },
5768 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5769 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5770 MAP_NORESERVE, MAP_NORESERVE },
5771 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5772 /* MAP_STACK had been ignored by the kernel for quite some time.
5773 Recognize it for the target insofar as we do not want to pass
5774 it through to the host. */
5775 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5776 { 0, 0, 0, 0 }
5780 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5781 * TARGET_I386 is defined if TARGET_X86_64 is defined
5783 #if defined(TARGET_I386)
5785 /* NOTE: there is really one LDT for all the threads */
5786 static uint8_t *ldt_table;
5788 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5790 int size;
5791 void *p;
5793 if (!ldt_table)
5794 return 0;
5795 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5796 if (size > bytecount)
5797 size = bytecount;
5798 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5799 if (!p)
5800 return -TARGET_EFAULT;
5801 /* ??? Should this by byteswapped? */
5802 memcpy(p, ldt_table, size);
5803 unlock_user(p, ptr, size);
5804 return size;
5807 /* XXX: add locking support */
5808 static abi_long write_ldt(CPUX86State *env,
5809 abi_ulong ptr, unsigned long bytecount, int oldmode)
5811 struct target_modify_ldt_ldt_s ldt_info;
5812 struct target_modify_ldt_ldt_s *target_ldt_info;
5813 int seg_32bit, contents, read_exec_only, limit_in_pages;
5814 int seg_not_present, useable, lm;
5815 uint32_t *lp, entry_1, entry_2;
5817 if (bytecount != sizeof(ldt_info))
5818 return -TARGET_EINVAL;
5819 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5820 return -TARGET_EFAULT;
5821 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5822 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5823 ldt_info.limit = tswap32(target_ldt_info->limit);
5824 ldt_info.flags = tswap32(target_ldt_info->flags);
5825 unlock_user_struct(target_ldt_info, ptr, 0);
5827 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5828 return -TARGET_EINVAL;
5829 seg_32bit = ldt_info.flags & 1;
5830 contents = (ldt_info.flags >> 1) & 3;
5831 read_exec_only = (ldt_info.flags >> 3) & 1;
5832 limit_in_pages = (ldt_info.flags >> 4) & 1;
5833 seg_not_present = (ldt_info.flags >> 5) & 1;
5834 useable = (ldt_info.flags >> 6) & 1;
5835 #ifdef TARGET_ABI32
5836 lm = 0;
5837 #else
5838 lm = (ldt_info.flags >> 7) & 1;
5839 #endif
5840 if (contents == 3) {
5841 if (oldmode)
5842 return -TARGET_EINVAL;
5843 if (seg_not_present == 0)
5844 return -TARGET_EINVAL;
5846 /* allocate the LDT */
5847 if (!ldt_table) {
5848 env->ldt.base = target_mmap(0,
5849 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5850 PROT_READ|PROT_WRITE,
5851 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5852 if (env->ldt.base == -1)
5853 return -TARGET_ENOMEM;
5854 memset(g2h(env->ldt.base), 0,
5855 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5856 env->ldt.limit = 0xffff;
5857 ldt_table = g2h(env->ldt.base);
5860 /* NOTE: same code as Linux kernel */
5861 /* Allow LDTs to be cleared by the user. */
5862 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5863 if (oldmode ||
5864 (contents == 0 &&
5865 read_exec_only == 1 &&
5866 seg_32bit == 0 &&
5867 limit_in_pages == 0 &&
5868 seg_not_present == 1 &&
5869 useable == 0 )) {
5870 entry_1 = 0;
5871 entry_2 = 0;
5872 goto install;
5876 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5877 (ldt_info.limit & 0x0ffff);
5878 entry_2 = (ldt_info.base_addr & 0xff000000) |
5879 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5880 (ldt_info.limit & 0xf0000) |
5881 ((read_exec_only ^ 1) << 9) |
5882 (contents << 10) |
5883 ((seg_not_present ^ 1) << 15) |
5884 (seg_32bit << 22) |
5885 (limit_in_pages << 23) |
5886 (lm << 21) |
5887 0x7000;
5888 if (!oldmode)
5889 entry_2 |= (useable << 20);
5891 /* Install the new entry ... */
5892 install:
5893 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5894 lp[0] = tswap32(entry_1);
5895 lp[1] = tswap32(entry_2);
5896 return 0;
5899 /* specific and weird i386 syscalls */
5900 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5901 unsigned long bytecount)
5903 abi_long ret;
5905 switch (func) {
5906 case 0:
5907 ret = read_ldt(ptr, bytecount);
5908 break;
5909 case 1:
5910 ret = write_ldt(env, ptr, bytecount, 1);
5911 break;
5912 case 0x11:
5913 ret = write_ldt(env, ptr, bytecount, 0);
5914 break;
5915 default:
5916 ret = -TARGET_ENOSYS;
5917 break;
5919 return ret;
5922 #if defined(TARGET_ABI32)
5923 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5925 uint64_t *gdt_table = g2h(env->gdt.base);
5926 struct target_modify_ldt_ldt_s ldt_info;
5927 struct target_modify_ldt_ldt_s *target_ldt_info;
5928 int seg_32bit, contents, read_exec_only, limit_in_pages;
5929 int seg_not_present, useable, lm;
5930 uint32_t *lp, entry_1, entry_2;
5931 int i;
5933 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5934 if (!target_ldt_info)
5935 return -TARGET_EFAULT;
5936 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5937 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5938 ldt_info.limit = tswap32(target_ldt_info->limit);
5939 ldt_info.flags = tswap32(target_ldt_info->flags);
5940 if (ldt_info.entry_number == -1) {
5941 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5942 if (gdt_table[i] == 0) {
5943 ldt_info.entry_number = i;
5944 target_ldt_info->entry_number = tswap32(i);
5945 break;
5949 unlock_user_struct(target_ldt_info, ptr, 1);
5951 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5952 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5953 return -TARGET_EINVAL;
5954 seg_32bit = ldt_info.flags & 1;
5955 contents = (ldt_info.flags >> 1) & 3;
5956 read_exec_only = (ldt_info.flags >> 3) & 1;
5957 limit_in_pages = (ldt_info.flags >> 4) & 1;
5958 seg_not_present = (ldt_info.flags >> 5) & 1;
5959 useable = (ldt_info.flags >> 6) & 1;
5960 #ifdef TARGET_ABI32
5961 lm = 0;
5962 #else
5963 lm = (ldt_info.flags >> 7) & 1;
5964 #endif
5966 if (contents == 3) {
5967 if (seg_not_present == 0)
5968 return -TARGET_EINVAL;
5971 /* NOTE: same code as Linux kernel */
5972 /* Allow LDTs to be cleared by the user. */
5973 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5974 if ((contents == 0 &&
5975 read_exec_only == 1 &&
5976 seg_32bit == 0 &&
5977 limit_in_pages == 0 &&
5978 seg_not_present == 1 &&
5979 useable == 0 )) {
5980 entry_1 = 0;
5981 entry_2 = 0;
5982 goto install;
5986 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5987 (ldt_info.limit & 0x0ffff);
5988 entry_2 = (ldt_info.base_addr & 0xff000000) |
5989 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5990 (ldt_info.limit & 0xf0000) |
5991 ((read_exec_only ^ 1) << 9) |
5992 (contents << 10) |
5993 ((seg_not_present ^ 1) << 15) |
5994 (seg_32bit << 22) |
5995 (limit_in_pages << 23) |
5996 (useable << 20) |
5997 (lm << 21) |
5998 0x7000;
6000 /* Install the new entry ... */
6001 install:
6002 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6003 lp[0] = tswap32(entry_1);
6004 lp[1] = tswap32(entry_2);
6005 return 0;
6008 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6010 struct target_modify_ldt_ldt_s *target_ldt_info;
6011 uint64_t *gdt_table = g2h(env->gdt.base);
6012 uint32_t base_addr, limit, flags;
6013 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6014 int seg_not_present, useable, lm;
6015 uint32_t *lp, entry_1, entry_2;
6017 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6018 if (!target_ldt_info)
6019 return -TARGET_EFAULT;
6020 idx = tswap32(target_ldt_info->entry_number);
6021 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6022 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6023 unlock_user_struct(target_ldt_info, ptr, 1);
6024 return -TARGET_EINVAL;
6026 lp = (uint32_t *)(gdt_table + idx);
6027 entry_1 = tswap32(lp[0]);
6028 entry_2 = tswap32(lp[1]);
6030 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6031 contents = (entry_2 >> 10) & 3;
6032 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6033 seg_32bit = (entry_2 >> 22) & 1;
6034 limit_in_pages = (entry_2 >> 23) & 1;
6035 useable = (entry_2 >> 20) & 1;
6036 #ifdef TARGET_ABI32
6037 lm = 0;
6038 #else
6039 lm = (entry_2 >> 21) & 1;
6040 #endif
6041 flags = (seg_32bit << 0) | (contents << 1) |
6042 (read_exec_only << 3) | (limit_in_pages << 4) |
6043 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6044 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6045 base_addr = (entry_1 >> 16) |
6046 (entry_2 & 0xff000000) |
6047 ((entry_2 & 0xff) << 16);
6048 target_ldt_info->base_addr = tswapal(base_addr);
6049 target_ldt_info->limit = tswap32(limit);
6050 target_ldt_info->flags = tswap32(flags);
6051 unlock_user_struct(target_ldt_info, ptr, 1);
6052 return 0;
6055 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6057 return -TARGET_ENOSYS;
6059 #else
6060 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6062 abi_long ret = 0;
6063 abi_ulong val;
6064 int idx;
6066 switch(code) {
6067 case TARGET_ARCH_SET_GS:
6068 case TARGET_ARCH_SET_FS:
6069 if (code == TARGET_ARCH_SET_GS)
6070 idx = R_GS;
6071 else
6072 idx = R_FS;
6073 cpu_x86_load_seg(env, idx, 0);
6074 env->segs[idx].base = addr;
6075 break;
6076 case TARGET_ARCH_GET_GS:
6077 case TARGET_ARCH_GET_FS:
6078 if (code == TARGET_ARCH_GET_GS)
6079 idx = R_GS;
6080 else
6081 idx = R_FS;
6082 val = env->segs[idx].base;
6083 if (put_user(val, addr, abi_ulong))
6084 ret = -TARGET_EFAULT;
6085 break;
6086 default:
6087 ret = -TARGET_EINVAL;
6088 break;
6090 return ret;
6092 #endif /* defined(TARGET_ABI32 */
6094 #endif /* defined(TARGET_I386) */
6096 #define NEW_STACK_SIZE 0x40000
6099 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6100 typedef struct {
6101 CPUArchState *env;
6102 pthread_mutex_t mutex;
6103 pthread_cond_t cond;
6104 pthread_t thread;
6105 uint32_t tid;
6106 abi_ulong child_tidptr;
6107 abi_ulong parent_tidptr;
6108 sigset_t sigmask;
6109 } new_thread_info;
6111 static void *clone_func(void *arg)
6113 new_thread_info *info = arg;
6114 CPUArchState *env;
6115 CPUState *cpu;
6116 TaskState *ts;
6118 rcu_register_thread();
6119 tcg_register_thread();
6120 env = info->env;
6121 cpu = env_cpu(env);
6122 thread_cpu = cpu;
6123 ts = (TaskState *)cpu->opaque;
6124 info->tid = sys_gettid();
6125 task_settid(ts);
6126 if (info->child_tidptr)
6127 put_user_u32(info->tid, info->child_tidptr);
6128 if (info->parent_tidptr)
6129 put_user_u32(info->tid, info->parent_tidptr);
6130 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6131 /* Enable signals. */
6132 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6133 /* Signal to the parent that we're ready. */
6134 pthread_mutex_lock(&info->mutex);
6135 pthread_cond_broadcast(&info->cond);
6136 pthread_mutex_unlock(&info->mutex);
6137 /* Wait until the parent has finished initializing the tls state. */
6138 pthread_mutex_lock(&clone_lock);
6139 pthread_mutex_unlock(&clone_lock);
6140 cpu_loop(env);
6141 /* never exits */
6142 return NULL;
6145 /* do_fork() Must return host values and target errnos (unlike most
6146 do_*() functions). */
6147 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6148 abi_ulong parent_tidptr, target_ulong newtls,
6149 abi_ulong child_tidptr)
6151 CPUState *cpu = env_cpu(env);
6152 int ret;
6153 TaskState *ts;
6154 CPUState *new_cpu;
6155 CPUArchState *new_env;
6156 sigset_t sigmask;
6158 flags &= ~CLONE_IGNORED_FLAGS;
6160 /* Emulate vfork() with fork() */
6161 if (flags & CLONE_VFORK)
6162 flags &= ~(CLONE_VFORK | CLONE_VM);
6164 if (flags & CLONE_VM) {
6165 TaskState *parent_ts = (TaskState *)cpu->opaque;
6166 new_thread_info info;
6167 pthread_attr_t attr;
6169 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6170 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6171 return -TARGET_EINVAL;
6174 ts = g_new0(TaskState, 1);
6175 init_task_state(ts);
6177 /* Grab a mutex so that thread setup appears atomic. */
6178 pthread_mutex_lock(&clone_lock);
6180 /* we create a new CPU instance. */
6181 new_env = cpu_copy(env);
6182 /* Init regs that differ from the parent. */
6183 cpu_clone_regs_child(new_env, newsp, flags);
6184 cpu_clone_regs_parent(env, flags);
6185 new_cpu = env_cpu(new_env);
6186 new_cpu->opaque = ts;
6187 ts->bprm = parent_ts->bprm;
6188 ts->info = parent_ts->info;
6189 ts->signal_mask = parent_ts->signal_mask;
6191 if (flags & CLONE_CHILD_CLEARTID) {
6192 ts->child_tidptr = child_tidptr;
6195 if (flags & CLONE_SETTLS) {
6196 cpu_set_tls (new_env, newtls);
6199 memset(&info, 0, sizeof(info));
6200 pthread_mutex_init(&info.mutex, NULL);
6201 pthread_mutex_lock(&info.mutex);
6202 pthread_cond_init(&info.cond, NULL);
6203 info.env = new_env;
6204 if (flags & CLONE_CHILD_SETTID) {
6205 info.child_tidptr = child_tidptr;
6207 if (flags & CLONE_PARENT_SETTID) {
6208 info.parent_tidptr = parent_tidptr;
6211 ret = pthread_attr_init(&attr);
6212 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6213 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6214 /* It is not safe to deliver signals until the child has finished
6215 initializing, so temporarily block all signals. */
6216 sigfillset(&sigmask);
6217 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6218 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6220 /* If this is our first additional thread, we need to ensure we
6221 * generate code for parallel execution and flush old translations.
6223 if (!parallel_cpus) {
6224 parallel_cpus = true;
6225 tb_flush(cpu);
6228 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6229 /* TODO: Free new CPU state if thread creation failed. */
6231 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6232 pthread_attr_destroy(&attr);
6233 if (ret == 0) {
6234 /* Wait for the child to initialize. */
6235 pthread_cond_wait(&info.cond, &info.mutex);
6236 ret = info.tid;
6237 } else {
6238 ret = -1;
6240 pthread_mutex_unlock(&info.mutex);
6241 pthread_cond_destroy(&info.cond);
6242 pthread_mutex_destroy(&info.mutex);
6243 pthread_mutex_unlock(&clone_lock);
6244 } else {
6245 /* if no CLONE_VM, we consider it is a fork */
6246 if (flags & CLONE_INVALID_FORK_FLAGS) {
6247 return -TARGET_EINVAL;
6250 /* We can't support custom termination signals */
6251 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6252 return -TARGET_EINVAL;
6255 if (block_signals()) {
6256 return -TARGET_ERESTARTSYS;
6259 fork_start();
6260 ret = fork();
6261 if (ret == 0) {
6262 /* Child Process. */
6263 cpu_clone_regs_child(env, newsp, flags);
6264 fork_end(1);
6265 /* There is a race condition here. The parent process could
6266 theoretically read the TID in the child process before the child
6267 tid is set. This would require using either ptrace
6268 (not implemented) or having *_tidptr to point at a shared memory
6269 mapping. We can't repeat the spinlock hack used above because
6270 the child process gets its own copy of the lock. */
6271 if (flags & CLONE_CHILD_SETTID)
6272 put_user_u32(sys_gettid(), child_tidptr);
6273 if (flags & CLONE_PARENT_SETTID)
6274 put_user_u32(sys_gettid(), parent_tidptr);
6275 ts = (TaskState *)cpu->opaque;
6276 if (flags & CLONE_SETTLS)
6277 cpu_set_tls (env, newtls);
6278 if (flags & CLONE_CHILD_CLEARTID)
6279 ts->child_tidptr = child_tidptr;
6280 } else {
6281 cpu_clone_regs_parent(env, flags);
6282 fork_end(0);
6285 return ret;
6288 /* warning : doesn't handle linux specific flags... */
6289 static int target_to_host_fcntl_cmd(int cmd)
6291 int ret;
6293 switch(cmd) {
6294 case TARGET_F_DUPFD:
6295 case TARGET_F_GETFD:
6296 case TARGET_F_SETFD:
6297 case TARGET_F_GETFL:
6298 case TARGET_F_SETFL:
6299 case TARGET_F_OFD_GETLK:
6300 case TARGET_F_OFD_SETLK:
6301 case TARGET_F_OFD_SETLKW:
6302 ret = cmd;
6303 break;
6304 case TARGET_F_GETLK:
6305 ret = F_GETLK64;
6306 break;
6307 case TARGET_F_SETLK:
6308 ret = F_SETLK64;
6309 break;
6310 case TARGET_F_SETLKW:
6311 ret = F_SETLKW64;
6312 break;
6313 case TARGET_F_GETOWN:
6314 ret = F_GETOWN;
6315 break;
6316 case TARGET_F_SETOWN:
6317 ret = F_SETOWN;
6318 break;
6319 case TARGET_F_GETSIG:
6320 ret = F_GETSIG;
6321 break;
6322 case TARGET_F_SETSIG:
6323 ret = F_SETSIG;
6324 break;
6325 #if TARGET_ABI_BITS == 32
6326 case TARGET_F_GETLK64:
6327 ret = F_GETLK64;
6328 break;
6329 case TARGET_F_SETLK64:
6330 ret = F_SETLK64;
6331 break;
6332 case TARGET_F_SETLKW64:
6333 ret = F_SETLKW64;
6334 break;
6335 #endif
6336 case TARGET_F_SETLEASE:
6337 ret = F_SETLEASE;
6338 break;
6339 case TARGET_F_GETLEASE:
6340 ret = F_GETLEASE;
6341 break;
6342 #ifdef F_DUPFD_CLOEXEC
6343 case TARGET_F_DUPFD_CLOEXEC:
6344 ret = F_DUPFD_CLOEXEC;
6345 break;
6346 #endif
6347 case TARGET_F_NOTIFY:
6348 ret = F_NOTIFY;
6349 break;
6350 #ifdef F_GETOWN_EX
6351 case TARGET_F_GETOWN_EX:
6352 ret = F_GETOWN_EX;
6353 break;
6354 #endif
6355 #ifdef F_SETOWN_EX
6356 case TARGET_F_SETOWN_EX:
6357 ret = F_SETOWN_EX;
6358 break;
6359 #endif
6360 #ifdef F_SETPIPE_SZ
6361 case TARGET_F_SETPIPE_SZ:
6362 ret = F_SETPIPE_SZ;
6363 break;
6364 case TARGET_F_GETPIPE_SZ:
6365 ret = F_GETPIPE_SZ;
6366 break;
6367 #endif
6368 default:
6369 ret = -TARGET_EINVAL;
6370 break;
6373 #if defined(__powerpc64__)
6374 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6375 * is not supported by kernel. The glibc fcntl call actually adjusts
6376 * them to 5, 6 and 7 before making the syscall(). Since we make the
6377 * syscall directly, adjust to what is supported by the kernel.
6379 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6380 ret -= F_GETLK64 - 5;
6382 #endif
6384 return ret;
6387 #define FLOCK_TRANSTBL \
6388 switch (type) { \
6389 TRANSTBL_CONVERT(F_RDLCK); \
6390 TRANSTBL_CONVERT(F_WRLCK); \
6391 TRANSTBL_CONVERT(F_UNLCK); \
6392 TRANSTBL_CONVERT(F_EXLCK); \
6393 TRANSTBL_CONVERT(F_SHLCK); \
6396 static int target_to_host_flock(int type)
6398 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6399 FLOCK_TRANSTBL
6400 #undef TRANSTBL_CONVERT
6401 return -TARGET_EINVAL;
6404 static int host_to_target_flock(int type)
6406 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6407 FLOCK_TRANSTBL
6408 #undef TRANSTBL_CONVERT
6409 /* if we don't know how to convert the value coming
6410 * from the host we copy to the target field as-is
6412 return type;
6415 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6416 abi_ulong target_flock_addr)
6418 struct target_flock *target_fl;
6419 int l_type;
6421 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6422 return -TARGET_EFAULT;
6425 __get_user(l_type, &target_fl->l_type);
6426 l_type = target_to_host_flock(l_type);
6427 if (l_type < 0) {
6428 return l_type;
6430 fl->l_type = l_type;
6431 __get_user(fl->l_whence, &target_fl->l_whence);
6432 __get_user(fl->l_start, &target_fl->l_start);
6433 __get_user(fl->l_len, &target_fl->l_len);
6434 __get_user(fl->l_pid, &target_fl->l_pid);
6435 unlock_user_struct(target_fl, target_flock_addr, 0);
6436 return 0;
6439 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6440 const struct flock64 *fl)
6442 struct target_flock *target_fl;
6443 short l_type;
6445 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6446 return -TARGET_EFAULT;
6449 l_type = host_to_target_flock(fl->l_type);
6450 __put_user(l_type, &target_fl->l_type);
6451 __put_user(fl->l_whence, &target_fl->l_whence);
6452 __put_user(fl->l_start, &target_fl->l_start);
6453 __put_user(fl->l_len, &target_fl->l_len);
6454 __put_user(fl->l_pid, &target_fl->l_pid);
6455 unlock_user_struct(target_fl, target_flock_addr, 1);
6456 return 0;
6459 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6460 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6462 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6463 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6464 abi_ulong target_flock_addr)
6466 struct target_oabi_flock64 *target_fl;
6467 int l_type;
6469 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6470 return -TARGET_EFAULT;
6473 __get_user(l_type, &target_fl->l_type);
6474 l_type = target_to_host_flock(l_type);
6475 if (l_type < 0) {
6476 return l_type;
6478 fl->l_type = l_type;
6479 __get_user(fl->l_whence, &target_fl->l_whence);
6480 __get_user(fl->l_start, &target_fl->l_start);
6481 __get_user(fl->l_len, &target_fl->l_len);
6482 __get_user(fl->l_pid, &target_fl->l_pid);
6483 unlock_user_struct(target_fl, target_flock_addr, 0);
6484 return 0;
6487 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6488 const struct flock64 *fl)
6490 struct target_oabi_flock64 *target_fl;
6491 short l_type;
6493 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6494 return -TARGET_EFAULT;
6497 l_type = host_to_target_flock(fl->l_type);
6498 __put_user(l_type, &target_fl->l_type);
6499 __put_user(fl->l_whence, &target_fl->l_whence);
6500 __put_user(fl->l_start, &target_fl->l_start);
6501 __put_user(fl->l_len, &target_fl->l_len);
6502 __put_user(fl->l_pid, &target_fl->l_pid);
6503 unlock_user_struct(target_fl, target_flock_addr, 1);
6504 return 0;
6506 #endif
6508 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6509 abi_ulong target_flock_addr)
6511 struct target_flock64 *target_fl;
6512 int l_type;
6514 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6515 return -TARGET_EFAULT;
6518 __get_user(l_type, &target_fl->l_type);
6519 l_type = target_to_host_flock(l_type);
6520 if (l_type < 0) {
6521 return l_type;
6523 fl->l_type = l_type;
6524 __get_user(fl->l_whence, &target_fl->l_whence);
6525 __get_user(fl->l_start, &target_fl->l_start);
6526 __get_user(fl->l_len, &target_fl->l_len);
6527 __get_user(fl->l_pid, &target_fl->l_pid);
6528 unlock_user_struct(target_fl, target_flock_addr, 0);
6529 return 0;
6532 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6533 const struct flock64 *fl)
6535 struct target_flock64 *target_fl;
6536 short l_type;
6538 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6539 return -TARGET_EFAULT;
6542 l_type = host_to_target_flock(fl->l_type);
6543 __put_user(l_type, &target_fl->l_type);
6544 __put_user(fl->l_whence, &target_fl->l_whence);
6545 __put_user(fl->l_start, &target_fl->l_start);
6546 __put_user(fl->l_len, &target_fl->l_len);
6547 __put_user(fl->l_pid, &target_fl->l_pid);
6548 unlock_user_struct(target_fl, target_flock_addr, 1);
6549 return 0;
6552 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6554 struct flock64 fl64;
6555 #ifdef F_GETOWN_EX
6556 struct f_owner_ex fox;
6557 struct target_f_owner_ex *target_fox;
6558 #endif
6559 abi_long ret;
6560 int host_cmd = target_to_host_fcntl_cmd(cmd);
6562 if (host_cmd == -TARGET_EINVAL)
6563 return host_cmd;
6565 switch(cmd) {
6566 case TARGET_F_GETLK:
6567 ret = copy_from_user_flock(&fl64, arg);
6568 if (ret) {
6569 return ret;
6571 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6572 if (ret == 0) {
6573 ret = copy_to_user_flock(arg, &fl64);
6575 break;
6577 case TARGET_F_SETLK:
6578 case TARGET_F_SETLKW:
6579 ret = copy_from_user_flock(&fl64, arg);
6580 if (ret) {
6581 return ret;
6583 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6584 break;
6586 case TARGET_F_GETLK64:
6587 case TARGET_F_OFD_GETLK:
6588 ret = copy_from_user_flock64(&fl64, arg);
6589 if (ret) {
6590 return ret;
6592 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6593 if (ret == 0) {
6594 ret = copy_to_user_flock64(arg, &fl64);
6596 break;
6597 case TARGET_F_SETLK64:
6598 case TARGET_F_SETLKW64:
6599 case TARGET_F_OFD_SETLK:
6600 case TARGET_F_OFD_SETLKW:
6601 ret = copy_from_user_flock64(&fl64, arg);
6602 if (ret) {
6603 return ret;
6605 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6606 break;
6608 case TARGET_F_GETFL:
6609 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6610 if (ret >= 0) {
6611 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6613 break;
6615 case TARGET_F_SETFL:
6616 ret = get_errno(safe_fcntl(fd, host_cmd,
6617 target_to_host_bitmask(arg,
6618 fcntl_flags_tbl)));
6619 break;
6621 #ifdef F_GETOWN_EX
6622 case TARGET_F_GETOWN_EX:
6623 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6624 if (ret >= 0) {
6625 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6626 return -TARGET_EFAULT;
6627 target_fox->type = tswap32(fox.type);
6628 target_fox->pid = tswap32(fox.pid);
6629 unlock_user_struct(target_fox, arg, 1);
6631 break;
6632 #endif
6634 #ifdef F_SETOWN_EX
6635 case TARGET_F_SETOWN_EX:
6636 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6637 return -TARGET_EFAULT;
6638 fox.type = tswap32(target_fox->type);
6639 fox.pid = tswap32(target_fox->pid);
6640 unlock_user_struct(target_fox, arg, 0);
6641 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6642 break;
6643 #endif
6645 case TARGET_F_SETOWN:
6646 case TARGET_F_GETOWN:
6647 case TARGET_F_SETSIG:
6648 case TARGET_F_GETSIG:
6649 case TARGET_F_SETLEASE:
6650 case TARGET_F_GETLEASE:
6651 case TARGET_F_SETPIPE_SZ:
6652 case TARGET_F_GETPIPE_SZ:
6653 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6654 break;
6656 default:
6657 ret = get_errno(safe_fcntl(fd, cmd, arg));
6658 break;
6660 return ret;
6663 #ifdef USE_UID16
6665 static inline int high2lowuid(int uid)
6667 if (uid > 65535)
6668 return 65534;
6669 else
6670 return uid;
6673 static inline int high2lowgid(int gid)
6675 if (gid > 65535)
6676 return 65534;
6677 else
6678 return gid;
6681 static inline int low2highuid(int uid)
6683 if ((int16_t)uid == -1)
6684 return -1;
6685 else
6686 return uid;
6689 static inline int low2highgid(int gid)
6691 if ((int16_t)gid == -1)
6692 return -1;
6693 else
6694 return gid;
6696 static inline int tswapid(int id)
6698 return tswap16(id);
6701 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6703 #else /* !USE_UID16 */
6704 static inline int high2lowuid(int uid)
6706 return uid;
6708 static inline int high2lowgid(int gid)
6710 return gid;
6712 static inline int low2highuid(int uid)
6714 return uid;
6716 static inline int low2highgid(int gid)
6718 return gid;
6720 static inline int tswapid(int id)
6722 return tswap32(id);
6725 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6727 #endif /* USE_UID16 */
6729 /* We must do direct syscalls for setting UID/GID, because we want to
6730 * implement the Linux system call semantics of "change only for this thread",
6731 * not the libc/POSIX semantics of "change for all threads in process".
6732 * (See http://ewontfix.com/17/ for more details.)
6733 * We use the 32-bit version of the syscalls if present; if it is not
6734 * then either the host architecture supports 32-bit UIDs natively with
6735 * the standard syscall, or the 16-bit UID is the best we can do.
6737 #ifdef __NR_setuid32
6738 #define __NR_sys_setuid __NR_setuid32
6739 #else
6740 #define __NR_sys_setuid __NR_setuid
6741 #endif
6742 #ifdef __NR_setgid32
6743 #define __NR_sys_setgid __NR_setgid32
6744 #else
6745 #define __NR_sys_setgid __NR_setgid
6746 #endif
6747 #ifdef __NR_setresuid32
6748 #define __NR_sys_setresuid __NR_setresuid32
6749 #else
6750 #define __NR_sys_setresuid __NR_setresuid
6751 #endif
6752 #ifdef __NR_setresgid32
6753 #define __NR_sys_setresgid __NR_setresgid32
6754 #else
6755 #define __NR_sys_setresgid __NR_setresgid
6756 #endif
6758 _syscall1(int, sys_setuid, uid_t, uid)
6759 _syscall1(int, sys_setgid, gid_t, gid)
6760 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6761 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6763 void syscall_init(void)
6765 IOCTLEntry *ie;
6766 const argtype *arg_type;
6767 int size;
6768 int i;
6770 thunk_init(STRUCT_MAX);
6772 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6773 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6774 #include "syscall_types.h"
6775 #undef STRUCT
6776 #undef STRUCT_SPECIAL
6778 /* Build target_to_host_errno_table[] table from
6779 * host_to_target_errno_table[]. */
6780 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6781 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6784 /* we patch the ioctl size if necessary. We rely on the fact that
6785 no ioctl has all the bits at '1' in the size field */
6786 ie = ioctl_entries;
6787 while (ie->target_cmd != 0) {
6788 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6789 TARGET_IOC_SIZEMASK) {
6790 arg_type = ie->arg_type;
6791 if (arg_type[0] != TYPE_PTR) {
6792 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6793 ie->target_cmd);
6794 exit(1);
6796 arg_type++;
6797 size = thunk_type_size(arg_type, 0);
6798 ie->target_cmd = (ie->target_cmd &
6799 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6800 (size << TARGET_IOC_SIZESHIFT);
6803 /* automatic consistency check if same arch */
6804 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6805 (defined(__x86_64__) && defined(TARGET_X86_64))
6806 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6807 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6808 ie->name, ie->target_cmd, ie->host_cmd);
6810 #endif
6811 ie++;
6815 #ifdef TARGET_NR_truncate64
6816 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6817 abi_long arg2,
6818 abi_long arg3,
6819 abi_long arg4)
6821 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6822 arg2 = arg3;
6823 arg3 = arg4;
6825 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6827 #endif
6829 #ifdef TARGET_NR_ftruncate64
6830 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6831 abi_long arg2,
6832 abi_long arg3,
6833 abi_long arg4)
6835 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6836 arg2 = arg3;
6837 arg3 = arg4;
6839 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6841 #endif
6843 #if defined(TARGET_NR_timer_settime) || \
6844 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6845 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6846 abi_ulong target_addr)
6848 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6849 offsetof(struct target_itimerspec,
6850 it_interval)) ||
6851 target_to_host_timespec(&host_its->it_value, target_addr +
6852 offsetof(struct target_itimerspec,
6853 it_value))) {
6854 return -TARGET_EFAULT;
6857 return 0;
6859 #endif
6861 #if defined(TARGET_NR_timer_settime64) || \
6862 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6863 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6864 abi_ulong target_addr)
6866 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6867 offsetof(struct target__kernel_itimerspec,
6868 it_interval)) ||
6869 target_to_host_timespec64(&host_its->it_value, target_addr +
6870 offsetof(struct target__kernel_itimerspec,
6871 it_value))) {
6872 return -TARGET_EFAULT;
6875 return 0;
6877 #endif
6879 #if ((defined(TARGET_NR_timerfd_gettime) || \
6880 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6881 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6882 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6883 struct itimerspec *host_its)
6885 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6886 it_interval),
6887 &host_its->it_interval) ||
6888 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6889 it_value),
6890 &host_its->it_value)) {
6891 return -TARGET_EFAULT;
6893 return 0;
6895 #endif
6897 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6898 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6899 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6900 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6901 struct itimerspec *host_its)
6903 if (host_to_target_timespec64(target_addr +
6904 offsetof(struct target__kernel_itimerspec,
6905 it_interval),
6906 &host_its->it_interval) ||
6907 host_to_target_timespec64(target_addr +
6908 offsetof(struct target__kernel_itimerspec,
6909 it_value),
6910 &host_its->it_value)) {
6911 return -TARGET_EFAULT;
6913 return 0;
6915 #endif
6917 #if defined(TARGET_NR_adjtimex) || \
6918 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6919 static inline abi_long target_to_host_timex(struct timex *host_tx,
6920 abi_long target_addr)
6922 struct target_timex *target_tx;
6924 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6925 return -TARGET_EFAULT;
6928 __get_user(host_tx->modes, &target_tx->modes);
6929 __get_user(host_tx->offset, &target_tx->offset);
6930 __get_user(host_tx->freq, &target_tx->freq);
6931 __get_user(host_tx->maxerror, &target_tx->maxerror);
6932 __get_user(host_tx->esterror, &target_tx->esterror);
6933 __get_user(host_tx->status, &target_tx->status);
6934 __get_user(host_tx->constant, &target_tx->constant);
6935 __get_user(host_tx->precision, &target_tx->precision);
6936 __get_user(host_tx->tolerance, &target_tx->tolerance);
6937 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6938 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6939 __get_user(host_tx->tick, &target_tx->tick);
6940 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6941 __get_user(host_tx->jitter, &target_tx->jitter);
6942 __get_user(host_tx->shift, &target_tx->shift);
6943 __get_user(host_tx->stabil, &target_tx->stabil);
6944 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6945 __get_user(host_tx->calcnt, &target_tx->calcnt);
6946 __get_user(host_tx->errcnt, &target_tx->errcnt);
6947 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6948 __get_user(host_tx->tai, &target_tx->tai);
6950 unlock_user_struct(target_tx, target_addr, 0);
6951 return 0;
6954 static inline abi_long host_to_target_timex(abi_long target_addr,
6955 struct timex *host_tx)
6957 struct target_timex *target_tx;
6959 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6960 return -TARGET_EFAULT;
6963 __put_user(host_tx->modes, &target_tx->modes);
6964 __put_user(host_tx->offset, &target_tx->offset);
6965 __put_user(host_tx->freq, &target_tx->freq);
6966 __put_user(host_tx->maxerror, &target_tx->maxerror);
6967 __put_user(host_tx->esterror, &target_tx->esterror);
6968 __put_user(host_tx->status, &target_tx->status);
6969 __put_user(host_tx->constant, &target_tx->constant);
6970 __put_user(host_tx->precision, &target_tx->precision);
6971 __put_user(host_tx->tolerance, &target_tx->tolerance);
6972 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6973 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6974 __put_user(host_tx->tick, &target_tx->tick);
6975 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6976 __put_user(host_tx->jitter, &target_tx->jitter);
6977 __put_user(host_tx->shift, &target_tx->shift);
6978 __put_user(host_tx->stabil, &target_tx->stabil);
6979 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6980 __put_user(host_tx->calcnt, &target_tx->calcnt);
6981 __put_user(host_tx->errcnt, &target_tx->errcnt);
6982 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6983 __put_user(host_tx->tai, &target_tx->tai);
6985 unlock_user_struct(target_tx, target_addr, 1);
6986 return 0;
6988 #endif
6991 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6992 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6993 abi_long target_addr)
6995 struct target__kernel_timex *target_tx;
6997 if (copy_from_user_timeval64(&host_tx->time, target_addr +
6998 offsetof(struct target__kernel_timex,
6999 time))) {
7000 return -TARGET_EFAULT;
7003 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7004 return -TARGET_EFAULT;
7007 __get_user(host_tx->modes, &target_tx->modes);
7008 __get_user(host_tx->offset, &target_tx->offset);
7009 __get_user(host_tx->freq, &target_tx->freq);
7010 __get_user(host_tx->maxerror, &target_tx->maxerror);
7011 __get_user(host_tx->esterror, &target_tx->esterror);
7012 __get_user(host_tx->status, &target_tx->status);
7013 __get_user(host_tx->constant, &target_tx->constant);
7014 __get_user(host_tx->precision, &target_tx->precision);
7015 __get_user(host_tx->tolerance, &target_tx->tolerance);
7016 __get_user(host_tx->tick, &target_tx->tick);
7017 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7018 __get_user(host_tx->jitter, &target_tx->jitter);
7019 __get_user(host_tx->shift, &target_tx->shift);
7020 __get_user(host_tx->stabil, &target_tx->stabil);
7021 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7022 __get_user(host_tx->calcnt, &target_tx->calcnt);
7023 __get_user(host_tx->errcnt, &target_tx->errcnt);
7024 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7025 __get_user(host_tx->tai, &target_tx->tai);
7027 unlock_user_struct(target_tx, target_addr, 0);
7028 return 0;
7031 static inline abi_long host_to_target_timex64(abi_long target_addr,
7032 struct timex *host_tx)
7034 struct target__kernel_timex *target_tx;
7036 if (copy_to_user_timeval64(target_addr +
7037 offsetof(struct target__kernel_timex, time),
7038 &host_tx->time)) {
7039 return -TARGET_EFAULT;
7042 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7043 return -TARGET_EFAULT;
7046 __put_user(host_tx->modes, &target_tx->modes);
7047 __put_user(host_tx->offset, &target_tx->offset);
7048 __put_user(host_tx->freq, &target_tx->freq);
7049 __put_user(host_tx->maxerror, &target_tx->maxerror);
7050 __put_user(host_tx->esterror, &target_tx->esterror);
7051 __put_user(host_tx->status, &target_tx->status);
7052 __put_user(host_tx->constant, &target_tx->constant);
7053 __put_user(host_tx->precision, &target_tx->precision);
7054 __put_user(host_tx->tolerance, &target_tx->tolerance);
7055 __put_user(host_tx->tick, &target_tx->tick);
7056 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7057 __put_user(host_tx->jitter, &target_tx->jitter);
7058 __put_user(host_tx->shift, &target_tx->shift);
7059 __put_user(host_tx->stabil, &target_tx->stabil);
7060 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7061 __put_user(host_tx->calcnt, &target_tx->calcnt);
7062 __put_user(host_tx->errcnt, &target_tx->errcnt);
7063 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7064 __put_user(host_tx->tai, &target_tx->tai);
7066 unlock_user_struct(target_tx, target_addr, 1);
7067 return 0;
7069 #endif
7071 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7072 abi_ulong target_addr)
7074 struct target_sigevent *target_sevp;
7076 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7077 return -TARGET_EFAULT;
7080 /* This union is awkward on 64 bit systems because it has a 32 bit
7081 * integer and a pointer in it; we follow the conversion approach
7082 * used for handling sigval types in signal.c so the guest should get
7083 * the correct value back even if we did a 64 bit byteswap and it's
7084 * using the 32 bit integer.
7086 host_sevp->sigev_value.sival_ptr =
7087 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7088 host_sevp->sigev_signo =
7089 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7090 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7091 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7093 unlock_user_struct(target_sevp, target_addr, 1);
7094 return 0;
7097 #if defined(TARGET_NR_mlockall)
7098 static inline int target_to_host_mlockall_arg(int arg)
7100 int result = 0;
7102 if (arg & TARGET_MCL_CURRENT) {
7103 result |= MCL_CURRENT;
7105 if (arg & TARGET_MCL_FUTURE) {
7106 result |= MCL_FUTURE;
7108 #ifdef MCL_ONFAULT
7109 if (arg & TARGET_MCL_ONFAULT) {
7110 result |= MCL_ONFAULT;
7112 #endif
7114 return result;
7116 #endif
7118 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7119 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7120 defined(TARGET_NR_newfstatat))
7121 static inline abi_long host_to_target_stat64(void *cpu_env,
7122 abi_ulong target_addr,
7123 struct stat *host_st)
7125 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7126 if (((CPUARMState *)cpu_env)->eabi) {
7127 struct target_eabi_stat64 *target_st;
7129 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7130 return -TARGET_EFAULT;
7131 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7132 __put_user(host_st->st_dev, &target_st->st_dev);
7133 __put_user(host_st->st_ino, &target_st->st_ino);
7134 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7135 __put_user(host_st->st_ino, &target_st->__st_ino);
7136 #endif
7137 __put_user(host_st->st_mode, &target_st->st_mode);
7138 __put_user(host_st->st_nlink, &target_st->st_nlink);
7139 __put_user(host_st->st_uid, &target_st->st_uid);
7140 __put_user(host_st->st_gid, &target_st->st_gid);
7141 __put_user(host_st->st_rdev, &target_st->st_rdev);
7142 __put_user(host_st->st_size, &target_st->st_size);
7143 __put_user(host_st->st_blksize, &target_st->st_blksize);
7144 __put_user(host_st->st_blocks, &target_st->st_blocks);
7145 __put_user(host_st->st_atime, &target_st->target_st_atime);
7146 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7147 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7148 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7149 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7150 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7151 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7152 #endif
7153 unlock_user_struct(target_st, target_addr, 1);
7154 } else
7155 #endif
7157 #if defined(TARGET_HAS_STRUCT_STAT64)
7158 struct target_stat64 *target_st;
7159 #else
7160 struct target_stat *target_st;
7161 #endif
7163 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7164 return -TARGET_EFAULT;
7165 memset(target_st, 0, sizeof(*target_st));
7166 __put_user(host_st->st_dev, &target_st->st_dev);
7167 __put_user(host_st->st_ino, &target_st->st_ino);
7168 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7169 __put_user(host_st->st_ino, &target_st->__st_ino);
7170 #endif
7171 __put_user(host_st->st_mode, &target_st->st_mode);
7172 __put_user(host_st->st_nlink, &target_st->st_nlink);
7173 __put_user(host_st->st_uid, &target_st->st_uid);
7174 __put_user(host_st->st_gid, &target_st->st_gid);
7175 __put_user(host_st->st_rdev, &target_st->st_rdev);
7176 /* XXX: better use of kernel struct */
7177 __put_user(host_st->st_size, &target_st->st_size);
7178 __put_user(host_st->st_blksize, &target_st->st_blksize);
7179 __put_user(host_st->st_blocks, &target_st->st_blocks);
7180 __put_user(host_st->st_atime, &target_st->target_st_atime);
7181 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7182 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7183 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7184 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7185 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7186 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7187 #endif
7188 unlock_user_struct(target_st, target_addr, 1);
7191 return 0;
7193 #endif
7195 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7196 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7197 abi_ulong target_addr)
7199 struct target_statx *target_stx;
7201 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7202 return -TARGET_EFAULT;
7204 memset(target_stx, 0, sizeof(*target_stx));
7206 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7207 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7208 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7209 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7210 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7211 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7212 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7213 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7214 __put_user(host_stx->stx_size, &target_stx->stx_size);
7215 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7216 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7217 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7218 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7219 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7220 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7221 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7222 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7223 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7224 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7225 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7226 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7227 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7228 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7230 unlock_user_struct(target_stx, target_addr, 1);
7232 return 0;
7234 #endif
7236 static int do_sys_futex(int *uaddr, int op, int val,
7237 const struct timespec *timeout, int *uaddr2,
7238 int val3)
7240 #if HOST_LONG_BITS == 64
7241 #if defined(__NR_futex)
7242 /* always a 64-bit time_t, it doesn't define _time64 version */
7243 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7245 #endif
7246 #else /* HOST_LONG_BITS == 64 */
7247 #if defined(__NR_futex_time64)
7248 if (sizeof(timeout->tv_sec) == 8) {
7249 /* _time64 function on 32bit arch */
7250 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7252 #endif
7253 #if defined(__NR_futex)
7254 /* old function on 32bit arch */
7255 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7256 #endif
7257 #endif /* HOST_LONG_BITS == 64 */
7258 g_assert_not_reached();
7261 static int do_safe_futex(int *uaddr, int op, int val,
7262 const struct timespec *timeout, int *uaddr2,
7263 int val3)
7265 #if HOST_LONG_BITS == 64
7266 #if defined(__NR_futex)
7267 /* always a 64-bit time_t, it doesn't define _time64 version */
7268 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7269 #endif
7270 #else /* HOST_LONG_BITS == 64 */
7271 #if defined(__NR_futex_time64)
7272 if (sizeof(timeout->tv_sec) == 8) {
7273 /* _time64 function on 32bit arch */
7274 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7275 val3));
7277 #endif
7278 #if defined(__NR_futex)
7279 /* old function on 32bit arch */
7280 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7281 #endif
7282 #endif /* HOST_LONG_BITS == 64 */
7283 return -TARGET_ENOSYS;
7286 /* ??? Using host futex calls even when target atomic operations
7287 are not really atomic probably breaks things. However implementing
7288 futexes locally would make futexes shared between multiple processes
7289 tricky. However they're probably useless because guest atomic
7290 operations won't work either. */
7291 #if defined(TARGET_NR_futex)
7292 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7293 target_ulong uaddr2, int val3)
7295 struct timespec ts, *pts;
7296 int base_op;
7298 /* ??? We assume FUTEX_* constants are the same on both host
7299 and target. */
7300 #ifdef FUTEX_CMD_MASK
7301 base_op = op & FUTEX_CMD_MASK;
7302 #else
7303 base_op = op;
7304 #endif
7305 switch (base_op) {
7306 case FUTEX_WAIT:
7307 case FUTEX_WAIT_BITSET:
7308 if (timeout) {
7309 pts = &ts;
7310 target_to_host_timespec(pts, timeout);
7311 } else {
7312 pts = NULL;
7314 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7315 case FUTEX_WAKE:
7316 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7317 case FUTEX_FD:
7318 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7319 case FUTEX_REQUEUE:
7320 case FUTEX_CMP_REQUEUE:
7321 case FUTEX_WAKE_OP:
7322 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7323 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7324 But the prototype takes a `struct timespec *'; insert casts
7325 to satisfy the compiler. We do not need to tswap TIMEOUT
7326 since it's not compared to guest memory. */
7327 pts = (struct timespec *)(uintptr_t) timeout;
7328 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7329 (base_op == FUTEX_CMP_REQUEUE
7330 ? tswap32(val3)
7331 : val3));
7332 default:
7333 return -TARGET_ENOSYS;
7336 #endif
7338 #if defined(TARGET_NR_futex_time64)
7339 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7340 target_ulong uaddr2, int val3)
7342 struct timespec ts, *pts;
7343 int base_op;
7345 /* ??? We assume FUTEX_* constants are the same on both host
7346 and target. */
7347 #ifdef FUTEX_CMD_MASK
7348 base_op = op & FUTEX_CMD_MASK;
7349 #else
7350 base_op = op;
7351 #endif
7352 switch (base_op) {
7353 case FUTEX_WAIT:
7354 case FUTEX_WAIT_BITSET:
7355 if (timeout) {
7356 pts = &ts;
7357 target_to_host_timespec64(pts, timeout);
7358 } else {
7359 pts = NULL;
7361 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7362 case FUTEX_WAKE:
7363 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7364 case FUTEX_FD:
7365 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7366 case FUTEX_REQUEUE:
7367 case FUTEX_CMP_REQUEUE:
7368 case FUTEX_WAKE_OP:
7369 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7370 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7371 But the prototype takes a `struct timespec *'; insert casts
7372 to satisfy the compiler. We do not need to tswap TIMEOUT
7373 since it's not compared to guest memory. */
7374 pts = (struct timespec *)(uintptr_t) timeout;
7375 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7376 (base_op == FUTEX_CMP_REQUEUE
7377 ? tswap32(val3)
7378 : val3));
7379 default:
7380 return -TARGET_ENOSYS;
7383 #endif
7385 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7386 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7387 abi_long handle, abi_long mount_id,
7388 abi_long flags)
7390 struct file_handle *target_fh;
7391 struct file_handle *fh;
7392 int mid = 0;
7393 abi_long ret;
7394 char *name;
7395 unsigned int size, total_size;
7397 if (get_user_s32(size, handle)) {
7398 return -TARGET_EFAULT;
7401 name = lock_user_string(pathname);
7402 if (!name) {
7403 return -TARGET_EFAULT;
7406 total_size = sizeof(struct file_handle) + size;
7407 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7408 if (!target_fh) {
7409 unlock_user(name, pathname, 0);
7410 return -TARGET_EFAULT;
7413 fh = g_malloc0(total_size);
7414 fh->handle_bytes = size;
7416 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7417 unlock_user(name, pathname, 0);
7419 /* man name_to_handle_at(2):
7420 * Other than the use of the handle_bytes field, the caller should treat
7421 * the file_handle structure as an opaque data type
7424 memcpy(target_fh, fh, total_size);
7425 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7426 target_fh->handle_type = tswap32(fh->handle_type);
7427 g_free(fh);
7428 unlock_user(target_fh, handle, total_size);
7430 if (put_user_s32(mid, mount_id)) {
7431 return -TARGET_EFAULT;
7434 return ret;
7437 #endif
7439 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7440 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7441 abi_long flags)
7443 struct file_handle *target_fh;
7444 struct file_handle *fh;
7445 unsigned int size, total_size;
7446 abi_long ret;
7448 if (get_user_s32(size, handle)) {
7449 return -TARGET_EFAULT;
7452 total_size = sizeof(struct file_handle) + size;
7453 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7454 if (!target_fh) {
7455 return -TARGET_EFAULT;
7458 fh = g_memdup(target_fh, total_size);
7459 fh->handle_bytes = size;
7460 fh->handle_type = tswap32(target_fh->handle_type);
7462 ret = get_errno(open_by_handle_at(mount_fd, fh,
7463 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7465 g_free(fh);
7467 unlock_user(target_fh, handle, total_size);
7469 return ret;
7471 #endif
7473 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7475 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7477 int host_flags;
7478 target_sigset_t *target_mask;
7479 sigset_t host_mask;
7480 abi_long ret;
7482 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7483 return -TARGET_EINVAL;
7485 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7486 return -TARGET_EFAULT;
7489 target_to_host_sigset(&host_mask, target_mask);
7491 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7493 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7494 if (ret >= 0) {
7495 fd_trans_register(ret, &target_signalfd_trans);
7498 unlock_user_struct(target_mask, mask, 0);
7500 return ret;
7502 #endif
7504 /* Map host to target signal numbers for the wait family of syscalls.
7505 Assume all other status bits are the same. */
7506 int host_to_target_waitstatus(int status)
7508 if (WIFSIGNALED(status)) {
7509 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7511 if (WIFSTOPPED(status)) {
7512 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7513 | (status & 0xff);
7515 return status;
7518 static int open_self_cmdline(void *cpu_env, int fd)
7520 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7521 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7522 int i;
7524 for (i = 0; i < bprm->argc; i++) {
7525 size_t len = strlen(bprm->argv[i]) + 1;
7527 if (write(fd, bprm->argv[i], len) != len) {
7528 return -1;
7532 return 0;
7535 static int open_self_maps(void *cpu_env, int fd)
7537 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7538 TaskState *ts = cpu->opaque;
7539 GSList *map_info = read_self_maps();
7540 GSList *s;
7541 int count;
7543 for (s = map_info; s; s = g_slist_next(s)) {
7544 MapInfo *e = (MapInfo *) s->data;
7546 if (h2g_valid(e->start)) {
7547 unsigned long min = e->start;
7548 unsigned long max = e->end;
7549 int flags = page_get_flags(h2g(min));
7550 const char *path;
7552 max = h2g_valid(max - 1) ?
7553 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7555 if (page_check_range(h2g(min), max - min, flags) == -1) {
7556 continue;
7559 if (h2g(min) == ts->info->stack_limit) {
7560 path = "[stack]";
7561 } else {
7562 path = e->path;
7565 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7566 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7567 h2g(min), h2g(max - 1) + 1,
7568 e->is_read ? 'r' : '-',
7569 e->is_write ? 'w' : '-',
7570 e->is_exec ? 'x' : '-',
7571 e->is_priv ? 'p' : '-',
7572 (uint64_t) e->offset, e->dev, e->inode);
7573 if (path) {
7574 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7575 } else {
7576 dprintf(fd, "\n");
7581 free_self_maps(map_info);
7583 #ifdef TARGET_VSYSCALL_PAGE
7585 * We only support execution from the vsyscall page.
7586 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7588 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7589 " --xp 00000000 00:00 0",
7590 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7591 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7592 #endif
7594 return 0;
7597 static int open_self_stat(void *cpu_env, int fd)
7599 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7600 TaskState *ts = cpu->opaque;
7601 g_autoptr(GString) buf = g_string_new(NULL);
7602 int i;
7604 for (i = 0; i < 44; i++) {
7605 if (i == 0) {
7606 /* pid */
7607 g_string_printf(buf, FMT_pid " ", getpid());
7608 } else if (i == 1) {
7609 /* app name */
7610 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7611 bin = bin ? bin + 1 : ts->bprm->argv[0];
7612 g_string_printf(buf, "(%.15s) ", bin);
7613 } else if (i == 27) {
7614 /* stack bottom */
7615 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7616 } else {
7617 /* for the rest, there is MasterCard */
7618 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7621 if (write(fd, buf->str, buf->len) != buf->len) {
7622 return -1;
7626 return 0;
7629 static int open_self_auxv(void *cpu_env, int fd)
7631 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7632 TaskState *ts = cpu->opaque;
7633 abi_ulong auxv = ts->info->saved_auxv;
7634 abi_ulong len = ts->info->auxv_len;
7635 char *ptr;
7638 * Auxiliary vector is stored in target process stack.
7639 * read in whole auxv vector and copy it to file
7641 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7642 if (ptr != NULL) {
7643 while (len > 0) {
7644 ssize_t r;
7645 r = write(fd, ptr, len);
7646 if (r <= 0) {
7647 break;
7649 len -= r;
7650 ptr += r;
7652 lseek(fd, 0, SEEK_SET);
7653 unlock_user(ptr, auxv, len);
7656 return 0;
7659 static int is_proc_myself(const char *filename, const char *entry)
7661 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7662 filename += strlen("/proc/");
7663 if (!strncmp(filename, "self/", strlen("self/"))) {
7664 filename += strlen("self/");
7665 } else if (*filename >= '1' && *filename <= '9') {
7666 char myself[80];
7667 snprintf(myself, sizeof(myself), "%d/", getpid());
7668 if (!strncmp(filename, myself, strlen(myself))) {
7669 filename += strlen(myself);
7670 } else {
7671 return 0;
7673 } else {
7674 return 0;
7676 if (!strcmp(filename, entry)) {
7677 return 1;
7680 return 0;
7683 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7684 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7685 static int is_proc(const char *filename, const char *entry)
7687 return strcmp(filename, entry) == 0;
7689 #endif
7691 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7692 static int open_net_route(void *cpu_env, int fd)
7694 FILE *fp;
7695 char *line = NULL;
7696 size_t len = 0;
7697 ssize_t read;
7699 fp = fopen("/proc/net/route", "r");
7700 if (fp == NULL) {
7701 return -1;
7704 /* read header */
7706 read = getline(&line, &len, fp);
7707 dprintf(fd, "%s", line);
7709 /* read routes */
7711 while ((read = getline(&line, &len, fp)) != -1) {
7712 char iface[16];
7713 uint32_t dest, gw, mask;
7714 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7715 int fields;
7717 fields = sscanf(line,
7718 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7719 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7720 &mask, &mtu, &window, &irtt);
7721 if (fields != 11) {
7722 continue;
7724 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7725 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7726 metric, tswap32(mask), mtu, window, irtt);
7729 free(line);
7730 fclose(fp);
7732 return 0;
7734 #endif
7736 #if defined(TARGET_SPARC)
7737 static int open_cpuinfo(void *cpu_env, int fd)
7739 dprintf(fd, "type\t\t: sun4u\n");
7740 return 0;
7742 #endif
7744 #if defined(TARGET_HPPA)
7745 static int open_cpuinfo(void *cpu_env, int fd)
7747 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7748 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7749 dprintf(fd, "capabilities\t: os32\n");
7750 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7751 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7752 return 0;
7754 #endif
7756 #if defined(TARGET_M68K)
7757 static int open_hardware(void *cpu_env, int fd)
7759 dprintf(fd, "Model:\t\tqemu-m68k\n");
7760 return 0;
7762 #endif
7764 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7766 struct fake_open {
7767 const char *filename;
7768 int (*fill)(void *cpu_env, int fd);
7769 int (*cmp)(const char *s1, const char *s2);
7771 const struct fake_open *fake_open;
7772 static const struct fake_open fakes[] = {
7773 { "maps", open_self_maps, is_proc_myself },
7774 { "stat", open_self_stat, is_proc_myself },
7775 { "auxv", open_self_auxv, is_proc_myself },
7776 { "cmdline", open_self_cmdline, is_proc_myself },
7777 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7778 { "/proc/net/route", open_net_route, is_proc },
7779 #endif
7780 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7781 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7782 #endif
7783 #if defined(TARGET_M68K)
7784 { "/proc/hardware", open_hardware, is_proc },
7785 #endif
7786 { NULL, NULL, NULL }
7789 if (is_proc_myself(pathname, "exe")) {
7790 int execfd = qemu_getauxval(AT_EXECFD);
7791 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7794 for (fake_open = fakes; fake_open->filename; fake_open++) {
7795 if (fake_open->cmp(pathname, fake_open->filename)) {
7796 break;
7800 if (fake_open->filename) {
7801 const char *tmpdir;
7802 char filename[PATH_MAX];
7803 int fd, r;
7805 /* create temporary file to map stat to */
7806 tmpdir = getenv("TMPDIR");
7807 if (!tmpdir)
7808 tmpdir = "/tmp";
7809 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7810 fd = mkstemp(filename);
7811 if (fd < 0) {
7812 return fd;
7814 unlink(filename);
7816 if ((r = fake_open->fill(cpu_env, fd))) {
7817 int e = errno;
7818 close(fd);
7819 errno = e;
7820 return r;
7822 lseek(fd, 0, SEEK_SET);
7824 return fd;
7827 return safe_openat(dirfd, path(pathname), flags, mode);
7830 #define TIMER_MAGIC 0x0caf0000
7831 #define TIMER_MAGIC_MASK 0xffff0000
7833 /* Convert QEMU provided timer ID back to internal 16bit index format */
7834 static target_timer_t get_timer_id(abi_long arg)
7836 target_timer_t timerid = arg;
7838 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7839 return -TARGET_EINVAL;
7842 timerid &= 0xffff;
7844 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7845 return -TARGET_EINVAL;
7848 return timerid;
7851 static int target_to_host_cpu_mask(unsigned long *host_mask,
7852 size_t host_size,
7853 abi_ulong target_addr,
7854 size_t target_size)
7856 unsigned target_bits = sizeof(abi_ulong) * 8;
7857 unsigned host_bits = sizeof(*host_mask) * 8;
7858 abi_ulong *target_mask;
7859 unsigned i, j;
7861 assert(host_size >= target_size);
7863 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7864 if (!target_mask) {
7865 return -TARGET_EFAULT;
7867 memset(host_mask, 0, host_size);
7869 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7870 unsigned bit = i * target_bits;
7871 abi_ulong val;
7873 __get_user(val, &target_mask[i]);
7874 for (j = 0; j < target_bits; j++, bit++) {
7875 if (val & (1UL << j)) {
7876 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7881 unlock_user(target_mask, target_addr, 0);
7882 return 0;
7885 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7886 size_t host_size,
7887 abi_ulong target_addr,
7888 size_t target_size)
7890 unsigned target_bits = sizeof(abi_ulong) * 8;
7891 unsigned host_bits = sizeof(*host_mask) * 8;
7892 abi_ulong *target_mask;
7893 unsigned i, j;
7895 assert(host_size >= target_size);
7897 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7898 if (!target_mask) {
7899 return -TARGET_EFAULT;
7902 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7903 unsigned bit = i * target_bits;
7904 abi_ulong val = 0;
7906 for (j = 0; j < target_bits; j++, bit++) {
7907 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7908 val |= 1UL << j;
7911 __put_user(val, &target_mask[i]);
7914 unlock_user(target_mask, target_addr, target_size);
7915 return 0;
7918 /* This is an internal helper for do_syscall so that it is easier
7919 * to have a single return point, so that actions, such as logging
7920 * of syscall results, can be performed.
7921 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7923 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7924 abi_long arg2, abi_long arg3, abi_long arg4,
7925 abi_long arg5, abi_long arg6, abi_long arg7,
7926 abi_long arg8)
7928 CPUState *cpu = env_cpu(cpu_env);
7929 abi_long ret;
7930 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7931 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7932 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7933 || defined(TARGET_NR_statx)
7934 struct stat st;
7935 #endif
7936 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7937 || defined(TARGET_NR_fstatfs)
7938 struct statfs stfs;
7939 #endif
7940 void *p;
7942 switch(num) {
7943 case TARGET_NR_exit:
7944 /* In old applications this may be used to implement _exit(2).
7945 However in threaded applictions it is used for thread termination,
7946 and _exit_group is used for application termination.
7947 Do thread termination if we have more then one thread. */
7949 if (block_signals()) {
7950 return -TARGET_ERESTARTSYS;
7953 pthread_mutex_lock(&clone_lock);
7955 if (CPU_NEXT(first_cpu)) {
7956 TaskState *ts = cpu->opaque;
7958 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7959 object_unref(OBJECT(cpu));
7961 * At this point the CPU should be unrealized and removed
7962 * from cpu lists. We can clean-up the rest of the thread
7963 * data without the lock held.
7966 pthread_mutex_unlock(&clone_lock);
7968 if (ts->child_tidptr) {
7969 put_user_u32(0, ts->child_tidptr);
7970 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7971 NULL, NULL, 0);
7973 thread_cpu = NULL;
7974 g_free(ts);
7975 rcu_unregister_thread();
7976 pthread_exit(NULL);
7979 pthread_mutex_unlock(&clone_lock);
7980 preexit_cleanup(cpu_env, arg1);
7981 _exit(arg1);
7982 return 0; /* avoid warning */
7983 case TARGET_NR_read:
7984 if (arg2 == 0 && arg3 == 0) {
7985 return get_errno(safe_read(arg1, 0, 0));
7986 } else {
7987 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7988 return -TARGET_EFAULT;
7989 ret = get_errno(safe_read(arg1, p, arg3));
7990 if (ret >= 0 &&
7991 fd_trans_host_to_target_data(arg1)) {
7992 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7994 unlock_user(p, arg2, ret);
7996 return ret;
7997 case TARGET_NR_write:
7998 if (arg2 == 0 && arg3 == 0) {
7999 return get_errno(safe_write(arg1, 0, 0));
8001 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8002 return -TARGET_EFAULT;
8003 if (fd_trans_target_to_host_data(arg1)) {
8004 void *copy = g_malloc(arg3);
8005 memcpy(copy, p, arg3);
8006 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8007 if (ret >= 0) {
8008 ret = get_errno(safe_write(arg1, copy, ret));
8010 g_free(copy);
8011 } else {
8012 ret = get_errno(safe_write(arg1, p, arg3));
8014 unlock_user(p, arg2, 0);
8015 return ret;
8017 #ifdef TARGET_NR_open
8018 case TARGET_NR_open:
8019 if (!(p = lock_user_string(arg1)))
8020 return -TARGET_EFAULT;
8021 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8022 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8023 arg3));
8024 fd_trans_unregister(ret);
8025 unlock_user(p, arg1, 0);
8026 return ret;
8027 #endif
8028 case TARGET_NR_openat:
8029 if (!(p = lock_user_string(arg2)))
8030 return -TARGET_EFAULT;
8031 ret = get_errno(do_openat(cpu_env, arg1, p,
8032 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8033 arg4));
8034 fd_trans_unregister(ret);
8035 unlock_user(p, arg2, 0);
8036 return ret;
8037 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8038 case TARGET_NR_name_to_handle_at:
8039 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8040 return ret;
8041 #endif
8042 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8043 case TARGET_NR_open_by_handle_at:
8044 ret = do_open_by_handle_at(arg1, arg2, arg3);
8045 fd_trans_unregister(ret);
8046 return ret;
8047 #endif
8048 case TARGET_NR_close:
8049 fd_trans_unregister(arg1);
8050 return get_errno(close(arg1));
8052 case TARGET_NR_brk:
8053 return do_brk(arg1);
8054 #ifdef TARGET_NR_fork
8055 case TARGET_NR_fork:
8056 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8057 #endif
8058 #ifdef TARGET_NR_waitpid
8059 case TARGET_NR_waitpid:
8061 int status;
8062 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8063 if (!is_error(ret) && arg2 && ret
8064 && put_user_s32(host_to_target_waitstatus(status), arg2))
8065 return -TARGET_EFAULT;
8067 return ret;
8068 #endif
8069 #ifdef TARGET_NR_waitid
8070 case TARGET_NR_waitid:
8072 siginfo_t info;
8073 info.si_pid = 0;
8074 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8075 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8076 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8077 return -TARGET_EFAULT;
8078 host_to_target_siginfo(p, &info);
8079 unlock_user(p, arg3, sizeof(target_siginfo_t));
8082 return ret;
8083 #endif
8084 #ifdef TARGET_NR_creat /* not on alpha */
8085 case TARGET_NR_creat:
8086 if (!(p = lock_user_string(arg1)))
8087 return -TARGET_EFAULT;
8088 ret = get_errno(creat(p, arg2));
8089 fd_trans_unregister(ret);
8090 unlock_user(p, arg1, 0);
8091 return ret;
8092 #endif
8093 #ifdef TARGET_NR_link
8094 case TARGET_NR_link:
8096 void * p2;
8097 p = lock_user_string(arg1);
8098 p2 = lock_user_string(arg2);
8099 if (!p || !p2)
8100 ret = -TARGET_EFAULT;
8101 else
8102 ret = get_errno(link(p, p2));
8103 unlock_user(p2, arg2, 0);
8104 unlock_user(p, arg1, 0);
8106 return ret;
8107 #endif
8108 #if defined(TARGET_NR_linkat)
8109 case TARGET_NR_linkat:
8111 void * p2 = NULL;
8112 if (!arg2 || !arg4)
8113 return -TARGET_EFAULT;
8114 p = lock_user_string(arg2);
8115 p2 = lock_user_string(arg4);
8116 if (!p || !p2)
8117 ret = -TARGET_EFAULT;
8118 else
8119 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8120 unlock_user(p, arg2, 0);
8121 unlock_user(p2, arg4, 0);
8123 return ret;
8124 #endif
8125 #ifdef TARGET_NR_unlink
8126 case TARGET_NR_unlink:
8127 if (!(p = lock_user_string(arg1)))
8128 return -TARGET_EFAULT;
8129 ret = get_errno(unlink(p));
8130 unlock_user(p, arg1, 0);
8131 return ret;
8132 #endif
8133 #if defined(TARGET_NR_unlinkat)
8134 case TARGET_NR_unlinkat:
8135 if (!(p = lock_user_string(arg2)))
8136 return -TARGET_EFAULT;
8137 ret = get_errno(unlinkat(arg1, p, arg3));
8138 unlock_user(p, arg2, 0);
8139 return ret;
8140 #endif
8141 case TARGET_NR_execve:
8143 char **argp, **envp;
8144 int argc, envc;
8145 abi_ulong gp;
8146 abi_ulong guest_argp;
8147 abi_ulong guest_envp;
8148 abi_ulong addr;
8149 char **q;
8150 int total_size = 0;
8152 argc = 0;
8153 guest_argp = arg2;
8154 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8155 if (get_user_ual(addr, gp))
8156 return -TARGET_EFAULT;
8157 if (!addr)
8158 break;
8159 argc++;
8161 envc = 0;
8162 guest_envp = arg3;
8163 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8164 if (get_user_ual(addr, gp))
8165 return -TARGET_EFAULT;
8166 if (!addr)
8167 break;
8168 envc++;
8171 argp = g_new0(char *, argc + 1);
8172 envp = g_new0(char *, envc + 1);
8174 for (gp = guest_argp, q = argp; gp;
8175 gp += sizeof(abi_ulong), q++) {
8176 if (get_user_ual(addr, gp))
8177 goto execve_efault;
8178 if (!addr)
8179 break;
8180 if (!(*q = lock_user_string(addr)))
8181 goto execve_efault;
8182 total_size += strlen(*q) + 1;
8184 *q = NULL;
8186 for (gp = guest_envp, q = envp; gp;
8187 gp += sizeof(abi_ulong), q++) {
8188 if (get_user_ual(addr, gp))
8189 goto execve_efault;
8190 if (!addr)
8191 break;
8192 if (!(*q = lock_user_string(addr)))
8193 goto execve_efault;
8194 total_size += strlen(*q) + 1;
8196 *q = NULL;
8198 if (!(p = lock_user_string(arg1)))
8199 goto execve_efault;
8200 /* Although execve() is not an interruptible syscall it is
8201 * a special case where we must use the safe_syscall wrapper:
8202 * if we allow a signal to happen before we make the host
8203 * syscall then we will 'lose' it, because at the point of
8204 * execve the process leaves QEMU's control. So we use the
8205 * safe syscall wrapper to ensure that we either take the
8206 * signal as a guest signal, or else it does not happen
8207 * before the execve completes and makes it the other
8208 * program's problem.
8210 ret = get_errno(safe_execve(p, argp, envp));
8211 unlock_user(p, arg1, 0);
8213 goto execve_end;
8215 execve_efault:
8216 ret = -TARGET_EFAULT;
8218 execve_end:
8219 for (gp = guest_argp, q = argp; *q;
8220 gp += sizeof(abi_ulong), q++) {
8221 if (get_user_ual(addr, gp)
8222 || !addr)
8223 break;
8224 unlock_user(*q, addr, 0);
8226 for (gp = guest_envp, q = envp; *q;
8227 gp += sizeof(abi_ulong), q++) {
8228 if (get_user_ual(addr, gp)
8229 || !addr)
8230 break;
8231 unlock_user(*q, addr, 0);
8234 g_free(argp);
8235 g_free(envp);
8237 return ret;
8238 case TARGET_NR_chdir:
8239 if (!(p = lock_user_string(arg1)))
8240 return -TARGET_EFAULT;
8241 ret = get_errno(chdir(p));
8242 unlock_user(p, arg1, 0);
8243 return ret;
8244 #ifdef TARGET_NR_time
8245 case TARGET_NR_time:
8247 time_t host_time;
8248 ret = get_errno(time(&host_time));
8249 if (!is_error(ret)
8250 && arg1
8251 && put_user_sal(host_time, arg1))
8252 return -TARGET_EFAULT;
8254 return ret;
8255 #endif
8256 #ifdef TARGET_NR_mknod
8257 case TARGET_NR_mknod:
8258 if (!(p = lock_user_string(arg1)))
8259 return -TARGET_EFAULT;
8260 ret = get_errno(mknod(p, arg2, arg3));
8261 unlock_user(p, arg1, 0);
8262 return ret;
8263 #endif
8264 #if defined(TARGET_NR_mknodat)
8265 case TARGET_NR_mknodat:
8266 if (!(p = lock_user_string(arg2)))
8267 return -TARGET_EFAULT;
8268 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8269 unlock_user(p, arg2, 0);
8270 return ret;
8271 #endif
8272 #ifdef TARGET_NR_chmod
8273 case TARGET_NR_chmod:
8274 if (!(p = lock_user_string(arg1)))
8275 return -TARGET_EFAULT;
8276 ret = get_errno(chmod(p, arg2));
8277 unlock_user(p, arg1, 0);
8278 return ret;
8279 #endif
8280 #ifdef TARGET_NR_lseek
8281 case TARGET_NR_lseek:
8282 return get_errno(lseek(arg1, arg2, arg3));
8283 #endif
8284 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8285 /* Alpha specific */
8286 case TARGET_NR_getxpid:
8287 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8288 return get_errno(getpid());
8289 #endif
8290 #ifdef TARGET_NR_getpid
8291 case TARGET_NR_getpid:
8292 return get_errno(getpid());
8293 #endif
8294 case TARGET_NR_mount:
8296 /* need to look at the data field */
8297 void *p2, *p3;
8299 if (arg1) {
8300 p = lock_user_string(arg1);
8301 if (!p) {
8302 return -TARGET_EFAULT;
8304 } else {
8305 p = NULL;
8308 p2 = lock_user_string(arg2);
8309 if (!p2) {
8310 if (arg1) {
8311 unlock_user(p, arg1, 0);
8313 return -TARGET_EFAULT;
8316 if (arg3) {
8317 p3 = lock_user_string(arg3);
8318 if (!p3) {
8319 if (arg1) {
8320 unlock_user(p, arg1, 0);
8322 unlock_user(p2, arg2, 0);
8323 return -TARGET_EFAULT;
8325 } else {
8326 p3 = NULL;
8329 /* FIXME - arg5 should be locked, but it isn't clear how to
8330 * do that since it's not guaranteed to be a NULL-terminated
8331 * string.
8333 if (!arg5) {
8334 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8335 } else {
8336 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8338 ret = get_errno(ret);
8340 if (arg1) {
8341 unlock_user(p, arg1, 0);
8343 unlock_user(p2, arg2, 0);
8344 if (arg3) {
8345 unlock_user(p3, arg3, 0);
8348 return ret;
8349 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8350 #if defined(TARGET_NR_umount)
8351 case TARGET_NR_umount:
8352 #endif
8353 #if defined(TARGET_NR_oldumount)
8354 case TARGET_NR_oldumount:
8355 #endif
8356 if (!(p = lock_user_string(arg1)))
8357 return -TARGET_EFAULT;
8358 ret = get_errno(umount(p));
8359 unlock_user(p, arg1, 0);
8360 return ret;
8361 #endif
8362 #ifdef TARGET_NR_stime /* not on alpha */
8363 case TARGET_NR_stime:
8365 struct timespec ts;
8366 ts.tv_nsec = 0;
8367 if (get_user_sal(ts.tv_sec, arg1)) {
8368 return -TARGET_EFAULT;
8370 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8372 #endif
8373 #ifdef TARGET_NR_alarm /* not on alpha */
8374 case TARGET_NR_alarm:
8375 return alarm(arg1);
8376 #endif
8377 #ifdef TARGET_NR_pause /* not on alpha */
8378 case TARGET_NR_pause:
8379 if (!block_signals()) {
8380 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8382 return -TARGET_EINTR;
8383 #endif
8384 #ifdef TARGET_NR_utime
8385 case TARGET_NR_utime:
8387 struct utimbuf tbuf, *host_tbuf;
8388 struct target_utimbuf *target_tbuf;
8389 if (arg2) {
8390 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8391 return -TARGET_EFAULT;
8392 tbuf.actime = tswapal(target_tbuf->actime);
8393 tbuf.modtime = tswapal(target_tbuf->modtime);
8394 unlock_user_struct(target_tbuf, arg2, 0);
8395 host_tbuf = &tbuf;
8396 } else {
8397 host_tbuf = NULL;
8399 if (!(p = lock_user_string(arg1)))
8400 return -TARGET_EFAULT;
8401 ret = get_errno(utime(p, host_tbuf));
8402 unlock_user(p, arg1, 0);
8404 return ret;
8405 #endif
8406 #ifdef TARGET_NR_utimes
8407 case TARGET_NR_utimes:
8409 struct timeval *tvp, tv[2];
8410 if (arg2) {
8411 if (copy_from_user_timeval(&tv[0], arg2)
8412 || copy_from_user_timeval(&tv[1],
8413 arg2 + sizeof(struct target_timeval)))
8414 return -TARGET_EFAULT;
8415 tvp = tv;
8416 } else {
8417 tvp = NULL;
8419 if (!(p = lock_user_string(arg1)))
8420 return -TARGET_EFAULT;
8421 ret = get_errno(utimes(p, tvp));
8422 unlock_user(p, arg1, 0);
8424 return ret;
8425 #endif
8426 #if defined(TARGET_NR_futimesat)
8427 case TARGET_NR_futimesat:
8429 struct timeval *tvp, tv[2];
8430 if (arg3) {
8431 if (copy_from_user_timeval(&tv[0], arg3)
8432 || copy_from_user_timeval(&tv[1],
8433 arg3 + sizeof(struct target_timeval)))
8434 return -TARGET_EFAULT;
8435 tvp = tv;
8436 } else {
8437 tvp = NULL;
8439 if (!(p = lock_user_string(arg2))) {
8440 return -TARGET_EFAULT;
8442 ret = get_errno(futimesat(arg1, path(p), tvp));
8443 unlock_user(p, arg2, 0);
8445 return ret;
8446 #endif
8447 #ifdef TARGET_NR_access
8448 case TARGET_NR_access:
8449 if (!(p = lock_user_string(arg1))) {
8450 return -TARGET_EFAULT;
8452 ret = get_errno(access(path(p), arg2));
8453 unlock_user(p, arg1, 0);
8454 return ret;
8455 #endif
8456 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8457 case TARGET_NR_faccessat:
8458 if (!(p = lock_user_string(arg2))) {
8459 return -TARGET_EFAULT;
8461 ret = get_errno(faccessat(arg1, p, arg3, 0));
8462 unlock_user(p, arg2, 0);
8463 return ret;
8464 #endif
8465 #ifdef TARGET_NR_nice /* not on alpha */
8466 case TARGET_NR_nice:
8467 return get_errno(nice(arg1));
8468 #endif
8469 case TARGET_NR_sync:
8470 sync();
8471 return 0;
8472 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8473 case TARGET_NR_syncfs:
8474 return get_errno(syncfs(arg1));
8475 #endif
8476 case TARGET_NR_kill:
8477 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8478 #ifdef TARGET_NR_rename
8479 case TARGET_NR_rename:
8481 void *p2;
8482 p = lock_user_string(arg1);
8483 p2 = lock_user_string(arg2);
8484 if (!p || !p2)
8485 ret = -TARGET_EFAULT;
8486 else
8487 ret = get_errno(rename(p, p2));
8488 unlock_user(p2, arg2, 0);
8489 unlock_user(p, arg1, 0);
8491 return ret;
8492 #endif
8493 #if defined(TARGET_NR_renameat)
8494 case TARGET_NR_renameat:
8496 void *p2;
8497 p = lock_user_string(arg2);
8498 p2 = lock_user_string(arg4);
8499 if (!p || !p2)
8500 ret = -TARGET_EFAULT;
8501 else
8502 ret = get_errno(renameat(arg1, p, arg3, p2));
8503 unlock_user(p2, arg4, 0);
8504 unlock_user(p, arg2, 0);
8506 return ret;
8507 #endif
8508 #if defined(TARGET_NR_renameat2)
8509 case TARGET_NR_renameat2:
8511 void *p2;
8512 p = lock_user_string(arg2);
8513 p2 = lock_user_string(arg4);
8514 if (!p || !p2) {
8515 ret = -TARGET_EFAULT;
8516 } else {
8517 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8519 unlock_user(p2, arg4, 0);
8520 unlock_user(p, arg2, 0);
8522 return ret;
8523 #endif
8524 #ifdef TARGET_NR_mkdir
8525 case TARGET_NR_mkdir:
8526 if (!(p = lock_user_string(arg1)))
8527 return -TARGET_EFAULT;
8528 ret = get_errno(mkdir(p, arg2));
8529 unlock_user(p, arg1, 0);
8530 return ret;
8531 #endif
8532 #if defined(TARGET_NR_mkdirat)
8533 case TARGET_NR_mkdirat:
8534 if (!(p = lock_user_string(arg2)))
8535 return -TARGET_EFAULT;
8536 ret = get_errno(mkdirat(arg1, p, arg3));
8537 unlock_user(p, arg2, 0);
8538 return ret;
8539 #endif
8540 #ifdef TARGET_NR_rmdir
8541 case TARGET_NR_rmdir:
8542 if (!(p = lock_user_string(arg1)))
8543 return -TARGET_EFAULT;
8544 ret = get_errno(rmdir(p));
8545 unlock_user(p, arg1, 0);
8546 return ret;
8547 #endif
8548 case TARGET_NR_dup:
8549 ret = get_errno(dup(arg1));
8550 if (ret >= 0) {
8551 fd_trans_dup(arg1, ret);
8553 return ret;
8554 #ifdef TARGET_NR_pipe
8555 case TARGET_NR_pipe:
8556 return do_pipe(cpu_env, arg1, 0, 0);
8557 #endif
8558 #ifdef TARGET_NR_pipe2
8559 case TARGET_NR_pipe2:
8560 return do_pipe(cpu_env, arg1,
8561 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8562 #endif
8563 case TARGET_NR_times:
8565 struct target_tms *tmsp;
8566 struct tms tms;
8567 ret = get_errno(times(&tms));
8568 if (arg1) {
8569 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8570 if (!tmsp)
8571 return -TARGET_EFAULT;
8572 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8573 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8574 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8575 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8577 if (!is_error(ret))
8578 ret = host_to_target_clock_t(ret);
8580 return ret;
8581 case TARGET_NR_acct:
8582 if (arg1 == 0) {
8583 ret = get_errno(acct(NULL));
8584 } else {
8585 if (!(p = lock_user_string(arg1))) {
8586 return -TARGET_EFAULT;
8588 ret = get_errno(acct(path(p)));
8589 unlock_user(p, arg1, 0);
8591 return ret;
8592 #ifdef TARGET_NR_umount2
8593 case TARGET_NR_umount2:
8594 if (!(p = lock_user_string(arg1)))
8595 return -TARGET_EFAULT;
8596 ret = get_errno(umount2(p, arg2));
8597 unlock_user(p, arg1, 0);
8598 return ret;
8599 #endif
8600 case TARGET_NR_ioctl:
8601 return do_ioctl(arg1, arg2, arg3);
8602 #ifdef TARGET_NR_fcntl
8603 case TARGET_NR_fcntl:
8604 return do_fcntl(arg1, arg2, arg3);
8605 #endif
8606 case TARGET_NR_setpgid:
8607 return get_errno(setpgid(arg1, arg2));
8608 case TARGET_NR_umask:
8609 return get_errno(umask(arg1));
8610 case TARGET_NR_chroot:
8611 if (!(p = lock_user_string(arg1)))
8612 return -TARGET_EFAULT;
8613 ret = get_errno(chroot(p));
8614 unlock_user(p, arg1, 0);
8615 return ret;
8616 #ifdef TARGET_NR_dup2
8617 case TARGET_NR_dup2:
8618 ret = get_errno(dup2(arg1, arg2));
8619 if (ret >= 0) {
8620 fd_trans_dup(arg1, arg2);
8622 return ret;
8623 #endif
8624 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8625 case TARGET_NR_dup3:
8627 int host_flags;
8629 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8630 return -EINVAL;
8632 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8633 ret = get_errno(dup3(arg1, arg2, host_flags));
8634 if (ret >= 0) {
8635 fd_trans_dup(arg1, arg2);
8637 return ret;
8639 #endif
8640 #ifdef TARGET_NR_getppid /* not on alpha */
8641 case TARGET_NR_getppid:
8642 return get_errno(getppid());
8643 #endif
8644 #ifdef TARGET_NR_getpgrp
8645 case TARGET_NR_getpgrp:
8646 return get_errno(getpgrp());
8647 #endif
8648 case TARGET_NR_setsid:
8649 return get_errno(setsid());
8650 #ifdef TARGET_NR_sigaction
8651 case TARGET_NR_sigaction:
8653 #if defined(TARGET_ALPHA)
8654 struct target_sigaction act, oact, *pact = 0;
8655 struct target_old_sigaction *old_act;
8656 if (arg2) {
8657 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8658 return -TARGET_EFAULT;
8659 act._sa_handler = old_act->_sa_handler;
8660 target_siginitset(&act.sa_mask, old_act->sa_mask);
8661 act.sa_flags = old_act->sa_flags;
8662 act.sa_restorer = 0;
8663 unlock_user_struct(old_act, arg2, 0);
8664 pact = &act;
8666 ret = get_errno(do_sigaction(arg1, pact, &oact));
8667 if (!is_error(ret) && arg3) {
8668 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8669 return -TARGET_EFAULT;
8670 old_act->_sa_handler = oact._sa_handler;
8671 old_act->sa_mask = oact.sa_mask.sig[0];
8672 old_act->sa_flags = oact.sa_flags;
8673 unlock_user_struct(old_act, arg3, 1);
8675 #elif defined(TARGET_MIPS)
8676 struct target_sigaction act, oact, *pact, *old_act;
8678 if (arg2) {
8679 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8680 return -TARGET_EFAULT;
8681 act._sa_handler = old_act->_sa_handler;
8682 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8683 act.sa_flags = old_act->sa_flags;
8684 unlock_user_struct(old_act, arg2, 0);
8685 pact = &act;
8686 } else {
8687 pact = NULL;
8690 ret = get_errno(do_sigaction(arg1, pact, &oact));
8692 if (!is_error(ret) && arg3) {
8693 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8694 return -TARGET_EFAULT;
8695 old_act->_sa_handler = oact._sa_handler;
8696 old_act->sa_flags = oact.sa_flags;
8697 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8698 old_act->sa_mask.sig[1] = 0;
8699 old_act->sa_mask.sig[2] = 0;
8700 old_act->sa_mask.sig[3] = 0;
8701 unlock_user_struct(old_act, arg3, 1);
8703 #else
8704 struct target_old_sigaction *old_act;
8705 struct target_sigaction act, oact, *pact;
8706 if (arg2) {
8707 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8708 return -TARGET_EFAULT;
8709 act._sa_handler = old_act->_sa_handler;
8710 target_siginitset(&act.sa_mask, old_act->sa_mask);
8711 act.sa_flags = old_act->sa_flags;
8712 act.sa_restorer = old_act->sa_restorer;
8713 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8714 act.ka_restorer = 0;
8715 #endif
8716 unlock_user_struct(old_act, arg2, 0);
8717 pact = &act;
8718 } else {
8719 pact = NULL;
8721 ret = get_errno(do_sigaction(arg1, pact, &oact));
8722 if (!is_error(ret) && arg3) {
8723 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8724 return -TARGET_EFAULT;
8725 old_act->_sa_handler = oact._sa_handler;
8726 old_act->sa_mask = oact.sa_mask.sig[0];
8727 old_act->sa_flags = oact.sa_flags;
8728 old_act->sa_restorer = oact.sa_restorer;
8729 unlock_user_struct(old_act, arg3, 1);
8731 #endif
8733 return ret;
8734 #endif
8735 case TARGET_NR_rt_sigaction:
8737 #if defined(TARGET_ALPHA)
8738 /* For Alpha and SPARC this is a 5 argument syscall, with
8739 * a 'restorer' parameter which must be copied into the
8740 * sa_restorer field of the sigaction struct.
8741 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8742 * and arg5 is the sigsetsize.
8743 * Alpha also has a separate rt_sigaction struct that it uses
8744 * here; SPARC uses the usual sigaction struct.
8746 struct target_rt_sigaction *rt_act;
8747 struct target_sigaction act, oact, *pact = 0;
8749 if (arg4 != sizeof(target_sigset_t)) {
8750 return -TARGET_EINVAL;
8752 if (arg2) {
8753 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8754 return -TARGET_EFAULT;
8755 act._sa_handler = rt_act->_sa_handler;
8756 act.sa_mask = rt_act->sa_mask;
8757 act.sa_flags = rt_act->sa_flags;
8758 act.sa_restorer = arg5;
8759 unlock_user_struct(rt_act, arg2, 0);
8760 pact = &act;
8762 ret = get_errno(do_sigaction(arg1, pact, &oact));
8763 if (!is_error(ret) && arg3) {
8764 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8765 return -TARGET_EFAULT;
8766 rt_act->_sa_handler = oact._sa_handler;
8767 rt_act->sa_mask = oact.sa_mask;
8768 rt_act->sa_flags = oact.sa_flags;
8769 unlock_user_struct(rt_act, arg3, 1);
8771 #else
8772 #ifdef TARGET_SPARC
8773 target_ulong restorer = arg4;
8774 target_ulong sigsetsize = arg5;
8775 #else
8776 target_ulong sigsetsize = arg4;
8777 #endif
8778 struct target_sigaction *act;
8779 struct target_sigaction *oact;
8781 if (sigsetsize != sizeof(target_sigset_t)) {
8782 return -TARGET_EINVAL;
8784 if (arg2) {
8785 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8786 return -TARGET_EFAULT;
8788 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8789 act->ka_restorer = restorer;
8790 #endif
8791 } else {
8792 act = NULL;
8794 if (arg3) {
8795 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8796 ret = -TARGET_EFAULT;
8797 goto rt_sigaction_fail;
8799 } else
8800 oact = NULL;
8801 ret = get_errno(do_sigaction(arg1, act, oact));
8802 rt_sigaction_fail:
8803 if (act)
8804 unlock_user_struct(act, arg2, 0);
8805 if (oact)
8806 unlock_user_struct(oact, arg3, 1);
8807 #endif
8809 return ret;
8810 #ifdef TARGET_NR_sgetmask /* not on alpha */
8811 case TARGET_NR_sgetmask:
8813 sigset_t cur_set;
8814 abi_ulong target_set;
8815 ret = do_sigprocmask(0, NULL, &cur_set);
8816 if (!ret) {
8817 host_to_target_old_sigset(&target_set, &cur_set);
8818 ret = target_set;
8821 return ret;
8822 #endif
8823 #ifdef TARGET_NR_ssetmask /* not on alpha */
8824 case TARGET_NR_ssetmask:
8826 sigset_t set, oset;
8827 abi_ulong target_set = arg1;
8828 target_to_host_old_sigset(&set, &target_set);
8829 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8830 if (!ret) {
8831 host_to_target_old_sigset(&target_set, &oset);
8832 ret = target_set;
8835 return ret;
8836 #endif
8837 #ifdef TARGET_NR_sigprocmask
8838 case TARGET_NR_sigprocmask:
8840 #if defined(TARGET_ALPHA)
8841 sigset_t set, oldset;
8842 abi_ulong mask;
8843 int how;
8845 switch (arg1) {
8846 case TARGET_SIG_BLOCK:
8847 how = SIG_BLOCK;
8848 break;
8849 case TARGET_SIG_UNBLOCK:
8850 how = SIG_UNBLOCK;
8851 break;
8852 case TARGET_SIG_SETMASK:
8853 how = SIG_SETMASK;
8854 break;
8855 default:
8856 return -TARGET_EINVAL;
8858 mask = arg2;
8859 target_to_host_old_sigset(&set, &mask);
8861 ret = do_sigprocmask(how, &set, &oldset);
8862 if (!is_error(ret)) {
8863 host_to_target_old_sigset(&mask, &oldset);
8864 ret = mask;
8865 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8867 #else
8868 sigset_t set, oldset, *set_ptr;
8869 int how;
8871 if (arg2) {
8872 switch (arg1) {
8873 case TARGET_SIG_BLOCK:
8874 how = SIG_BLOCK;
8875 break;
8876 case TARGET_SIG_UNBLOCK:
8877 how = SIG_UNBLOCK;
8878 break;
8879 case TARGET_SIG_SETMASK:
8880 how = SIG_SETMASK;
8881 break;
8882 default:
8883 return -TARGET_EINVAL;
8885 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8886 return -TARGET_EFAULT;
8887 target_to_host_old_sigset(&set, p);
8888 unlock_user(p, arg2, 0);
8889 set_ptr = &set;
8890 } else {
8891 how = 0;
8892 set_ptr = NULL;
8894 ret = do_sigprocmask(how, set_ptr, &oldset);
8895 if (!is_error(ret) && arg3) {
8896 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8897 return -TARGET_EFAULT;
8898 host_to_target_old_sigset(p, &oldset);
8899 unlock_user(p, arg3, sizeof(target_sigset_t));
8901 #endif
8903 return ret;
8904 #endif
8905 case TARGET_NR_rt_sigprocmask:
8907 int how = arg1;
8908 sigset_t set, oldset, *set_ptr;
8910 if (arg4 != sizeof(target_sigset_t)) {
8911 return -TARGET_EINVAL;
8914 if (arg2) {
8915 switch(how) {
8916 case TARGET_SIG_BLOCK:
8917 how = SIG_BLOCK;
8918 break;
8919 case TARGET_SIG_UNBLOCK:
8920 how = SIG_UNBLOCK;
8921 break;
8922 case TARGET_SIG_SETMASK:
8923 how = SIG_SETMASK;
8924 break;
8925 default:
8926 return -TARGET_EINVAL;
8928 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8929 return -TARGET_EFAULT;
8930 target_to_host_sigset(&set, p);
8931 unlock_user(p, arg2, 0);
8932 set_ptr = &set;
8933 } else {
8934 how = 0;
8935 set_ptr = NULL;
8937 ret = do_sigprocmask(how, set_ptr, &oldset);
8938 if (!is_error(ret) && arg3) {
8939 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8940 return -TARGET_EFAULT;
8941 host_to_target_sigset(p, &oldset);
8942 unlock_user(p, arg3, sizeof(target_sigset_t));
8945 return ret;
8946 #ifdef TARGET_NR_sigpending
8947 case TARGET_NR_sigpending:
8949 sigset_t set;
8950 ret = get_errno(sigpending(&set));
8951 if (!is_error(ret)) {
8952 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8953 return -TARGET_EFAULT;
8954 host_to_target_old_sigset(p, &set);
8955 unlock_user(p, arg1, sizeof(target_sigset_t));
8958 return ret;
8959 #endif
8960 case TARGET_NR_rt_sigpending:
8962 sigset_t set;
8964 /* Yes, this check is >, not != like most. We follow the kernel's
8965 * logic and it does it like this because it implements
8966 * NR_sigpending through the same code path, and in that case
8967 * the old_sigset_t is smaller in size.
8969 if (arg2 > sizeof(target_sigset_t)) {
8970 return -TARGET_EINVAL;
8973 ret = get_errno(sigpending(&set));
8974 if (!is_error(ret)) {
8975 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8976 return -TARGET_EFAULT;
8977 host_to_target_sigset(p, &set);
8978 unlock_user(p, arg1, sizeof(target_sigset_t));
8981 return ret;
8982 #ifdef TARGET_NR_sigsuspend
8983 case TARGET_NR_sigsuspend:
8985 TaskState *ts = cpu->opaque;
8986 #if defined(TARGET_ALPHA)
8987 abi_ulong mask = arg1;
8988 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8989 #else
8990 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8991 return -TARGET_EFAULT;
8992 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8993 unlock_user(p, arg1, 0);
8994 #endif
8995 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8996 SIGSET_T_SIZE));
8997 if (ret != -TARGET_ERESTARTSYS) {
8998 ts->in_sigsuspend = 1;
9001 return ret;
9002 #endif
9003 case TARGET_NR_rt_sigsuspend:
9005 TaskState *ts = cpu->opaque;
9007 if (arg2 != sizeof(target_sigset_t)) {
9008 return -TARGET_EINVAL;
9010 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9011 return -TARGET_EFAULT;
9012 target_to_host_sigset(&ts->sigsuspend_mask, p);
9013 unlock_user(p, arg1, 0);
9014 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9015 SIGSET_T_SIZE));
9016 if (ret != -TARGET_ERESTARTSYS) {
9017 ts->in_sigsuspend = 1;
9020 return ret;
9021 #ifdef TARGET_NR_rt_sigtimedwait
9022 case TARGET_NR_rt_sigtimedwait:
9024 sigset_t set;
9025 struct timespec uts, *puts;
9026 siginfo_t uinfo;
9028 if (arg4 != sizeof(target_sigset_t)) {
9029 return -TARGET_EINVAL;
9032 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9033 return -TARGET_EFAULT;
9034 target_to_host_sigset(&set, p);
9035 unlock_user(p, arg1, 0);
9036 if (arg3) {
9037 puts = &uts;
9038 if (target_to_host_timespec(puts, arg3)) {
9039 return -TARGET_EFAULT;
9041 } else {
9042 puts = NULL;
9044 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9045 SIGSET_T_SIZE));
9046 if (!is_error(ret)) {
9047 if (arg2) {
9048 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9050 if (!p) {
9051 return -TARGET_EFAULT;
9053 host_to_target_siginfo(p, &uinfo);
9054 unlock_user(p, arg2, sizeof(target_siginfo_t));
9056 ret = host_to_target_signal(ret);
9059 return ret;
9060 #endif
9061 #ifdef TARGET_NR_rt_sigtimedwait_time64
9062 case TARGET_NR_rt_sigtimedwait_time64:
9064 sigset_t set;
9065 struct timespec uts, *puts;
9066 siginfo_t uinfo;
9068 if (arg4 != sizeof(target_sigset_t)) {
9069 return -TARGET_EINVAL;
9072 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9073 if (!p) {
9074 return -TARGET_EFAULT;
9076 target_to_host_sigset(&set, p);
9077 unlock_user(p, arg1, 0);
9078 if (arg3) {
9079 puts = &uts;
9080 if (target_to_host_timespec64(puts, arg3)) {
9081 return -TARGET_EFAULT;
9083 } else {
9084 puts = NULL;
9086 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9087 SIGSET_T_SIZE));
9088 if (!is_error(ret)) {
9089 if (arg2) {
9090 p = lock_user(VERIFY_WRITE, arg2,
9091 sizeof(target_siginfo_t), 0);
9092 if (!p) {
9093 return -TARGET_EFAULT;
9095 host_to_target_siginfo(p, &uinfo);
9096 unlock_user(p, arg2, sizeof(target_siginfo_t));
9098 ret = host_to_target_signal(ret);
9101 return ret;
9102 #endif
9103 case TARGET_NR_rt_sigqueueinfo:
9105 siginfo_t uinfo;
9107 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9108 if (!p) {
9109 return -TARGET_EFAULT;
9111 target_to_host_siginfo(&uinfo, p);
9112 unlock_user(p, arg3, 0);
9113 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9115 return ret;
9116 case TARGET_NR_rt_tgsigqueueinfo:
9118 siginfo_t uinfo;
9120 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9121 if (!p) {
9122 return -TARGET_EFAULT;
9124 target_to_host_siginfo(&uinfo, p);
9125 unlock_user(p, arg4, 0);
9126 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9128 return ret;
9129 #ifdef TARGET_NR_sigreturn
9130 case TARGET_NR_sigreturn:
9131 if (block_signals()) {
9132 return -TARGET_ERESTARTSYS;
9134 return do_sigreturn(cpu_env);
9135 #endif
9136 case TARGET_NR_rt_sigreturn:
9137 if (block_signals()) {
9138 return -TARGET_ERESTARTSYS;
9140 return do_rt_sigreturn(cpu_env);
9141 case TARGET_NR_sethostname:
9142 if (!(p = lock_user_string(arg1)))
9143 return -TARGET_EFAULT;
9144 ret = get_errno(sethostname(p, arg2));
9145 unlock_user(p, arg1, 0);
9146 return ret;
9147 #ifdef TARGET_NR_setrlimit
9148 case TARGET_NR_setrlimit:
9150 int resource = target_to_host_resource(arg1);
9151 struct target_rlimit *target_rlim;
9152 struct rlimit rlim;
9153 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9154 return -TARGET_EFAULT;
9155 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9156 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9157 unlock_user_struct(target_rlim, arg2, 0);
9159 * If we just passed through resource limit settings for memory then
9160 * they would also apply to QEMU's own allocations, and QEMU will
9161 * crash or hang or die if its allocations fail. Ideally we would
9162 * track the guest allocations in QEMU and apply the limits ourselves.
9163 * For now, just tell the guest the call succeeded but don't actually
9164 * limit anything.
9166 if (resource != RLIMIT_AS &&
9167 resource != RLIMIT_DATA &&
9168 resource != RLIMIT_STACK) {
9169 return get_errno(setrlimit(resource, &rlim));
9170 } else {
9171 return 0;
9174 #endif
9175 #ifdef TARGET_NR_getrlimit
9176 case TARGET_NR_getrlimit:
9178 int resource = target_to_host_resource(arg1);
9179 struct target_rlimit *target_rlim;
9180 struct rlimit rlim;
9182 ret = get_errno(getrlimit(resource, &rlim));
9183 if (!is_error(ret)) {
9184 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9185 return -TARGET_EFAULT;
9186 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9187 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9188 unlock_user_struct(target_rlim, arg2, 1);
9191 return ret;
9192 #endif
9193 case TARGET_NR_getrusage:
9195 struct rusage rusage;
9196 ret = get_errno(getrusage(arg1, &rusage));
9197 if (!is_error(ret)) {
9198 ret = host_to_target_rusage(arg2, &rusage);
9201 return ret;
9202 #if defined(TARGET_NR_gettimeofday)
9203 case TARGET_NR_gettimeofday:
9205 struct timeval tv;
9206 struct timezone tz;
9208 ret = get_errno(gettimeofday(&tv, &tz));
9209 if (!is_error(ret)) {
9210 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9211 return -TARGET_EFAULT;
9213 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9214 return -TARGET_EFAULT;
9218 return ret;
9219 #endif
9220 #if defined(TARGET_NR_settimeofday)
9221 case TARGET_NR_settimeofday:
9223 struct timeval tv, *ptv = NULL;
9224 struct timezone tz, *ptz = NULL;
9226 if (arg1) {
9227 if (copy_from_user_timeval(&tv, arg1)) {
9228 return -TARGET_EFAULT;
9230 ptv = &tv;
9233 if (arg2) {
9234 if (copy_from_user_timezone(&tz, arg2)) {
9235 return -TARGET_EFAULT;
9237 ptz = &tz;
9240 return get_errno(settimeofday(ptv, ptz));
9242 #endif
9243 #if defined(TARGET_NR_select)
9244 case TARGET_NR_select:
9245 #if defined(TARGET_WANT_NI_OLD_SELECT)
9246 /* some architectures used to have old_select here
9247 * but now ENOSYS it.
9249 ret = -TARGET_ENOSYS;
9250 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9251 ret = do_old_select(arg1);
9252 #else
9253 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9254 #endif
9255 return ret;
9256 #endif
9257 #ifdef TARGET_NR_pselect6
9258 case TARGET_NR_pselect6:
9260 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9261 fd_set rfds, wfds, efds;
9262 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9263 struct timespec ts, *ts_ptr;
9266 * The 6th arg is actually two args smashed together,
9267 * so we cannot use the C library.
9269 sigset_t set;
9270 struct {
9271 sigset_t *set;
9272 size_t size;
9273 } sig, *sig_ptr;
9275 abi_ulong arg_sigset, arg_sigsize, *arg7;
9276 target_sigset_t *target_sigset;
9278 n = arg1;
9279 rfd_addr = arg2;
9280 wfd_addr = arg3;
9281 efd_addr = arg4;
9282 ts_addr = arg5;
9284 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9285 if (ret) {
9286 return ret;
9288 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9289 if (ret) {
9290 return ret;
9292 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9293 if (ret) {
9294 return ret;
9298 * This takes a timespec, and not a timeval, so we cannot
9299 * use the do_select() helper ...
9301 if (ts_addr) {
9302 if (target_to_host_timespec(&ts, ts_addr)) {
9303 return -TARGET_EFAULT;
9305 ts_ptr = &ts;
9306 } else {
9307 ts_ptr = NULL;
9310 /* Extract the two packed args for the sigset */
9311 if (arg6) {
9312 sig_ptr = &sig;
9313 sig.size = SIGSET_T_SIZE;
9315 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9316 if (!arg7) {
9317 return -TARGET_EFAULT;
9319 arg_sigset = tswapal(arg7[0]);
9320 arg_sigsize = tswapal(arg7[1]);
9321 unlock_user(arg7, arg6, 0);
9323 if (arg_sigset) {
9324 sig.set = &set;
9325 if (arg_sigsize != sizeof(*target_sigset)) {
9326 /* Like the kernel, we enforce correct size sigsets */
9327 return -TARGET_EINVAL;
9329 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9330 sizeof(*target_sigset), 1);
9331 if (!target_sigset) {
9332 return -TARGET_EFAULT;
9334 target_to_host_sigset(&set, target_sigset);
9335 unlock_user(target_sigset, arg_sigset, 0);
9336 } else {
9337 sig.set = NULL;
9339 } else {
9340 sig_ptr = NULL;
9343 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9344 ts_ptr, sig_ptr));
9346 if (!is_error(ret)) {
9347 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9348 return -TARGET_EFAULT;
9349 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9350 return -TARGET_EFAULT;
9351 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9352 return -TARGET_EFAULT;
9354 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9355 return -TARGET_EFAULT;
9358 return ret;
9359 #endif
9360 #ifdef TARGET_NR_symlink
9361 case TARGET_NR_symlink:
9363 void *p2;
9364 p = lock_user_string(arg1);
9365 p2 = lock_user_string(arg2);
9366 if (!p || !p2)
9367 ret = -TARGET_EFAULT;
9368 else
9369 ret = get_errno(symlink(p, p2));
9370 unlock_user(p2, arg2, 0);
9371 unlock_user(p, arg1, 0);
9373 return ret;
9374 #endif
9375 #if defined(TARGET_NR_symlinkat)
9376 case TARGET_NR_symlinkat:
9378 void *p2;
9379 p = lock_user_string(arg1);
9380 p2 = lock_user_string(arg3);
9381 if (!p || !p2)
9382 ret = -TARGET_EFAULT;
9383 else
9384 ret = get_errno(symlinkat(p, arg2, p2));
9385 unlock_user(p2, arg3, 0);
9386 unlock_user(p, arg1, 0);
9388 return ret;
9389 #endif
9390 #ifdef TARGET_NR_readlink
9391 case TARGET_NR_readlink:
9393 void *p2;
9394 p = lock_user_string(arg1);
9395 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9396 if (!p || !p2) {
9397 ret = -TARGET_EFAULT;
9398 } else if (!arg3) {
9399 /* Short circuit this for the magic exe check. */
9400 ret = -TARGET_EINVAL;
9401 } else if (is_proc_myself((const char *)p, "exe")) {
9402 char real[PATH_MAX], *temp;
9403 temp = realpath(exec_path, real);
9404 /* Return value is # of bytes that we wrote to the buffer. */
9405 if (temp == NULL) {
9406 ret = get_errno(-1);
9407 } else {
9408 /* Don't worry about sign mismatch as earlier mapping
9409 * logic would have thrown a bad address error. */
9410 ret = MIN(strlen(real), arg3);
9411 /* We cannot NUL terminate the string. */
9412 memcpy(p2, real, ret);
9414 } else {
9415 ret = get_errno(readlink(path(p), p2, arg3));
9417 unlock_user(p2, arg2, ret);
9418 unlock_user(p, arg1, 0);
9420 return ret;
9421 #endif
9422 #if defined(TARGET_NR_readlinkat)
9423 case TARGET_NR_readlinkat:
9425 void *p2;
9426 p = lock_user_string(arg2);
9427 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9428 if (!p || !p2) {
9429 ret = -TARGET_EFAULT;
9430 } else if (is_proc_myself((const char *)p, "exe")) {
9431 char real[PATH_MAX], *temp;
9432 temp = realpath(exec_path, real);
9433 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9434 snprintf((char *)p2, arg4, "%s", real);
9435 } else {
9436 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9438 unlock_user(p2, arg3, ret);
9439 unlock_user(p, arg2, 0);
9441 return ret;
9442 #endif
9443 #ifdef TARGET_NR_swapon
9444 case TARGET_NR_swapon:
9445 if (!(p = lock_user_string(arg1)))
9446 return -TARGET_EFAULT;
9447 ret = get_errno(swapon(p, arg2));
9448 unlock_user(p, arg1, 0);
9449 return ret;
9450 #endif
9451 case TARGET_NR_reboot:
9452 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9453 /* arg4 must be ignored in all other cases */
9454 p = lock_user_string(arg4);
9455 if (!p) {
9456 return -TARGET_EFAULT;
9458 ret = get_errno(reboot(arg1, arg2, arg3, p));
9459 unlock_user(p, arg4, 0);
9460 } else {
9461 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9463 return ret;
9464 #ifdef TARGET_NR_mmap
9465 case TARGET_NR_mmap:
9466 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9467 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9468 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9469 || defined(TARGET_S390X)
9471 abi_ulong *v;
9472 abi_ulong v1, v2, v3, v4, v5, v6;
9473 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9474 return -TARGET_EFAULT;
9475 v1 = tswapal(v[0]);
9476 v2 = tswapal(v[1]);
9477 v3 = tswapal(v[2]);
9478 v4 = tswapal(v[3]);
9479 v5 = tswapal(v[4]);
9480 v6 = tswapal(v[5]);
9481 unlock_user(v, arg1, 0);
9482 ret = get_errno(target_mmap(v1, v2, v3,
9483 target_to_host_bitmask(v4, mmap_flags_tbl),
9484 v5, v6));
9486 #else
9487 ret = get_errno(target_mmap(arg1, arg2, arg3,
9488 target_to_host_bitmask(arg4, mmap_flags_tbl),
9489 arg5,
9490 arg6));
9491 #endif
9492 return ret;
9493 #endif
9494 #ifdef TARGET_NR_mmap2
9495 case TARGET_NR_mmap2:
9496 #ifndef MMAP_SHIFT
9497 #define MMAP_SHIFT 12
9498 #endif
9499 ret = target_mmap(arg1, arg2, arg3,
9500 target_to_host_bitmask(arg4, mmap_flags_tbl),
9501 arg5, arg6 << MMAP_SHIFT);
9502 return get_errno(ret);
9503 #endif
9504 case TARGET_NR_munmap:
9505 return get_errno(target_munmap(arg1, arg2));
9506 case TARGET_NR_mprotect:
9508 TaskState *ts = cpu->opaque;
9509 /* Special hack to detect libc making the stack executable. */
9510 if ((arg3 & PROT_GROWSDOWN)
9511 && arg1 >= ts->info->stack_limit
9512 && arg1 <= ts->info->start_stack) {
9513 arg3 &= ~PROT_GROWSDOWN;
9514 arg2 = arg2 + arg1 - ts->info->stack_limit;
9515 arg1 = ts->info->stack_limit;
9518 return get_errno(target_mprotect(arg1, arg2, arg3));
9519 #ifdef TARGET_NR_mremap
9520 case TARGET_NR_mremap:
9521 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9522 #endif
9523 /* ??? msync/mlock/munlock are broken for softmmu. */
9524 #ifdef TARGET_NR_msync
9525 case TARGET_NR_msync:
9526 return get_errno(msync(g2h(arg1), arg2, arg3));
9527 #endif
9528 #ifdef TARGET_NR_mlock
9529 case TARGET_NR_mlock:
9530 return get_errno(mlock(g2h(arg1), arg2));
9531 #endif
9532 #ifdef TARGET_NR_munlock
9533 case TARGET_NR_munlock:
9534 return get_errno(munlock(g2h(arg1), arg2));
9535 #endif
9536 #ifdef TARGET_NR_mlockall
9537 case TARGET_NR_mlockall:
9538 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9539 #endif
9540 #ifdef TARGET_NR_munlockall
9541 case TARGET_NR_munlockall:
9542 return get_errno(munlockall());
9543 #endif
9544 #ifdef TARGET_NR_truncate
9545 case TARGET_NR_truncate:
9546 if (!(p = lock_user_string(arg1)))
9547 return -TARGET_EFAULT;
9548 ret = get_errno(truncate(p, arg2));
9549 unlock_user(p, arg1, 0);
9550 return ret;
9551 #endif
9552 #ifdef TARGET_NR_ftruncate
9553 case TARGET_NR_ftruncate:
9554 return get_errno(ftruncate(arg1, arg2));
9555 #endif
9556 case TARGET_NR_fchmod:
9557 return get_errno(fchmod(arg1, arg2));
9558 #if defined(TARGET_NR_fchmodat)
9559 case TARGET_NR_fchmodat:
9560 if (!(p = lock_user_string(arg2)))
9561 return -TARGET_EFAULT;
9562 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9563 unlock_user(p, arg2, 0);
9564 return ret;
9565 #endif
9566 case TARGET_NR_getpriority:
9567 /* Note that negative values are valid for getpriority, so we must
9568 differentiate based on errno settings. */
9569 errno = 0;
9570 ret = getpriority(arg1, arg2);
9571 if (ret == -1 && errno != 0) {
9572 return -host_to_target_errno(errno);
9574 #ifdef TARGET_ALPHA
9575 /* Return value is the unbiased priority. Signal no error. */
9576 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9577 #else
9578 /* Return value is a biased priority to avoid negative numbers. */
9579 ret = 20 - ret;
9580 #endif
9581 return ret;
9582 case TARGET_NR_setpriority:
9583 return get_errno(setpriority(arg1, arg2, arg3));
9584 #ifdef TARGET_NR_statfs
9585 case TARGET_NR_statfs:
9586 if (!(p = lock_user_string(arg1))) {
9587 return -TARGET_EFAULT;
9589 ret = get_errno(statfs(path(p), &stfs));
9590 unlock_user(p, arg1, 0);
9591 convert_statfs:
9592 if (!is_error(ret)) {
9593 struct target_statfs *target_stfs;
9595 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9596 return -TARGET_EFAULT;
9597 __put_user(stfs.f_type, &target_stfs->f_type);
9598 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9599 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9600 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9601 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9602 __put_user(stfs.f_files, &target_stfs->f_files);
9603 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9604 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9605 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9606 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9607 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9608 #ifdef _STATFS_F_FLAGS
9609 __put_user(stfs.f_flags, &target_stfs->f_flags);
9610 #else
9611 __put_user(0, &target_stfs->f_flags);
9612 #endif
9613 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9614 unlock_user_struct(target_stfs, arg2, 1);
9616 return ret;
9617 #endif
9618 #ifdef TARGET_NR_fstatfs
9619 case TARGET_NR_fstatfs:
9620 ret = get_errno(fstatfs(arg1, &stfs));
9621 goto convert_statfs;
9622 #endif
9623 #ifdef TARGET_NR_statfs64
9624 case TARGET_NR_statfs64:
9625 if (!(p = lock_user_string(arg1))) {
9626 return -TARGET_EFAULT;
9628 ret = get_errno(statfs(path(p), &stfs));
9629 unlock_user(p, arg1, 0);
9630 convert_statfs64:
9631 if (!is_error(ret)) {
9632 struct target_statfs64 *target_stfs;
9634 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9635 return -TARGET_EFAULT;
9636 __put_user(stfs.f_type, &target_stfs->f_type);
9637 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9638 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9639 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9640 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9641 __put_user(stfs.f_files, &target_stfs->f_files);
9642 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9643 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9644 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9645 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9646 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9647 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9648 unlock_user_struct(target_stfs, arg3, 1);
9650 return ret;
9651 case TARGET_NR_fstatfs64:
9652 ret = get_errno(fstatfs(arg1, &stfs));
9653 goto convert_statfs64;
9654 #endif
9655 #ifdef TARGET_NR_socketcall
9656 case TARGET_NR_socketcall:
9657 return do_socketcall(arg1, arg2);
9658 #endif
9659 #ifdef TARGET_NR_accept
9660 case TARGET_NR_accept:
9661 return do_accept4(arg1, arg2, arg3, 0);
9662 #endif
9663 #ifdef TARGET_NR_accept4
9664 case TARGET_NR_accept4:
9665 return do_accept4(arg1, arg2, arg3, arg4);
9666 #endif
9667 #ifdef TARGET_NR_bind
9668 case TARGET_NR_bind:
9669 return do_bind(arg1, arg2, arg3);
9670 #endif
9671 #ifdef TARGET_NR_connect
9672 case TARGET_NR_connect:
9673 return do_connect(arg1, arg2, arg3);
9674 #endif
9675 #ifdef TARGET_NR_getpeername
9676 case TARGET_NR_getpeername:
9677 return do_getpeername(arg1, arg2, arg3);
9678 #endif
9679 #ifdef TARGET_NR_getsockname
9680 case TARGET_NR_getsockname:
9681 return do_getsockname(arg1, arg2, arg3);
9682 #endif
9683 #ifdef TARGET_NR_getsockopt
9684 case TARGET_NR_getsockopt:
9685 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9686 #endif
9687 #ifdef TARGET_NR_listen
9688 case TARGET_NR_listen:
9689 return get_errno(listen(arg1, arg2));
9690 #endif
9691 #ifdef TARGET_NR_recv
9692 case TARGET_NR_recv:
9693 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9694 #endif
9695 #ifdef TARGET_NR_recvfrom
9696 case TARGET_NR_recvfrom:
9697 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9698 #endif
9699 #ifdef TARGET_NR_recvmsg
9700 case TARGET_NR_recvmsg:
9701 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9702 #endif
9703 #ifdef TARGET_NR_send
9704 case TARGET_NR_send:
9705 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9706 #endif
9707 #ifdef TARGET_NR_sendmsg
9708 case TARGET_NR_sendmsg:
9709 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9710 #endif
9711 #ifdef TARGET_NR_sendmmsg
9712 case TARGET_NR_sendmmsg:
9713 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9714 #endif
9715 #ifdef TARGET_NR_recvmmsg
9716 case TARGET_NR_recvmmsg:
9717 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9718 #endif
9719 #ifdef TARGET_NR_sendto
9720 case TARGET_NR_sendto:
9721 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9722 #endif
9723 #ifdef TARGET_NR_shutdown
9724 case TARGET_NR_shutdown:
9725 return get_errno(shutdown(arg1, arg2));
9726 #endif
9727 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9728 case TARGET_NR_getrandom:
9729 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9730 if (!p) {
9731 return -TARGET_EFAULT;
9733 ret = get_errno(getrandom(p, arg2, arg3));
9734 unlock_user(p, arg1, ret);
9735 return ret;
9736 #endif
9737 #ifdef TARGET_NR_socket
9738 case TARGET_NR_socket:
9739 return do_socket(arg1, arg2, arg3);
9740 #endif
9741 #ifdef TARGET_NR_socketpair
9742 case TARGET_NR_socketpair:
9743 return do_socketpair(arg1, arg2, arg3, arg4);
9744 #endif
9745 #ifdef TARGET_NR_setsockopt
9746 case TARGET_NR_setsockopt:
9747 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9748 #endif
9749 #if defined(TARGET_NR_syslog)
9750 case TARGET_NR_syslog:
9752 int len = arg2;
9754 switch (arg1) {
9755 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9756 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9757 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9758 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9759 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9760 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9761 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9762 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9763 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9764 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9765 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9766 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9768 if (len < 0) {
9769 return -TARGET_EINVAL;
9771 if (len == 0) {
9772 return 0;
9774 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9775 if (!p) {
9776 return -TARGET_EFAULT;
9778 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9779 unlock_user(p, arg2, arg3);
9781 return ret;
9782 default:
9783 return -TARGET_EINVAL;
9786 break;
9787 #endif
9788 case TARGET_NR_setitimer:
9790 struct itimerval value, ovalue, *pvalue;
9792 if (arg2) {
9793 pvalue = &value;
9794 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9795 || copy_from_user_timeval(&pvalue->it_value,
9796 arg2 + sizeof(struct target_timeval)))
9797 return -TARGET_EFAULT;
9798 } else {
9799 pvalue = NULL;
9801 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9802 if (!is_error(ret) && arg3) {
9803 if (copy_to_user_timeval(arg3,
9804 &ovalue.it_interval)
9805 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9806 &ovalue.it_value))
9807 return -TARGET_EFAULT;
9810 return ret;
9811 case TARGET_NR_getitimer:
9813 struct itimerval value;
9815 ret = get_errno(getitimer(arg1, &value));
9816 if (!is_error(ret) && arg2) {
9817 if (copy_to_user_timeval(arg2,
9818 &value.it_interval)
9819 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9820 &value.it_value))
9821 return -TARGET_EFAULT;
9824 return ret;
9825 #ifdef TARGET_NR_stat
9826 case TARGET_NR_stat:
9827 if (!(p = lock_user_string(arg1))) {
9828 return -TARGET_EFAULT;
9830 ret = get_errno(stat(path(p), &st));
9831 unlock_user(p, arg1, 0);
9832 goto do_stat;
9833 #endif
9834 #ifdef TARGET_NR_lstat
9835 case TARGET_NR_lstat:
9836 if (!(p = lock_user_string(arg1))) {
9837 return -TARGET_EFAULT;
9839 ret = get_errno(lstat(path(p), &st));
9840 unlock_user(p, arg1, 0);
9841 goto do_stat;
9842 #endif
9843 #ifdef TARGET_NR_fstat
9844 case TARGET_NR_fstat:
9846 ret = get_errno(fstat(arg1, &st));
9847 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9848 do_stat:
9849 #endif
9850 if (!is_error(ret)) {
9851 struct target_stat *target_st;
9853 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9854 return -TARGET_EFAULT;
9855 memset(target_st, 0, sizeof(*target_st));
9856 __put_user(st.st_dev, &target_st->st_dev);
9857 __put_user(st.st_ino, &target_st->st_ino);
9858 __put_user(st.st_mode, &target_st->st_mode);
9859 __put_user(st.st_uid, &target_st->st_uid);
9860 __put_user(st.st_gid, &target_st->st_gid);
9861 __put_user(st.st_nlink, &target_st->st_nlink);
9862 __put_user(st.st_rdev, &target_st->st_rdev);
9863 __put_user(st.st_size, &target_st->st_size);
9864 __put_user(st.st_blksize, &target_st->st_blksize);
9865 __put_user(st.st_blocks, &target_st->st_blocks);
9866 __put_user(st.st_atime, &target_st->target_st_atime);
9867 __put_user(st.st_mtime, &target_st->target_st_mtime);
9868 __put_user(st.st_ctime, &target_st->target_st_ctime);
9869 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9870 defined(TARGET_STAT_HAVE_NSEC)
9871 __put_user(st.st_atim.tv_nsec,
9872 &target_st->target_st_atime_nsec);
9873 __put_user(st.st_mtim.tv_nsec,
9874 &target_st->target_st_mtime_nsec);
9875 __put_user(st.st_ctim.tv_nsec,
9876 &target_st->target_st_ctime_nsec);
9877 #endif
9878 unlock_user_struct(target_st, arg2, 1);
9881 return ret;
9882 #endif
9883 case TARGET_NR_vhangup:
9884 return get_errno(vhangup());
9885 #ifdef TARGET_NR_syscall
9886 case TARGET_NR_syscall:
9887 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9888 arg6, arg7, arg8, 0);
9889 #endif
9890 #if defined(TARGET_NR_wait4)
9891 case TARGET_NR_wait4:
9893 int status;
9894 abi_long status_ptr = arg2;
9895 struct rusage rusage, *rusage_ptr;
9896 abi_ulong target_rusage = arg4;
9897 abi_long rusage_err;
9898 if (target_rusage)
9899 rusage_ptr = &rusage;
9900 else
9901 rusage_ptr = NULL;
9902 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9903 if (!is_error(ret)) {
9904 if (status_ptr && ret) {
9905 status = host_to_target_waitstatus(status);
9906 if (put_user_s32(status, status_ptr))
9907 return -TARGET_EFAULT;
9909 if (target_rusage) {
9910 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9911 if (rusage_err) {
9912 ret = rusage_err;
9917 return ret;
9918 #endif
9919 #ifdef TARGET_NR_swapoff
9920 case TARGET_NR_swapoff:
9921 if (!(p = lock_user_string(arg1)))
9922 return -TARGET_EFAULT;
9923 ret = get_errno(swapoff(p));
9924 unlock_user(p, arg1, 0);
9925 return ret;
9926 #endif
9927 case TARGET_NR_sysinfo:
9929 struct target_sysinfo *target_value;
9930 struct sysinfo value;
9931 ret = get_errno(sysinfo(&value));
9932 if (!is_error(ret) && arg1)
9934 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9935 return -TARGET_EFAULT;
9936 __put_user(value.uptime, &target_value->uptime);
9937 __put_user(value.loads[0], &target_value->loads[0]);
9938 __put_user(value.loads[1], &target_value->loads[1]);
9939 __put_user(value.loads[2], &target_value->loads[2]);
9940 __put_user(value.totalram, &target_value->totalram);
9941 __put_user(value.freeram, &target_value->freeram);
9942 __put_user(value.sharedram, &target_value->sharedram);
9943 __put_user(value.bufferram, &target_value->bufferram);
9944 __put_user(value.totalswap, &target_value->totalswap);
9945 __put_user(value.freeswap, &target_value->freeswap);
9946 __put_user(value.procs, &target_value->procs);
9947 __put_user(value.totalhigh, &target_value->totalhigh);
9948 __put_user(value.freehigh, &target_value->freehigh);
9949 __put_user(value.mem_unit, &target_value->mem_unit);
9950 unlock_user_struct(target_value, arg1, 1);
9953 return ret;
9954 #ifdef TARGET_NR_ipc
9955 case TARGET_NR_ipc:
9956 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9957 #endif
9958 #ifdef TARGET_NR_semget
9959 case TARGET_NR_semget:
9960 return get_errno(semget(arg1, arg2, arg3));
9961 #endif
9962 #ifdef TARGET_NR_semop
9963 case TARGET_NR_semop:
9964 return do_semtimedop(arg1, arg2, arg3, 0, false);
9965 #endif
9966 #ifdef TARGET_NR_semtimedop
9967 case TARGET_NR_semtimedop:
9968 return do_semtimedop(arg1, arg2, arg3, arg4, false);
9969 #endif
9970 #ifdef TARGET_NR_semtimedop_time64
9971 case TARGET_NR_semtimedop_time64:
9972 return do_semtimedop(arg1, arg2, arg3, arg4, true);
9973 #endif
9974 #ifdef TARGET_NR_semctl
9975 case TARGET_NR_semctl:
9976 return do_semctl(arg1, arg2, arg3, arg4);
9977 #endif
9978 #ifdef TARGET_NR_msgctl
9979 case TARGET_NR_msgctl:
9980 return do_msgctl(arg1, arg2, arg3);
9981 #endif
9982 #ifdef TARGET_NR_msgget
9983 case TARGET_NR_msgget:
9984 return get_errno(msgget(arg1, arg2));
9985 #endif
9986 #ifdef TARGET_NR_msgrcv
9987 case TARGET_NR_msgrcv:
9988 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9989 #endif
9990 #ifdef TARGET_NR_msgsnd
9991 case TARGET_NR_msgsnd:
9992 return do_msgsnd(arg1, arg2, arg3, arg4);
9993 #endif
9994 #ifdef TARGET_NR_shmget
9995 case TARGET_NR_shmget:
9996 return get_errno(shmget(arg1, arg2, arg3));
9997 #endif
9998 #ifdef TARGET_NR_shmctl
9999 case TARGET_NR_shmctl:
10000 return do_shmctl(arg1, arg2, arg3);
10001 #endif
10002 #ifdef TARGET_NR_shmat
10003 case TARGET_NR_shmat:
10004 return do_shmat(cpu_env, arg1, arg2, arg3);
10005 #endif
10006 #ifdef TARGET_NR_shmdt
10007 case TARGET_NR_shmdt:
10008 return do_shmdt(arg1);
10009 #endif
10010 case TARGET_NR_fsync:
10011 return get_errno(fsync(arg1));
10012 case TARGET_NR_clone:
10013 /* Linux manages to have three different orderings for its
10014 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10015 * match the kernel's CONFIG_CLONE_* settings.
10016 * Microblaze is further special in that it uses a sixth
10017 * implicit argument to clone for the TLS pointer.
10019 #if defined(TARGET_MICROBLAZE)
10020 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10021 #elif defined(TARGET_CLONE_BACKWARDS)
10022 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10023 #elif defined(TARGET_CLONE_BACKWARDS2)
10024 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10025 #else
10026 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10027 #endif
10028 return ret;
10029 #ifdef __NR_exit_group
10030 /* new thread calls */
10031 case TARGET_NR_exit_group:
10032 preexit_cleanup(cpu_env, arg1);
10033 return get_errno(exit_group(arg1));
10034 #endif
10035 case TARGET_NR_setdomainname:
10036 if (!(p = lock_user_string(arg1)))
10037 return -TARGET_EFAULT;
10038 ret = get_errno(setdomainname(p, arg2));
10039 unlock_user(p, arg1, 0);
10040 return ret;
10041 case TARGET_NR_uname:
10042 /* no need to transcode because we use the linux syscall */
10044 struct new_utsname * buf;
10046 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10047 return -TARGET_EFAULT;
10048 ret = get_errno(sys_uname(buf));
10049 if (!is_error(ret)) {
10050 /* Overwrite the native machine name with whatever is being
10051 emulated. */
10052 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10053 sizeof(buf->machine));
10054 /* Allow the user to override the reported release. */
10055 if (qemu_uname_release && *qemu_uname_release) {
10056 g_strlcpy(buf->release, qemu_uname_release,
10057 sizeof(buf->release));
10060 unlock_user_struct(buf, arg1, 1);
10062 return ret;
10063 #ifdef TARGET_I386
10064 case TARGET_NR_modify_ldt:
10065 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10066 #if !defined(TARGET_X86_64)
10067 case TARGET_NR_vm86:
10068 return do_vm86(cpu_env, arg1, arg2);
10069 #endif
10070 #endif
10071 #if defined(TARGET_NR_adjtimex)
10072 case TARGET_NR_adjtimex:
10074 struct timex host_buf;
10076 if (target_to_host_timex(&host_buf, arg1) != 0) {
10077 return -TARGET_EFAULT;
10079 ret = get_errno(adjtimex(&host_buf));
10080 if (!is_error(ret)) {
10081 if (host_to_target_timex(arg1, &host_buf) != 0) {
10082 return -TARGET_EFAULT;
10086 return ret;
10087 #endif
10088 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10089 case TARGET_NR_clock_adjtime:
10091 struct timex htx, *phtx = &htx;
10093 if (target_to_host_timex(phtx, arg2) != 0) {
10094 return -TARGET_EFAULT;
10096 ret = get_errno(clock_adjtime(arg1, phtx));
10097 if (!is_error(ret) && phtx) {
10098 if (host_to_target_timex(arg2, phtx) != 0) {
10099 return -TARGET_EFAULT;
10103 return ret;
10104 #endif
10105 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10106 case TARGET_NR_clock_adjtime64:
10108 struct timex htx;
10110 if (target_to_host_timex64(&htx, arg2) != 0) {
10111 return -TARGET_EFAULT;
10113 ret = get_errno(clock_adjtime(arg1, &htx));
10114 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10115 return -TARGET_EFAULT;
10118 return ret;
10119 #endif
10120 case TARGET_NR_getpgid:
10121 return get_errno(getpgid(arg1));
10122 case TARGET_NR_fchdir:
10123 return get_errno(fchdir(arg1));
10124 case TARGET_NR_personality:
10125 return get_errno(personality(arg1));
10126 #ifdef TARGET_NR__llseek /* Not on alpha */
10127 case TARGET_NR__llseek:
10129 int64_t res;
10130 #if !defined(__NR_llseek)
10131 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10132 if (res == -1) {
10133 ret = get_errno(res);
10134 } else {
10135 ret = 0;
10137 #else
10138 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10139 #endif
10140 if ((ret == 0) && put_user_s64(res, arg4)) {
10141 return -TARGET_EFAULT;
10144 return ret;
10145 #endif
10146 #ifdef TARGET_NR_getdents
10147 case TARGET_NR_getdents:
10148 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10149 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10151 struct target_dirent *target_dirp;
10152 struct linux_dirent *dirp;
10153 abi_long count = arg3;
10155 dirp = g_try_malloc(count);
10156 if (!dirp) {
10157 return -TARGET_ENOMEM;
10160 ret = get_errno(sys_getdents(arg1, dirp, count));
10161 if (!is_error(ret)) {
10162 struct linux_dirent *de;
10163 struct target_dirent *tde;
10164 int len = ret;
10165 int reclen, treclen;
10166 int count1, tnamelen;
10168 count1 = 0;
10169 de = dirp;
10170 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10171 return -TARGET_EFAULT;
10172 tde = target_dirp;
10173 while (len > 0) {
10174 reclen = de->d_reclen;
10175 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10176 assert(tnamelen >= 0);
10177 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10178 assert(count1 + treclen <= count);
10179 tde->d_reclen = tswap16(treclen);
10180 tde->d_ino = tswapal(de->d_ino);
10181 tde->d_off = tswapal(de->d_off);
10182 memcpy(tde->d_name, de->d_name, tnamelen);
10183 de = (struct linux_dirent *)((char *)de + reclen);
10184 len -= reclen;
10185 tde = (struct target_dirent *)((char *)tde + treclen);
10186 count1 += treclen;
10188 ret = count1;
10189 unlock_user(target_dirp, arg2, ret);
10191 g_free(dirp);
10193 #else
10195 struct linux_dirent *dirp;
10196 abi_long count = arg3;
10198 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10199 return -TARGET_EFAULT;
10200 ret = get_errno(sys_getdents(arg1, dirp, count));
10201 if (!is_error(ret)) {
10202 struct linux_dirent *de;
10203 int len = ret;
10204 int reclen;
10205 de = dirp;
10206 while (len > 0) {
10207 reclen = de->d_reclen;
10208 if (reclen > len)
10209 break;
10210 de->d_reclen = tswap16(reclen);
10211 tswapls(&de->d_ino);
10212 tswapls(&de->d_off);
10213 de = (struct linux_dirent *)((char *)de + reclen);
10214 len -= reclen;
10217 unlock_user(dirp, arg2, ret);
10219 #endif
10220 #else
10221 /* Implement getdents in terms of getdents64 */
10223 struct linux_dirent64 *dirp;
10224 abi_long count = arg3;
10226 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10227 if (!dirp) {
10228 return -TARGET_EFAULT;
10230 ret = get_errno(sys_getdents64(arg1, dirp, count));
10231 if (!is_error(ret)) {
10232 /* Convert the dirent64 structs to target dirent. We do this
10233 * in-place, since we can guarantee that a target_dirent is no
10234 * larger than a dirent64; however this means we have to be
10235 * careful to read everything before writing in the new format.
10237 struct linux_dirent64 *de;
10238 struct target_dirent *tde;
10239 int len = ret;
10240 int tlen = 0;
10242 de = dirp;
10243 tde = (struct target_dirent *)dirp;
10244 while (len > 0) {
10245 int namelen, treclen;
10246 int reclen = de->d_reclen;
10247 uint64_t ino = de->d_ino;
10248 int64_t off = de->d_off;
10249 uint8_t type = de->d_type;
10251 namelen = strlen(de->d_name);
10252 treclen = offsetof(struct target_dirent, d_name)
10253 + namelen + 2;
10254 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10256 memmove(tde->d_name, de->d_name, namelen + 1);
10257 tde->d_ino = tswapal(ino);
10258 tde->d_off = tswapal(off);
10259 tde->d_reclen = tswap16(treclen);
10260 /* The target_dirent type is in what was formerly a padding
10261 * byte at the end of the structure:
10263 *(((char *)tde) + treclen - 1) = type;
10265 de = (struct linux_dirent64 *)((char *)de + reclen);
10266 tde = (struct target_dirent *)((char *)tde + treclen);
10267 len -= reclen;
10268 tlen += treclen;
10270 ret = tlen;
10272 unlock_user(dirp, arg2, ret);
10274 #endif
10275 return ret;
10276 #endif /* TARGET_NR_getdents */
10277 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10278 case TARGET_NR_getdents64:
10280 struct linux_dirent64 *dirp;
10281 abi_long count = arg3;
10282 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10283 return -TARGET_EFAULT;
10284 ret = get_errno(sys_getdents64(arg1, dirp, count));
10285 if (!is_error(ret)) {
10286 struct linux_dirent64 *de;
10287 int len = ret;
10288 int reclen;
10289 de = dirp;
10290 while (len > 0) {
10291 reclen = de->d_reclen;
10292 if (reclen > len)
10293 break;
10294 de->d_reclen = tswap16(reclen);
10295 tswap64s((uint64_t *)&de->d_ino);
10296 tswap64s((uint64_t *)&de->d_off);
10297 de = (struct linux_dirent64 *)((char *)de + reclen);
10298 len -= reclen;
10301 unlock_user(dirp, arg2, ret);
10303 return ret;
10304 #endif /* TARGET_NR_getdents64 */
10305 #if defined(TARGET_NR__newselect)
10306 case TARGET_NR__newselect:
10307 return do_select(arg1, arg2, arg3, arg4, arg5);
10308 #endif
10309 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10310 # ifdef TARGET_NR_poll
10311 case TARGET_NR_poll:
10312 # endif
10313 # ifdef TARGET_NR_ppoll
10314 case TARGET_NR_ppoll:
10315 # endif
10317 struct target_pollfd *target_pfd;
10318 unsigned int nfds = arg2;
10319 struct pollfd *pfd;
10320 unsigned int i;
10322 pfd = NULL;
10323 target_pfd = NULL;
10324 if (nfds) {
10325 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10326 return -TARGET_EINVAL;
10329 target_pfd = lock_user(VERIFY_WRITE, arg1,
10330 sizeof(struct target_pollfd) * nfds, 1);
10331 if (!target_pfd) {
10332 return -TARGET_EFAULT;
10335 pfd = alloca(sizeof(struct pollfd) * nfds);
10336 for (i = 0; i < nfds; i++) {
10337 pfd[i].fd = tswap32(target_pfd[i].fd);
10338 pfd[i].events = tswap16(target_pfd[i].events);
10342 switch (num) {
10343 # ifdef TARGET_NR_ppoll
10344 case TARGET_NR_ppoll:
10346 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10347 target_sigset_t *target_set;
10348 sigset_t _set, *set = &_set;
10350 if (arg3) {
10351 if (target_to_host_timespec(timeout_ts, arg3)) {
10352 unlock_user(target_pfd, arg1, 0);
10353 return -TARGET_EFAULT;
10355 } else {
10356 timeout_ts = NULL;
10359 if (arg4) {
10360 if (arg5 != sizeof(target_sigset_t)) {
10361 unlock_user(target_pfd, arg1, 0);
10362 return -TARGET_EINVAL;
10365 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10366 if (!target_set) {
10367 unlock_user(target_pfd, arg1, 0);
10368 return -TARGET_EFAULT;
10370 target_to_host_sigset(set, target_set);
10371 } else {
10372 set = NULL;
10375 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10376 set, SIGSET_T_SIZE));
10378 if (!is_error(ret) && arg3) {
10379 host_to_target_timespec(arg3, timeout_ts);
10381 if (arg4) {
10382 unlock_user(target_set, arg4, 0);
10384 break;
10386 # endif
10387 # ifdef TARGET_NR_poll
10388 case TARGET_NR_poll:
10390 struct timespec ts, *pts;
10392 if (arg3 >= 0) {
10393 /* Convert ms to secs, ns */
10394 ts.tv_sec = arg3 / 1000;
10395 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10396 pts = &ts;
10397 } else {
10398 /* -ve poll() timeout means "infinite" */
10399 pts = NULL;
10401 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10402 break;
10404 # endif
10405 default:
10406 g_assert_not_reached();
10409 if (!is_error(ret)) {
10410 for(i = 0; i < nfds; i++) {
10411 target_pfd[i].revents = tswap16(pfd[i].revents);
10414 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10416 return ret;
10417 #endif
10418 case TARGET_NR_flock:
10419 /* NOTE: the flock constant seems to be the same for every
10420 Linux platform */
10421 return get_errno(safe_flock(arg1, arg2));
10422 case TARGET_NR_readv:
10424 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10425 if (vec != NULL) {
10426 ret = get_errno(safe_readv(arg1, vec, arg3));
10427 unlock_iovec(vec, arg2, arg3, 1);
10428 } else {
10429 ret = -host_to_target_errno(errno);
10432 return ret;
10433 case TARGET_NR_writev:
10435 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10436 if (vec != NULL) {
10437 ret = get_errno(safe_writev(arg1, vec, arg3));
10438 unlock_iovec(vec, arg2, arg3, 0);
10439 } else {
10440 ret = -host_to_target_errno(errno);
10443 return ret;
10444 #if defined(TARGET_NR_preadv)
10445 case TARGET_NR_preadv:
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10448 if (vec != NULL) {
10449 unsigned long low, high;
10451 target_to_host_low_high(arg4, arg5, &low, &high);
10452 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10453 unlock_iovec(vec, arg2, arg3, 1);
10454 } else {
10455 ret = -host_to_target_errno(errno);
10458 return ret;
10459 #endif
10460 #if defined(TARGET_NR_pwritev)
10461 case TARGET_NR_pwritev:
10463 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10464 if (vec != NULL) {
10465 unsigned long low, high;
10467 target_to_host_low_high(arg4, arg5, &low, &high);
10468 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10469 unlock_iovec(vec, arg2, arg3, 0);
10470 } else {
10471 ret = -host_to_target_errno(errno);
10474 return ret;
10475 #endif
10476 case TARGET_NR_getsid:
10477 return get_errno(getsid(arg1));
10478 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10479 case TARGET_NR_fdatasync:
10480 return get_errno(fdatasync(arg1));
10481 #endif
10482 #ifdef TARGET_NR__sysctl
10483 case TARGET_NR__sysctl:
10484 /* We don't implement this, but ENOTDIR is always a safe
10485 return value. */
10486 return -TARGET_ENOTDIR;
10487 #endif
10488 case TARGET_NR_sched_getaffinity:
10490 unsigned int mask_size;
10491 unsigned long *mask;
10494 * sched_getaffinity needs multiples of ulong, so need to take
10495 * care of mismatches between target ulong and host ulong sizes.
10497 if (arg2 & (sizeof(abi_ulong) - 1)) {
10498 return -TARGET_EINVAL;
10500 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10502 mask = alloca(mask_size);
10503 memset(mask, 0, mask_size);
10504 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10506 if (!is_error(ret)) {
10507 if (ret > arg2) {
10508 /* More data returned than the caller's buffer will fit.
10509 * This only happens if sizeof(abi_long) < sizeof(long)
10510 * and the caller passed us a buffer holding an odd number
10511 * of abi_longs. If the host kernel is actually using the
10512 * extra 4 bytes then fail EINVAL; otherwise we can just
10513 * ignore them and only copy the interesting part.
10515 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10516 if (numcpus > arg2 * 8) {
10517 return -TARGET_EINVAL;
10519 ret = arg2;
10522 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10523 return -TARGET_EFAULT;
10527 return ret;
10528 case TARGET_NR_sched_setaffinity:
10530 unsigned int mask_size;
10531 unsigned long *mask;
10534 * sched_setaffinity needs multiples of ulong, so need to take
10535 * care of mismatches between target ulong and host ulong sizes.
10537 if (arg2 & (sizeof(abi_ulong) - 1)) {
10538 return -TARGET_EINVAL;
10540 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10541 mask = alloca(mask_size);
10543 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10544 if (ret) {
10545 return ret;
10548 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10550 case TARGET_NR_getcpu:
10552 unsigned cpu, node;
10553 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10554 arg2 ? &node : NULL,
10555 NULL));
10556 if (is_error(ret)) {
10557 return ret;
10559 if (arg1 && put_user_u32(cpu, arg1)) {
10560 return -TARGET_EFAULT;
10562 if (arg2 && put_user_u32(node, arg2)) {
10563 return -TARGET_EFAULT;
10566 return ret;
10567 case TARGET_NR_sched_setparam:
10569 struct sched_param *target_schp;
10570 struct sched_param schp;
10572 if (arg2 == 0) {
10573 return -TARGET_EINVAL;
10575 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10576 return -TARGET_EFAULT;
10577 schp.sched_priority = tswap32(target_schp->sched_priority);
10578 unlock_user_struct(target_schp, arg2, 0);
10579 return get_errno(sched_setparam(arg1, &schp));
10581 case TARGET_NR_sched_getparam:
10583 struct sched_param *target_schp;
10584 struct sched_param schp;
10586 if (arg2 == 0) {
10587 return -TARGET_EINVAL;
10589 ret = get_errno(sched_getparam(arg1, &schp));
10590 if (!is_error(ret)) {
10591 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10592 return -TARGET_EFAULT;
10593 target_schp->sched_priority = tswap32(schp.sched_priority);
10594 unlock_user_struct(target_schp, arg2, 1);
10597 return ret;
10598 case TARGET_NR_sched_setscheduler:
10600 struct sched_param *target_schp;
10601 struct sched_param schp;
10602 if (arg3 == 0) {
10603 return -TARGET_EINVAL;
10605 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10606 return -TARGET_EFAULT;
10607 schp.sched_priority = tswap32(target_schp->sched_priority);
10608 unlock_user_struct(target_schp, arg3, 0);
10609 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10611 case TARGET_NR_sched_getscheduler:
10612 return get_errno(sched_getscheduler(arg1));
10613 case TARGET_NR_sched_yield:
10614 return get_errno(sched_yield());
10615 case TARGET_NR_sched_get_priority_max:
10616 return get_errno(sched_get_priority_max(arg1));
10617 case TARGET_NR_sched_get_priority_min:
10618 return get_errno(sched_get_priority_min(arg1));
10619 #ifdef TARGET_NR_sched_rr_get_interval
10620 case TARGET_NR_sched_rr_get_interval:
10622 struct timespec ts;
10623 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10624 if (!is_error(ret)) {
10625 ret = host_to_target_timespec(arg2, &ts);
10628 return ret;
10629 #endif
10630 #ifdef TARGET_NR_sched_rr_get_interval_time64
10631 case TARGET_NR_sched_rr_get_interval_time64:
10633 struct timespec ts;
10634 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10635 if (!is_error(ret)) {
10636 ret = host_to_target_timespec64(arg2, &ts);
10639 return ret;
10640 #endif
10641 #if defined(TARGET_NR_nanosleep)
10642 case TARGET_NR_nanosleep:
10644 struct timespec req, rem;
10645 target_to_host_timespec(&req, arg1);
10646 ret = get_errno(safe_nanosleep(&req, &rem));
10647 if (is_error(ret) && arg2) {
10648 host_to_target_timespec(arg2, &rem);
10651 return ret;
10652 #endif
10653 case TARGET_NR_prctl:
10654 switch (arg1) {
10655 case PR_GET_PDEATHSIG:
10657 int deathsig;
10658 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10659 if (!is_error(ret) && arg2
10660 && put_user_ual(deathsig, arg2)) {
10661 return -TARGET_EFAULT;
10663 return ret;
10665 #ifdef PR_GET_NAME
10666 case PR_GET_NAME:
10668 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10669 if (!name) {
10670 return -TARGET_EFAULT;
10672 ret = get_errno(prctl(arg1, (unsigned long)name,
10673 arg3, arg4, arg5));
10674 unlock_user(name, arg2, 16);
10675 return ret;
10677 case PR_SET_NAME:
10679 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10680 if (!name) {
10681 return -TARGET_EFAULT;
10683 ret = get_errno(prctl(arg1, (unsigned long)name,
10684 arg3, arg4, arg5));
10685 unlock_user(name, arg2, 0);
10686 return ret;
10688 #endif
10689 #ifdef TARGET_MIPS
10690 case TARGET_PR_GET_FP_MODE:
10692 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10693 ret = 0;
10694 if (env->CP0_Status & (1 << CP0St_FR)) {
10695 ret |= TARGET_PR_FP_MODE_FR;
10697 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10698 ret |= TARGET_PR_FP_MODE_FRE;
10700 return ret;
10702 case TARGET_PR_SET_FP_MODE:
10704 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10705 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10706 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10707 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10708 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10710 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10711 TARGET_PR_FP_MODE_FRE;
10713 /* If nothing to change, return right away, successfully. */
10714 if (old_fr == new_fr && old_fre == new_fre) {
10715 return 0;
10717 /* Check the value is valid */
10718 if (arg2 & ~known_bits) {
10719 return -TARGET_EOPNOTSUPP;
10721 /* Setting FRE without FR is not supported. */
10722 if (new_fre && !new_fr) {
10723 return -TARGET_EOPNOTSUPP;
10725 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10726 /* FR1 is not supported */
10727 return -TARGET_EOPNOTSUPP;
10729 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10730 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10731 /* cannot set FR=0 */
10732 return -TARGET_EOPNOTSUPP;
10734 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10735 /* Cannot set FRE=1 */
10736 return -TARGET_EOPNOTSUPP;
10739 int i;
10740 fpr_t *fpr = env->active_fpu.fpr;
10741 for (i = 0; i < 32 ; i += 2) {
10742 if (!old_fr && new_fr) {
10743 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10744 } else if (old_fr && !new_fr) {
10745 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10749 if (new_fr) {
10750 env->CP0_Status |= (1 << CP0St_FR);
10751 env->hflags |= MIPS_HFLAG_F64;
10752 } else {
10753 env->CP0_Status &= ~(1 << CP0St_FR);
10754 env->hflags &= ~MIPS_HFLAG_F64;
10756 if (new_fre) {
10757 env->CP0_Config5 |= (1 << CP0C5_FRE);
10758 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10759 env->hflags |= MIPS_HFLAG_FRE;
10761 } else {
10762 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10763 env->hflags &= ~MIPS_HFLAG_FRE;
10766 return 0;
10768 #endif /* MIPS */
10769 #ifdef TARGET_AARCH64
10770 case TARGET_PR_SVE_SET_VL:
10772 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10773 * PR_SVE_VL_INHERIT. Note the kernel definition
10774 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10775 * even though the current architectural maximum is VQ=16.
10777 ret = -TARGET_EINVAL;
10778 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10779 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10780 CPUARMState *env = cpu_env;
10781 ARMCPU *cpu = env_archcpu(env);
10782 uint32_t vq, old_vq;
10784 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10785 vq = MAX(arg2 / 16, 1);
10786 vq = MIN(vq, cpu->sve_max_vq);
10788 if (vq < old_vq) {
10789 aarch64_sve_narrow_vq(env, vq);
10791 env->vfp.zcr_el[1] = vq - 1;
10792 arm_rebuild_hflags(env);
10793 ret = vq * 16;
10795 return ret;
10796 case TARGET_PR_SVE_GET_VL:
10797 ret = -TARGET_EINVAL;
10799 ARMCPU *cpu = env_archcpu(cpu_env);
10800 if (cpu_isar_feature(aa64_sve, cpu)) {
10801 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10804 return ret;
10805 case TARGET_PR_PAC_RESET_KEYS:
10807 CPUARMState *env = cpu_env;
10808 ARMCPU *cpu = env_archcpu(env);
10810 if (arg3 || arg4 || arg5) {
10811 return -TARGET_EINVAL;
10813 if (cpu_isar_feature(aa64_pauth, cpu)) {
10814 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10815 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10816 TARGET_PR_PAC_APGAKEY);
10817 int ret = 0;
10818 Error *err = NULL;
10820 if (arg2 == 0) {
10821 arg2 = all;
10822 } else if (arg2 & ~all) {
10823 return -TARGET_EINVAL;
10825 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10826 ret |= qemu_guest_getrandom(&env->keys.apia,
10827 sizeof(ARMPACKey), &err);
10829 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10830 ret |= qemu_guest_getrandom(&env->keys.apib,
10831 sizeof(ARMPACKey), &err);
10833 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10834 ret |= qemu_guest_getrandom(&env->keys.apda,
10835 sizeof(ARMPACKey), &err);
10837 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10838 ret |= qemu_guest_getrandom(&env->keys.apdb,
10839 sizeof(ARMPACKey), &err);
10841 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10842 ret |= qemu_guest_getrandom(&env->keys.apga,
10843 sizeof(ARMPACKey), &err);
10845 if (ret != 0) {
10847 * Some unknown failure in the crypto. The best
10848 * we can do is log it and fail the syscall.
10849 * The real syscall cannot fail this way.
10851 qemu_log_mask(LOG_UNIMP,
10852 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10853 error_get_pretty(err));
10854 error_free(err);
10855 return -TARGET_EIO;
10857 return 0;
10860 return -TARGET_EINVAL;
10861 #endif /* AARCH64 */
10862 case PR_GET_SECCOMP:
10863 case PR_SET_SECCOMP:
10864 /* Disable seccomp to prevent the target disabling syscalls we
10865 * need. */
10866 return -TARGET_EINVAL;
10867 default:
10868 /* Most prctl options have no pointer arguments */
10869 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10871 break;
10872 #ifdef TARGET_NR_arch_prctl
10873 case TARGET_NR_arch_prctl:
10874 return do_arch_prctl(cpu_env, arg1, arg2);
10875 #endif
10876 #ifdef TARGET_NR_pread64
10877 case TARGET_NR_pread64:
10878 if (regpairs_aligned(cpu_env, num)) {
10879 arg4 = arg5;
10880 arg5 = arg6;
10882 if (arg2 == 0 && arg3 == 0) {
10883 /* Special-case NULL buffer and zero length, which should succeed */
10884 p = 0;
10885 } else {
10886 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10887 if (!p) {
10888 return -TARGET_EFAULT;
10891 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10892 unlock_user(p, arg2, ret);
10893 return ret;
10894 case TARGET_NR_pwrite64:
10895 if (regpairs_aligned(cpu_env, num)) {
10896 arg4 = arg5;
10897 arg5 = arg6;
10899 if (arg2 == 0 && arg3 == 0) {
10900 /* Special-case NULL buffer and zero length, which should succeed */
10901 p = 0;
10902 } else {
10903 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10904 if (!p) {
10905 return -TARGET_EFAULT;
10908 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10909 unlock_user(p, arg2, 0);
10910 return ret;
10911 #endif
10912 case TARGET_NR_getcwd:
10913 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10914 return -TARGET_EFAULT;
10915 ret = get_errno(sys_getcwd1(p, arg2));
10916 unlock_user(p, arg1, ret);
10917 return ret;
10918 case TARGET_NR_capget:
10919 case TARGET_NR_capset:
10921 struct target_user_cap_header *target_header;
10922 struct target_user_cap_data *target_data = NULL;
10923 struct __user_cap_header_struct header;
10924 struct __user_cap_data_struct data[2];
10925 struct __user_cap_data_struct *dataptr = NULL;
10926 int i, target_datalen;
10927 int data_items = 1;
10929 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10930 return -TARGET_EFAULT;
10932 header.version = tswap32(target_header->version);
10933 header.pid = tswap32(target_header->pid);
10935 if (header.version != _LINUX_CAPABILITY_VERSION) {
10936 /* Version 2 and up takes pointer to two user_data structs */
10937 data_items = 2;
10940 target_datalen = sizeof(*target_data) * data_items;
10942 if (arg2) {
10943 if (num == TARGET_NR_capget) {
10944 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10945 } else {
10946 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10948 if (!target_data) {
10949 unlock_user_struct(target_header, arg1, 0);
10950 return -TARGET_EFAULT;
10953 if (num == TARGET_NR_capset) {
10954 for (i = 0; i < data_items; i++) {
10955 data[i].effective = tswap32(target_data[i].effective);
10956 data[i].permitted = tswap32(target_data[i].permitted);
10957 data[i].inheritable = tswap32(target_data[i].inheritable);
10961 dataptr = data;
10964 if (num == TARGET_NR_capget) {
10965 ret = get_errno(capget(&header, dataptr));
10966 } else {
10967 ret = get_errno(capset(&header, dataptr));
10970 /* The kernel always updates version for both capget and capset */
10971 target_header->version = tswap32(header.version);
10972 unlock_user_struct(target_header, arg1, 1);
10974 if (arg2) {
10975 if (num == TARGET_NR_capget) {
10976 for (i = 0; i < data_items; i++) {
10977 target_data[i].effective = tswap32(data[i].effective);
10978 target_data[i].permitted = tswap32(data[i].permitted);
10979 target_data[i].inheritable = tswap32(data[i].inheritable);
10981 unlock_user(target_data, arg2, target_datalen);
10982 } else {
10983 unlock_user(target_data, arg2, 0);
10986 return ret;
10988 case TARGET_NR_sigaltstack:
10989 return do_sigaltstack(arg1, arg2,
10990 get_sp_from_cpustate((CPUArchState *)cpu_env));
10992 #ifdef CONFIG_SENDFILE
10993 #ifdef TARGET_NR_sendfile
10994 case TARGET_NR_sendfile:
10996 off_t *offp = NULL;
10997 off_t off;
10998 if (arg3) {
10999 ret = get_user_sal(off, arg3);
11000 if (is_error(ret)) {
11001 return ret;
11003 offp = &off;
11005 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11006 if (!is_error(ret) && arg3) {
11007 abi_long ret2 = put_user_sal(off, arg3);
11008 if (is_error(ret2)) {
11009 ret = ret2;
11012 return ret;
11014 #endif
11015 #ifdef TARGET_NR_sendfile64
11016 case TARGET_NR_sendfile64:
11018 off_t *offp = NULL;
11019 off_t off;
11020 if (arg3) {
11021 ret = get_user_s64(off, arg3);
11022 if (is_error(ret)) {
11023 return ret;
11025 offp = &off;
11027 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11028 if (!is_error(ret) && arg3) {
11029 abi_long ret2 = put_user_s64(off, arg3);
11030 if (is_error(ret2)) {
11031 ret = ret2;
11034 return ret;
11036 #endif
11037 #endif
11038 #ifdef TARGET_NR_vfork
11039 case TARGET_NR_vfork:
11040 return get_errno(do_fork(cpu_env,
11041 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11042 0, 0, 0, 0));
11043 #endif
11044 #ifdef TARGET_NR_ugetrlimit
11045 case TARGET_NR_ugetrlimit:
11047 struct rlimit rlim;
11048 int resource = target_to_host_resource(arg1);
11049 ret = get_errno(getrlimit(resource, &rlim));
11050 if (!is_error(ret)) {
11051 struct target_rlimit *target_rlim;
11052 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11053 return -TARGET_EFAULT;
11054 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11055 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11056 unlock_user_struct(target_rlim, arg2, 1);
11058 return ret;
11060 #endif
11061 #ifdef TARGET_NR_truncate64
11062 case TARGET_NR_truncate64:
11063 if (!(p = lock_user_string(arg1)))
11064 return -TARGET_EFAULT;
11065 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11066 unlock_user(p, arg1, 0);
11067 return ret;
11068 #endif
11069 #ifdef TARGET_NR_ftruncate64
11070 case TARGET_NR_ftruncate64:
11071 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11072 #endif
11073 #ifdef TARGET_NR_stat64
11074 case TARGET_NR_stat64:
11075 if (!(p = lock_user_string(arg1))) {
11076 return -TARGET_EFAULT;
11078 ret = get_errno(stat(path(p), &st));
11079 unlock_user(p, arg1, 0);
11080 if (!is_error(ret))
11081 ret = host_to_target_stat64(cpu_env, arg2, &st);
11082 return ret;
11083 #endif
11084 #ifdef TARGET_NR_lstat64
11085 case TARGET_NR_lstat64:
11086 if (!(p = lock_user_string(arg1))) {
11087 return -TARGET_EFAULT;
11089 ret = get_errno(lstat(path(p), &st));
11090 unlock_user(p, arg1, 0);
11091 if (!is_error(ret))
11092 ret = host_to_target_stat64(cpu_env, arg2, &st);
11093 return ret;
11094 #endif
11095 #ifdef TARGET_NR_fstat64
11096 case TARGET_NR_fstat64:
11097 ret = get_errno(fstat(arg1, &st));
11098 if (!is_error(ret))
11099 ret = host_to_target_stat64(cpu_env, arg2, &st);
11100 return ret;
11101 #endif
11102 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11103 #ifdef TARGET_NR_fstatat64
11104 case TARGET_NR_fstatat64:
11105 #endif
11106 #ifdef TARGET_NR_newfstatat
11107 case TARGET_NR_newfstatat:
11108 #endif
11109 if (!(p = lock_user_string(arg2))) {
11110 return -TARGET_EFAULT;
11112 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11113 unlock_user(p, arg2, 0);
11114 if (!is_error(ret))
11115 ret = host_to_target_stat64(cpu_env, arg3, &st);
11116 return ret;
11117 #endif
11118 #if defined(TARGET_NR_statx)
11119 case TARGET_NR_statx:
11121 struct target_statx *target_stx;
11122 int dirfd = arg1;
11123 int flags = arg3;
11125 p = lock_user_string(arg2);
11126 if (p == NULL) {
11127 return -TARGET_EFAULT;
11129 #if defined(__NR_statx)
11132 * It is assumed that struct statx is architecture independent.
11134 struct target_statx host_stx;
11135 int mask = arg4;
11137 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11138 if (!is_error(ret)) {
11139 if (host_to_target_statx(&host_stx, arg5) != 0) {
11140 unlock_user(p, arg2, 0);
11141 return -TARGET_EFAULT;
11145 if (ret != -TARGET_ENOSYS) {
11146 unlock_user(p, arg2, 0);
11147 return ret;
11150 #endif
11151 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11152 unlock_user(p, arg2, 0);
11154 if (!is_error(ret)) {
11155 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11156 return -TARGET_EFAULT;
11158 memset(target_stx, 0, sizeof(*target_stx));
11159 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11160 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11161 __put_user(st.st_ino, &target_stx->stx_ino);
11162 __put_user(st.st_mode, &target_stx->stx_mode);
11163 __put_user(st.st_uid, &target_stx->stx_uid);
11164 __put_user(st.st_gid, &target_stx->stx_gid);
11165 __put_user(st.st_nlink, &target_stx->stx_nlink);
11166 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11167 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11168 __put_user(st.st_size, &target_stx->stx_size);
11169 __put_user(st.st_blksize, &target_stx->stx_blksize);
11170 __put_user(st.st_blocks, &target_stx->stx_blocks);
11171 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11172 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11173 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11174 unlock_user_struct(target_stx, arg5, 1);
11177 return ret;
11178 #endif
11179 #ifdef TARGET_NR_lchown
11180 case TARGET_NR_lchown:
11181 if (!(p = lock_user_string(arg1)))
11182 return -TARGET_EFAULT;
11183 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11184 unlock_user(p, arg1, 0);
11185 return ret;
11186 #endif
11187 #ifdef TARGET_NR_getuid
11188 case TARGET_NR_getuid:
11189 return get_errno(high2lowuid(getuid()));
11190 #endif
11191 #ifdef TARGET_NR_getgid
11192 case TARGET_NR_getgid:
11193 return get_errno(high2lowgid(getgid()));
11194 #endif
11195 #ifdef TARGET_NR_geteuid
11196 case TARGET_NR_geteuid:
11197 return get_errno(high2lowuid(geteuid()));
11198 #endif
11199 #ifdef TARGET_NR_getegid
11200 case TARGET_NR_getegid:
11201 return get_errno(high2lowgid(getegid()));
11202 #endif
11203 case TARGET_NR_setreuid:
11204 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11205 case TARGET_NR_setregid:
11206 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11207 case TARGET_NR_getgroups:
11209 int gidsetsize = arg1;
11210 target_id *target_grouplist;
11211 gid_t *grouplist;
11212 int i;
11214 grouplist = alloca(gidsetsize * sizeof(gid_t));
11215 ret = get_errno(getgroups(gidsetsize, grouplist));
11216 if (gidsetsize == 0)
11217 return ret;
11218 if (!is_error(ret)) {
11219 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11220 if (!target_grouplist)
11221 return -TARGET_EFAULT;
11222 for(i = 0;i < ret; i++)
11223 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11224 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11227 return ret;
11228 case TARGET_NR_setgroups:
11230 int gidsetsize = arg1;
11231 target_id *target_grouplist;
11232 gid_t *grouplist = NULL;
11233 int i;
11234 if (gidsetsize) {
11235 grouplist = alloca(gidsetsize * sizeof(gid_t));
11236 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11237 if (!target_grouplist) {
11238 return -TARGET_EFAULT;
11240 for (i = 0; i < gidsetsize; i++) {
11241 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11243 unlock_user(target_grouplist, arg2, 0);
11245 return get_errno(setgroups(gidsetsize, grouplist));
11247 case TARGET_NR_fchown:
11248 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11249 #if defined(TARGET_NR_fchownat)
11250 case TARGET_NR_fchownat:
11251 if (!(p = lock_user_string(arg2)))
11252 return -TARGET_EFAULT;
11253 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11254 low2highgid(arg4), arg5));
11255 unlock_user(p, arg2, 0);
11256 return ret;
11257 #endif
11258 #ifdef TARGET_NR_setresuid
11259 case TARGET_NR_setresuid:
11260 return get_errno(sys_setresuid(low2highuid(arg1),
11261 low2highuid(arg2),
11262 low2highuid(arg3)));
11263 #endif
11264 #ifdef TARGET_NR_getresuid
11265 case TARGET_NR_getresuid:
11267 uid_t ruid, euid, suid;
11268 ret = get_errno(getresuid(&ruid, &euid, &suid));
11269 if (!is_error(ret)) {
11270 if (put_user_id(high2lowuid(ruid), arg1)
11271 || put_user_id(high2lowuid(euid), arg2)
11272 || put_user_id(high2lowuid(suid), arg3))
11273 return -TARGET_EFAULT;
11276 return ret;
11277 #endif
11278 #ifdef TARGET_NR_getresgid
11279 case TARGET_NR_setresgid:
11280 return get_errno(sys_setresgid(low2highgid(arg1),
11281 low2highgid(arg2),
11282 low2highgid(arg3)));
11283 #endif
11284 #ifdef TARGET_NR_getresgid
11285 case TARGET_NR_getresgid:
11287 gid_t rgid, egid, sgid;
11288 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11289 if (!is_error(ret)) {
11290 if (put_user_id(high2lowgid(rgid), arg1)
11291 || put_user_id(high2lowgid(egid), arg2)
11292 || put_user_id(high2lowgid(sgid), arg3))
11293 return -TARGET_EFAULT;
11296 return ret;
11297 #endif
11298 #ifdef TARGET_NR_chown
11299 case TARGET_NR_chown:
11300 if (!(p = lock_user_string(arg1)))
11301 return -TARGET_EFAULT;
11302 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11303 unlock_user(p, arg1, 0);
11304 return ret;
11305 #endif
11306 case TARGET_NR_setuid:
11307 return get_errno(sys_setuid(low2highuid(arg1)));
11308 case TARGET_NR_setgid:
11309 return get_errno(sys_setgid(low2highgid(arg1)));
11310 case TARGET_NR_setfsuid:
11311 return get_errno(setfsuid(arg1));
11312 case TARGET_NR_setfsgid:
11313 return get_errno(setfsgid(arg1));
11315 #ifdef TARGET_NR_lchown32
11316 case TARGET_NR_lchown32:
11317 if (!(p = lock_user_string(arg1)))
11318 return -TARGET_EFAULT;
11319 ret = get_errno(lchown(p, arg2, arg3));
11320 unlock_user(p, arg1, 0);
11321 return ret;
11322 #endif
11323 #ifdef TARGET_NR_getuid32
11324 case TARGET_NR_getuid32:
11325 return get_errno(getuid());
11326 #endif
11328 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11329 /* Alpha specific */
11330 case TARGET_NR_getxuid:
11332 uid_t euid;
11333 euid=geteuid();
11334 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11336 return get_errno(getuid());
11337 #endif
11338 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11339 /* Alpha specific */
11340 case TARGET_NR_getxgid:
11342 uid_t egid;
11343 egid=getegid();
11344 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11346 return get_errno(getgid());
11347 #endif
11348 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11349 /* Alpha specific */
11350 case TARGET_NR_osf_getsysinfo:
11351 ret = -TARGET_EOPNOTSUPP;
11352 switch (arg1) {
11353 case TARGET_GSI_IEEE_FP_CONTROL:
11355 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11356 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11358 swcr &= ~SWCR_STATUS_MASK;
11359 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11361 if (put_user_u64 (swcr, arg2))
11362 return -TARGET_EFAULT;
11363 ret = 0;
11365 break;
11367 /* case GSI_IEEE_STATE_AT_SIGNAL:
11368 -- Not implemented in linux kernel.
11369 case GSI_UACPROC:
11370 -- Retrieves current unaligned access state; not much used.
11371 case GSI_PROC_TYPE:
11372 -- Retrieves implver information; surely not used.
11373 case GSI_GET_HWRPB:
11374 -- Grabs a copy of the HWRPB; surely not used.
11377 return ret;
11378 #endif
11379 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11380 /* Alpha specific */
11381 case TARGET_NR_osf_setsysinfo:
11382 ret = -TARGET_EOPNOTSUPP;
11383 switch (arg1) {
11384 case TARGET_SSI_IEEE_FP_CONTROL:
11386 uint64_t swcr, fpcr;
11388 if (get_user_u64 (swcr, arg2)) {
11389 return -TARGET_EFAULT;
11393 * The kernel calls swcr_update_status to update the
11394 * status bits from the fpcr at every point that it
11395 * could be queried. Therefore, we store the status
11396 * bits only in FPCR.
11398 ((CPUAlphaState *)cpu_env)->swcr
11399 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11401 fpcr = cpu_alpha_load_fpcr(cpu_env);
11402 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11403 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11404 cpu_alpha_store_fpcr(cpu_env, fpcr);
11405 ret = 0;
11407 break;
11409 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11411 uint64_t exc, fpcr, fex;
11413 if (get_user_u64(exc, arg2)) {
11414 return -TARGET_EFAULT;
11416 exc &= SWCR_STATUS_MASK;
11417 fpcr = cpu_alpha_load_fpcr(cpu_env);
11419 /* Old exceptions are not signaled. */
11420 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11421 fex = exc & ~fex;
11422 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11423 fex &= ((CPUArchState *)cpu_env)->swcr;
11425 /* Update the hardware fpcr. */
11426 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11427 cpu_alpha_store_fpcr(cpu_env, fpcr);
11429 if (fex) {
11430 int si_code = TARGET_FPE_FLTUNK;
11431 target_siginfo_t info;
11433 if (fex & SWCR_TRAP_ENABLE_DNO) {
11434 si_code = TARGET_FPE_FLTUND;
11436 if (fex & SWCR_TRAP_ENABLE_INE) {
11437 si_code = TARGET_FPE_FLTRES;
11439 if (fex & SWCR_TRAP_ENABLE_UNF) {
11440 si_code = TARGET_FPE_FLTUND;
11442 if (fex & SWCR_TRAP_ENABLE_OVF) {
11443 si_code = TARGET_FPE_FLTOVF;
11445 if (fex & SWCR_TRAP_ENABLE_DZE) {
11446 si_code = TARGET_FPE_FLTDIV;
11448 if (fex & SWCR_TRAP_ENABLE_INV) {
11449 si_code = TARGET_FPE_FLTINV;
11452 info.si_signo = SIGFPE;
11453 info.si_errno = 0;
11454 info.si_code = si_code;
11455 info._sifields._sigfault._addr
11456 = ((CPUArchState *)cpu_env)->pc;
11457 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11458 QEMU_SI_FAULT, &info);
11460 ret = 0;
11462 break;
11464 /* case SSI_NVPAIRS:
11465 -- Used with SSIN_UACPROC to enable unaligned accesses.
11466 case SSI_IEEE_STATE_AT_SIGNAL:
11467 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11468 -- Not implemented in linux kernel
11471 return ret;
11472 #endif
11473 #ifdef TARGET_NR_osf_sigprocmask
11474 /* Alpha specific. */
11475 case TARGET_NR_osf_sigprocmask:
11477 abi_ulong mask;
11478 int how;
11479 sigset_t set, oldset;
11481 switch(arg1) {
11482 case TARGET_SIG_BLOCK:
11483 how = SIG_BLOCK;
11484 break;
11485 case TARGET_SIG_UNBLOCK:
11486 how = SIG_UNBLOCK;
11487 break;
11488 case TARGET_SIG_SETMASK:
11489 how = SIG_SETMASK;
11490 break;
11491 default:
11492 return -TARGET_EINVAL;
11494 mask = arg2;
11495 target_to_host_old_sigset(&set, &mask);
11496 ret = do_sigprocmask(how, &set, &oldset);
11497 if (!ret) {
11498 host_to_target_old_sigset(&mask, &oldset);
11499 ret = mask;
11502 return ret;
11503 #endif
11505 #ifdef TARGET_NR_getgid32
11506 case TARGET_NR_getgid32:
11507 return get_errno(getgid());
11508 #endif
11509 #ifdef TARGET_NR_geteuid32
11510 case TARGET_NR_geteuid32:
11511 return get_errno(geteuid());
11512 #endif
11513 #ifdef TARGET_NR_getegid32
11514 case TARGET_NR_getegid32:
11515 return get_errno(getegid());
11516 #endif
11517 #ifdef TARGET_NR_setreuid32
11518 case TARGET_NR_setreuid32:
11519 return get_errno(setreuid(arg1, arg2));
11520 #endif
11521 #ifdef TARGET_NR_setregid32
11522 case TARGET_NR_setregid32:
11523 return get_errno(setregid(arg1, arg2));
11524 #endif
11525 #ifdef TARGET_NR_getgroups32
11526 case TARGET_NR_getgroups32:
11528 int gidsetsize = arg1;
11529 uint32_t *target_grouplist;
11530 gid_t *grouplist;
11531 int i;
11533 grouplist = alloca(gidsetsize * sizeof(gid_t));
11534 ret = get_errno(getgroups(gidsetsize, grouplist));
11535 if (gidsetsize == 0)
11536 return ret;
11537 if (!is_error(ret)) {
11538 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11539 if (!target_grouplist) {
11540 return -TARGET_EFAULT;
11542 for(i = 0;i < ret; i++)
11543 target_grouplist[i] = tswap32(grouplist[i]);
11544 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11547 return ret;
11548 #endif
11549 #ifdef TARGET_NR_setgroups32
11550 case TARGET_NR_setgroups32:
11552 int gidsetsize = arg1;
11553 uint32_t *target_grouplist;
11554 gid_t *grouplist;
11555 int i;
11557 grouplist = alloca(gidsetsize * sizeof(gid_t));
11558 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11559 if (!target_grouplist) {
11560 return -TARGET_EFAULT;
11562 for(i = 0;i < gidsetsize; i++)
11563 grouplist[i] = tswap32(target_grouplist[i]);
11564 unlock_user(target_grouplist, arg2, 0);
11565 return get_errno(setgroups(gidsetsize, grouplist));
11567 #endif
11568 #ifdef TARGET_NR_fchown32
11569 case TARGET_NR_fchown32:
11570 return get_errno(fchown(arg1, arg2, arg3));
11571 #endif
11572 #ifdef TARGET_NR_setresuid32
11573 case TARGET_NR_setresuid32:
11574 return get_errno(sys_setresuid(arg1, arg2, arg3));
11575 #endif
11576 #ifdef TARGET_NR_getresuid32
11577 case TARGET_NR_getresuid32:
11579 uid_t ruid, euid, suid;
11580 ret = get_errno(getresuid(&ruid, &euid, &suid));
11581 if (!is_error(ret)) {
11582 if (put_user_u32(ruid, arg1)
11583 || put_user_u32(euid, arg2)
11584 || put_user_u32(suid, arg3))
11585 return -TARGET_EFAULT;
11588 return ret;
11589 #endif
11590 #ifdef TARGET_NR_setresgid32
11591 case TARGET_NR_setresgid32:
11592 return get_errno(sys_setresgid(arg1, arg2, arg3));
11593 #endif
11594 #ifdef TARGET_NR_getresgid32
11595 case TARGET_NR_getresgid32:
11597 gid_t rgid, egid, sgid;
11598 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11599 if (!is_error(ret)) {
11600 if (put_user_u32(rgid, arg1)
11601 || put_user_u32(egid, arg2)
11602 || put_user_u32(sgid, arg3))
11603 return -TARGET_EFAULT;
11606 return ret;
11607 #endif
11608 #ifdef TARGET_NR_chown32
11609 case TARGET_NR_chown32:
11610 if (!(p = lock_user_string(arg1)))
11611 return -TARGET_EFAULT;
11612 ret = get_errno(chown(p, arg2, arg3));
11613 unlock_user(p, arg1, 0);
11614 return ret;
11615 #endif
11616 #ifdef TARGET_NR_setuid32
11617 case TARGET_NR_setuid32:
11618 return get_errno(sys_setuid(arg1));
11619 #endif
11620 #ifdef TARGET_NR_setgid32
11621 case TARGET_NR_setgid32:
11622 return get_errno(sys_setgid(arg1));
11623 #endif
11624 #ifdef TARGET_NR_setfsuid32
11625 case TARGET_NR_setfsuid32:
11626 return get_errno(setfsuid(arg1));
11627 #endif
11628 #ifdef TARGET_NR_setfsgid32
11629 case TARGET_NR_setfsgid32:
11630 return get_errno(setfsgid(arg1));
11631 #endif
11632 #ifdef TARGET_NR_mincore
11633 case TARGET_NR_mincore:
11635 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11636 if (!a) {
11637 return -TARGET_ENOMEM;
11639 p = lock_user_string(arg3);
11640 if (!p) {
11641 ret = -TARGET_EFAULT;
11642 } else {
11643 ret = get_errno(mincore(a, arg2, p));
11644 unlock_user(p, arg3, ret);
11646 unlock_user(a, arg1, 0);
11648 return ret;
11649 #endif
11650 #ifdef TARGET_NR_arm_fadvise64_64
11651 case TARGET_NR_arm_fadvise64_64:
11652 /* arm_fadvise64_64 looks like fadvise64_64 but
11653 * with different argument order: fd, advice, offset, len
11654 * rather than the usual fd, offset, len, advice.
11655 * Note that offset and len are both 64-bit so appear as
11656 * pairs of 32-bit registers.
11658 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11659 target_offset64(arg5, arg6), arg2);
11660 return -host_to_target_errno(ret);
11661 #endif
11663 #if TARGET_ABI_BITS == 32
11665 #ifdef TARGET_NR_fadvise64_64
11666 case TARGET_NR_fadvise64_64:
11667 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11668 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11669 ret = arg2;
11670 arg2 = arg3;
11671 arg3 = arg4;
11672 arg4 = arg5;
11673 arg5 = arg6;
11674 arg6 = ret;
11675 #else
11676 /* 6 args: fd, offset (high, low), len (high, low), advice */
11677 if (regpairs_aligned(cpu_env, num)) {
11678 /* offset is in (3,4), len in (5,6) and advice in 7 */
11679 arg2 = arg3;
11680 arg3 = arg4;
11681 arg4 = arg5;
11682 arg5 = arg6;
11683 arg6 = arg7;
11685 #endif
11686 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11687 target_offset64(arg4, arg5), arg6);
11688 return -host_to_target_errno(ret);
11689 #endif
11691 #ifdef TARGET_NR_fadvise64
11692 case TARGET_NR_fadvise64:
11693 /* 5 args: fd, offset (high, low), len, advice */
11694 if (regpairs_aligned(cpu_env, num)) {
11695 /* offset is in (3,4), len in 5 and advice in 6 */
11696 arg2 = arg3;
11697 arg3 = arg4;
11698 arg4 = arg5;
11699 arg5 = arg6;
11701 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11702 return -host_to_target_errno(ret);
11703 #endif
11705 #else /* not a 32-bit ABI */
11706 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11707 #ifdef TARGET_NR_fadvise64_64
11708 case TARGET_NR_fadvise64_64:
11709 #endif
11710 #ifdef TARGET_NR_fadvise64
11711 case TARGET_NR_fadvise64:
11712 #endif
11713 #ifdef TARGET_S390X
11714 switch (arg4) {
11715 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11716 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11717 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11718 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11719 default: break;
11721 #endif
11722 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11723 #endif
11724 #endif /* end of 64-bit ABI fadvise handling */
11726 #ifdef TARGET_NR_madvise
11727 case TARGET_NR_madvise:
11728 /* A straight passthrough may not be safe because qemu sometimes
11729 turns private file-backed mappings into anonymous mappings.
11730 This will break MADV_DONTNEED.
11731 This is a hint, so ignoring and returning success is ok. */
11732 return 0;
11733 #endif
11734 #ifdef TARGET_NR_fcntl64
11735 case TARGET_NR_fcntl64:
11737 int cmd;
11738 struct flock64 fl;
11739 from_flock64_fn *copyfrom = copy_from_user_flock64;
11740 to_flock64_fn *copyto = copy_to_user_flock64;
11742 #ifdef TARGET_ARM
11743 if (!((CPUARMState *)cpu_env)->eabi) {
11744 copyfrom = copy_from_user_oabi_flock64;
11745 copyto = copy_to_user_oabi_flock64;
11747 #endif
11749 cmd = target_to_host_fcntl_cmd(arg2);
11750 if (cmd == -TARGET_EINVAL) {
11751 return cmd;
11754 switch(arg2) {
11755 case TARGET_F_GETLK64:
11756 ret = copyfrom(&fl, arg3);
11757 if (ret) {
11758 break;
11760 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11761 if (ret == 0) {
11762 ret = copyto(arg3, &fl);
11764 break;
11766 case TARGET_F_SETLK64:
11767 case TARGET_F_SETLKW64:
11768 ret = copyfrom(&fl, arg3);
11769 if (ret) {
11770 break;
11772 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11773 break;
11774 default:
11775 ret = do_fcntl(arg1, arg2, arg3);
11776 break;
11778 return ret;
11780 #endif
11781 #ifdef TARGET_NR_cacheflush
11782 case TARGET_NR_cacheflush:
11783 /* self-modifying code is handled automatically, so nothing needed */
11784 return 0;
11785 #endif
11786 #ifdef TARGET_NR_getpagesize
11787 case TARGET_NR_getpagesize:
11788 return TARGET_PAGE_SIZE;
11789 #endif
11790 case TARGET_NR_gettid:
11791 return get_errno(sys_gettid());
11792 #ifdef TARGET_NR_readahead
11793 case TARGET_NR_readahead:
11794 #if TARGET_ABI_BITS == 32
11795 if (regpairs_aligned(cpu_env, num)) {
11796 arg2 = arg3;
11797 arg3 = arg4;
11798 arg4 = arg5;
11800 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11801 #else
11802 ret = get_errno(readahead(arg1, arg2, arg3));
11803 #endif
11804 return ret;
11805 #endif
11806 #ifdef CONFIG_ATTR
11807 #ifdef TARGET_NR_setxattr
11808 case TARGET_NR_listxattr:
11809 case TARGET_NR_llistxattr:
11811 void *p, *b = 0;
11812 if (arg2) {
11813 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11814 if (!b) {
11815 return -TARGET_EFAULT;
11818 p = lock_user_string(arg1);
11819 if (p) {
11820 if (num == TARGET_NR_listxattr) {
11821 ret = get_errno(listxattr(p, b, arg3));
11822 } else {
11823 ret = get_errno(llistxattr(p, b, arg3));
11825 } else {
11826 ret = -TARGET_EFAULT;
11828 unlock_user(p, arg1, 0);
11829 unlock_user(b, arg2, arg3);
11830 return ret;
11832 case TARGET_NR_flistxattr:
11834 void *b = 0;
11835 if (arg2) {
11836 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11837 if (!b) {
11838 return -TARGET_EFAULT;
11841 ret = get_errno(flistxattr(arg1, b, arg3));
11842 unlock_user(b, arg2, arg3);
11843 return ret;
11845 case TARGET_NR_setxattr:
11846 case TARGET_NR_lsetxattr:
11848 void *p, *n, *v = 0;
11849 if (arg3) {
11850 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11851 if (!v) {
11852 return -TARGET_EFAULT;
11855 p = lock_user_string(arg1);
11856 n = lock_user_string(arg2);
11857 if (p && n) {
11858 if (num == TARGET_NR_setxattr) {
11859 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11860 } else {
11861 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11863 } else {
11864 ret = -TARGET_EFAULT;
11866 unlock_user(p, arg1, 0);
11867 unlock_user(n, arg2, 0);
11868 unlock_user(v, arg3, 0);
11870 return ret;
11871 case TARGET_NR_fsetxattr:
11873 void *n, *v = 0;
11874 if (arg3) {
11875 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11876 if (!v) {
11877 return -TARGET_EFAULT;
11880 n = lock_user_string(arg2);
11881 if (n) {
11882 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11883 } else {
11884 ret = -TARGET_EFAULT;
11886 unlock_user(n, arg2, 0);
11887 unlock_user(v, arg3, 0);
11889 return ret;
11890 case TARGET_NR_getxattr:
11891 case TARGET_NR_lgetxattr:
11893 void *p, *n, *v = 0;
11894 if (arg3) {
11895 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11896 if (!v) {
11897 return -TARGET_EFAULT;
11900 p = lock_user_string(arg1);
11901 n = lock_user_string(arg2);
11902 if (p && n) {
11903 if (num == TARGET_NR_getxattr) {
11904 ret = get_errno(getxattr(p, n, v, arg4));
11905 } else {
11906 ret = get_errno(lgetxattr(p, n, v, arg4));
11908 } else {
11909 ret = -TARGET_EFAULT;
11911 unlock_user(p, arg1, 0);
11912 unlock_user(n, arg2, 0);
11913 unlock_user(v, arg3, arg4);
11915 return ret;
11916 case TARGET_NR_fgetxattr:
11918 void *n, *v = 0;
11919 if (arg3) {
11920 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11921 if (!v) {
11922 return -TARGET_EFAULT;
11925 n = lock_user_string(arg2);
11926 if (n) {
11927 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11928 } else {
11929 ret = -TARGET_EFAULT;
11931 unlock_user(n, arg2, 0);
11932 unlock_user(v, arg3, arg4);
11934 return ret;
11935 case TARGET_NR_removexattr:
11936 case TARGET_NR_lremovexattr:
11938 void *p, *n;
11939 p = lock_user_string(arg1);
11940 n = lock_user_string(arg2);
11941 if (p && n) {
11942 if (num == TARGET_NR_removexattr) {
11943 ret = get_errno(removexattr(p, n));
11944 } else {
11945 ret = get_errno(lremovexattr(p, n));
11947 } else {
11948 ret = -TARGET_EFAULT;
11950 unlock_user(p, arg1, 0);
11951 unlock_user(n, arg2, 0);
11953 return ret;
11954 case TARGET_NR_fremovexattr:
11956 void *n;
11957 n = lock_user_string(arg2);
11958 if (n) {
11959 ret = get_errno(fremovexattr(arg1, n));
11960 } else {
11961 ret = -TARGET_EFAULT;
11963 unlock_user(n, arg2, 0);
11965 return ret;
11966 #endif
11967 #endif /* CONFIG_ATTR */
11968 #ifdef TARGET_NR_set_thread_area
11969 case TARGET_NR_set_thread_area:
11970 #if defined(TARGET_MIPS)
11971 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11972 return 0;
11973 #elif defined(TARGET_CRIS)
11974 if (arg1 & 0xff)
11975 ret = -TARGET_EINVAL;
11976 else {
11977 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11978 ret = 0;
11980 return ret;
11981 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11982 return do_set_thread_area(cpu_env, arg1);
11983 #elif defined(TARGET_M68K)
11985 TaskState *ts = cpu->opaque;
11986 ts->tp_value = arg1;
11987 return 0;
11989 #else
11990 return -TARGET_ENOSYS;
11991 #endif
11992 #endif
11993 #ifdef TARGET_NR_get_thread_area
11994 case TARGET_NR_get_thread_area:
11995 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11996 return do_get_thread_area(cpu_env, arg1);
11997 #elif defined(TARGET_M68K)
11999 TaskState *ts = cpu->opaque;
12000 return ts->tp_value;
12002 #else
12003 return -TARGET_ENOSYS;
12004 #endif
12005 #endif
12006 #ifdef TARGET_NR_getdomainname
12007 case TARGET_NR_getdomainname:
12008 return -TARGET_ENOSYS;
12009 #endif
12011 #ifdef TARGET_NR_clock_settime
12012 case TARGET_NR_clock_settime:
12014 struct timespec ts;
12016 ret = target_to_host_timespec(&ts, arg2);
12017 if (!is_error(ret)) {
12018 ret = get_errno(clock_settime(arg1, &ts));
12020 return ret;
12022 #endif
12023 #ifdef TARGET_NR_clock_settime64
12024 case TARGET_NR_clock_settime64:
12026 struct timespec ts;
12028 ret = target_to_host_timespec64(&ts, arg2);
12029 if (!is_error(ret)) {
12030 ret = get_errno(clock_settime(arg1, &ts));
12032 return ret;
12034 #endif
12035 #ifdef TARGET_NR_clock_gettime
12036 case TARGET_NR_clock_gettime:
12038 struct timespec ts;
12039 ret = get_errno(clock_gettime(arg1, &ts));
12040 if (!is_error(ret)) {
12041 ret = host_to_target_timespec(arg2, &ts);
12043 return ret;
12045 #endif
12046 #ifdef TARGET_NR_clock_gettime64
12047 case TARGET_NR_clock_gettime64:
12049 struct timespec ts;
12050 ret = get_errno(clock_gettime(arg1, &ts));
12051 if (!is_error(ret)) {
12052 ret = host_to_target_timespec64(arg2, &ts);
12054 return ret;
12056 #endif
12057 #ifdef TARGET_NR_clock_getres
12058 case TARGET_NR_clock_getres:
12060 struct timespec ts;
12061 ret = get_errno(clock_getres(arg1, &ts));
12062 if (!is_error(ret)) {
12063 host_to_target_timespec(arg2, &ts);
12065 return ret;
12067 #endif
12068 #ifdef TARGET_NR_clock_getres_time64
12069 case TARGET_NR_clock_getres_time64:
12071 struct timespec ts;
12072 ret = get_errno(clock_getres(arg1, &ts));
12073 if (!is_error(ret)) {
12074 host_to_target_timespec64(arg2, &ts);
12076 return ret;
12078 #endif
12079 #ifdef TARGET_NR_clock_nanosleep
12080 case TARGET_NR_clock_nanosleep:
12082 struct timespec ts;
12083 if (target_to_host_timespec(&ts, arg3)) {
12084 return -TARGET_EFAULT;
12086 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12087 &ts, arg4 ? &ts : NULL));
12089 * if the call is interrupted by a signal handler, it fails
12090 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12091 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12093 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12094 host_to_target_timespec(arg4, &ts)) {
12095 return -TARGET_EFAULT;
12098 return ret;
12100 #endif
12101 #ifdef TARGET_NR_clock_nanosleep_time64
12102 case TARGET_NR_clock_nanosleep_time64:
12104 struct timespec ts;
12106 if (target_to_host_timespec64(&ts, arg3)) {
12107 return -TARGET_EFAULT;
12110 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12111 &ts, arg4 ? &ts : NULL));
12113 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12114 host_to_target_timespec64(arg4, &ts)) {
12115 return -TARGET_EFAULT;
12117 return ret;
12119 #endif
12121 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12122 case TARGET_NR_set_tid_address:
12123 return get_errno(set_tid_address((int *)g2h(arg1)));
12124 #endif
12126 case TARGET_NR_tkill:
12127 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12129 case TARGET_NR_tgkill:
12130 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12131 target_to_host_signal(arg3)));
12133 #ifdef TARGET_NR_set_robust_list
12134 case TARGET_NR_set_robust_list:
12135 case TARGET_NR_get_robust_list:
12136 /* The ABI for supporting robust futexes has userspace pass
12137 * the kernel a pointer to a linked list which is updated by
12138 * userspace after the syscall; the list is walked by the kernel
12139 * when the thread exits. Since the linked list in QEMU guest
12140 * memory isn't a valid linked list for the host and we have
12141 * no way to reliably intercept the thread-death event, we can't
12142 * support these. Silently return ENOSYS so that guest userspace
12143 * falls back to a non-robust futex implementation (which should
12144 * be OK except in the corner case of the guest crashing while
12145 * holding a mutex that is shared with another process via
12146 * shared memory).
12148 return -TARGET_ENOSYS;
12149 #endif
12151 #if defined(TARGET_NR_utimensat)
12152 case TARGET_NR_utimensat:
12154 struct timespec *tsp, ts[2];
12155 if (!arg3) {
12156 tsp = NULL;
12157 } else {
12158 if (target_to_host_timespec(ts, arg3)) {
12159 return -TARGET_EFAULT;
12161 if (target_to_host_timespec(ts + 1, arg3 +
12162 sizeof(struct target_timespec))) {
12163 return -TARGET_EFAULT;
12165 tsp = ts;
12167 if (!arg2)
12168 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12169 else {
12170 if (!(p = lock_user_string(arg2))) {
12171 return -TARGET_EFAULT;
12173 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12174 unlock_user(p, arg2, 0);
12177 return ret;
12178 #endif
12179 #ifdef TARGET_NR_utimensat_time64
12180 case TARGET_NR_utimensat_time64:
12182 struct timespec *tsp, ts[2];
12183 if (!arg3) {
12184 tsp = NULL;
12185 } else {
12186 if (target_to_host_timespec64(ts, arg3)) {
12187 return -TARGET_EFAULT;
12189 if (target_to_host_timespec64(ts + 1, arg3 +
12190 sizeof(struct target__kernel_timespec))) {
12191 return -TARGET_EFAULT;
12193 tsp = ts;
12195 if (!arg2)
12196 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12197 else {
12198 p = lock_user_string(arg2);
12199 if (!p) {
12200 return -TARGET_EFAULT;
12202 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12203 unlock_user(p, arg2, 0);
12206 return ret;
12207 #endif
12208 #ifdef TARGET_NR_futex
12209 case TARGET_NR_futex:
12210 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12211 #endif
12212 #ifdef TARGET_NR_futex_time64
12213 case TARGET_NR_futex_time64:
12214 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12215 #endif
12216 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12217 case TARGET_NR_inotify_init:
12218 ret = get_errno(sys_inotify_init());
12219 if (ret >= 0) {
12220 fd_trans_register(ret, &target_inotify_trans);
12222 return ret;
12223 #endif
12224 #ifdef CONFIG_INOTIFY1
12225 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12226 case TARGET_NR_inotify_init1:
12227 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12228 fcntl_flags_tbl)));
12229 if (ret >= 0) {
12230 fd_trans_register(ret, &target_inotify_trans);
12232 return ret;
12233 #endif
12234 #endif
12235 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12236 case TARGET_NR_inotify_add_watch:
12237 p = lock_user_string(arg2);
12238 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12239 unlock_user(p, arg2, 0);
12240 return ret;
12241 #endif
12242 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12243 case TARGET_NR_inotify_rm_watch:
12244 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12245 #endif
12247 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12248 case TARGET_NR_mq_open:
12250 struct mq_attr posix_mq_attr;
12251 struct mq_attr *pposix_mq_attr;
12252 int host_flags;
12254 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12255 pposix_mq_attr = NULL;
12256 if (arg4) {
12257 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12258 return -TARGET_EFAULT;
12260 pposix_mq_attr = &posix_mq_attr;
12262 p = lock_user_string(arg1 - 1);
12263 if (!p) {
12264 return -TARGET_EFAULT;
12266 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12267 unlock_user (p, arg1, 0);
12269 return ret;
12271 case TARGET_NR_mq_unlink:
12272 p = lock_user_string(arg1 - 1);
12273 if (!p) {
12274 return -TARGET_EFAULT;
12276 ret = get_errno(mq_unlink(p));
12277 unlock_user (p, arg1, 0);
12278 return ret;
12280 #ifdef TARGET_NR_mq_timedsend
12281 case TARGET_NR_mq_timedsend:
12283 struct timespec ts;
12285 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12286 if (arg5 != 0) {
12287 if (target_to_host_timespec(&ts, arg5)) {
12288 return -TARGET_EFAULT;
12290 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12291 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12292 return -TARGET_EFAULT;
12294 } else {
12295 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12297 unlock_user (p, arg2, arg3);
12299 return ret;
12300 #endif
12301 #ifdef TARGET_NR_mq_timedsend_time64
12302 case TARGET_NR_mq_timedsend_time64:
12304 struct timespec ts;
12306 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12307 if (arg5 != 0) {
12308 if (target_to_host_timespec64(&ts, arg5)) {
12309 return -TARGET_EFAULT;
12311 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12312 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12313 return -TARGET_EFAULT;
12315 } else {
12316 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12318 unlock_user(p, arg2, arg3);
12320 return ret;
12321 #endif
12323 #ifdef TARGET_NR_mq_timedreceive
12324 case TARGET_NR_mq_timedreceive:
12326 struct timespec ts;
12327 unsigned int prio;
12329 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12330 if (arg5 != 0) {
12331 if (target_to_host_timespec(&ts, arg5)) {
12332 return -TARGET_EFAULT;
12334 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12335 &prio, &ts));
12336 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12337 return -TARGET_EFAULT;
12339 } else {
12340 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12341 &prio, NULL));
12343 unlock_user (p, arg2, arg3);
12344 if (arg4 != 0)
12345 put_user_u32(prio, arg4);
12347 return ret;
12348 #endif
12349 #ifdef TARGET_NR_mq_timedreceive_time64
12350 case TARGET_NR_mq_timedreceive_time64:
12352 struct timespec ts;
12353 unsigned int prio;
12355 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12356 if (arg5 != 0) {
12357 if (target_to_host_timespec64(&ts, arg5)) {
12358 return -TARGET_EFAULT;
12360 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12361 &prio, &ts));
12362 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12363 return -TARGET_EFAULT;
12365 } else {
12366 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12367 &prio, NULL));
12369 unlock_user(p, arg2, arg3);
12370 if (arg4 != 0) {
12371 put_user_u32(prio, arg4);
12374 return ret;
12375 #endif
12377 /* Not implemented for now... */
12378 /* case TARGET_NR_mq_notify: */
12379 /* break; */
12381 case TARGET_NR_mq_getsetattr:
12383 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12384 ret = 0;
12385 if (arg2 != 0) {
12386 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12387 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12388 &posix_mq_attr_out));
12389 } else if (arg3 != 0) {
12390 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12392 if (ret == 0 && arg3 != 0) {
12393 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12396 return ret;
12397 #endif
12399 #ifdef CONFIG_SPLICE
12400 #ifdef TARGET_NR_tee
12401 case TARGET_NR_tee:
12403 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12405 return ret;
12406 #endif
12407 #ifdef TARGET_NR_splice
12408 case TARGET_NR_splice:
12410 loff_t loff_in, loff_out;
12411 loff_t *ploff_in = NULL, *ploff_out = NULL;
12412 if (arg2) {
12413 if (get_user_u64(loff_in, arg2)) {
12414 return -TARGET_EFAULT;
12416 ploff_in = &loff_in;
12418 if (arg4) {
12419 if (get_user_u64(loff_out, arg4)) {
12420 return -TARGET_EFAULT;
12422 ploff_out = &loff_out;
12424 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12425 if (arg2) {
12426 if (put_user_u64(loff_in, arg2)) {
12427 return -TARGET_EFAULT;
12430 if (arg4) {
12431 if (put_user_u64(loff_out, arg4)) {
12432 return -TARGET_EFAULT;
12436 return ret;
12437 #endif
12438 #ifdef TARGET_NR_vmsplice
12439 case TARGET_NR_vmsplice:
12441 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12442 if (vec != NULL) {
12443 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12444 unlock_iovec(vec, arg2, arg3, 0);
12445 } else {
12446 ret = -host_to_target_errno(errno);
12449 return ret;
12450 #endif
12451 #endif /* CONFIG_SPLICE */
12452 #ifdef CONFIG_EVENTFD
12453 #if defined(TARGET_NR_eventfd)
12454 case TARGET_NR_eventfd:
12455 ret = get_errno(eventfd(arg1, 0));
12456 if (ret >= 0) {
12457 fd_trans_register(ret, &target_eventfd_trans);
12459 return ret;
12460 #endif
12461 #if defined(TARGET_NR_eventfd2)
12462 case TARGET_NR_eventfd2:
12464 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12465 if (arg2 & TARGET_O_NONBLOCK) {
12466 host_flags |= O_NONBLOCK;
12468 if (arg2 & TARGET_O_CLOEXEC) {
12469 host_flags |= O_CLOEXEC;
12471 ret = get_errno(eventfd(arg1, host_flags));
12472 if (ret >= 0) {
12473 fd_trans_register(ret, &target_eventfd_trans);
12475 return ret;
12477 #endif
12478 #endif /* CONFIG_EVENTFD */
12479 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12480 case TARGET_NR_fallocate:
12481 #if TARGET_ABI_BITS == 32
12482 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12483 target_offset64(arg5, arg6)));
12484 #else
12485 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12486 #endif
12487 return ret;
12488 #endif
12489 #if defined(CONFIG_SYNC_FILE_RANGE)
12490 #if defined(TARGET_NR_sync_file_range)
12491 case TARGET_NR_sync_file_range:
12492 #if TARGET_ABI_BITS == 32
12493 #if defined(TARGET_MIPS)
12494 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12495 target_offset64(arg5, arg6), arg7));
12496 #else
12497 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12498 target_offset64(arg4, arg5), arg6));
12499 #endif /* !TARGET_MIPS */
12500 #else
12501 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12502 #endif
12503 return ret;
12504 #endif
12505 #if defined(TARGET_NR_sync_file_range2) || \
12506 defined(TARGET_NR_arm_sync_file_range)
12507 #if defined(TARGET_NR_sync_file_range2)
12508 case TARGET_NR_sync_file_range2:
12509 #endif
12510 #if defined(TARGET_NR_arm_sync_file_range)
12511 case TARGET_NR_arm_sync_file_range:
12512 #endif
12513 /* This is like sync_file_range but the arguments are reordered */
12514 #if TARGET_ABI_BITS == 32
12515 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12516 target_offset64(arg5, arg6), arg2));
12517 #else
12518 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12519 #endif
12520 return ret;
12521 #endif
12522 #endif
12523 #if defined(TARGET_NR_signalfd4)
12524 case TARGET_NR_signalfd4:
12525 return do_signalfd4(arg1, arg2, arg4);
12526 #endif
12527 #if defined(TARGET_NR_signalfd)
12528 case TARGET_NR_signalfd:
12529 return do_signalfd4(arg1, arg2, 0);
12530 #endif
12531 #if defined(CONFIG_EPOLL)
12532 #if defined(TARGET_NR_epoll_create)
12533 case TARGET_NR_epoll_create:
12534 return get_errno(epoll_create(arg1));
12535 #endif
12536 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12537 case TARGET_NR_epoll_create1:
12538 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12539 #endif
12540 #if defined(TARGET_NR_epoll_ctl)
12541 case TARGET_NR_epoll_ctl:
12543 struct epoll_event ep;
12544 struct epoll_event *epp = 0;
12545 if (arg4) {
12546 struct target_epoll_event *target_ep;
12547 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12548 return -TARGET_EFAULT;
12550 ep.events = tswap32(target_ep->events);
12551 /* The epoll_data_t union is just opaque data to the kernel,
12552 * so we transfer all 64 bits across and need not worry what
12553 * actual data type it is.
12555 ep.data.u64 = tswap64(target_ep->data.u64);
12556 unlock_user_struct(target_ep, arg4, 0);
12557 epp = &ep;
12559 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12561 #endif
12563 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12564 #if defined(TARGET_NR_epoll_wait)
12565 case TARGET_NR_epoll_wait:
12566 #endif
12567 #if defined(TARGET_NR_epoll_pwait)
12568 case TARGET_NR_epoll_pwait:
12569 #endif
12571 struct target_epoll_event *target_ep;
12572 struct epoll_event *ep;
12573 int epfd = arg1;
12574 int maxevents = arg3;
12575 int timeout = arg4;
12577 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12578 return -TARGET_EINVAL;
12581 target_ep = lock_user(VERIFY_WRITE, arg2,
12582 maxevents * sizeof(struct target_epoll_event), 1);
12583 if (!target_ep) {
12584 return -TARGET_EFAULT;
12587 ep = g_try_new(struct epoll_event, maxevents);
12588 if (!ep) {
12589 unlock_user(target_ep, arg2, 0);
12590 return -TARGET_ENOMEM;
12593 switch (num) {
12594 #if defined(TARGET_NR_epoll_pwait)
12595 case TARGET_NR_epoll_pwait:
12597 target_sigset_t *target_set;
12598 sigset_t _set, *set = &_set;
12600 if (arg5) {
12601 if (arg6 != sizeof(target_sigset_t)) {
12602 ret = -TARGET_EINVAL;
12603 break;
12606 target_set = lock_user(VERIFY_READ, arg5,
12607 sizeof(target_sigset_t), 1);
12608 if (!target_set) {
12609 ret = -TARGET_EFAULT;
12610 break;
12612 target_to_host_sigset(set, target_set);
12613 unlock_user(target_set, arg5, 0);
12614 } else {
12615 set = NULL;
12618 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12619 set, SIGSET_T_SIZE));
12620 break;
12622 #endif
12623 #if defined(TARGET_NR_epoll_wait)
12624 case TARGET_NR_epoll_wait:
12625 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12626 NULL, 0));
12627 break;
12628 #endif
12629 default:
12630 ret = -TARGET_ENOSYS;
12632 if (!is_error(ret)) {
12633 int i;
12634 for (i = 0; i < ret; i++) {
12635 target_ep[i].events = tswap32(ep[i].events);
12636 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12638 unlock_user(target_ep, arg2,
12639 ret * sizeof(struct target_epoll_event));
12640 } else {
12641 unlock_user(target_ep, arg2, 0);
12643 g_free(ep);
12644 return ret;
12646 #endif
12647 #endif
12648 #ifdef TARGET_NR_prlimit64
12649 case TARGET_NR_prlimit64:
12651 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12652 struct target_rlimit64 *target_rnew, *target_rold;
12653 struct host_rlimit64 rnew, rold, *rnewp = 0;
12654 int resource = target_to_host_resource(arg2);
12656 if (arg3 && (resource != RLIMIT_AS &&
12657 resource != RLIMIT_DATA &&
12658 resource != RLIMIT_STACK)) {
12659 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12660 return -TARGET_EFAULT;
12662 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12663 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12664 unlock_user_struct(target_rnew, arg3, 0);
12665 rnewp = &rnew;
12668 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12669 if (!is_error(ret) && arg4) {
12670 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12671 return -TARGET_EFAULT;
12673 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12674 target_rold->rlim_max = tswap64(rold.rlim_max);
12675 unlock_user_struct(target_rold, arg4, 1);
12677 return ret;
12679 #endif
12680 #ifdef TARGET_NR_gethostname
12681 case TARGET_NR_gethostname:
12683 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12684 if (name) {
12685 ret = get_errno(gethostname(name, arg2));
12686 unlock_user(name, arg1, arg2);
12687 } else {
12688 ret = -TARGET_EFAULT;
12690 return ret;
12692 #endif
12693 #ifdef TARGET_NR_atomic_cmpxchg_32
12694 case TARGET_NR_atomic_cmpxchg_32:
12696 /* should use start_exclusive from main.c */
12697 abi_ulong mem_value;
12698 if (get_user_u32(mem_value, arg6)) {
12699 target_siginfo_t info;
12700 info.si_signo = SIGSEGV;
12701 info.si_errno = 0;
12702 info.si_code = TARGET_SEGV_MAPERR;
12703 info._sifields._sigfault._addr = arg6;
12704 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12705 QEMU_SI_FAULT, &info);
12706 ret = 0xdeadbeef;
12709 if (mem_value == arg2)
12710 put_user_u32(arg1, arg6);
12711 return mem_value;
12713 #endif
12714 #ifdef TARGET_NR_atomic_barrier
12715 case TARGET_NR_atomic_barrier:
12716 /* Like the kernel implementation and the
12717 qemu arm barrier, no-op this? */
12718 return 0;
12719 #endif
12721 #ifdef TARGET_NR_timer_create
12722 case TARGET_NR_timer_create:
12724 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12726 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12728 int clkid = arg1;
12729 int timer_index = next_free_host_timer();
12731 if (timer_index < 0) {
12732 ret = -TARGET_EAGAIN;
12733 } else {
12734 timer_t *phtimer = g_posix_timers + timer_index;
12736 if (arg2) {
12737 phost_sevp = &host_sevp;
12738 ret = target_to_host_sigevent(phost_sevp, arg2);
12739 if (ret != 0) {
12740 return ret;
12744 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12745 if (ret) {
12746 phtimer = NULL;
12747 } else {
12748 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12749 return -TARGET_EFAULT;
12753 return ret;
12755 #endif
12757 #ifdef TARGET_NR_timer_settime
12758 case TARGET_NR_timer_settime:
12760 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12761 * struct itimerspec * old_value */
12762 target_timer_t timerid = get_timer_id(arg1);
12764 if (timerid < 0) {
12765 ret = timerid;
12766 } else if (arg3 == 0) {
12767 ret = -TARGET_EINVAL;
12768 } else {
12769 timer_t htimer = g_posix_timers[timerid];
12770 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12772 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12773 return -TARGET_EFAULT;
12775 ret = get_errno(
12776 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12777 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12778 return -TARGET_EFAULT;
12781 return ret;
12783 #endif
12785 #ifdef TARGET_NR_timer_settime64
12786 case TARGET_NR_timer_settime64:
12788 target_timer_t timerid = get_timer_id(arg1);
12790 if (timerid < 0) {
12791 ret = timerid;
12792 } else if (arg3 == 0) {
12793 ret = -TARGET_EINVAL;
12794 } else {
12795 timer_t htimer = g_posix_timers[timerid];
12796 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12798 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12799 return -TARGET_EFAULT;
12801 ret = get_errno(
12802 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12803 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12804 return -TARGET_EFAULT;
12807 return ret;
12809 #endif
12811 #ifdef TARGET_NR_timer_gettime
12812 case TARGET_NR_timer_gettime:
12814 /* args: timer_t timerid, struct itimerspec *curr_value */
12815 target_timer_t timerid = get_timer_id(arg1);
12817 if (timerid < 0) {
12818 ret = timerid;
12819 } else if (!arg2) {
12820 ret = -TARGET_EFAULT;
12821 } else {
12822 timer_t htimer = g_posix_timers[timerid];
12823 struct itimerspec hspec;
12824 ret = get_errno(timer_gettime(htimer, &hspec));
12826 if (host_to_target_itimerspec(arg2, &hspec)) {
12827 ret = -TARGET_EFAULT;
12830 return ret;
12832 #endif
12834 #ifdef TARGET_NR_timer_gettime64
12835 case TARGET_NR_timer_gettime64:
12837 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12838 target_timer_t timerid = get_timer_id(arg1);
12840 if (timerid < 0) {
12841 ret = timerid;
12842 } else if (!arg2) {
12843 ret = -TARGET_EFAULT;
12844 } else {
12845 timer_t htimer = g_posix_timers[timerid];
12846 struct itimerspec hspec;
12847 ret = get_errno(timer_gettime(htimer, &hspec));
12849 if (host_to_target_itimerspec64(arg2, &hspec)) {
12850 ret = -TARGET_EFAULT;
12853 return ret;
12855 #endif
12857 #ifdef TARGET_NR_timer_getoverrun
12858 case TARGET_NR_timer_getoverrun:
12860 /* args: timer_t timerid */
12861 target_timer_t timerid = get_timer_id(arg1);
12863 if (timerid < 0) {
12864 ret = timerid;
12865 } else {
12866 timer_t htimer = g_posix_timers[timerid];
12867 ret = get_errno(timer_getoverrun(htimer));
12869 return ret;
12871 #endif
12873 #ifdef TARGET_NR_timer_delete
12874 case TARGET_NR_timer_delete:
12876 /* args: timer_t timerid */
12877 target_timer_t timerid = get_timer_id(arg1);
12879 if (timerid < 0) {
12880 ret = timerid;
12881 } else {
12882 timer_t htimer = g_posix_timers[timerid];
12883 ret = get_errno(timer_delete(htimer));
12884 g_posix_timers[timerid] = 0;
12886 return ret;
12888 #endif
12890 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12891 case TARGET_NR_timerfd_create:
12892 return get_errno(timerfd_create(arg1,
12893 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12894 #endif
12896 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12897 case TARGET_NR_timerfd_gettime:
12899 struct itimerspec its_curr;
12901 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12903 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12904 return -TARGET_EFAULT;
12907 return ret;
12908 #endif
12910 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12911 case TARGET_NR_timerfd_gettime64:
12913 struct itimerspec its_curr;
12915 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12917 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12918 return -TARGET_EFAULT;
12921 return ret;
12922 #endif
12924 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12925 case TARGET_NR_timerfd_settime:
12927 struct itimerspec its_new, its_old, *p_new;
12929 if (arg3) {
12930 if (target_to_host_itimerspec(&its_new, arg3)) {
12931 return -TARGET_EFAULT;
12933 p_new = &its_new;
12934 } else {
12935 p_new = NULL;
12938 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12940 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12941 return -TARGET_EFAULT;
12944 return ret;
12945 #endif
12947 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12948 case TARGET_NR_timerfd_settime64:
12950 struct itimerspec its_new, its_old, *p_new;
12952 if (arg3) {
12953 if (target_to_host_itimerspec64(&its_new, arg3)) {
12954 return -TARGET_EFAULT;
12956 p_new = &its_new;
12957 } else {
12958 p_new = NULL;
12961 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12963 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12964 return -TARGET_EFAULT;
12967 return ret;
12968 #endif
12970 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12971 case TARGET_NR_ioprio_get:
12972 return get_errno(ioprio_get(arg1, arg2));
12973 #endif
12975 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12976 case TARGET_NR_ioprio_set:
12977 return get_errno(ioprio_set(arg1, arg2, arg3));
12978 #endif
12980 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12981 case TARGET_NR_setns:
12982 return get_errno(setns(arg1, arg2));
12983 #endif
12984 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12985 case TARGET_NR_unshare:
12986 return get_errno(unshare(arg1));
12987 #endif
12988 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12989 case TARGET_NR_kcmp:
12990 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12991 #endif
12992 #ifdef TARGET_NR_swapcontext
12993 case TARGET_NR_swapcontext:
12994 /* PowerPC specific. */
12995 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12996 #endif
12997 #ifdef TARGET_NR_memfd_create
12998 case TARGET_NR_memfd_create:
12999 p = lock_user_string(arg1);
13000 if (!p) {
13001 return -TARGET_EFAULT;
13003 ret = get_errno(memfd_create(p, arg2));
13004 fd_trans_unregister(ret);
13005 unlock_user(p, arg1, 0);
13006 return ret;
13007 #endif
13008 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13009 case TARGET_NR_membarrier:
13010 return get_errno(membarrier(arg1, arg2));
13011 #endif
13013 default:
13014 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13015 return -TARGET_ENOSYS;
13017 return ret;
13020 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13021 abi_long arg2, abi_long arg3, abi_long arg4,
13022 abi_long arg5, abi_long arg6, abi_long arg7,
13023 abi_long arg8)
13025 CPUState *cpu = env_cpu(cpu_env);
13026 abi_long ret;
13028 #ifdef DEBUG_ERESTARTSYS
13029 /* Debug-only code for exercising the syscall-restart code paths
13030 * in the per-architecture cpu main loops: restart every syscall
13031 * the guest makes once before letting it through.
13034 static bool flag;
13035 flag = !flag;
13036 if (flag) {
13037 return -TARGET_ERESTARTSYS;
13040 #endif
13042 record_syscall_start(cpu, num, arg1,
13043 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13045 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13046 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13049 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13050 arg5, arg6, arg7, arg8);
13052 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13053 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13054 arg3, arg4, arg5, arg6);
13057 record_syscall_return(cpu, num, ret);
13058 return ret;