Merge remote-tracking branch 'qemu/master' into master
[qemu/ar7.git] / linux-user / syscall.c
blob3b72d2a9909f6f22f304a54940dff438154eab81
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
130 #ifndef CLONE_IO
131 #define CLONE_IO 0x80000000 /* Clone io context */
132 #endif
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
239 type6 arg6) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
296 loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300 siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310 const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318 unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325 void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327 struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329 struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342 unsigned long, idx1, unsigned long, idx2)
343 #endif
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350 unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
358 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
359 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
360 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
361 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
362 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
363 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
364 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
365 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
366 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
367 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
368 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
372 #endif
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
375 #endif
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
378 #endif
379 #if defined(O_PATH)
380 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
381 #endif
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
384 #endif
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389 { 0, 0, 0, 0 }
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
394 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398 const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401 const struct timespec times[2], int flags)
403 errno = ENOSYS;
404 return -1;
406 #endif
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413 const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416 int newfd, const char *new, int flags)
418 if (flags == 0) {
419 return renameat(oldfd, old, newfd, new);
421 errno = ENOSYS;
422 return -1;
424 #endif
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
439 return (inotify_add_watch(fd, pathname, mask));
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
445 return (inotify_rm_watch(fd, wd));
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
452 return (inotify_init1(flags));
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471 uint64_t rlim_cur;
472 uint64_t rlim_max;
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475 const struct host_rlimit64 *, new_limit,
476 struct host_rlimit64 *, old_limit)
477 #endif
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
486 int k ;
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489 if (g_posix_timers[k] == 0) {
490 g_posix_timers[k] = (timer_t) 1;
491 return k;
494 return -1;
496 #endif
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510 [EAGAIN] = TARGET_EAGAIN,
511 [EIDRM] = TARGET_EIDRM,
512 [ECHRNG] = TARGET_ECHRNG,
513 [EL2NSYNC] = TARGET_EL2NSYNC,
514 [EL3HLT] = TARGET_EL3HLT,
515 [EL3RST] = TARGET_EL3RST,
516 [ELNRNG] = TARGET_ELNRNG,
517 [EUNATCH] = TARGET_EUNATCH,
518 [ENOCSI] = TARGET_ENOCSI,
519 [EL2HLT] = TARGET_EL2HLT,
520 [EDEADLK] = TARGET_EDEADLK,
521 [ENOLCK] = TARGET_ENOLCK,
522 [EBADE] = TARGET_EBADE,
523 [EBADR] = TARGET_EBADR,
524 [EXFULL] = TARGET_EXFULL,
525 [ENOANO] = TARGET_ENOANO,
526 [EBADRQC] = TARGET_EBADRQC,
527 [EBADSLT] = TARGET_EBADSLT,
528 [EBFONT] = TARGET_EBFONT,
529 [ENOSTR] = TARGET_ENOSTR,
530 [ENODATA] = TARGET_ENODATA,
531 [ETIME] = TARGET_ETIME,
532 [ENOSR] = TARGET_ENOSR,
533 [ENONET] = TARGET_ENONET,
534 [ENOPKG] = TARGET_ENOPKG,
535 [EREMOTE] = TARGET_EREMOTE,
536 [ENOLINK] = TARGET_ENOLINK,
537 [EADV] = TARGET_EADV,
538 [ESRMNT] = TARGET_ESRMNT,
539 [ECOMM] = TARGET_ECOMM,
540 [EPROTO] = TARGET_EPROTO,
541 [EDOTDOT] = TARGET_EDOTDOT,
542 [EMULTIHOP] = TARGET_EMULTIHOP,
543 [EBADMSG] = TARGET_EBADMSG,
544 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
545 [EOVERFLOW] = TARGET_EOVERFLOW,
546 [ENOTUNIQ] = TARGET_ENOTUNIQ,
547 [EBADFD] = TARGET_EBADFD,
548 [EREMCHG] = TARGET_EREMCHG,
549 [ELIBACC] = TARGET_ELIBACC,
550 [ELIBBAD] = TARGET_ELIBBAD,
551 [ELIBSCN] = TARGET_ELIBSCN,
552 [ELIBMAX] = TARGET_ELIBMAX,
553 [ELIBEXEC] = TARGET_ELIBEXEC,
554 [EILSEQ] = TARGET_EILSEQ,
555 [ENOSYS] = TARGET_ENOSYS,
556 [ELOOP] = TARGET_ELOOP,
557 [ERESTART] = TARGET_ERESTART,
558 [ESTRPIPE] = TARGET_ESTRPIPE,
559 [ENOTEMPTY] = TARGET_ENOTEMPTY,
560 [EUSERS] = TARGET_EUSERS,
561 [ENOTSOCK] = TARGET_ENOTSOCK,
562 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
563 [EMSGSIZE] = TARGET_EMSGSIZE,
564 [EPROTOTYPE] = TARGET_EPROTOTYPE,
565 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
566 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
567 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
568 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
569 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
570 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
571 [EADDRINUSE] = TARGET_EADDRINUSE,
572 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
573 [ENETDOWN] = TARGET_ENETDOWN,
574 [ENETUNREACH] = TARGET_ENETUNREACH,
575 [ENETRESET] = TARGET_ENETRESET,
576 [ECONNABORTED] = TARGET_ECONNABORTED,
577 [ECONNRESET] = TARGET_ECONNRESET,
578 [ENOBUFS] = TARGET_ENOBUFS,
579 [EISCONN] = TARGET_EISCONN,
580 [ENOTCONN] = TARGET_ENOTCONN,
581 [EUCLEAN] = TARGET_EUCLEAN,
582 [ENOTNAM] = TARGET_ENOTNAM,
583 [ENAVAIL] = TARGET_ENAVAIL,
584 [EISNAM] = TARGET_EISNAM,
585 [EREMOTEIO] = TARGET_EREMOTEIO,
586 [EDQUOT] = TARGET_EDQUOT,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600 [ENOKEY] = TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618 [ENOMSG] = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621 [ERFKILL] = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624 [EHWPOISON] = TARGET_EHWPOISON,
625 #endif
628 static inline int host_to_target_errno(int err)
630 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631 host_to_target_errno_table[err]) {
632 return host_to_target_errno_table[err];
634 return err;
637 static inline int target_to_host_errno(int err)
639 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640 target_to_host_errno_table[err]) {
641 return target_to_host_errno_table[err];
643 return err;
646 static inline abi_long get_errno(abi_long ret)
648 if (ret == -1)
649 return -host_to_target_errno(errno);
650 else
651 return ret;
654 const char *target_strerror(int err)
656 if (err == TARGET_ERESTARTSYS) {
657 return "To be restarted";
659 if (err == TARGET_QEMU_ESIGRETURN) {
660 return "Successful exit from sigreturn";
663 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664 return NULL;
666 return strerror(target_to_host_errno(err));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694 type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703 type5 arg5) \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719 int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722 struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725 int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734 struct timespec *, tsp, const sigset_t *, sigmask,
735 size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738 int, maxevents, int, timeout, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742 const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746 const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755 unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757 unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759 socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769 const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772 int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775 struct timespec *, rem)
776 #endif
777 #if defined(TARGET_NR_clock_nanosleep) || \
778 defined(TARGET_NR_clock_nanosleep_time64)
779 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
780 const struct timespec *, req, struct timespec *, rem)
781 #endif
782 #ifdef __NR_ipc
783 #ifdef __s390x__
784 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
785 void *, ptr)
786 #else
787 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
788 void *, ptr, long, fifth)
789 #endif
790 #endif
791 #ifdef __NR_msgsnd
792 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
793 int, flags)
794 #endif
795 #ifdef __NR_msgrcv
796 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
797 long, msgtype, int, flags)
798 #endif
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
801 unsigned, nsops, const struct timespec *, timeout)
802 #endif
803 #if defined(TARGET_NR_mq_timedsend) || \
804 defined(TARGET_NR_mq_timedsend_time64)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806 size_t, len, unsigned, prio, const struct timespec *, timeout)
807 #endif
808 #if defined(TARGET_NR_mq_timedreceive) || \
809 defined(TARGET_NR_mq_timedreceive_time64)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811 size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
815 * the libc function.
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
829 static inline int host_to_target_sock_type(int host_type)
831 int target_type;
833 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834 case SOCK_DGRAM:
835 target_type = TARGET_SOCK_DGRAM;
836 break;
837 case SOCK_STREAM:
838 target_type = TARGET_SOCK_STREAM;
839 break;
840 default:
841 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842 break;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type & SOCK_CLOEXEC) {
847 target_type |= TARGET_SOCK_CLOEXEC;
849 #endif
851 #if defined(SOCK_NONBLOCK)
852 if (host_type & SOCK_NONBLOCK) {
853 target_type |= TARGET_SOCK_NONBLOCK;
855 #endif
857 return target_type;
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
864 void target_set_brk(abi_ulong new_brk)
866 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867 brk_page = HOST_PAGE_ALIGN(target_brk);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
876 abi_long mapped_addr;
877 abi_ulong new_alloc_size;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
881 if (!new_brk) {
882 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883 return target_brk;
885 if (new_brk < target_original_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887 target_brk);
888 return target_brk;
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk <= brk_page) {
894 /* Heap contents are initialized to zero, as for anonymous
895 * mapped pages. */
896 if (new_brk > target_brk) {
897 memset(g2h(target_brk), 0, new_brk - target_brk);
899 target_brk = new_brk;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901 return target_brk;
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912 PROT_READ|PROT_WRITE,
913 MAP_ANON|MAP_PRIVATE, 0, 0));
915 if (mapped_addr == brk_page) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
922 * then shrunken). */
923 memset(g2h(target_brk), 0, brk_page - target_brk);
925 target_brk = new_brk;
926 brk_page = HOST_PAGE_ALIGN(target_brk);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928 target_brk);
929 return target_brk;
930 } else if (mapped_addr != -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr, new_alloc_size);
935 mapped_addr = -1;
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
938 else {
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM;
946 #endif
947 /* For everything else, return the previous break. */
948 return target_brk;
951 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
952 defined(TARGET_NR_pselect6)
953 static inline abi_long copy_from_user_fdset(fd_set *fds,
954 abi_ulong target_fds_addr,
955 int n)
957 int i, nw, j, k;
958 abi_ulong b, *target_fds;
960 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
961 if (!(target_fds = lock_user(VERIFY_READ,
962 target_fds_addr,
963 sizeof(abi_ulong) * nw,
964 1)))
965 return -TARGET_EFAULT;
967 FD_ZERO(fds);
968 k = 0;
969 for (i = 0; i < nw; i++) {
970 /* grab the abi_ulong */
971 __get_user(b, &target_fds[i]);
972 for (j = 0; j < TARGET_ABI_BITS; j++) {
973 /* check the bit inside the abi_ulong */
974 if ((b >> j) & 1)
975 FD_SET(k, fds);
976 k++;
980 unlock_user(target_fds, target_fds_addr, 0);
982 return 0;
985 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
986 abi_ulong target_fds_addr,
987 int n)
989 if (target_fds_addr) {
990 if (copy_from_user_fdset(fds, target_fds_addr, n))
991 return -TARGET_EFAULT;
992 *fds_ptr = fds;
993 } else {
994 *fds_ptr = NULL;
996 return 0;
999 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1000 const fd_set *fds,
1001 int n)
1003 int i, nw, j, k;
1004 abi_long v;
1005 abi_ulong *target_fds;
1007 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1008 if (!(target_fds = lock_user(VERIFY_WRITE,
1009 target_fds_addr,
1010 sizeof(abi_ulong) * nw,
1011 0)))
1012 return -TARGET_EFAULT;
1014 k = 0;
1015 for (i = 0; i < nw; i++) {
1016 v = 0;
1017 for (j = 0; j < TARGET_ABI_BITS; j++) {
1018 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1019 k++;
1021 __put_user(v, &target_fds[i]);
1024 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1026 return 0;
1028 #endif
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1032 #else
1033 #define HOST_HZ 100
1034 #endif
1036 static inline abi_long host_to_target_clock_t(long ticks)
1038 #if HOST_HZ == TARGET_HZ
1039 return ticks;
1040 #else
1041 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1042 #endif
1045 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1046 const struct rusage *rusage)
1048 struct target_rusage *target_rusage;
1050 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1051 return -TARGET_EFAULT;
1052 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1053 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1054 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1055 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1056 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1057 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1058 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1059 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1060 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1061 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1062 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1063 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1064 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1065 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1066 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1067 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1068 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1069 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1070 unlock_user_struct(target_rusage, target_addr, 1);
1072 return 0;
1075 #ifdef TARGET_NR_setrlimit
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1079 rlim_t result;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1089 return result;
1091 #endif
1093 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1094 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1096 abi_ulong target_rlim_swap;
1097 abi_ulong result;
1099 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1100 target_rlim_swap = TARGET_RLIM_INFINITY;
1101 else
1102 target_rlim_swap = rlim;
1103 result = tswapal(target_rlim_swap);
1105 return result;
1107 #endif
1109 static inline int target_to_host_resource(int code)
1111 switch (code) {
1112 case TARGET_RLIMIT_AS:
1113 return RLIMIT_AS;
1114 case TARGET_RLIMIT_CORE:
1115 return RLIMIT_CORE;
1116 case TARGET_RLIMIT_CPU:
1117 return RLIMIT_CPU;
1118 case TARGET_RLIMIT_DATA:
1119 return RLIMIT_DATA;
1120 case TARGET_RLIMIT_FSIZE:
1121 return RLIMIT_FSIZE;
1122 case TARGET_RLIMIT_LOCKS:
1123 return RLIMIT_LOCKS;
1124 case TARGET_RLIMIT_MEMLOCK:
1125 return RLIMIT_MEMLOCK;
1126 case TARGET_RLIMIT_MSGQUEUE:
1127 return RLIMIT_MSGQUEUE;
1128 case TARGET_RLIMIT_NICE:
1129 return RLIMIT_NICE;
1130 case TARGET_RLIMIT_NOFILE:
1131 return RLIMIT_NOFILE;
1132 case TARGET_RLIMIT_NPROC:
1133 return RLIMIT_NPROC;
1134 case TARGET_RLIMIT_RSS:
1135 return RLIMIT_RSS;
1136 case TARGET_RLIMIT_RTPRIO:
1137 return RLIMIT_RTPRIO;
1138 case TARGET_RLIMIT_SIGPENDING:
1139 return RLIMIT_SIGPENDING;
1140 case TARGET_RLIMIT_STACK:
1141 return RLIMIT_STACK;
1142 default:
1143 return code;
1147 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1148 abi_ulong target_tv_addr)
1150 struct target_timeval *target_tv;
1152 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1153 return -TARGET_EFAULT;
1156 __get_user(tv->tv_sec, &target_tv->tv_sec);
1157 __get_user(tv->tv_usec, &target_tv->tv_usec);
1159 unlock_user_struct(target_tv, target_tv_addr, 0);
1161 return 0;
1164 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1165 const struct timeval *tv)
1167 struct target_timeval *target_tv;
1169 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1170 return -TARGET_EFAULT;
1173 __put_user(tv->tv_sec, &target_tv->tv_sec);
1174 __put_user(tv->tv_usec, &target_tv->tv_usec);
1176 unlock_user_struct(target_tv, target_tv_addr, 1);
1178 return 0;
1181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1182 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1183 abi_ulong target_tv_addr)
1185 struct target__kernel_sock_timeval *target_tv;
1187 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1188 return -TARGET_EFAULT;
1191 __get_user(tv->tv_sec, &target_tv->tv_sec);
1192 __get_user(tv->tv_usec, &target_tv->tv_usec);
1194 unlock_user_struct(target_tv, target_tv_addr, 0);
1196 return 0;
1198 #endif
1200 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1201 const struct timeval *tv)
1203 struct target__kernel_sock_timeval *target_tv;
1205 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1206 return -TARGET_EFAULT;
1209 __put_user(tv->tv_sec, &target_tv->tv_sec);
1210 __put_user(tv->tv_usec, &target_tv->tv_usec);
1212 unlock_user_struct(target_tv, target_tv_addr, 1);
1214 return 0;
1217 #if defined(TARGET_NR_futex) || \
1218 defined(TARGET_NR_rt_sigtimedwait) || \
1219 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1220 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1221 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1222 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1223 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1224 defined(TARGET_NR_timer_settime) || \
1225 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1226 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1227 abi_ulong target_addr)
1229 struct target_timespec *target_ts;
1231 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1232 return -TARGET_EFAULT;
1234 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1235 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236 unlock_user_struct(target_ts, target_addr, 0);
1237 return 0;
1239 #endif
1241 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1242 defined(TARGET_NR_timer_settime64) || \
1243 defined(TARGET_NR_mq_timedsend_time64) || \
1244 defined(TARGET_NR_mq_timedreceive_time64) || \
1245 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1246 defined(TARGET_NR_clock_nanosleep_time64) || \
1247 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1248 defined(TARGET_NR_utimensat) || \
1249 defined(TARGET_NR_utimensat_time64) || \
1250 defined(TARGET_NR_semtimedop_time64)
1251 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1252 abi_ulong target_addr)
1254 struct target__kernel_timespec *target_ts;
1256 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1257 return -TARGET_EFAULT;
1259 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1260 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261 /* in 32bit mode, this drops the padding */
1262 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1263 unlock_user_struct(target_ts, target_addr, 0);
1264 return 0;
1266 #endif
1268 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1269 struct timespec *host_ts)
1271 struct target_timespec *target_ts;
1273 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274 return -TARGET_EFAULT;
1276 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278 unlock_user_struct(target_ts, target_addr, 1);
1279 return 0;
1282 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1283 struct timespec *host_ts)
1285 struct target__kernel_timespec *target_ts;
1287 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288 return -TARGET_EFAULT;
1290 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292 unlock_user_struct(target_ts, target_addr, 1);
1293 return 0;
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1298 struct timezone *tz)
1300 struct target_timezone *target_tz;
1302 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1303 return -TARGET_EFAULT;
1306 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1307 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1309 unlock_user_struct(target_tz, target_tz_addr, 1);
1311 return 0;
1313 #endif
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1317 abi_ulong target_tz_addr)
1319 struct target_timezone *target_tz;
1321 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1322 return -TARGET_EFAULT;
1325 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1328 unlock_user_struct(target_tz, target_tz_addr, 0);
1330 return 0;
1332 #endif
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1335 #include <mqueue.h>
1337 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1338 abi_ulong target_mq_attr_addr)
1340 struct target_mq_attr *target_mq_attr;
1342 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1343 target_mq_attr_addr, 1))
1344 return -TARGET_EFAULT;
1346 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1347 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1348 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1349 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1351 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1353 return 0;
1356 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1357 const struct mq_attr *attr)
1359 struct target_mq_attr *target_mq_attr;
1361 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1362 target_mq_attr_addr, 0))
1363 return -TARGET_EFAULT;
1365 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1370 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1372 return 0;
1374 #endif
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long do_select(int n,
1379 abi_ulong rfd_addr, abi_ulong wfd_addr,
1380 abi_ulong efd_addr, abi_ulong target_tv_addr)
1382 fd_set rfds, wfds, efds;
1383 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1384 struct timeval tv;
1385 struct timespec ts, *ts_ptr;
1386 abi_long ret;
1388 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389 if (ret) {
1390 return ret;
1392 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393 if (ret) {
1394 return ret;
1396 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397 if (ret) {
1398 return ret;
1401 if (target_tv_addr) {
1402 if (copy_from_user_timeval(&tv, target_tv_addr))
1403 return -TARGET_EFAULT;
1404 ts.tv_sec = tv.tv_sec;
1405 ts.tv_nsec = tv.tv_usec * 1000;
1406 ts_ptr = &ts;
1407 } else {
1408 ts_ptr = NULL;
1411 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1412 ts_ptr, NULL));
1414 if (!is_error(ret)) {
1415 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1416 return -TARGET_EFAULT;
1417 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1418 return -TARGET_EFAULT;
1419 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1420 return -TARGET_EFAULT;
1422 if (target_tv_addr) {
1423 tv.tv_sec = ts.tv_sec;
1424 tv.tv_usec = ts.tv_nsec / 1000;
1425 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1426 return -TARGET_EFAULT;
1431 return ret;
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long do_old_select(abi_ulong arg1)
1437 struct target_sel_arg_struct *sel;
1438 abi_ulong inp, outp, exp, tvp;
1439 long nsel;
1441 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1442 return -TARGET_EFAULT;
1445 nsel = tswapal(sel->n);
1446 inp = tswapal(sel->inp);
1447 outp = tswapal(sel->outp);
1448 exp = tswapal(sel->exp);
1449 tvp = tswapal(sel->tvp);
1451 unlock_user_struct(sel, arg1, 0);
1453 return do_select(nsel, inp, outp, exp, tvp);
1455 #endif
1456 #endif
1458 static abi_long do_pipe2(int host_pipe[], int flags)
1460 #ifdef CONFIG_PIPE2
1461 return pipe2(host_pipe, flags);
1462 #else
1463 return -ENOSYS;
1464 #endif
1467 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1468 int flags, int is_pipe2)
1470 int host_pipe[2];
1471 abi_long ret;
1472 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1474 if (is_error(ret))
1475 return get_errno(ret);
1477 /* Several targets have special calling conventions for the original
1478 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1479 if (!is_pipe2) {
1480 #if defined(TARGET_ALPHA)
1481 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1482 return host_pipe[0];
1483 #elif defined(TARGET_MIPS)
1484 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1485 return host_pipe[0];
1486 #elif defined(TARGET_SH4)
1487 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1488 return host_pipe[0];
1489 #elif defined(TARGET_SPARC)
1490 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1491 return host_pipe[0];
1492 #endif
1495 if (put_user_s32(host_pipe[0], pipedes)
1496 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1497 return -TARGET_EFAULT;
1498 return get_errno(ret);
1501 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1502 abi_ulong target_addr,
1503 socklen_t len)
1505 struct target_ip_mreqn *target_smreqn;
1507 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1508 if (!target_smreqn)
1509 return -TARGET_EFAULT;
1510 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1511 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1512 if (len == sizeof(struct target_ip_mreqn))
1513 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1514 unlock_user(target_smreqn, target_addr, 0);
1516 return 0;
1519 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1520 abi_ulong target_addr,
1521 socklen_t len)
1523 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1524 sa_family_t sa_family;
1525 struct target_sockaddr *target_saddr;
1527 if (fd_trans_target_to_host_addr(fd)) {
1528 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1531 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1532 if (!target_saddr)
1533 return -TARGET_EFAULT;
1535 sa_family = tswap16(target_saddr->sa_family);
1537 /* Oops. The caller might send a incomplete sun_path; sun_path
1538 * must be terminated by \0 (see the manual page), but
1539 * unfortunately it is quite common to specify sockaddr_un
1540 * length as "strlen(x->sun_path)" while it should be
1541 * "strlen(...) + 1". We'll fix that here if needed.
1542 * Linux kernel has a similar feature.
1545 if (sa_family == AF_UNIX) {
1546 if (len < unix_maxlen && len > 0) {
1547 char *cp = (char*)target_saddr;
1549 if ( cp[len-1] && !cp[len] )
1550 len++;
1552 if (len > unix_maxlen)
1553 len = unix_maxlen;
1556 memcpy(addr, target_saddr, len);
1557 addr->sa_family = sa_family;
1558 if (sa_family == AF_NETLINK) {
1559 struct sockaddr_nl *nladdr;
1561 nladdr = (struct sockaddr_nl *)addr;
1562 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1563 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1564 } else if (sa_family == AF_PACKET) {
1565 struct target_sockaddr_ll *lladdr;
1567 lladdr = (struct target_sockaddr_ll *)addr;
1568 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1569 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1571 unlock_user(target_saddr, target_addr, 0);
1573 return 0;
1576 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1577 struct sockaddr *addr,
1578 socklen_t len)
1580 struct target_sockaddr *target_saddr;
1582 if (len == 0) {
1583 return 0;
1585 assert(addr);
1587 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1588 if (!target_saddr)
1589 return -TARGET_EFAULT;
1590 memcpy(target_saddr, addr, len);
1591 if (len >= offsetof(struct target_sockaddr, sa_family) +
1592 sizeof(target_saddr->sa_family)) {
1593 target_saddr->sa_family = tswap16(addr->sa_family);
1595 if (addr->sa_family == AF_NETLINK &&
1596 len >= sizeof(struct target_sockaddr_nl)) {
1597 struct target_sockaddr_nl *target_nl =
1598 (struct target_sockaddr_nl *)target_saddr;
1599 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1600 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1601 } else if (addr->sa_family == AF_PACKET) {
1602 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1603 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1604 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1605 } else if (addr->sa_family == AF_INET6 &&
1606 len >= sizeof(struct target_sockaddr_in6)) {
1607 struct target_sockaddr_in6 *target_in6 =
1608 (struct target_sockaddr_in6 *)target_saddr;
1609 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1611 unlock_user(target_saddr, target_addr, len);
1613 return 0;
1616 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1617 struct target_msghdr *target_msgh)
1619 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1620 abi_long msg_controllen;
1621 abi_ulong target_cmsg_addr;
1622 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1623 socklen_t space = 0;
1625 msg_controllen = tswapal(target_msgh->msg_controllen);
1626 if (msg_controllen < sizeof (struct target_cmsghdr))
1627 goto the_end;
1628 target_cmsg_addr = tswapal(target_msgh->msg_control);
1629 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1630 target_cmsg_start = target_cmsg;
1631 if (!target_cmsg)
1632 return -TARGET_EFAULT;
1634 while (cmsg && target_cmsg) {
1635 void *data = CMSG_DATA(cmsg);
1636 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1638 int len = tswapal(target_cmsg->cmsg_len)
1639 - sizeof(struct target_cmsghdr);
1641 space += CMSG_SPACE(len);
1642 if (space > msgh->msg_controllen) {
1643 space -= CMSG_SPACE(len);
1644 /* This is a QEMU bug, since we allocated the payload
1645 * area ourselves (unlike overflow in host-to-target
1646 * conversion, which is just the guest giving us a buffer
1647 * that's too small). It can't happen for the payload types
1648 * we currently support; if it becomes an issue in future
1649 * we would need to improve our allocation strategy to
1650 * something more intelligent than "twice the size of the
1651 * target buffer we're reading from".
1653 qemu_log_mask(LOG_UNIMP,
1654 ("Unsupported ancillary data %d/%d: "
1655 "unhandled msg size\n"),
1656 tswap32(target_cmsg->cmsg_level),
1657 tswap32(target_cmsg->cmsg_type));
1658 break;
1661 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1662 cmsg->cmsg_level = SOL_SOCKET;
1663 } else {
1664 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1666 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1667 cmsg->cmsg_len = CMSG_LEN(len);
1669 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1670 int *fd = (int *)data;
1671 int *target_fd = (int *)target_data;
1672 int i, numfds = len / sizeof(int);
1674 for (i = 0; i < numfds; i++) {
1675 __get_user(fd[i], target_fd + i);
1677 } else if (cmsg->cmsg_level == SOL_SOCKET
1678 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1679 struct ucred *cred = (struct ucred *)data;
1680 struct target_ucred *target_cred =
1681 (struct target_ucred *)target_data;
1683 __get_user(cred->pid, &target_cred->pid);
1684 __get_user(cred->uid, &target_cred->uid);
1685 __get_user(cred->gid, &target_cred->gid);
1686 } else {
1687 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1688 cmsg->cmsg_level, cmsg->cmsg_type);
1689 memcpy(data, target_data, len);
1692 cmsg = CMSG_NXTHDR(msgh, cmsg);
1693 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1694 target_cmsg_start);
1696 unlock_user(target_cmsg, target_cmsg_addr, 0);
1697 the_end:
1698 msgh->msg_controllen = space;
1699 return 0;
1702 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1703 struct msghdr *msgh)
1705 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1706 abi_long msg_controllen;
1707 abi_ulong target_cmsg_addr;
1708 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1709 socklen_t space = 0;
1711 msg_controllen = tswapal(target_msgh->msg_controllen);
1712 if (msg_controllen < sizeof (struct target_cmsghdr))
1713 goto the_end;
1714 target_cmsg_addr = tswapal(target_msgh->msg_control);
1715 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1716 target_cmsg_start = target_cmsg;
1717 if (!target_cmsg)
1718 return -TARGET_EFAULT;
1720 while (cmsg && target_cmsg) {
1721 void *data = CMSG_DATA(cmsg);
1722 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1724 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1725 int tgt_len, tgt_space;
1727 /* We never copy a half-header but may copy half-data;
1728 * this is Linux's behaviour in put_cmsg(). Note that
1729 * truncation here is a guest problem (which we report
1730 * to the guest via the CTRUNC bit), unlike truncation
1731 * in target_to_host_cmsg, which is a QEMU bug.
1733 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1734 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1735 break;
1738 if (cmsg->cmsg_level == SOL_SOCKET) {
1739 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1740 } else {
1741 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1743 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1745 /* Payload types which need a different size of payload on
1746 * the target must adjust tgt_len here.
1748 tgt_len = len;
1749 switch (cmsg->cmsg_level) {
1750 case SOL_SOCKET:
1751 switch (cmsg->cmsg_type) {
1752 case SO_TIMESTAMP:
1753 tgt_len = sizeof(struct target_timeval);
1754 break;
1755 default:
1756 break;
1758 break;
1759 default:
1760 break;
1763 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1764 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1765 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1768 /* We must now copy-and-convert len bytes of payload
1769 * into tgt_len bytes of destination space. Bear in mind
1770 * that in both source and destination we may be dealing
1771 * with a truncated value!
1773 switch (cmsg->cmsg_level) {
1774 case SOL_SOCKET:
1775 switch (cmsg->cmsg_type) {
1776 case SCM_RIGHTS:
1778 int *fd = (int *)data;
1779 int *target_fd = (int *)target_data;
1780 int i, numfds = tgt_len / sizeof(int);
1782 for (i = 0; i < numfds; i++) {
1783 __put_user(fd[i], target_fd + i);
1785 break;
1787 case SO_TIMESTAMP:
1789 struct timeval *tv = (struct timeval *)data;
1790 struct target_timeval *target_tv =
1791 (struct target_timeval *)target_data;
1793 if (len != sizeof(struct timeval) ||
1794 tgt_len != sizeof(struct target_timeval)) {
1795 goto unimplemented;
1798 /* copy struct timeval to target */
1799 __put_user(tv->tv_sec, &target_tv->tv_sec);
1800 __put_user(tv->tv_usec, &target_tv->tv_usec);
1801 break;
1803 case SCM_CREDENTIALS:
1805 struct ucred *cred = (struct ucred *)data;
1806 struct target_ucred *target_cred =
1807 (struct target_ucred *)target_data;
1809 __put_user(cred->pid, &target_cred->pid);
1810 __put_user(cred->uid, &target_cred->uid);
1811 __put_user(cred->gid, &target_cred->gid);
1812 break;
1814 default:
1815 goto unimplemented;
1817 break;
1819 case SOL_IP:
1820 switch (cmsg->cmsg_type) {
1821 case IP_TTL:
1823 uint32_t *v = (uint32_t *)data;
1824 uint32_t *t_int = (uint32_t *)target_data;
1826 if (len != sizeof(uint32_t) ||
1827 tgt_len != sizeof(uint32_t)) {
1828 goto unimplemented;
1830 __put_user(*v, t_int);
1831 break;
1833 case IP_RECVERR:
1835 struct errhdr_t {
1836 struct sock_extended_err ee;
1837 struct sockaddr_in offender;
1839 struct errhdr_t *errh = (struct errhdr_t *)data;
1840 struct errhdr_t *target_errh =
1841 (struct errhdr_t *)target_data;
1843 if (len != sizeof(struct errhdr_t) ||
1844 tgt_len != sizeof(struct errhdr_t)) {
1845 goto unimplemented;
1847 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1848 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1849 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1850 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1851 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1852 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1853 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1854 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1855 (void *) &errh->offender, sizeof(errh->offender));
1856 break;
1858 default:
1859 goto unimplemented;
1861 break;
1863 case SOL_IPV6:
1864 switch (cmsg->cmsg_type) {
1865 case IPV6_HOPLIMIT:
1867 uint32_t *v = (uint32_t *)data;
1868 uint32_t *t_int = (uint32_t *)target_data;
1870 if (len != sizeof(uint32_t) ||
1871 tgt_len != sizeof(uint32_t)) {
1872 goto unimplemented;
1874 __put_user(*v, t_int);
1875 break;
1877 case IPV6_RECVERR:
1879 struct errhdr6_t {
1880 struct sock_extended_err ee;
1881 struct sockaddr_in6 offender;
1883 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1884 struct errhdr6_t *target_errh =
1885 (struct errhdr6_t *)target_data;
1887 if (len != sizeof(struct errhdr6_t) ||
1888 tgt_len != sizeof(struct errhdr6_t)) {
1889 goto unimplemented;
1891 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1892 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1893 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1894 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1895 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1896 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1897 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1898 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1899 (void *) &errh->offender, sizeof(errh->offender));
1900 break;
1902 default:
1903 goto unimplemented;
1905 break;
1907 default:
1908 unimplemented:
1909 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1910 cmsg->cmsg_level, cmsg->cmsg_type);
1911 memcpy(target_data, data, MIN(len, tgt_len));
1912 if (tgt_len > len) {
1913 memset(target_data + len, 0, tgt_len - len);
1917 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1918 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1919 if (msg_controllen < tgt_space) {
1920 tgt_space = msg_controllen;
1922 msg_controllen -= tgt_space;
1923 space += tgt_space;
1924 cmsg = CMSG_NXTHDR(msgh, cmsg);
1925 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1926 target_cmsg_start);
1928 unlock_user(target_cmsg, target_cmsg_addr, space);
1929 the_end:
1930 target_msgh->msg_controllen = tswapal(space);
1931 return 0;
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long do_setsockopt(int sockfd, int level, int optname,
1936 abi_ulong optval_addr, socklen_t optlen)
1938 abi_long ret;
1939 int val;
1940 struct ip_mreqn *ip_mreq;
1941 struct ip_mreq_source *ip_mreq_source;
1943 switch(level) {
1944 case SOL_TCP:
1945 /* TCP options all take an 'int' value. */
1946 if (optlen < sizeof(uint32_t))
1947 return -TARGET_EINVAL;
1949 if (get_user_u32(val, optval_addr))
1950 return -TARGET_EFAULT;
1951 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1952 break;
1953 case SOL_IP:
1954 switch(optname) {
1955 case IP_TOS:
1956 case IP_TTL:
1957 case IP_HDRINCL:
1958 case IP_ROUTER_ALERT:
1959 case IP_RECVOPTS:
1960 case IP_RETOPTS:
1961 case IP_PKTINFO:
1962 case IP_MTU_DISCOVER:
1963 case IP_RECVERR:
1964 case IP_RECVTTL:
1965 case IP_RECVTOS:
1966 #ifdef IP_FREEBIND
1967 case IP_FREEBIND:
1968 #endif
1969 case IP_MULTICAST_TTL:
1970 case IP_MULTICAST_LOOP:
1971 val = 0;
1972 if (optlen >= sizeof(uint32_t)) {
1973 if (get_user_u32(val, optval_addr))
1974 return -TARGET_EFAULT;
1975 } else if (optlen >= 1) {
1976 if (get_user_u8(val, optval_addr))
1977 return -TARGET_EFAULT;
1979 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1980 break;
1981 case IP_ADD_MEMBERSHIP:
1982 case IP_DROP_MEMBERSHIP:
1983 if (optlen < sizeof (struct target_ip_mreq) ||
1984 optlen > sizeof (struct target_ip_mreqn))
1985 return -TARGET_EINVAL;
1987 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1988 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1989 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1990 break;
1992 case IP_BLOCK_SOURCE:
1993 case IP_UNBLOCK_SOURCE:
1994 case IP_ADD_SOURCE_MEMBERSHIP:
1995 case IP_DROP_SOURCE_MEMBERSHIP:
1996 if (optlen != sizeof (struct target_ip_mreq_source))
1997 return -TARGET_EINVAL;
1999 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2000 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2001 unlock_user (ip_mreq_source, optval_addr, 0);
2002 break;
2004 default:
2005 goto unimplemented;
2007 break;
2008 case SOL_IPV6:
2009 switch (optname) {
2010 case IPV6_MTU_DISCOVER:
2011 case IPV6_MTU:
2012 case IPV6_V6ONLY:
2013 case IPV6_RECVPKTINFO:
2014 case IPV6_UNICAST_HOPS:
2015 case IPV6_MULTICAST_HOPS:
2016 case IPV6_MULTICAST_LOOP:
2017 case IPV6_RECVERR:
2018 case IPV6_RECVHOPLIMIT:
2019 case IPV6_2292HOPLIMIT:
2020 case IPV6_CHECKSUM:
2021 case IPV6_ADDRFORM:
2022 case IPV6_2292PKTINFO:
2023 case IPV6_RECVTCLASS:
2024 case IPV6_RECVRTHDR:
2025 case IPV6_2292RTHDR:
2026 case IPV6_RECVHOPOPTS:
2027 case IPV6_2292HOPOPTS:
2028 case IPV6_RECVDSTOPTS:
2029 case IPV6_2292DSTOPTS:
2030 case IPV6_TCLASS:
2031 #ifdef IPV6_RECVPATHMTU
2032 case IPV6_RECVPATHMTU:
2033 #endif
2034 #ifdef IPV6_TRANSPARENT
2035 case IPV6_TRANSPARENT:
2036 #endif
2037 #ifdef IPV6_FREEBIND
2038 case IPV6_FREEBIND:
2039 #endif
2040 #ifdef IPV6_RECVORIGDSTADDR
2041 case IPV6_RECVORIGDSTADDR:
2042 #endif
2043 val = 0;
2044 if (optlen < sizeof(uint32_t)) {
2045 return -TARGET_EINVAL;
2047 if (get_user_u32(val, optval_addr)) {
2048 return -TARGET_EFAULT;
2050 ret = get_errno(setsockopt(sockfd, level, optname,
2051 &val, sizeof(val)));
2052 break;
2053 case IPV6_PKTINFO:
2055 struct in6_pktinfo pki;
2057 if (optlen < sizeof(pki)) {
2058 return -TARGET_EINVAL;
2061 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2062 return -TARGET_EFAULT;
2065 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2067 ret = get_errno(setsockopt(sockfd, level, optname,
2068 &pki, sizeof(pki)));
2069 break;
2071 case IPV6_ADD_MEMBERSHIP:
2072 case IPV6_DROP_MEMBERSHIP:
2074 struct ipv6_mreq ipv6mreq;
2076 if (optlen < sizeof(ipv6mreq)) {
2077 return -TARGET_EINVAL;
2080 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2081 return -TARGET_EFAULT;
2084 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2086 ret = get_errno(setsockopt(sockfd, level, optname,
2087 &ipv6mreq, sizeof(ipv6mreq)));
2088 break;
2090 default:
2091 goto unimplemented;
2093 break;
2094 case SOL_ICMPV6:
2095 switch (optname) {
2096 case ICMPV6_FILTER:
2098 struct icmp6_filter icmp6f;
2100 if (optlen > sizeof(icmp6f)) {
2101 optlen = sizeof(icmp6f);
2104 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2105 return -TARGET_EFAULT;
2108 for (val = 0; val < 8; val++) {
2109 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2112 ret = get_errno(setsockopt(sockfd, level, optname,
2113 &icmp6f, optlen));
2114 break;
2116 default:
2117 goto unimplemented;
2119 break;
2120 case SOL_RAW:
2121 switch (optname) {
2122 case ICMP_FILTER:
2123 case IPV6_CHECKSUM:
2124 /* those take an u32 value */
2125 if (optlen < sizeof(uint32_t)) {
2126 return -TARGET_EINVAL;
2129 if (get_user_u32(val, optval_addr)) {
2130 return -TARGET_EFAULT;
2132 ret = get_errno(setsockopt(sockfd, level, optname,
2133 &val, sizeof(val)));
2134 break;
2136 default:
2137 goto unimplemented;
2139 break;
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2141 case SOL_ALG:
2142 switch (optname) {
2143 case ALG_SET_KEY:
2145 char *alg_key = g_malloc(optlen);
2147 if (!alg_key) {
2148 return -TARGET_ENOMEM;
2150 if (copy_from_user(alg_key, optval_addr, optlen)) {
2151 g_free(alg_key);
2152 return -TARGET_EFAULT;
2154 ret = get_errno(setsockopt(sockfd, level, optname,
2155 alg_key, optlen));
2156 g_free(alg_key);
2157 break;
2159 case ALG_SET_AEAD_AUTHSIZE:
2161 ret = get_errno(setsockopt(sockfd, level, optname,
2162 NULL, optlen));
2163 break;
2165 default:
2166 goto unimplemented;
2168 break;
2169 #endif
2170 case TARGET_SOL_SOCKET:
2171 switch (optname) {
2172 case TARGET_SO_RCVTIMEO:
2174 struct timeval tv;
2176 optname = SO_RCVTIMEO;
2178 set_timeout:
2179 if (optlen != sizeof(struct target_timeval)) {
2180 return -TARGET_EINVAL;
2183 if (copy_from_user_timeval(&tv, optval_addr)) {
2184 return -TARGET_EFAULT;
2187 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2188 &tv, sizeof(tv)));
2189 return ret;
2191 case TARGET_SO_SNDTIMEO:
2192 optname = SO_SNDTIMEO;
2193 goto set_timeout;
2194 case TARGET_SO_ATTACH_FILTER:
2196 struct target_sock_fprog *tfprog;
2197 struct target_sock_filter *tfilter;
2198 struct sock_fprog fprog;
2199 struct sock_filter *filter;
2200 int i;
2202 if (optlen != sizeof(*tfprog)) {
2203 return -TARGET_EINVAL;
2205 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2206 return -TARGET_EFAULT;
2208 if (!lock_user_struct(VERIFY_READ, tfilter,
2209 tswapal(tfprog->filter), 0)) {
2210 unlock_user_struct(tfprog, optval_addr, 1);
2211 return -TARGET_EFAULT;
2214 fprog.len = tswap16(tfprog->len);
2215 filter = g_try_new(struct sock_filter, fprog.len);
2216 if (filter == NULL) {
2217 unlock_user_struct(tfilter, tfprog->filter, 1);
2218 unlock_user_struct(tfprog, optval_addr, 1);
2219 return -TARGET_ENOMEM;
2221 for (i = 0; i < fprog.len; i++) {
2222 filter[i].code = tswap16(tfilter[i].code);
2223 filter[i].jt = tfilter[i].jt;
2224 filter[i].jf = tfilter[i].jf;
2225 filter[i].k = tswap32(tfilter[i].k);
2227 fprog.filter = filter;
2229 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2230 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2231 g_free(filter);
2233 unlock_user_struct(tfilter, tfprog->filter, 1);
2234 unlock_user_struct(tfprog, optval_addr, 1);
2235 return ret;
2237 case TARGET_SO_BINDTODEVICE:
2239 char *dev_ifname, *addr_ifname;
2241 if (optlen > IFNAMSIZ - 1) {
2242 optlen = IFNAMSIZ - 1;
2244 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2245 if (!dev_ifname) {
2246 return -TARGET_EFAULT;
2248 optname = SO_BINDTODEVICE;
2249 addr_ifname = alloca(IFNAMSIZ);
2250 memcpy(addr_ifname, dev_ifname, optlen);
2251 addr_ifname[optlen] = 0;
2252 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2253 addr_ifname, optlen));
2254 unlock_user (dev_ifname, optval_addr, 0);
2255 return ret;
2257 case TARGET_SO_LINGER:
2259 struct linger lg;
2260 struct target_linger *tlg;
2262 if (optlen != sizeof(struct target_linger)) {
2263 return -TARGET_EINVAL;
2265 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2266 return -TARGET_EFAULT;
2268 __get_user(lg.l_onoff, &tlg->l_onoff);
2269 __get_user(lg.l_linger, &tlg->l_linger);
2270 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2271 &lg, sizeof(lg)));
2272 unlock_user_struct(tlg, optval_addr, 0);
2273 return ret;
2275 /* Options with 'int' argument. */
2276 case TARGET_SO_DEBUG:
2277 optname = SO_DEBUG;
2278 break;
2279 case TARGET_SO_REUSEADDR:
2280 optname = SO_REUSEADDR;
2281 break;
2282 #ifdef SO_REUSEPORT
2283 case TARGET_SO_REUSEPORT:
2284 optname = SO_REUSEPORT;
2285 break;
2286 #endif
2287 case TARGET_SO_TYPE:
2288 optname = SO_TYPE;
2289 break;
2290 case TARGET_SO_ERROR:
2291 optname = SO_ERROR;
2292 break;
2293 case TARGET_SO_DONTROUTE:
2294 optname = SO_DONTROUTE;
2295 break;
2296 case TARGET_SO_BROADCAST:
2297 optname = SO_BROADCAST;
2298 break;
2299 case TARGET_SO_SNDBUF:
2300 optname = SO_SNDBUF;
2301 break;
2302 case TARGET_SO_SNDBUFFORCE:
2303 optname = SO_SNDBUFFORCE;
2304 break;
2305 case TARGET_SO_RCVBUF:
2306 optname = SO_RCVBUF;
2307 break;
2308 case TARGET_SO_RCVBUFFORCE:
2309 optname = SO_RCVBUFFORCE;
2310 break;
2311 case TARGET_SO_KEEPALIVE:
2312 optname = SO_KEEPALIVE;
2313 break;
2314 case TARGET_SO_OOBINLINE:
2315 optname = SO_OOBINLINE;
2316 break;
2317 case TARGET_SO_NO_CHECK:
2318 optname = SO_NO_CHECK;
2319 break;
2320 case TARGET_SO_PRIORITY:
2321 optname = SO_PRIORITY;
2322 break;
2323 #ifdef SO_BSDCOMPAT
2324 case TARGET_SO_BSDCOMPAT:
2325 optname = SO_BSDCOMPAT;
2326 break;
2327 #endif
2328 case TARGET_SO_PASSCRED:
2329 optname = SO_PASSCRED;
2330 break;
2331 case TARGET_SO_PASSSEC:
2332 optname = SO_PASSSEC;
2333 break;
2334 case TARGET_SO_TIMESTAMP:
2335 optname = SO_TIMESTAMP;
2336 break;
2337 case TARGET_SO_RCVLOWAT:
2338 optname = SO_RCVLOWAT;
2339 break;
2340 default:
2341 goto unimplemented;
2343 if (optlen < sizeof(uint32_t))
2344 return -TARGET_EINVAL;
2346 if (get_user_u32(val, optval_addr))
2347 return -TARGET_EFAULT;
2348 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2349 break;
2350 #ifdef SOL_NETLINK
2351 case SOL_NETLINK:
2352 switch (optname) {
2353 case NETLINK_PKTINFO:
2354 case NETLINK_ADD_MEMBERSHIP:
2355 case NETLINK_DROP_MEMBERSHIP:
2356 case NETLINK_BROADCAST_ERROR:
2357 case NETLINK_NO_ENOBUFS:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359 case NETLINK_LISTEN_ALL_NSID:
2360 case NETLINK_CAP_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363 case NETLINK_EXT_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366 case NETLINK_GET_STRICT_CHK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 break;
2369 default:
2370 goto unimplemented;
2372 val = 0;
2373 if (optlen < sizeof(uint32_t)) {
2374 return -TARGET_EINVAL;
2376 if (get_user_u32(val, optval_addr)) {
2377 return -TARGET_EFAULT;
2379 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2380 sizeof(val)));
2381 break;
2382 #endif /* SOL_NETLINK */
2383 default:
2384 unimplemented:
2385 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2386 level, optname);
2387 ret = -TARGET_ENOPROTOOPT;
2389 return ret;
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long do_getsockopt(int sockfd, int level, int optname,
2394 abi_ulong optval_addr, abi_ulong optlen)
2396 abi_long ret;
2397 int len, val;
2398 socklen_t lv;
2400 switch(level) {
2401 case TARGET_SOL_SOCKET:
2402 level = SOL_SOCKET;
2403 switch (optname) {
2404 /* These don't just return a single integer */
2405 case TARGET_SO_PEERNAME:
2406 goto unimplemented;
2407 case TARGET_SO_RCVTIMEO: {
2408 struct timeval tv;
2409 socklen_t tvlen;
2411 optname = SO_RCVTIMEO;
2413 get_timeout:
2414 if (get_user_u32(len, optlen)) {
2415 return -TARGET_EFAULT;
2417 if (len < 0) {
2418 return -TARGET_EINVAL;
2421 tvlen = sizeof(tv);
2422 ret = get_errno(getsockopt(sockfd, level, optname,
2423 &tv, &tvlen));
2424 if (ret < 0) {
2425 return ret;
2427 if (len > sizeof(struct target_timeval)) {
2428 len = sizeof(struct target_timeval);
2430 if (copy_to_user_timeval(optval_addr, &tv)) {
2431 return -TARGET_EFAULT;
2433 if (put_user_u32(len, optlen)) {
2434 return -TARGET_EFAULT;
2436 break;
2438 case TARGET_SO_SNDTIMEO:
2439 optname = SO_SNDTIMEO;
2440 goto get_timeout;
2441 case TARGET_SO_PEERCRED: {
2442 struct ucred cr;
2443 socklen_t crlen;
2444 struct target_ucred *tcr;
2446 if (get_user_u32(len, optlen)) {
2447 return -TARGET_EFAULT;
2449 if (len < 0) {
2450 return -TARGET_EINVAL;
2453 crlen = sizeof(cr);
2454 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2455 &cr, &crlen));
2456 if (ret < 0) {
2457 return ret;
2459 if (len > crlen) {
2460 len = crlen;
2462 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2463 return -TARGET_EFAULT;
2465 __put_user(cr.pid, &tcr->pid);
2466 __put_user(cr.uid, &tcr->uid);
2467 __put_user(cr.gid, &tcr->gid);
2468 unlock_user_struct(tcr, optval_addr, 1);
2469 if (put_user_u32(len, optlen)) {
2470 return -TARGET_EFAULT;
2472 break;
2474 case TARGET_SO_PEERSEC: {
2475 char *name;
2477 if (get_user_u32(len, optlen)) {
2478 return -TARGET_EFAULT;
2480 if (len < 0) {
2481 return -TARGET_EINVAL;
2483 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2484 if (!name) {
2485 return -TARGET_EFAULT;
2487 lv = len;
2488 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2489 name, &lv));
2490 if (put_user_u32(lv, optlen)) {
2491 ret = -TARGET_EFAULT;
2493 unlock_user(name, optval_addr, lv);
2494 break;
2496 case TARGET_SO_LINGER:
2498 struct linger lg;
2499 socklen_t lglen;
2500 struct target_linger *tlg;
2502 if (get_user_u32(len, optlen)) {
2503 return -TARGET_EFAULT;
2505 if (len < 0) {
2506 return -TARGET_EINVAL;
2509 lglen = sizeof(lg);
2510 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2511 &lg, &lglen));
2512 if (ret < 0) {
2513 return ret;
2515 if (len > lglen) {
2516 len = lglen;
2518 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2519 return -TARGET_EFAULT;
2521 __put_user(lg.l_onoff, &tlg->l_onoff);
2522 __put_user(lg.l_linger, &tlg->l_linger);
2523 unlock_user_struct(tlg, optval_addr, 1);
2524 if (put_user_u32(len, optlen)) {
2525 return -TARGET_EFAULT;
2527 break;
2529 /* Options with 'int' argument. */
2530 case TARGET_SO_DEBUG:
2531 optname = SO_DEBUG;
2532 goto int_case;
2533 case TARGET_SO_REUSEADDR:
2534 optname = SO_REUSEADDR;
2535 goto int_case;
2536 #ifdef SO_REUSEPORT
2537 case TARGET_SO_REUSEPORT:
2538 optname = SO_REUSEPORT;
2539 goto int_case;
2540 #endif
2541 case TARGET_SO_TYPE:
2542 optname = SO_TYPE;
2543 goto int_case;
2544 case TARGET_SO_ERROR:
2545 optname = SO_ERROR;
2546 goto int_case;
2547 case TARGET_SO_DONTROUTE:
2548 optname = SO_DONTROUTE;
2549 goto int_case;
2550 case TARGET_SO_BROADCAST:
2551 optname = SO_BROADCAST;
2552 goto int_case;
2553 case TARGET_SO_SNDBUF:
2554 optname = SO_SNDBUF;
2555 goto int_case;
2556 case TARGET_SO_RCVBUF:
2557 optname = SO_RCVBUF;
2558 goto int_case;
2559 case TARGET_SO_KEEPALIVE:
2560 optname = SO_KEEPALIVE;
2561 goto int_case;
2562 case TARGET_SO_OOBINLINE:
2563 optname = SO_OOBINLINE;
2564 goto int_case;
2565 case TARGET_SO_NO_CHECK:
2566 optname = SO_NO_CHECK;
2567 goto int_case;
2568 case TARGET_SO_PRIORITY:
2569 optname = SO_PRIORITY;
2570 goto int_case;
2571 #ifdef SO_BSDCOMPAT
2572 case TARGET_SO_BSDCOMPAT:
2573 optname = SO_BSDCOMPAT;
2574 goto int_case;
2575 #endif
2576 case TARGET_SO_PASSCRED:
2577 optname = SO_PASSCRED;
2578 goto int_case;
2579 case TARGET_SO_TIMESTAMP:
2580 optname = SO_TIMESTAMP;
2581 goto int_case;
2582 case TARGET_SO_RCVLOWAT:
2583 optname = SO_RCVLOWAT;
2584 goto int_case;
2585 case TARGET_SO_ACCEPTCONN:
2586 optname = SO_ACCEPTCONN;
2587 goto int_case;
2588 default:
2589 goto int_case;
2591 break;
2592 case SOL_TCP:
2593 /* TCP options all take an 'int' value. */
2594 int_case:
2595 if (get_user_u32(len, optlen))
2596 return -TARGET_EFAULT;
2597 if (len < 0)
2598 return -TARGET_EINVAL;
2599 lv = sizeof(lv);
2600 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2601 if (ret < 0)
2602 return ret;
2603 if (optname == SO_TYPE) {
2604 val = host_to_target_sock_type(val);
2606 if (len > lv)
2607 len = lv;
2608 if (len == 4) {
2609 if (put_user_u32(val, optval_addr))
2610 return -TARGET_EFAULT;
2611 } else {
2612 if (put_user_u8(val, optval_addr))
2613 return -TARGET_EFAULT;
2615 if (put_user_u32(len, optlen))
2616 return -TARGET_EFAULT;
2617 break;
2618 case SOL_IP:
2619 switch(optname) {
2620 case IP_TOS:
2621 case IP_TTL:
2622 case IP_HDRINCL:
2623 case IP_ROUTER_ALERT:
2624 case IP_RECVOPTS:
2625 case IP_RETOPTS:
2626 case IP_PKTINFO:
2627 case IP_MTU_DISCOVER:
2628 case IP_RECVERR:
2629 case IP_RECVTOS:
2630 #ifdef IP_FREEBIND
2631 case IP_FREEBIND:
2632 #endif
2633 case IP_MULTICAST_TTL:
2634 case IP_MULTICAST_LOOP:
2635 if (get_user_u32(len, optlen))
2636 return -TARGET_EFAULT;
2637 if (len < 0)
2638 return -TARGET_EINVAL;
2639 lv = sizeof(lv);
2640 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2641 if (ret < 0)
2642 return ret;
2643 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2644 len = 1;
2645 if (put_user_u32(len, optlen)
2646 || put_user_u8(val, optval_addr))
2647 return -TARGET_EFAULT;
2648 } else {
2649 if (len > sizeof(int))
2650 len = sizeof(int);
2651 if (put_user_u32(len, optlen)
2652 || put_user_u32(val, optval_addr))
2653 return -TARGET_EFAULT;
2655 break;
2656 default:
2657 ret = -TARGET_ENOPROTOOPT;
2658 break;
2660 break;
2661 case SOL_IPV6:
2662 switch (optname) {
2663 case IPV6_MTU_DISCOVER:
2664 case IPV6_MTU:
2665 case IPV6_V6ONLY:
2666 case IPV6_RECVPKTINFO:
2667 case IPV6_UNICAST_HOPS:
2668 case IPV6_MULTICAST_HOPS:
2669 case IPV6_MULTICAST_LOOP:
2670 case IPV6_RECVERR:
2671 case IPV6_RECVHOPLIMIT:
2672 case IPV6_2292HOPLIMIT:
2673 case IPV6_CHECKSUM:
2674 case IPV6_ADDRFORM:
2675 case IPV6_2292PKTINFO:
2676 case IPV6_RECVTCLASS:
2677 case IPV6_RECVRTHDR:
2678 case IPV6_2292RTHDR:
2679 case IPV6_RECVHOPOPTS:
2680 case IPV6_2292HOPOPTS:
2681 case IPV6_RECVDSTOPTS:
2682 case IPV6_2292DSTOPTS:
2683 case IPV6_TCLASS:
2684 #ifdef IPV6_RECVPATHMTU
2685 case IPV6_RECVPATHMTU:
2686 #endif
2687 #ifdef IPV6_TRANSPARENT
2688 case IPV6_TRANSPARENT:
2689 #endif
2690 #ifdef IPV6_FREEBIND
2691 case IPV6_FREEBIND:
2692 #endif
2693 #ifdef IPV6_RECVORIGDSTADDR
2694 case IPV6_RECVORIGDSTADDR:
2695 #endif
2696 if (get_user_u32(len, optlen))
2697 return -TARGET_EFAULT;
2698 if (len < 0)
2699 return -TARGET_EINVAL;
2700 lv = sizeof(lv);
2701 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2702 if (ret < 0)
2703 return ret;
2704 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2705 len = 1;
2706 if (put_user_u32(len, optlen)
2707 || put_user_u8(val, optval_addr))
2708 return -TARGET_EFAULT;
2709 } else {
2710 if (len > sizeof(int))
2711 len = sizeof(int);
2712 if (put_user_u32(len, optlen)
2713 || put_user_u32(val, optval_addr))
2714 return -TARGET_EFAULT;
2716 break;
2717 default:
2718 ret = -TARGET_ENOPROTOOPT;
2719 break;
2721 break;
2722 #ifdef SOL_NETLINK
2723 case SOL_NETLINK:
2724 switch (optname) {
2725 case NETLINK_PKTINFO:
2726 case NETLINK_BROADCAST_ERROR:
2727 case NETLINK_NO_ENOBUFS:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729 case NETLINK_LISTEN_ALL_NSID:
2730 case NETLINK_CAP_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733 case NETLINK_EXT_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736 case NETLINK_GET_STRICT_CHK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 if (get_user_u32(len, optlen)) {
2739 return -TARGET_EFAULT;
2741 if (len != sizeof(val)) {
2742 return -TARGET_EINVAL;
2744 lv = len;
2745 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2746 if (ret < 0) {
2747 return ret;
2749 if (put_user_u32(lv, optlen)
2750 || put_user_u32(val, optval_addr)) {
2751 return -TARGET_EFAULT;
2753 break;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755 case NETLINK_LIST_MEMBERSHIPS:
2757 uint32_t *results;
2758 int i;
2759 if (get_user_u32(len, optlen)) {
2760 return -TARGET_EFAULT;
2762 if (len < 0) {
2763 return -TARGET_EINVAL;
2765 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2766 if (!results) {
2767 return -TARGET_EFAULT;
2769 lv = len;
2770 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2771 if (ret < 0) {
2772 unlock_user(results, optval_addr, 0);
2773 return ret;
2775 /* swap host endianess to target endianess. */
2776 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2777 results[i] = tswap32(results[i]);
2779 if (put_user_u32(lv, optlen)) {
2780 return -TARGET_EFAULT;
2782 unlock_user(results, optval_addr, 0);
2783 break;
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2786 default:
2787 goto unimplemented;
2789 break;
2790 #endif /* SOL_NETLINK */
2791 default:
2792 unimplemented:
2793 qemu_log_mask(LOG_UNIMP,
2794 "getsockopt level=%d optname=%d not yet supported\n",
2795 level, optname);
2796 ret = -TARGET_EOPNOTSUPP;
2797 break;
2799 return ret;
2802 /* Convert target low/high pair representing file offset into the host
2803 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804 * as the kernel doesn't handle them either.
2806 static void target_to_host_low_high(abi_ulong tlow,
2807 abi_ulong thigh,
2808 unsigned long *hlow,
2809 unsigned long *hhigh)
2811 uint64_t off = tlow |
2812 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2813 TARGET_LONG_BITS / 2;
2815 *hlow = off;
2816 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2819 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2820 abi_ulong count, int copy)
2822 struct target_iovec *target_vec;
2823 struct iovec *vec;
2824 abi_ulong total_len, max_len;
2825 int i;
2826 int err = 0;
2827 bool bad_address = false;
2829 if (count == 0) {
2830 errno = 0;
2831 return NULL;
2833 if (count > IOV_MAX) {
2834 errno = EINVAL;
2835 return NULL;
2838 vec = g_try_new0(struct iovec, count);
2839 if (vec == NULL) {
2840 errno = ENOMEM;
2841 return NULL;
2844 target_vec = lock_user(VERIFY_READ, target_addr,
2845 count * sizeof(struct target_iovec), 1);
2846 if (target_vec == NULL) {
2847 err = EFAULT;
2848 goto fail2;
2851 /* ??? If host page size > target page size, this will result in a
2852 value larger than what we can actually support. */
2853 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2854 total_len = 0;
2856 for (i = 0; i < count; i++) {
2857 abi_ulong base = tswapal(target_vec[i].iov_base);
2858 abi_long len = tswapal(target_vec[i].iov_len);
2860 if (len < 0) {
2861 err = EINVAL;
2862 goto fail;
2863 } else if (len == 0) {
2864 /* Zero length pointer is ignored. */
2865 vec[i].iov_base = 0;
2866 } else {
2867 vec[i].iov_base = lock_user(type, base, len, copy);
2868 /* If the first buffer pointer is bad, this is a fault. But
2869 * subsequent bad buffers will result in a partial write; this
2870 * is realized by filling the vector with null pointers and
2871 * zero lengths. */
2872 if (!vec[i].iov_base) {
2873 if (i == 0) {
2874 err = EFAULT;
2875 goto fail;
2876 } else {
2877 bad_address = true;
2880 if (bad_address) {
2881 len = 0;
2883 if (len > max_len - total_len) {
2884 len = max_len - total_len;
2887 vec[i].iov_len = len;
2888 total_len += len;
2891 unlock_user(target_vec, target_addr, 0);
2892 return vec;
2894 fail:
2895 while (--i >= 0) {
2896 if (tswapal(target_vec[i].iov_len) > 0) {
2897 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2900 unlock_user(target_vec, target_addr, 0);
2901 fail2:
2902 g_free(vec);
2903 errno = err;
2904 return NULL;
2907 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2908 abi_ulong count, int copy)
2910 struct target_iovec *target_vec;
2911 int i;
2913 target_vec = lock_user(VERIFY_READ, target_addr,
2914 count * sizeof(struct target_iovec), 1);
2915 if (target_vec) {
2916 for (i = 0; i < count; i++) {
2917 abi_ulong base = tswapal(target_vec[i].iov_base);
2918 abi_long len = tswapal(target_vec[i].iov_len);
2919 if (len < 0) {
2920 break;
2922 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2924 unlock_user(target_vec, target_addr, 0);
2927 g_free(vec);
2930 static inline int target_to_host_sock_type(int *type)
2932 int host_type = 0;
2933 int target_type = *type;
2935 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2936 case TARGET_SOCK_DGRAM:
2937 host_type = SOCK_DGRAM;
2938 break;
2939 case TARGET_SOCK_STREAM:
2940 host_type = SOCK_STREAM;
2941 break;
2942 default:
2943 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2944 break;
2946 if (target_type & TARGET_SOCK_CLOEXEC) {
2947 #if defined(SOCK_CLOEXEC)
2948 host_type |= SOCK_CLOEXEC;
2949 #else
2950 return -TARGET_EINVAL;
2951 #endif
2953 if (target_type & TARGET_SOCK_NONBLOCK) {
2954 #if defined(SOCK_NONBLOCK)
2955 host_type |= SOCK_NONBLOCK;
2956 #elif !defined(O_NONBLOCK)
2957 return -TARGET_EINVAL;
2958 #endif
2960 *type = host_type;
2961 return 0;
2964 /* Try to emulate socket type flags after socket creation. */
2965 static int sock_flags_fixup(int fd, int target_type)
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968 if (target_type & TARGET_SOCK_NONBLOCK) {
2969 int flags = fcntl(fd, F_GETFL);
2970 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2971 close(fd);
2972 return -TARGET_EINVAL;
2975 #endif
2976 return fd;
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long do_socket(int domain, int type, int protocol)
2982 int target_type = type;
2983 int ret;
2985 ret = target_to_host_sock_type(&type);
2986 if (ret) {
2987 return ret;
2990 if (domain == PF_NETLINK && !(
2991 #ifdef CONFIG_RTNETLINK
2992 protocol == NETLINK_ROUTE ||
2993 #endif
2994 protocol == NETLINK_KOBJECT_UEVENT ||
2995 protocol == NETLINK_AUDIT)) {
2996 return -TARGET_EPROTONOSUPPORT;
2999 if (domain == AF_PACKET ||
3000 (domain == AF_INET && type == SOCK_PACKET)) {
3001 protocol = tswap16(protocol);
3004 ret = get_errno(socket(domain, type, protocol));
3005 if (ret >= 0) {
3006 ret = sock_flags_fixup(ret, target_type);
3007 if (type == SOCK_PACKET) {
3008 /* Manage an obsolete case :
3009 * if socket type is SOCK_PACKET, bind by name
3011 fd_trans_register(ret, &target_packet_trans);
3012 } else if (domain == PF_NETLINK) {
3013 switch (protocol) {
3014 #ifdef CONFIG_RTNETLINK
3015 case NETLINK_ROUTE:
3016 fd_trans_register(ret, &target_netlink_route_trans);
3017 break;
3018 #endif
3019 case NETLINK_KOBJECT_UEVENT:
3020 /* nothing to do: messages are strings */
3021 break;
3022 case NETLINK_AUDIT:
3023 fd_trans_register(ret, &target_netlink_audit_trans);
3024 break;
3025 default:
3026 g_assert_not_reached();
3030 return ret;
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3035 socklen_t addrlen)
3037 void *addr;
3038 abi_long ret;
3040 if ((int)addrlen < 0) {
3041 return -TARGET_EINVAL;
3044 addr = alloca(addrlen+1);
3046 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3047 if (ret)
3048 return ret;
3050 return get_errno(bind(sockfd, addr, addrlen));
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3055 socklen_t addrlen)
3057 void *addr;
3058 abi_long ret;
3060 if ((int)addrlen < 0) {
3061 return -TARGET_EINVAL;
3064 addr = alloca(addrlen+1);
3066 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3067 if (ret)
3068 return ret;
3070 return get_errno(safe_connect(sockfd, addr, addrlen));
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3075 int flags, int send)
3077 abi_long ret, len;
3078 struct msghdr msg;
3079 abi_ulong count;
3080 struct iovec *vec;
3081 abi_ulong target_vec;
3083 if (msgp->msg_name) {
3084 msg.msg_namelen = tswap32(msgp->msg_namelen);
3085 msg.msg_name = alloca(msg.msg_namelen+1);
3086 ret = target_to_host_sockaddr(fd, msg.msg_name,
3087 tswapal(msgp->msg_name),
3088 msg.msg_namelen);
3089 if (ret == -TARGET_EFAULT) {
3090 /* For connected sockets msg_name and msg_namelen must
3091 * be ignored, so returning EFAULT immediately is wrong.
3092 * Instead, pass a bad msg_name to the host kernel, and
3093 * let it decide whether to return EFAULT or not.
3095 msg.msg_name = (void *)-1;
3096 } else if (ret) {
3097 goto out2;
3099 } else {
3100 msg.msg_name = NULL;
3101 msg.msg_namelen = 0;
3103 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3104 msg.msg_control = alloca(msg.msg_controllen);
3105 memset(msg.msg_control, 0, msg.msg_controllen);
3107 msg.msg_flags = tswap32(msgp->msg_flags);
3109 count = tswapal(msgp->msg_iovlen);
3110 target_vec = tswapal(msgp->msg_iov);
3112 if (count > IOV_MAX) {
3113 /* sendrcvmsg returns a different errno for this condition than
3114 * readv/writev, so we must catch it here before lock_iovec() does.
3116 ret = -TARGET_EMSGSIZE;
3117 goto out2;
3120 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3121 target_vec, count, send);
3122 if (vec == NULL) {
3123 ret = -host_to_target_errno(errno);
3124 goto out2;
3126 msg.msg_iovlen = count;
3127 msg.msg_iov = vec;
3129 if (send) {
3130 if (fd_trans_target_to_host_data(fd)) {
3131 void *host_msg;
3133 host_msg = g_malloc(msg.msg_iov->iov_len);
3134 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3135 ret = fd_trans_target_to_host_data(fd)(host_msg,
3136 msg.msg_iov->iov_len);
3137 if (ret >= 0) {
3138 msg.msg_iov->iov_base = host_msg;
3139 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3141 g_free(host_msg);
3142 } else {
3143 ret = target_to_host_cmsg(&msg, msgp);
3144 if (ret == 0) {
3145 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3148 } else {
3149 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3150 if (!is_error(ret)) {
3151 len = ret;
3152 if (fd_trans_host_to_target_data(fd)) {
3153 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3154 MIN(msg.msg_iov->iov_len, len));
3155 } else {
3156 ret = host_to_target_cmsg(msgp, &msg);
3158 if (!is_error(ret)) {
3159 msgp->msg_namelen = tswap32(msg.msg_namelen);
3160 msgp->msg_flags = tswap32(msg.msg_flags);
3161 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3162 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3163 msg.msg_name, msg.msg_namelen);
3164 if (ret) {
3165 goto out;
3169 ret = len;
3174 out:
3175 unlock_iovec(vec, target_vec, count, !send);
3176 out2:
3177 return ret;
3180 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3181 int flags, int send)
3183 abi_long ret;
3184 struct target_msghdr *msgp;
3186 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3187 msgp,
3188 target_msg,
3189 send ? 1 : 0)) {
3190 return -TARGET_EFAULT;
3192 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3193 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3194 return ret;
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198 * so it might not have this *mmsg-specific flag either.
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3202 #endif
3204 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3205 unsigned int vlen, unsigned int flags,
3206 int send)
3208 struct target_mmsghdr *mmsgp;
3209 abi_long ret = 0;
3210 int i;
3212 if (vlen > UIO_MAXIOV) {
3213 vlen = UIO_MAXIOV;
3216 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3217 if (!mmsgp) {
3218 return -TARGET_EFAULT;
3221 for (i = 0; i < vlen; i++) {
3222 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3223 if (is_error(ret)) {
3224 break;
3226 mmsgp[i].msg_len = tswap32(ret);
3227 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228 if (flags & MSG_WAITFORONE) {
3229 flags |= MSG_DONTWAIT;
3233 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3235 /* Return number of datagrams sent if we sent any at all;
3236 * otherwise return the error.
3238 if (i) {
3239 return i;
3241 return ret;
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long do_accept4(int fd, abi_ulong target_addr,
3246 abi_ulong target_addrlen_addr, int flags)
3248 socklen_t addrlen, ret_addrlen;
3249 void *addr;
3250 abi_long ret;
3251 int host_flags;
3253 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3255 if (target_addr == 0) {
3256 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3259 /* linux returns EINVAL if addrlen pointer is invalid */
3260 if (get_user_u32(addrlen, target_addrlen_addr))
3261 return -TARGET_EINVAL;
3263 if ((int)addrlen < 0) {
3264 return -TARGET_EINVAL;
3267 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268 return -TARGET_EINVAL;
3270 addr = alloca(addrlen);
3272 ret_addrlen = addrlen;
3273 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3274 if (!is_error(ret)) {
3275 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277 ret = -TARGET_EFAULT;
3280 return ret;
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3285 abi_ulong target_addrlen_addr)
3287 socklen_t addrlen, ret_addrlen;
3288 void *addr;
3289 abi_long ret;
3291 if (get_user_u32(addrlen, target_addrlen_addr))
3292 return -TARGET_EFAULT;
3294 if ((int)addrlen < 0) {
3295 return -TARGET_EINVAL;
3298 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299 return -TARGET_EFAULT;
3301 addr = alloca(addrlen);
3303 ret_addrlen = addrlen;
3304 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3305 if (!is_error(ret)) {
3306 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308 ret = -TARGET_EFAULT;
3311 return ret;
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3316 abi_ulong target_addrlen_addr)
3318 socklen_t addrlen, ret_addrlen;
3319 void *addr;
3320 abi_long ret;
3322 if (get_user_u32(addrlen, target_addrlen_addr))
3323 return -TARGET_EFAULT;
3325 if ((int)addrlen < 0) {
3326 return -TARGET_EINVAL;
3329 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3330 return -TARGET_EFAULT;
3332 addr = alloca(addrlen);
3334 ret_addrlen = addrlen;
3335 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3336 if (!is_error(ret)) {
3337 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3338 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3339 ret = -TARGET_EFAULT;
3342 return ret;
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long do_socketpair(int domain, int type, int protocol,
3347 abi_ulong target_tab_addr)
3349 int tab[2];
3350 abi_long ret;
3352 target_to_host_sock_type(&type);
3354 ret = get_errno(socketpair(domain, type, protocol, tab));
3355 if (!is_error(ret)) {
3356 if (put_user_s32(tab[0], target_tab_addr)
3357 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3358 ret = -TARGET_EFAULT;
3360 return ret;
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3365 abi_ulong target_addr, socklen_t addrlen)
3367 void *addr;
3368 void *host_msg;
3369 void *copy_msg = NULL;
3370 abi_long ret;
3372 if ((int)addrlen < 0) {
3373 return -TARGET_EINVAL;
3376 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3377 if (!host_msg)
3378 return -TARGET_EFAULT;
3379 if (fd_trans_target_to_host_data(fd)) {
3380 copy_msg = host_msg;
3381 host_msg = g_malloc(len);
3382 memcpy(host_msg, copy_msg, len);
3383 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3384 if (ret < 0) {
3385 goto fail;
3388 if (target_addr) {
3389 addr = alloca(addrlen+1);
3390 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3391 if (ret) {
3392 goto fail;
3394 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3395 } else {
3396 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3398 fail:
3399 if (copy_msg) {
3400 g_free(host_msg);
3401 host_msg = copy_msg;
3403 unlock_user(host_msg, msg, 0);
3404 return ret;
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3409 abi_ulong target_addr,
3410 abi_ulong target_addrlen)
3412 socklen_t addrlen, ret_addrlen;
3413 void *addr;
3414 void *host_msg;
3415 abi_long ret;
3417 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3418 if (!host_msg)
3419 return -TARGET_EFAULT;
3420 if (target_addr) {
3421 if (get_user_u32(addrlen, target_addrlen)) {
3422 ret = -TARGET_EFAULT;
3423 goto fail;
3425 if ((int)addrlen < 0) {
3426 ret = -TARGET_EINVAL;
3427 goto fail;
3429 addr = alloca(addrlen);
3430 ret_addrlen = addrlen;
3431 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3432 addr, &ret_addrlen));
3433 } else {
3434 addr = NULL; /* To keep compiler quiet. */
3435 addrlen = 0; /* To keep compiler quiet. */
3436 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3438 if (!is_error(ret)) {
3439 if (fd_trans_host_to_target_data(fd)) {
3440 abi_long trans;
3441 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3442 if (is_error(trans)) {
3443 ret = trans;
3444 goto fail;
3447 if (target_addr) {
3448 host_to_target_sockaddr(target_addr, addr,
3449 MIN(addrlen, ret_addrlen));
3450 if (put_user_u32(ret_addrlen, target_addrlen)) {
3451 ret = -TARGET_EFAULT;
3452 goto fail;
3455 unlock_user(host_msg, msg, len);
3456 } else {
3457 fail:
3458 unlock_user(host_msg, msg, 0);
3460 return ret;
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long do_socketcall(int num, abi_ulong vptr)
3467 static const unsigned nargs[] = { /* number of arguments per operation */
3468 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3469 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3472 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3476 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3477 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3479 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3481 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3482 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3484 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3486 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3487 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3489 abi_long a[6]; /* max 6 args */
3490 unsigned i;
3492 /* check the range of the first argument num */
3493 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3495 return -TARGET_EINVAL;
3497 /* ensure we have space for args */
3498 if (nargs[num] > ARRAY_SIZE(a)) {
3499 return -TARGET_EINVAL;
3501 /* collect the arguments in a[] according to nargs[] */
3502 for (i = 0; i < nargs[num]; ++i) {
3503 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3504 return -TARGET_EFAULT;
3507 /* now when we have the args, invoke the appropriate underlying function */
3508 switch (num) {
3509 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3510 return do_socket(a[0], a[1], a[2]);
3511 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3512 return do_bind(a[0], a[1], a[2]);
3513 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3514 return do_connect(a[0], a[1], a[2]);
3515 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3516 return get_errno(listen(a[0], a[1]));
3517 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3518 return do_accept4(a[0], a[1], a[2], 0);
3519 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3520 return do_getsockname(a[0], a[1], a[2]);
3521 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3522 return do_getpeername(a[0], a[1], a[2]);
3523 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3524 return do_socketpair(a[0], a[1], a[2], a[3]);
3525 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3526 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3527 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3528 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3529 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3530 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3531 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3532 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3533 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3534 return get_errno(shutdown(a[0], a[1]));
3535 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3536 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3537 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3538 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3539 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3540 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3541 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3542 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3543 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3544 return do_accept4(a[0], a[1], a[2], a[3]);
3545 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3546 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3547 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3548 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3549 default:
3550 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3551 return -TARGET_EINVAL;
3554 #endif
3556 #define N_SHM_REGIONS 32
3558 static struct shm_region {
3559 abi_ulong start;
3560 abi_ulong size;
3561 bool in_use;
3562 } shm_regions[N_SHM_REGIONS];
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3568 struct target_ipc_perm sem_perm;
3569 abi_ulong sem_otime;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused1;
3572 #endif
3573 abi_ulong sem_ctime;
3574 #if TARGET_ABI_BITS == 32
3575 abi_ulong __unused2;
3576 #endif
3577 abi_ulong sem_nsems;
3578 abi_ulong __unused3;
3579 abi_ulong __unused4;
3581 #endif
3583 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3584 abi_ulong target_addr)
3586 struct target_ipc_perm *target_ip;
3587 struct target_semid64_ds *target_sd;
3589 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3590 return -TARGET_EFAULT;
3591 target_ip = &(target_sd->sem_perm);
3592 host_ip->__key = tswap32(target_ip->__key);
3593 host_ip->uid = tswap32(target_ip->uid);
3594 host_ip->gid = tswap32(target_ip->gid);
3595 host_ip->cuid = tswap32(target_ip->cuid);
3596 host_ip->cgid = tswap32(target_ip->cgid);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598 host_ip->mode = tswap32(target_ip->mode);
3599 #else
3600 host_ip->mode = tswap16(target_ip->mode);
3601 #endif
3602 #if defined(TARGET_PPC)
3603 host_ip->__seq = tswap32(target_ip->__seq);
3604 #else
3605 host_ip->__seq = tswap16(target_ip->__seq);
3606 #endif
3607 unlock_user_struct(target_sd, target_addr, 0);
3608 return 0;
3611 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3612 struct ipc_perm *host_ip)
3614 struct target_ipc_perm *target_ip;
3615 struct target_semid64_ds *target_sd;
3617 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3618 return -TARGET_EFAULT;
3619 target_ip = &(target_sd->sem_perm);
3620 target_ip->__key = tswap32(host_ip->__key);
3621 target_ip->uid = tswap32(host_ip->uid);
3622 target_ip->gid = tswap32(host_ip->gid);
3623 target_ip->cuid = tswap32(host_ip->cuid);
3624 target_ip->cgid = tswap32(host_ip->cgid);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626 target_ip->mode = tswap32(host_ip->mode);
3627 #else
3628 target_ip->mode = tswap16(host_ip->mode);
3629 #endif
3630 #if defined(TARGET_PPC)
3631 target_ip->__seq = tswap32(host_ip->__seq);
3632 #else
3633 target_ip->__seq = tswap16(host_ip->__seq);
3634 #endif
3635 unlock_user_struct(target_sd, target_addr, 1);
3636 return 0;
3639 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3640 abi_ulong target_addr)
3642 struct target_semid64_ds *target_sd;
3644 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645 return -TARGET_EFAULT;
3646 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3647 return -TARGET_EFAULT;
3648 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3649 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3650 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3651 unlock_user_struct(target_sd, target_addr, 0);
3652 return 0;
3655 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3656 struct semid_ds *host_sd)
3658 struct target_semid64_ds *target_sd;
3660 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661 return -TARGET_EFAULT;
3662 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3663 return -TARGET_EFAULT;
3664 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3665 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3666 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3667 unlock_user_struct(target_sd, target_addr, 1);
3668 return 0;
3671 struct target_seminfo {
3672 int semmap;
3673 int semmni;
3674 int semmns;
3675 int semmnu;
3676 int semmsl;
3677 int semopm;
3678 int semume;
3679 int semusz;
3680 int semvmx;
3681 int semaem;
3684 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3685 struct seminfo *host_seminfo)
3687 struct target_seminfo *target_seminfo;
3688 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3689 return -TARGET_EFAULT;
3690 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3691 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3692 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3693 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3694 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3695 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3696 __put_user(host_seminfo->semume, &target_seminfo->semume);
3697 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3698 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3699 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3700 unlock_user_struct(target_seminfo, target_addr, 1);
3701 return 0;
3704 union semun {
3705 int val;
3706 struct semid_ds *buf;
3707 unsigned short *array;
3708 struct seminfo *__buf;
3711 union target_semun {
3712 int val;
3713 abi_ulong buf;
3714 abi_ulong array;
3715 abi_ulong __buf;
3718 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3719 abi_ulong target_addr)
3721 int nsems;
3722 unsigned short *array;
3723 union semun semun;
3724 struct semid_ds semid_ds;
3725 int i, ret;
3727 semun.buf = &semid_ds;
3729 ret = semctl(semid, 0, IPC_STAT, semun);
3730 if (ret == -1)
3731 return get_errno(ret);
3733 nsems = semid_ds.sem_nsems;
3735 *host_array = g_try_new(unsigned short, nsems);
3736 if (!*host_array) {
3737 return -TARGET_ENOMEM;
3739 array = lock_user(VERIFY_READ, target_addr,
3740 nsems*sizeof(unsigned short), 1);
3741 if (!array) {
3742 g_free(*host_array);
3743 return -TARGET_EFAULT;
3746 for(i=0; i<nsems; i++) {
3747 __get_user((*host_array)[i], &array[i]);
3749 unlock_user(array, target_addr, 0);
3751 return 0;
3754 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3755 unsigned short **host_array)
3757 int nsems;
3758 unsigned short *array;
3759 union semun semun;
3760 struct semid_ds semid_ds;
3761 int i, ret;
3763 semun.buf = &semid_ds;
3765 ret = semctl(semid, 0, IPC_STAT, semun);
3766 if (ret == -1)
3767 return get_errno(ret);
3769 nsems = semid_ds.sem_nsems;
3771 array = lock_user(VERIFY_WRITE, target_addr,
3772 nsems*sizeof(unsigned short), 0);
3773 if (!array)
3774 return -TARGET_EFAULT;
3776 for(i=0; i<nsems; i++) {
3777 __put_user((*host_array)[i], &array[i]);
3779 g_free(*host_array);
3780 unlock_user(array, target_addr, 1);
3782 return 0;
3785 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3786 abi_ulong target_arg)
3788 union target_semun target_su = { .buf = target_arg };
3789 union semun arg;
3790 struct semid_ds dsarg;
3791 unsigned short *array = NULL;
3792 struct seminfo seminfo;
3793 abi_long ret = -TARGET_EINVAL;
3794 abi_long err;
3795 cmd &= 0xff;
3797 switch( cmd ) {
3798 case GETVAL:
3799 case SETVAL:
3800 /* In 64 bit cross-endian situations, we will erroneously pick up
3801 * the wrong half of the union for the "val" element. To rectify
3802 * this, the entire 8-byte structure is byteswapped, followed by
3803 * a swap of the 4 byte val field. In other cases, the data is
3804 * already in proper host byte order. */
3805 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3806 target_su.buf = tswapal(target_su.buf);
3807 arg.val = tswap32(target_su.val);
3808 } else {
3809 arg.val = target_su.val;
3811 ret = get_errno(semctl(semid, semnum, cmd, arg));
3812 break;
3813 case GETALL:
3814 case SETALL:
3815 err = target_to_host_semarray(semid, &array, target_su.array);
3816 if (err)
3817 return err;
3818 arg.array = array;
3819 ret = get_errno(semctl(semid, semnum, cmd, arg));
3820 err = host_to_target_semarray(semid, target_su.array, &array);
3821 if (err)
3822 return err;
3823 break;
3824 case IPC_STAT:
3825 case IPC_SET:
3826 case SEM_STAT:
3827 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3828 if (err)
3829 return err;
3830 arg.buf = &dsarg;
3831 ret = get_errno(semctl(semid, semnum, cmd, arg));
3832 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3833 if (err)
3834 return err;
3835 break;
3836 case IPC_INFO:
3837 case SEM_INFO:
3838 arg.__buf = &seminfo;
3839 ret = get_errno(semctl(semid, semnum, cmd, arg));
3840 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3841 if (err)
3842 return err;
3843 break;
3844 case IPC_RMID:
3845 case GETPID:
3846 case GETNCNT:
3847 case GETZCNT:
3848 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3849 break;
3852 return ret;
3855 struct target_sembuf {
3856 unsigned short sem_num;
3857 short sem_op;
3858 short sem_flg;
3861 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3862 abi_ulong target_addr,
3863 unsigned nsops)
3865 struct target_sembuf *target_sembuf;
3866 int i;
3868 target_sembuf = lock_user(VERIFY_READ, target_addr,
3869 nsops*sizeof(struct target_sembuf), 1);
3870 if (!target_sembuf)
3871 return -TARGET_EFAULT;
3873 for(i=0; i<nsops; i++) {
3874 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3875 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3876 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3879 unlock_user(target_sembuf, target_addr, 0);
3881 return 0;
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3888 * This macro is required to handle the s390 variants, which passes the
3889 * arguments in a different order than default.
3891 #ifdef __s390x__
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893 (__nsops), (__timeout), (__sops)
3894 #else
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), 0, (__sops), (__timeout)
3897 #endif
3899 static inline abi_long do_semtimedop(int semid,
3900 abi_long ptr,
3901 unsigned nsops,
3902 abi_long timeout, bool time64)
3904 struct sembuf *sops;
3905 struct timespec ts, *pts = NULL;
3906 abi_long ret;
3908 if (timeout) {
3909 pts = &ts;
3910 if (time64) {
3911 if (target_to_host_timespec64(pts, timeout)) {
3912 return -TARGET_EFAULT;
3914 } else {
3915 if (target_to_host_timespec(pts, timeout)) {
3916 return -TARGET_EFAULT;
3921 if (nsops > TARGET_SEMOPM) {
3922 return -TARGET_E2BIG;
3925 sops = g_new(struct sembuf, nsops);
3927 if (target_to_host_sembuf(sops, ptr, nsops)) {
3928 g_free(sops);
3929 return -TARGET_EFAULT;
3932 ret = -TARGET_ENOSYS;
3933 #ifdef __NR_semtimedop
3934 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3935 #endif
3936 #ifdef __NR_ipc
3937 if (ret == -TARGET_ENOSYS) {
3938 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3939 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3941 #endif
3942 g_free(sops);
3943 return ret;
3945 #endif
3947 struct target_msqid_ds
3949 struct target_ipc_perm msg_perm;
3950 abi_ulong msg_stime;
3951 #if TARGET_ABI_BITS == 32
3952 abi_ulong __unused1;
3953 #endif
3954 abi_ulong msg_rtime;
3955 #if TARGET_ABI_BITS == 32
3956 abi_ulong __unused2;
3957 #endif
3958 abi_ulong msg_ctime;
3959 #if TARGET_ABI_BITS == 32
3960 abi_ulong __unused3;
3961 #endif
3962 abi_ulong __msg_cbytes;
3963 abi_ulong msg_qnum;
3964 abi_ulong msg_qbytes;
3965 abi_ulong msg_lspid;
3966 abi_ulong msg_lrpid;
3967 abi_ulong __unused4;
3968 abi_ulong __unused5;
3971 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3972 abi_ulong target_addr)
3974 struct target_msqid_ds *target_md;
3976 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3977 return -TARGET_EFAULT;
3978 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3979 return -TARGET_EFAULT;
3980 host_md->msg_stime = tswapal(target_md->msg_stime);
3981 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3982 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3983 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3984 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3985 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3986 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3987 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3988 unlock_user_struct(target_md, target_addr, 0);
3989 return 0;
3992 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3993 struct msqid_ds *host_md)
3995 struct target_msqid_ds *target_md;
3997 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3998 return -TARGET_EFAULT;
3999 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4000 return -TARGET_EFAULT;
4001 target_md->msg_stime = tswapal(host_md->msg_stime);
4002 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4003 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4004 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4005 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4006 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4007 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4008 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4009 unlock_user_struct(target_md, target_addr, 1);
4010 return 0;
4013 struct target_msginfo {
4014 int msgpool;
4015 int msgmap;
4016 int msgmax;
4017 int msgmnb;
4018 int msgmni;
4019 int msgssz;
4020 int msgtql;
4021 unsigned short int msgseg;
4024 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4025 struct msginfo *host_msginfo)
4027 struct target_msginfo *target_msginfo;
4028 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4029 return -TARGET_EFAULT;
4030 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4031 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4032 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4033 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4034 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4035 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4036 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4037 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4038 unlock_user_struct(target_msginfo, target_addr, 1);
4039 return 0;
4042 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4044 struct msqid_ds dsarg;
4045 struct msginfo msginfo;
4046 abi_long ret = -TARGET_EINVAL;
4048 cmd &= 0xff;
4050 switch (cmd) {
4051 case IPC_STAT:
4052 case IPC_SET:
4053 case MSG_STAT:
4054 if (target_to_host_msqid_ds(&dsarg,ptr))
4055 return -TARGET_EFAULT;
4056 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4057 if (host_to_target_msqid_ds(ptr,&dsarg))
4058 return -TARGET_EFAULT;
4059 break;
4060 case IPC_RMID:
4061 ret = get_errno(msgctl(msgid, cmd, NULL));
4062 break;
4063 case IPC_INFO:
4064 case MSG_INFO:
4065 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4066 if (host_to_target_msginfo(ptr, &msginfo))
4067 return -TARGET_EFAULT;
4068 break;
4071 return ret;
4074 struct target_msgbuf {
4075 abi_long mtype;
4076 char mtext[1];
4079 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4080 ssize_t msgsz, int msgflg)
4082 struct target_msgbuf *target_mb;
4083 struct msgbuf *host_mb;
4084 abi_long ret = 0;
4086 if (msgsz < 0) {
4087 return -TARGET_EINVAL;
4090 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4091 return -TARGET_EFAULT;
4092 host_mb = g_try_malloc(msgsz + sizeof(long));
4093 if (!host_mb) {
4094 unlock_user_struct(target_mb, msgp, 0);
4095 return -TARGET_ENOMEM;
4097 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4098 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4099 ret = -TARGET_ENOSYS;
4100 #ifdef __NR_msgsnd
4101 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4102 #endif
4103 #ifdef __NR_ipc
4104 if (ret == -TARGET_ENOSYS) {
4105 #ifdef __s390x__
4106 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4107 host_mb));
4108 #else
4109 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4110 host_mb, 0));
4111 #endif
4113 #endif
4114 g_free(host_mb);
4115 unlock_user_struct(target_mb, msgp, 0);
4117 return ret;
4120 #ifdef __NR_ipc
4121 #if defined(__sparc__)
4122 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4123 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4124 #elif defined(__s390x__)
4125 /* The s390 sys_ipc variant has only five parameters. */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4127 ((long int[]){(long int)__msgp, __msgtyp})
4128 #else
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130 ((long int[]){(long int)__msgp, __msgtyp}), 0
4131 #endif
4132 #endif
4134 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4135 ssize_t msgsz, abi_long msgtyp,
4136 int msgflg)
4138 struct target_msgbuf *target_mb;
4139 char *target_mtext;
4140 struct msgbuf *host_mb;
4141 abi_long ret = 0;
4143 if (msgsz < 0) {
4144 return -TARGET_EINVAL;
4147 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4148 return -TARGET_EFAULT;
4150 host_mb = g_try_malloc(msgsz + sizeof(long));
4151 if (!host_mb) {
4152 ret = -TARGET_ENOMEM;
4153 goto end;
4155 ret = -TARGET_ENOSYS;
4156 #ifdef __NR_msgrcv
4157 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4158 #endif
4159 #ifdef __NR_ipc
4160 if (ret == -TARGET_ENOSYS) {
4161 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4162 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4164 #endif
4166 if (ret > 0) {
4167 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4168 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4169 if (!target_mtext) {
4170 ret = -TARGET_EFAULT;
4171 goto end;
4173 memcpy(target_mb->mtext, host_mb->mtext, ret);
4174 unlock_user(target_mtext, target_mtext_addr, ret);
4177 target_mb->mtype = tswapal(host_mb->mtype);
4179 end:
4180 if (target_mb)
4181 unlock_user_struct(target_mb, msgp, 1);
4182 g_free(host_mb);
4183 return ret;
4186 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4187 abi_ulong target_addr)
4189 struct target_shmid_ds *target_sd;
4191 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4192 return -TARGET_EFAULT;
4193 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4194 return -TARGET_EFAULT;
4195 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4196 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4197 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4198 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4199 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4200 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4201 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4202 unlock_user_struct(target_sd, target_addr, 0);
4203 return 0;
4206 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4207 struct shmid_ds *host_sd)
4209 struct target_shmid_ds *target_sd;
4211 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4212 return -TARGET_EFAULT;
4213 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4214 return -TARGET_EFAULT;
4215 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4216 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4217 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4218 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4219 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4220 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4221 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4222 unlock_user_struct(target_sd, target_addr, 1);
4223 return 0;
4226 struct target_shminfo {
4227 abi_ulong shmmax;
4228 abi_ulong shmmin;
4229 abi_ulong shmmni;
4230 abi_ulong shmseg;
4231 abi_ulong shmall;
4234 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4235 struct shminfo *host_shminfo)
4237 struct target_shminfo *target_shminfo;
4238 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4239 return -TARGET_EFAULT;
4240 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4241 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4242 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4243 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4244 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4245 unlock_user_struct(target_shminfo, target_addr, 1);
4246 return 0;
4249 struct target_shm_info {
4250 int used_ids;
4251 abi_ulong shm_tot;
4252 abi_ulong shm_rss;
4253 abi_ulong shm_swp;
4254 abi_ulong swap_attempts;
4255 abi_ulong swap_successes;
4258 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4259 struct shm_info *host_shm_info)
4261 struct target_shm_info *target_shm_info;
4262 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4263 return -TARGET_EFAULT;
4264 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4265 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4266 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4267 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4268 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4269 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4270 unlock_user_struct(target_shm_info, target_addr, 1);
4271 return 0;
4274 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4276 struct shmid_ds dsarg;
4277 struct shminfo shminfo;
4278 struct shm_info shm_info;
4279 abi_long ret = -TARGET_EINVAL;
4281 cmd &= 0xff;
4283 switch(cmd) {
4284 case IPC_STAT:
4285 case IPC_SET:
4286 case SHM_STAT:
4287 if (target_to_host_shmid_ds(&dsarg, buf))
4288 return -TARGET_EFAULT;
4289 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4290 if (host_to_target_shmid_ds(buf, &dsarg))
4291 return -TARGET_EFAULT;
4292 break;
4293 case IPC_INFO:
4294 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4295 if (host_to_target_shminfo(buf, &shminfo))
4296 return -TARGET_EFAULT;
4297 break;
4298 case SHM_INFO:
4299 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4300 if (host_to_target_shm_info(buf, &shm_info))
4301 return -TARGET_EFAULT;
4302 break;
4303 case IPC_RMID:
4304 case SHM_LOCK:
4305 case SHM_UNLOCK:
4306 ret = get_errno(shmctl(shmid, cmd, NULL));
4307 break;
4310 return ret;
4313 #ifndef TARGET_FORCE_SHMLBA
4314 /* For most architectures, SHMLBA is the same as the page size;
4315 * some architectures have larger values, in which case they should
4316 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4317 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4318 * and defining its own value for SHMLBA.
4320 * The kernel also permits SHMLBA to be set by the architecture to a
4321 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4322 * this means that addresses are rounded to the large size if
4323 * SHM_RND is set but addresses not aligned to that size are not rejected
4324 * as long as they are at least page-aligned. Since the only architecture
4325 * which uses this is ia64 this code doesn't provide for that oddity.
4327 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4329 return TARGET_PAGE_SIZE;
4331 #endif
4333 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4334 int shmid, abi_ulong shmaddr, int shmflg)
4336 abi_long raddr;
4337 void *host_raddr;
4338 struct shmid_ds shm_info;
4339 int i,ret;
4340 abi_ulong shmlba;
4342 /* find out the length of the shared memory segment */
4343 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4344 if (is_error(ret)) {
4345 /* can't get length, bail out */
4346 return ret;
4349 shmlba = target_shmlba(cpu_env);
4351 if (shmaddr & (shmlba - 1)) {
4352 if (shmflg & SHM_RND) {
4353 shmaddr &= ~(shmlba - 1);
4354 } else {
4355 return -TARGET_EINVAL;
4358 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4359 return -TARGET_EINVAL;
4362 mmap_lock();
4364 if (shmaddr)
4365 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4366 else {
4367 abi_ulong mmap_start;
4369 /* In order to use the host shmat, we need to honor host SHMLBA. */
4370 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4372 if (mmap_start == -1) {
4373 errno = ENOMEM;
4374 host_raddr = (void *)-1;
4375 } else
4376 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4379 if (host_raddr == (void *)-1) {
4380 mmap_unlock();
4381 return get_errno((long)host_raddr);
4383 raddr=h2g((unsigned long)host_raddr);
4385 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4386 PAGE_VALID | PAGE_READ |
4387 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4389 for (i = 0; i < N_SHM_REGIONS; i++) {
4390 if (!shm_regions[i].in_use) {
4391 shm_regions[i].in_use = true;
4392 shm_regions[i].start = raddr;
4393 shm_regions[i].size = shm_info.shm_segsz;
4394 break;
4398 mmap_unlock();
4399 return raddr;
4403 static inline abi_long do_shmdt(abi_ulong shmaddr)
4405 int i;
4406 abi_long rv;
4408 mmap_lock();
4410 for (i = 0; i < N_SHM_REGIONS; ++i) {
4411 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4412 shm_regions[i].in_use = false;
4413 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4414 break;
4417 rv = get_errno(shmdt(g2h(shmaddr)));
4419 mmap_unlock();
4421 return rv;
4424 #ifdef TARGET_NR_ipc
4425 /* ??? This only works with linear mappings. */
4426 /* do_ipc() must return target values and target errnos. */
4427 static abi_long do_ipc(CPUArchState *cpu_env,
4428 unsigned int call, abi_long first,
4429 abi_long second, abi_long third,
4430 abi_long ptr, abi_long fifth)
4432 int version;
4433 abi_long ret = 0;
4435 version = call >> 16;
4436 call &= 0xffff;
4438 switch (call) {
4439 case IPCOP_semop:
4440 ret = do_semtimedop(first, ptr, second, 0, false);
4441 break;
4442 case IPCOP_semtimedop:
4444 * The s390 sys_ipc variant has only five parameters instead of six
4445 * (as for default variant) and the only difference is the handling of
4446 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4447 * to a struct timespec where the generic variant uses fifth parameter.
4449 #if defined(TARGET_S390X)
4450 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4451 #else
4452 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4453 #endif
4454 break;
4456 case IPCOP_semget:
4457 ret = get_errno(semget(first, second, third));
4458 break;
4460 case IPCOP_semctl: {
4461 /* The semun argument to semctl is passed by value, so dereference the
4462 * ptr argument. */
4463 abi_ulong atptr;
4464 get_user_ual(atptr, ptr);
4465 ret = do_semctl(first, second, third, atptr);
4466 break;
4469 case IPCOP_msgget:
4470 ret = get_errno(msgget(first, second));
4471 break;
4473 case IPCOP_msgsnd:
4474 ret = do_msgsnd(first, ptr, second, third);
4475 break;
4477 case IPCOP_msgctl:
4478 ret = do_msgctl(first, second, ptr);
4479 break;
4481 case IPCOP_msgrcv:
4482 switch (version) {
4483 case 0:
4485 struct target_ipc_kludge {
4486 abi_long msgp;
4487 abi_long msgtyp;
4488 } *tmp;
4490 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4491 ret = -TARGET_EFAULT;
4492 break;
4495 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4497 unlock_user_struct(tmp, ptr, 0);
4498 break;
4500 default:
4501 ret = do_msgrcv(first, ptr, second, fifth, third);
4503 break;
4505 case IPCOP_shmat:
4506 switch (version) {
4507 default:
4509 abi_ulong raddr;
4510 raddr = do_shmat(cpu_env, first, ptr, second);
4511 if (is_error(raddr))
4512 return get_errno(raddr);
4513 if (put_user_ual(raddr, third))
4514 return -TARGET_EFAULT;
4515 break;
4517 case 1:
4518 ret = -TARGET_EINVAL;
4519 break;
4521 break;
4522 case IPCOP_shmdt:
4523 ret = do_shmdt(ptr);
4524 break;
4526 case IPCOP_shmget:
4527 /* IPC_* flag values are the same on all linux platforms */
4528 ret = get_errno(shmget(first, second, third));
4529 break;
4531 /* IPC_* and SHM_* command values are the same on all linux platforms */
4532 case IPCOP_shmctl:
4533 ret = do_shmctl(first, second, ptr);
4534 break;
4535 default:
4536 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4537 call, version);
4538 ret = -TARGET_ENOSYS;
4539 break;
4541 return ret;
4543 #endif
4545 /* kernel structure types definitions */
4547 #define STRUCT(name, ...) STRUCT_ ## name,
4548 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4549 enum {
4550 #include "syscall_types.h"
4551 STRUCT_MAX
4553 #undef STRUCT
4554 #undef STRUCT_SPECIAL
4556 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4557 #define STRUCT_SPECIAL(name)
4558 #include "syscall_types.h"
4559 #undef STRUCT
4560 #undef STRUCT_SPECIAL
4562 #define MAX_STRUCT_SIZE 4096
4564 #ifdef CONFIG_FIEMAP
4565 /* So fiemap access checks don't overflow on 32 bit systems.
4566 * This is very slightly smaller than the limit imposed by
4567 * the underlying kernel.
4569 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4570 / sizeof(struct fiemap_extent))
4572 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4573 int fd, int cmd, abi_long arg)
4575 /* The parameter for this ioctl is a struct fiemap followed
4576 * by an array of struct fiemap_extent whose size is set
4577 * in fiemap->fm_extent_count. The array is filled in by the
4578 * ioctl.
4580 int target_size_in, target_size_out;
4581 struct fiemap *fm;
4582 const argtype *arg_type = ie->arg_type;
4583 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4584 void *argptr, *p;
4585 abi_long ret;
4586 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4587 uint32_t outbufsz;
4588 int free_fm = 0;
4590 assert(arg_type[0] == TYPE_PTR);
4591 assert(ie->access == IOC_RW);
4592 arg_type++;
4593 target_size_in = thunk_type_size(arg_type, 0);
4594 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4595 if (!argptr) {
4596 return -TARGET_EFAULT;
4598 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4599 unlock_user(argptr, arg, 0);
4600 fm = (struct fiemap *)buf_temp;
4601 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4602 return -TARGET_EINVAL;
4605 outbufsz = sizeof (*fm) +
4606 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4608 if (outbufsz > MAX_STRUCT_SIZE) {
4609 /* We can't fit all the extents into the fixed size buffer.
4610 * Allocate one that is large enough and use it instead.
4612 fm = g_try_malloc(outbufsz);
4613 if (!fm) {
4614 return -TARGET_ENOMEM;
4616 memcpy(fm, buf_temp, sizeof(struct fiemap));
4617 free_fm = 1;
4619 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4620 if (!is_error(ret)) {
4621 target_size_out = target_size_in;
4622 /* An extent_count of 0 means we were only counting the extents
4623 * so there are no structs to copy
4625 if (fm->fm_extent_count != 0) {
4626 target_size_out += fm->fm_mapped_extents * extent_size;
4628 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4629 if (!argptr) {
4630 ret = -TARGET_EFAULT;
4631 } else {
4632 /* Convert the struct fiemap */
4633 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4634 if (fm->fm_extent_count != 0) {
4635 p = argptr + target_size_in;
4636 /* ...and then all the struct fiemap_extents */
4637 for (i = 0; i < fm->fm_mapped_extents; i++) {
4638 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4639 THUNK_TARGET);
4640 p += extent_size;
4643 unlock_user(argptr, arg, target_size_out);
4646 if (free_fm) {
4647 g_free(fm);
4649 return ret;
4651 #endif
4653 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4654 int fd, int cmd, abi_long arg)
4656 const argtype *arg_type = ie->arg_type;
4657 int target_size;
4658 void *argptr;
4659 int ret;
4660 struct ifconf *host_ifconf;
4661 uint32_t outbufsz;
4662 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4663 int target_ifreq_size;
4664 int nb_ifreq;
4665 int free_buf = 0;
4666 int i;
4667 int target_ifc_len;
4668 abi_long target_ifc_buf;
4669 int host_ifc_len;
4670 char *host_ifc_buf;
4672 assert(arg_type[0] == TYPE_PTR);
4673 assert(ie->access == IOC_RW);
4675 arg_type++;
4676 target_size = thunk_type_size(arg_type, 0);
4678 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4679 if (!argptr)
4680 return -TARGET_EFAULT;
4681 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4682 unlock_user(argptr, arg, 0);
4684 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4685 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4686 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4688 if (target_ifc_buf != 0) {
4689 target_ifc_len = host_ifconf->ifc_len;
4690 nb_ifreq = target_ifc_len / target_ifreq_size;
4691 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4693 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4694 if (outbufsz > MAX_STRUCT_SIZE) {
4696 * We can't fit all the extents into the fixed size buffer.
4697 * Allocate one that is large enough and use it instead.
4699 host_ifconf = malloc(outbufsz);
4700 if (!host_ifconf) {
4701 return -TARGET_ENOMEM;
4703 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4704 free_buf = 1;
4706 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4708 host_ifconf->ifc_len = host_ifc_len;
4709 } else {
4710 host_ifc_buf = NULL;
4712 host_ifconf->ifc_buf = host_ifc_buf;
4714 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4715 if (!is_error(ret)) {
4716 /* convert host ifc_len to target ifc_len */
4718 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4719 target_ifc_len = nb_ifreq * target_ifreq_size;
4720 host_ifconf->ifc_len = target_ifc_len;
4722 /* restore target ifc_buf */
4724 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4726 /* copy struct ifconf to target user */
4728 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4729 if (!argptr)
4730 return -TARGET_EFAULT;
4731 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4732 unlock_user(argptr, arg, target_size);
4734 if (target_ifc_buf != 0) {
4735 /* copy ifreq[] to target user */
4736 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4737 for (i = 0; i < nb_ifreq ; i++) {
4738 thunk_convert(argptr + i * target_ifreq_size,
4739 host_ifc_buf + i * sizeof(struct ifreq),
4740 ifreq_arg_type, THUNK_TARGET);
4742 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4746 if (free_buf) {
4747 free(host_ifconf);
4750 return ret;
4753 #if defined(CONFIG_USBFS)
4754 #if HOST_LONG_BITS > 64
4755 #error USBDEVFS thunks do not support >64 bit hosts yet.
4756 #endif
4757 struct live_urb {
4758 uint64_t target_urb_adr;
4759 uint64_t target_buf_adr;
4760 char *target_buf_ptr;
4761 struct usbdevfs_urb host_urb;
4764 static GHashTable *usbdevfs_urb_hashtable(void)
4766 static GHashTable *urb_hashtable;
4768 if (!urb_hashtable) {
4769 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4771 return urb_hashtable;
4774 static void urb_hashtable_insert(struct live_urb *urb)
4776 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4777 g_hash_table_insert(urb_hashtable, urb, urb);
4780 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4782 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4783 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4786 static void urb_hashtable_remove(struct live_urb *urb)
4788 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4789 g_hash_table_remove(urb_hashtable, urb);
4792 static abi_long
4793 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4794 int fd, int cmd, abi_long arg)
4796 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4797 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4798 struct live_urb *lurb;
4799 void *argptr;
4800 uint64_t hurb;
4801 int target_size;
4802 uintptr_t target_urb_adr;
4803 abi_long ret;
4805 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4807 memset(buf_temp, 0, sizeof(uint64_t));
4808 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4809 if (is_error(ret)) {
4810 return ret;
4813 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4814 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4815 if (!lurb->target_urb_adr) {
4816 return -TARGET_EFAULT;
4818 urb_hashtable_remove(lurb);
4819 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4820 lurb->host_urb.buffer_length);
4821 lurb->target_buf_ptr = NULL;
4823 /* restore the guest buffer pointer */
4824 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4826 /* update the guest urb struct */
4827 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4828 if (!argptr) {
4829 g_free(lurb);
4830 return -TARGET_EFAULT;
4832 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4833 unlock_user(argptr, lurb->target_urb_adr, target_size);
4835 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4836 /* write back the urb handle */
4837 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4838 if (!argptr) {
4839 g_free(lurb);
4840 return -TARGET_EFAULT;
4843 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4844 target_urb_adr = lurb->target_urb_adr;
4845 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4846 unlock_user(argptr, arg, target_size);
4848 g_free(lurb);
4849 return ret;
4852 static abi_long
4853 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4854 uint8_t *buf_temp __attribute__((unused)),
4855 int fd, int cmd, abi_long arg)
4857 struct live_urb *lurb;
4859 /* map target address back to host URB with metadata. */
4860 lurb = urb_hashtable_lookup(arg);
4861 if (!lurb) {
4862 return -TARGET_EFAULT;
4864 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4867 static abi_long
4868 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4869 int fd, int cmd, abi_long arg)
4871 const argtype *arg_type = ie->arg_type;
4872 int target_size;
4873 abi_long ret;
4874 void *argptr;
4875 int rw_dir;
4876 struct live_urb *lurb;
4879 * each submitted URB needs to map to a unique ID for the
4880 * kernel, and that unique ID needs to be a pointer to
4881 * host memory. hence, we need to malloc for each URB.
4882 * isochronous transfers have a variable length struct.
4884 arg_type++;
4885 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4887 /* construct host copy of urb and metadata */
4888 lurb = g_try_malloc0(sizeof(struct live_urb));
4889 if (!lurb) {
4890 return -TARGET_ENOMEM;
4893 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4894 if (!argptr) {
4895 g_free(lurb);
4896 return -TARGET_EFAULT;
4898 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4899 unlock_user(argptr, arg, 0);
4901 lurb->target_urb_adr = arg;
4902 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4904 /* buffer space used depends on endpoint type so lock the entire buffer */
4905 /* control type urbs should check the buffer contents for true direction */
4906 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4907 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4908 lurb->host_urb.buffer_length, 1);
4909 if (lurb->target_buf_ptr == NULL) {
4910 g_free(lurb);
4911 return -TARGET_EFAULT;
4914 /* update buffer pointer in host copy */
4915 lurb->host_urb.buffer = lurb->target_buf_ptr;
4917 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4918 if (is_error(ret)) {
4919 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4920 g_free(lurb);
4921 } else {
4922 urb_hashtable_insert(lurb);
4925 return ret;
4927 #endif /* CONFIG_USBFS */
4929 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4930 int cmd, abi_long arg)
4932 void *argptr;
4933 struct dm_ioctl *host_dm;
4934 abi_long guest_data;
4935 uint32_t guest_data_size;
4936 int target_size;
4937 const argtype *arg_type = ie->arg_type;
4938 abi_long ret;
4939 void *big_buf = NULL;
4940 char *host_data;
4942 arg_type++;
4943 target_size = thunk_type_size(arg_type, 0);
4944 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4945 if (!argptr) {
4946 ret = -TARGET_EFAULT;
4947 goto out;
4949 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4952 /* buf_temp is too small, so fetch things into a bigger buffer */
4953 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4954 memcpy(big_buf, buf_temp, target_size);
4955 buf_temp = big_buf;
4956 host_dm = big_buf;
4958 guest_data = arg + host_dm->data_start;
4959 if ((guest_data - arg) < 0) {
4960 ret = -TARGET_EINVAL;
4961 goto out;
4963 guest_data_size = host_dm->data_size - host_dm->data_start;
4964 host_data = (char*)host_dm + host_dm->data_start;
4966 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4967 if (!argptr) {
4968 ret = -TARGET_EFAULT;
4969 goto out;
4972 switch (ie->host_cmd) {
4973 case DM_REMOVE_ALL:
4974 case DM_LIST_DEVICES:
4975 case DM_DEV_CREATE:
4976 case DM_DEV_REMOVE:
4977 case DM_DEV_SUSPEND:
4978 case DM_DEV_STATUS:
4979 case DM_DEV_WAIT:
4980 case DM_TABLE_STATUS:
4981 case DM_TABLE_CLEAR:
4982 case DM_TABLE_DEPS:
4983 case DM_LIST_VERSIONS:
4984 /* no input data */
4985 break;
4986 case DM_DEV_RENAME:
4987 case DM_DEV_SET_GEOMETRY:
4988 /* data contains only strings */
4989 memcpy(host_data, argptr, guest_data_size);
4990 break;
4991 case DM_TARGET_MSG:
4992 memcpy(host_data, argptr, guest_data_size);
4993 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4994 break;
4995 case DM_TABLE_LOAD:
4997 void *gspec = argptr;
4998 void *cur_data = host_data;
4999 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5000 int spec_size = thunk_type_size(arg_type, 0);
5001 int i;
5003 for (i = 0; i < host_dm->target_count; i++) {
5004 struct dm_target_spec *spec = cur_data;
5005 uint32_t next;
5006 int slen;
5008 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5009 slen = strlen((char*)gspec + spec_size) + 1;
5010 next = spec->next;
5011 spec->next = sizeof(*spec) + slen;
5012 strcpy((char*)&spec[1], gspec + spec_size);
5013 gspec += next;
5014 cur_data += spec->next;
5016 break;
5018 default:
5019 ret = -TARGET_EINVAL;
5020 unlock_user(argptr, guest_data, 0);
5021 goto out;
5023 unlock_user(argptr, guest_data, 0);
5025 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5026 if (!is_error(ret)) {
5027 guest_data = arg + host_dm->data_start;
5028 guest_data_size = host_dm->data_size - host_dm->data_start;
5029 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5030 switch (ie->host_cmd) {
5031 case DM_REMOVE_ALL:
5032 case DM_DEV_CREATE:
5033 case DM_DEV_REMOVE:
5034 case DM_DEV_RENAME:
5035 case DM_DEV_SUSPEND:
5036 case DM_DEV_STATUS:
5037 case DM_TABLE_LOAD:
5038 case DM_TABLE_CLEAR:
5039 case DM_TARGET_MSG:
5040 case DM_DEV_SET_GEOMETRY:
5041 /* no return data */
5042 break;
5043 case DM_LIST_DEVICES:
5045 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5046 uint32_t remaining_data = guest_data_size;
5047 void *cur_data = argptr;
5048 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5049 int nl_size = 12; /* can't use thunk_size due to alignment */
5051 while (1) {
5052 uint32_t next = nl->next;
5053 if (next) {
5054 nl->next = nl_size + (strlen(nl->name) + 1);
5056 if (remaining_data < nl->next) {
5057 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5058 break;
5060 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5061 strcpy(cur_data + nl_size, nl->name);
5062 cur_data += nl->next;
5063 remaining_data -= nl->next;
5064 if (!next) {
5065 break;
5067 nl = (void*)nl + next;
5069 break;
5071 case DM_DEV_WAIT:
5072 case DM_TABLE_STATUS:
5074 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5075 void *cur_data = argptr;
5076 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077 int spec_size = thunk_type_size(arg_type, 0);
5078 int i;
5080 for (i = 0; i < host_dm->target_count; i++) {
5081 uint32_t next = spec->next;
5082 int slen = strlen((char*)&spec[1]) + 1;
5083 spec->next = (cur_data - argptr) + spec_size + slen;
5084 if (guest_data_size < spec->next) {
5085 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5086 break;
5088 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5089 strcpy(cur_data + spec_size, (char*)&spec[1]);
5090 cur_data = argptr + spec->next;
5091 spec = (void*)host_dm + host_dm->data_start + next;
5093 break;
5095 case DM_TABLE_DEPS:
5097 void *hdata = (void*)host_dm + host_dm->data_start;
5098 int count = *(uint32_t*)hdata;
5099 uint64_t *hdev = hdata + 8;
5100 uint64_t *gdev = argptr + 8;
5101 int i;
5103 *(uint32_t*)argptr = tswap32(count);
5104 for (i = 0; i < count; i++) {
5105 *gdev = tswap64(*hdev);
5106 gdev++;
5107 hdev++;
5109 break;
5111 case DM_LIST_VERSIONS:
5113 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5114 uint32_t remaining_data = guest_data_size;
5115 void *cur_data = argptr;
5116 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5117 int vers_size = thunk_type_size(arg_type, 0);
5119 while (1) {
5120 uint32_t next = vers->next;
5121 if (next) {
5122 vers->next = vers_size + (strlen(vers->name) + 1);
5124 if (remaining_data < vers->next) {
5125 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5126 break;
5128 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5129 strcpy(cur_data + vers_size, vers->name);
5130 cur_data += vers->next;
5131 remaining_data -= vers->next;
5132 if (!next) {
5133 break;
5135 vers = (void*)vers + next;
5137 break;
5139 default:
5140 unlock_user(argptr, guest_data, 0);
5141 ret = -TARGET_EINVAL;
5142 goto out;
5144 unlock_user(argptr, guest_data, guest_data_size);
5146 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5147 if (!argptr) {
5148 ret = -TARGET_EFAULT;
5149 goto out;
5151 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5152 unlock_user(argptr, arg, target_size);
5154 out:
5155 g_free(big_buf);
5156 return ret;
5159 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5160 int cmd, abi_long arg)
5162 void *argptr;
5163 int target_size;
5164 const argtype *arg_type = ie->arg_type;
5165 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5166 abi_long ret;
5168 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5169 struct blkpg_partition host_part;
5171 /* Read and convert blkpg */
5172 arg_type++;
5173 target_size = thunk_type_size(arg_type, 0);
5174 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5175 if (!argptr) {
5176 ret = -TARGET_EFAULT;
5177 goto out;
5179 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5180 unlock_user(argptr, arg, 0);
5182 switch (host_blkpg->op) {
5183 case BLKPG_ADD_PARTITION:
5184 case BLKPG_DEL_PARTITION:
5185 /* payload is struct blkpg_partition */
5186 break;
5187 default:
5188 /* Unknown opcode */
5189 ret = -TARGET_EINVAL;
5190 goto out;
5193 /* Read and convert blkpg->data */
5194 arg = (abi_long)(uintptr_t)host_blkpg->data;
5195 target_size = thunk_type_size(part_arg_type, 0);
5196 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5197 if (!argptr) {
5198 ret = -TARGET_EFAULT;
5199 goto out;
5201 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5202 unlock_user(argptr, arg, 0);
5204 /* Swizzle the data pointer to our local copy and call! */
5205 host_blkpg->data = &host_part;
5206 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5208 out:
5209 return ret;
5212 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5213 int fd, int cmd, abi_long arg)
5215 const argtype *arg_type = ie->arg_type;
5216 const StructEntry *se;
5217 const argtype *field_types;
5218 const int *dst_offsets, *src_offsets;
5219 int target_size;
5220 void *argptr;
5221 abi_ulong *target_rt_dev_ptr = NULL;
5222 unsigned long *host_rt_dev_ptr = NULL;
5223 abi_long ret;
5224 int i;
5226 assert(ie->access == IOC_W);
5227 assert(*arg_type == TYPE_PTR);
5228 arg_type++;
5229 assert(*arg_type == TYPE_STRUCT);
5230 target_size = thunk_type_size(arg_type, 0);
5231 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5232 if (!argptr) {
5233 return -TARGET_EFAULT;
5235 arg_type++;
5236 assert(*arg_type == (int)STRUCT_rtentry);
5237 se = struct_entries + *arg_type++;
5238 assert(se->convert[0] == NULL);
5239 /* convert struct here to be able to catch rt_dev string */
5240 field_types = se->field_types;
5241 dst_offsets = se->field_offsets[THUNK_HOST];
5242 src_offsets = se->field_offsets[THUNK_TARGET];
5243 for (i = 0; i < se->nb_fields; i++) {
5244 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5245 assert(*field_types == TYPE_PTRVOID);
5246 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5247 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5248 if (*target_rt_dev_ptr != 0) {
5249 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5250 tswapal(*target_rt_dev_ptr));
5251 if (!*host_rt_dev_ptr) {
5252 unlock_user(argptr, arg, 0);
5253 return -TARGET_EFAULT;
5255 } else {
5256 *host_rt_dev_ptr = 0;
5258 field_types++;
5259 continue;
5261 field_types = thunk_convert(buf_temp + dst_offsets[i],
5262 argptr + src_offsets[i],
5263 field_types, THUNK_HOST);
5265 unlock_user(argptr, arg, 0);
5267 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5269 assert(host_rt_dev_ptr != NULL);
5270 assert(target_rt_dev_ptr != NULL);
5271 if (*host_rt_dev_ptr != 0) {
5272 unlock_user((void *)*host_rt_dev_ptr,
5273 *target_rt_dev_ptr, 0);
5275 return ret;
5278 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5279 int fd, int cmd, abi_long arg)
5281 int sig = target_to_host_signal(arg);
5282 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5285 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5286 int fd, int cmd, abi_long arg)
5288 struct timeval tv;
5289 abi_long ret;
5291 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5292 if (is_error(ret)) {
5293 return ret;
5296 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5297 if (copy_to_user_timeval(arg, &tv)) {
5298 return -TARGET_EFAULT;
5300 } else {
5301 if (copy_to_user_timeval64(arg, &tv)) {
5302 return -TARGET_EFAULT;
5306 return ret;
5309 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5310 int fd, int cmd, abi_long arg)
5312 struct timespec ts;
5313 abi_long ret;
5315 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5316 if (is_error(ret)) {
5317 return ret;
5320 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5321 if (host_to_target_timespec(arg, &ts)) {
5322 return -TARGET_EFAULT;
5324 } else{
5325 if (host_to_target_timespec64(arg, &ts)) {
5326 return -TARGET_EFAULT;
5330 return ret;
5333 #ifdef TIOCGPTPEER
5334 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5335 int fd, int cmd, abi_long arg)
5337 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5338 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5340 #endif
5342 #ifdef HAVE_DRM_H
5344 static void unlock_drm_version(struct drm_version *host_ver,
5345 struct target_drm_version *target_ver,
5346 bool copy)
5348 unlock_user(host_ver->name, target_ver->name,
5349 copy ? host_ver->name_len : 0);
5350 unlock_user(host_ver->date, target_ver->date,
5351 copy ? host_ver->date_len : 0);
5352 unlock_user(host_ver->desc, target_ver->desc,
5353 copy ? host_ver->desc_len : 0);
5356 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5357 struct target_drm_version *target_ver)
5359 memset(host_ver, 0, sizeof(*host_ver));
5361 __get_user(host_ver->name_len, &target_ver->name_len);
5362 if (host_ver->name_len) {
5363 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5364 target_ver->name_len, 0);
5365 if (!host_ver->name) {
5366 return -EFAULT;
5370 __get_user(host_ver->date_len, &target_ver->date_len);
5371 if (host_ver->date_len) {
5372 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5373 target_ver->date_len, 0);
5374 if (!host_ver->date) {
5375 goto err;
5379 __get_user(host_ver->desc_len, &target_ver->desc_len);
5380 if (host_ver->desc_len) {
5381 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5382 target_ver->desc_len, 0);
5383 if (!host_ver->desc) {
5384 goto err;
5388 return 0;
5389 err:
5390 unlock_drm_version(host_ver, target_ver, false);
5391 return -EFAULT;
5394 static inline void host_to_target_drmversion(
5395 struct target_drm_version *target_ver,
5396 struct drm_version *host_ver)
5398 __put_user(host_ver->version_major, &target_ver->version_major);
5399 __put_user(host_ver->version_minor, &target_ver->version_minor);
5400 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5401 __put_user(host_ver->name_len, &target_ver->name_len);
5402 __put_user(host_ver->date_len, &target_ver->date_len);
5403 __put_user(host_ver->desc_len, &target_ver->desc_len);
5404 unlock_drm_version(host_ver, target_ver, true);
5407 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5408 int fd, int cmd, abi_long arg)
5410 struct drm_version *ver;
5411 struct target_drm_version *target_ver;
5412 abi_long ret;
5414 switch (ie->host_cmd) {
5415 case DRM_IOCTL_VERSION:
5416 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5417 return -TARGET_EFAULT;
5419 ver = (struct drm_version *)buf_temp;
5420 ret = target_to_host_drmversion(ver, target_ver);
5421 if (!is_error(ret)) {
5422 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5423 if (is_error(ret)) {
5424 unlock_drm_version(ver, target_ver, false);
5425 } else {
5426 host_to_target_drmversion(target_ver, ver);
5429 unlock_user_struct(target_ver, arg, 0);
5430 return ret;
5432 return -TARGET_ENOSYS;
5435 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5436 struct drm_i915_getparam *gparam,
5437 int fd, abi_long arg)
5439 abi_long ret;
5440 int value;
5441 struct target_drm_i915_getparam *target_gparam;
5443 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5444 return -TARGET_EFAULT;
5447 __get_user(gparam->param, &target_gparam->param);
5448 gparam->value = &value;
5449 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5450 put_user_s32(value, target_gparam->value);
5452 unlock_user_struct(target_gparam, arg, 0);
5453 return ret;
5456 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5457 int fd, int cmd, abi_long arg)
5459 switch (ie->host_cmd) {
5460 case DRM_IOCTL_I915_GETPARAM:
5461 return do_ioctl_drm_i915_getparam(ie,
5462 (struct drm_i915_getparam *)buf_temp,
5463 fd, arg);
5464 default:
5465 return -TARGET_ENOSYS;
5469 #endif
5471 IOCTLEntry ioctl_entries[] = {
5472 #define IOCTL(cmd, access, ...) \
5473 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5474 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5475 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5476 #define IOCTL_IGNORE(cmd) \
5477 { TARGET_ ## cmd, 0, #cmd },
5478 #include "ioctls.h"
5479 { 0, 0, },
5482 /* ??? Implement proper locking for ioctls. */
5483 /* do_ioctl() Must return target values and target errnos. */
5484 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5486 const IOCTLEntry *ie;
5487 const argtype *arg_type;
5488 abi_long ret;
5489 uint8_t buf_temp[MAX_STRUCT_SIZE];
5490 int target_size;
5491 void *argptr;
5493 ie = ioctl_entries;
5494 for(;;) {
5495 if (ie->target_cmd == 0) {
5496 qemu_log_mask(
5497 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5498 return -TARGET_ENOSYS;
5500 if (ie->target_cmd == cmd)
5501 break;
5502 ie++;
5504 arg_type = ie->arg_type;
5505 if (ie->do_ioctl) {
5506 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5507 } else if (!ie->host_cmd) {
5508 /* Some architectures define BSD ioctls in their headers
5509 that are not implemented in Linux. */
5510 return -TARGET_ENOSYS;
5513 switch(arg_type[0]) {
5514 case TYPE_NULL:
5515 /* no argument */
5516 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5517 break;
5518 case TYPE_PTRVOID:
5519 case TYPE_INT:
5520 case TYPE_LONG:
5521 case TYPE_ULONG:
5522 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5523 break;
5524 case TYPE_PTR:
5525 arg_type++;
5526 target_size = thunk_type_size(arg_type, 0);
5527 switch(ie->access) {
5528 case IOC_R:
5529 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5530 if (!is_error(ret)) {
5531 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5532 if (!argptr)
5533 return -TARGET_EFAULT;
5534 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5535 unlock_user(argptr, arg, target_size);
5537 break;
5538 case IOC_W:
5539 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5540 if (!argptr)
5541 return -TARGET_EFAULT;
5542 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5543 unlock_user(argptr, arg, 0);
5544 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5545 break;
5546 default:
5547 case IOC_RW:
5548 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5549 if (!argptr)
5550 return -TARGET_EFAULT;
5551 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5552 unlock_user(argptr, arg, 0);
5553 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5554 if (!is_error(ret)) {
5555 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5556 if (!argptr)
5557 return -TARGET_EFAULT;
5558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5559 unlock_user(argptr, arg, target_size);
5561 break;
5563 break;
5564 default:
5565 qemu_log_mask(LOG_UNIMP,
5566 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5567 (long)cmd, arg_type[0]);
5568 ret = -TARGET_ENOSYS;
5569 break;
5571 return ret;
5574 static const bitmask_transtbl iflag_tbl[] = {
5575 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5576 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5577 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5578 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5579 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5580 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5581 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5582 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5583 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5584 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5585 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5586 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5587 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5588 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5589 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5590 { 0, 0, 0, 0 }
5593 static const bitmask_transtbl oflag_tbl[] = {
5594 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5595 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5596 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5597 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5598 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5599 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5600 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5601 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5602 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5603 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5604 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5605 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5606 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5607 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5608 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5609 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5610 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5611 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5612 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5613 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5614 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5615 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5616 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5617 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5618 { 0, 0, 0, 0 }
5621 static const bitmask_transtbl cflag_tbl[] = {
5622 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5623 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5624 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5625 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5626 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5627 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5628 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5629 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5630 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5631 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5632 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5633 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5634 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5635 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5636 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5637 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5638 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5639 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5640 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5641 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5642 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5643 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5644 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5645 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5646 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5647 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5648 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5649 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5650 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5651 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5652 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5653 { 0, 0, 0, 0 }
5656 static const bitmask_transtbl lflag_tbl[] = {
5657 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5658 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5659 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5660 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5661 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5662 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5663 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5664 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5665 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5666 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5667 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5668 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5669 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5670 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5671 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5672 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5673 { 0, 0, 0, 0 }
5676 static void target_to_host_termios (void *dst, const void *src)
5678 struct host_termios *host = dst;
5679 const struct target_termios *target = src;
5681 host->c_iflag =
5682 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5683 host->c_oflag =
5684 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5685 host->c_cflag =
5686 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5687 host->c_lflag =
5688 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5689 host->c_line = target->c_line;
5691 memset(host->c_cc, 0, sizeof(host->c_cc));
5692 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5693 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5694 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5695 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5696 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5697 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5698 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5699 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5700 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5701 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5702 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5703 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5704 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5705 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5706 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5707 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5708 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5711 static void host_to_target_termios (void *dst, const void *src)
5713 struct target_termios *target = dst;
5714 const struct host_termios *host = src;
5716 target->c_iflag =
5717 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5718 target->c_oflag =
5719 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5720 target->c_cflag =
5721 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5722 target->c_lflag =
5723 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5724 target->c_line = host->c_line;
5726 memset(target->c_cc, 0, sizeof(target->c_cc));
5727 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5728 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5729 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5730 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5731 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5732 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5733 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5734 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5735 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5736 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5737 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5738 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5739 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5740 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5741 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5742 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5743 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5746 static const StructEntry struct_termios_def = {
5747 .convert = { host_to_target_termios, target_to_host_termios },
5748 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5749 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5750 .print = print_termios,
5753 static bitmask_transtbl mmap_flags_tbl[] = {
5754 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5755 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5756 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5757 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5758 MAP_ANONYMOUS, MAP_ANONYMOUS },
5759 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5760 MAP_GROWSDOWN, MAP_GROWSDOWN },
5761 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5762 MAP_DENYWRITE, MAP_DENYWRITE },
5763 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5764 MAP_EXECUTABLE, MAP_EXECUTABLE },
5765 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5766 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5767 MAP_NORESERVE, MAP_NORESERVE },
5768 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5769 /* MAP_STACK had been ignored by the kernel for quite some time.
5770 Recognize it for the target insofar as we do not want to pass
5771 it through to the host. */
5772 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5773 { 0, 0, 0, 0 }
5777 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5778 * TARGET_I386 is defined if TARGET_X86_64 is defined
5780 #if defined(TARGET_I386)
5782 /* NOTE: there is really one LDT for all the threads */
5783 static uint8_t *ldt_table;
5785 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5787 int size;
5788 void *p;
5790 if (!ldt_table)
5791 return 0;
5792 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5793 if (size > bytecount)
5794 size = bytecount;
5795 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5796 if (!p)
5797 return -TARGET_EFAULT;
5798 /* ??? Should this by byteswapped? */
5799 memcpy(p, ldt_table, size);
5800 unlock_user(p, ptr, size);
5801 return size;
5804 /* XXX: add locking support */
5805 static abi_long write_ldt(CPUX86State *env,
5806 abi_ulong ptr, unsigned long bytecount, int oldmode)
5808 struct target_modify_ldt_ldt_s ldt_info;
5809 struct target_modify_ldt_ldt_s *target_ldt_info;
5810 int seg_32bit, contents, read_exec_only, limit_in_pages;
5811 int seg_not_present, useable, lm;
5812 uint32_t *lp, entry_1, entry_2;
5814 if (bytecount != sizeof(ldt_info))
5815 return -TARGET_EINVAL;
5816 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5817 return -TARGET_EFAULT;
5818 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5819 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5820 ldt_info.limit = tswap32(target_ldt_info->limit);
5821 ldt_info.flags = tswap32(target_ldt_info->flags);
5822 unlock_user_struct(target_ldt_info, ptr, 0);
5824 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5825 return -TARGET_EINVAL;
5826 seg_32bit = ldt_info.flags & 1;
5827 contents = (ldt_info.flags >> 1) & 3;
5828 read_exec_only = (ldt_info.flags >> 3) & 1;
5829 limit_in_pages = (ldt_info.flags >> 4) & 1;
5830 seg_not_present = (ldt_info.flags >> 5) & 1;
5831 useable = (ldt_info.flags >> 6) & 1;
5832 #ifdef TARGET_ABI32
5833 lm = 0;
5834 #else
5835 lm = (ldt_info.flags >> 7) & 1;
5836 #endif
5837 if (contents == 3) {
5838 if (oldmode)
5839 return -TARGET_EINVAL;
5840 if (seg_not_present == 0)
5841 return -TARGET_EINVAL;
5843 /* allocate the LDT */
5844 if (!ldt_table) {
5845 env->ldt.base = target_mmap(0,
5846 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5847 PROT_READ|PROT_WRITE,
5848 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5849 if (env->ldt.base == -1)
5850 return -TARGET_ENOMEM;
5851 memset(g2h(env->ldt.base), 0,
5852 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5853 env->ldt.limit = 0xffff;
5854 ldt_table = g2h(env->ldt.base);
5857 /* NOTE: same code as Linux kernel */
5858 /* Allow LDTs to be cleared by the user. */
5859 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5860 if (oldmode ||
5861 (contents == 0 &&
5862 read_exec_only == 1 &&
5863 seg_32bit == 0 &&
5864 limit_in_pages == 0 &&
5865 seg_not_present == 1 &&
5866 useable == 0 )) {
5867 entry_1 = 0;
5868 entry_2 = 0;
5869 goto install;
5873 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5874 (ldt_info.limit & 0x0ffff);
5875 entry_2 = (ldt_info.base_addr & 0xff000000) |
5876 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5877 (ldt_info.limit & 0xf0000) |
5878 ((read_exec_only ^ 1) << 9) |
5879 (contents << 10) |
5880 ((seg_not_present ^ 1) << 15) |
5881 (seg_32bit << 22) |
5882 (limit_in_pages << 23) |
5883 (lm << 21) |
5884 0x7000;
5885 if (!oldmode)
5886 entry_2 |= (useable << 20);
5888 /* Install the new entry ... */
5889 install:
5890 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5891 lp[0] = tswap32(entry_1);
5892 lp[1] = tswap32(entry_2);
5893 return 0;
5896 /* specific and weird i386 syscalls */
5897 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5898 unsigned long bytecount)
5900 abi_long ret;
5902 switch (func) {
5903 case 0:
5904 ret = read_ldt(ptr, bytecount);
5905 break;
5906 case 1:
5907 ret = write_ldt(env, ptr, bytecount, 1);
5908 break;
5909 case 0x11:
5910 ret = write_ldt(env, ptr, bytecount, 0);
5911 break;
5912 default:
5913 ret = -TARGET_ENOSYS;
5914 break;
5916 return ret;
5919 #if defined(TARGET_ABI32)
5920 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5922 uint64_t *gdt_table = g2h(env->gdt.base);
5923 struct target_modify_ldt_ldt_s ldt_info;
5924 struct target_modify_ldt_ldt_s *target_ldt_info;
5925 int seg_32bit, contents, read_exec_only, limit_in_pages;
5926 int seg_not_present, useable, lm;
5927 uint32_t *lp, entry_1, entry_2;
5928 int i;
5930 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5931 if (!target_ldt_info)
5932 return -TARGET_EFAULT;
5933 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5934 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5935 ldt_info.limit = tswap32(target_ldt_info->limit);
5936 ldt_info.flags = tswap32(target_ldt_info->flags);
5937 if (ldt_info.entry_number == -1) {
5938 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5939 if (gdt_table[i] == 0) {
5940 ldt_info.entry_number = i;
5941 target_ldt_info->entry_number = tswap32(i);
5942 break;
5946 unlock_user_struct(target_ldt_info, ptr, 1);
5948 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5949 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5950 return -TARGET_EINVAL;
5951 seg_32bit = ldt_info.flags & 1;
5952 contents = (ldt_info.flags >> 1) & 3;
5953 read_exec_only = (ldt_info.flags >> 3) & 1;
5954 limit_in_pages = (ldt_info.flags >> 4) & 1;
5955 seg_not_present = (ldt_info.flags >> 5) & 1;
5956 useable = (ldt_info.flags >> 6) & 1;
5957 #ifdef TARGET_ABI32
5958 lm = 0;
5959 #else
5960 lm = (ldt_info.flags >> 7) & 1;
5961 #endif
5963 if (contents == 3) {
5964 if (seg_not_present == 0)
5965 return -TARGET_EINVAL;
5968 /* NOTE: same code as Linux kernel */
5969 /* Allow LDTs to be cleared by the user. */
5970 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5971 if ((contents == 0 &&
5972 read_exec_only == 1 &&
5973 seg_32bit == 0 &&
5974 limit_in_pages == 0 &&
5975 seg_not_present == 1 &&
5976 useable == 0 )) {
5977 entry_1 = 0;
5978 entry_2 = 0;
5979 goto install;
5983 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5984 (ldt_info.limit & 0x0ffff);
5985 entry_2 = (ldt_info.base_addr & 0xff000000) |
5986 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5987 (ldt_info.limit & 0xf0000) |
5988 ((read_exec_only ^ 1) << 9) |
5989 (contents << 10) |
5990 ((seg_not_present ^ 1) << 15) |
5991 (seg_32bit << 22) |
5992 (limit_in_pages << 23) |
5993 (useable << 20) |
5994 (lm << 21) |
5995 0x7000;
5997 /* Install the new entry ... */
5998 install:
5999 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6000 lp[0] = tswap32(entry_1);
6001 lp[1] = tswap32(entry_2);
6002 return 0;
6005 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6007 struct target_modify_ldt_ldt_s *target_ldt_info;
6008 uint64_t *gdt_table = g2h(env->gdt.base);
6009 uint32_t base_addr, limit, flags;
6010 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6011 int seg_not_present, useable, lm;
6012 uint32_t *lp, entry_1, entry_2;
6014 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6015 if (!target_ldt_info)
6016 return -TARGET_EFAULT;
6017 idx = tswap32(target_ldt_info->entry_number);
6018 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6019 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6020 unlock_user_struct(target_ldt_info, ptr, 1);
6021 return -TARGET_EINVAL;
6023 lp = (uint32_t *)(gdt_table + idx);
6024 entry_1 = tswap32(lp[0]);
6025 entry_2 = tswap32(lp[1]);
6027 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6028 contents = (entry_2 >> 10) & 3;
6029 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6030 seg_32bit = (entry_2 >> 22) & 1;
6031 limit_in_pages = (entry_2 >> 23) & 1;
6032 useable = (entry_2 >> 20) & 1;
6033 #ifdef TARGET_ABI32
6034 lm = 0;
6035 #else
6036 lm = (entry_2 >> 21) & 1;
6037 #endif
6038 flags = (seg_32bit << 0) | (contents << 1) |
6039 (read_exec_only << 3) | (limit_in_pages << 4) |
6040 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6041 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6042 base_addr = (entry_1 >> 16) |
6043 (entry_2 & 0xff000000) |
6044 ((entry_2 & 0xff) << 16);
6045 target_ldt_info->base_addr = tswapal(base_addr);
6046 target_ldt_info->limit = tswap32(limit);
6047 target_ldt_info->flags = tswap32(flags);
6048 unlock_user_struct(target_ldt_info, ptr, 1);
6049 return 0;
6052 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6054 return -TARGET_ENOSYS;
6056 #else
6057 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6059 abi_long ret = 0;
6060 abi_ulong val;
6061 int idx;
6063 switch(code) {
6064 case TARGET_ARCH_SET_GS:
6065 case TARGET_ARCH_SET_FS:
6066 if (code == TARGET_ARCH_SET_GS)
6067 idx = R_GS;
6068 else
6069 idx = R_FS;
6070 cpu_x86_load_seg(env, idx, 0);
6071 env->segs[idx].base = addr;
6072 break;
6073 case TARGET_ARCH_GET_GS:
6074 case TARGET_ARCH_GET_FS:
6075 if (code == TARGET_ARCH_GET_GS)
6076 idx = R_GS;
6077 else
6078 idx = R_FS;
6079 val = env->segs[idx].base;
6080 if (put_user(val, addr, abi_ulong))
6081 ret = -TARGET_EFAULT;
6082 break;
6083 default:
6084 ret = -TARGET_EINVAL;
6085 break;
6087 return ret;
6089 #endif /* defined(TARGET_ABI32 */
6091 #endif /* defined(TARGET_I386) */
6093 #define NEW_STACK_SIZE 0x40000
6096 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6097 typedef struct {
6098 CPUArchState *env;
6099 pthread_mutex_t mutex;
6100 pthread_cond_t cond;
6101 pthread_t thread;
6102 uint32_t tid;
6103 abi_ulong child_tidptr;
6104 abi_ulong parent_tidptr;
6105 sigset_t sigmask;
6106 } new_thread_info;
6108 static void * QEMU_NORETURN clone_func(void *arg)
6110 new_thread_info *info = arg;
6111 CPUArchState *env;
6112 CPUState *cpu;
6113 TaskState *ts;
6115 rcu_register_thread();
6116 tcg_register_thread();
6117 env = info->env;
6118 cpu = env_cpu(env);
6119 thread_cpu = cpu;
6120 ts = (TaskState *)cpu->opaque;
6121 info->tid = sys_gettid();
6122 task_settid(ts);
6123 if (info->child_tidptr)
6124 put_user_u32(info->tid, info->child_tidptr);
6125 if (info->parent_tidptr)
6126 put_user_u32(info->tid, info->parent_tidptr);
6127 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6128 /* Enable signals. */
6129 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6130 /* Signal to the parent that we're ready. */
6131 pthread_mutex_lock(&info->mutex);
6132 pthread_cond_broadcast(&info->cond);
6133 pthread_mutex_unlock(&info->mutex);
6134 /* Wait until the parent has finished initializing the tls state. */
6135 pthread_mutex_lock(&clone_lock);
6136 pthread_mutex_unlock(&clone_lock);
6137 cpu_loop(env);
6138 /* never exits */
6141 /* do_fork() Must return host values and target errnos (unlike most
6142 do_*() functions). */
6143 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6144 abi_ulong parent_tidptr, target_ulong newtls,
6145 abi_ulong child_tidptr)
6147 CPUState *cpu = env_cpu(env);
6148 int ret;
6149 TaskState *ts;
6150 CPUState *new_cpu;
6151 CPUArchState *new_env;
6152 sigset_t sigmask;
6154 flags &= ~CLONE_IGNORED_FLAGS;
6156 /* Emulate vfork() with fork() */
6157 if (flags & CLONE_VFORK)
6158 flags &= ~(CLONE_VFORK | CLONE_VM);
6160 if (flags & CLONE_VM) {
6161 TaskState *parent_ts = (TaskState *)cpu->opaque;
6162 new_thread_info info;
6163 pthread_attr_t attr;
6165 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6166 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6167 return -TARGET_EINVAL;
6170 ts = g_new0(TaskState, 1);
6171 init_task_state(ts);
6173 /* Grab a mutex so that thread setup appears atomic. */
6174 pthread_mutex_lock(&clone_lock);
6176 /* we create a new CPU instance. */
6177 new_env = cpu_copy(env);
6178 /* Init regs that differ from the parent. */
6179 cpu_clone_regs_child(new_env, newsp, flags);
6180 cpu_clone_regs_parent(env, flags);
6181 new_cpu = env_cpu(new_env);
6182 new_cpu->opaque = ts;
6183 ts->bprm = parent_ts->bprm;
6184 ts->info = parent_ts->info;
6185 ts->signal_mask = parent_ts->signal_mask;
6187 if (flags & CLONE_CHILD_CLEARTID) {
6188 ts->child_tidptr = child_tidptr;
6191 if (flags & CLONE_SETTLS) {
6192 cpu_set_tls (new_env, newtls);
6195 memset(&info, 0, sizeof(info));
6196 pthread_mutex_init(&info.mutex, NULL);
6197 pthread_mutex_lock(&info.mutex);
6198 pthread_cond_init(&info.cond, NULL);
6199 info.env = new_env;
6200 if (flags & CLONE_CHILD_SETTID) {
6201 info.child_tidptr = child_tidptr;
6203 if (flags & CLONE_PARENT_SETTID) {
6204 info.parent_tidptr = parent_tidptr;
6207 ret = pthread_attr_init(&attr);
6208 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6209 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6210 /* It is not safe to deliver signals until the child has finished
6211 initializing, so temporarily block all signals. */
6212 sigfillset(&sigmask);
6213 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6214 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6216 /* If this is our first additional thread, we need to ensure we
6217 * generate code for parallel execution and flush old translations.
6219 if (!parallel_cpus) {
6220 parallel_cpus = true;
6221 tb_flush(cpu);
6224 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6225 /* TODO: Free new CPU state if thread creation failed. */
6227 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6228 pthread_attr_destroy(&attr);
6229 if (ret == 0) {
6230 /* Wait for the child to initialize. */
6231 pthread_cond_wait(&info.cond, &info.mutex);
6232 ret = info.tid;
6233 } else {
6234 ret = -1;
6236 pthread_mutex_unlock(&info.mutex);
6237 pthread_cond_destroy(&info.cond);
6238 pthread_mutex_destroy(&info.mutex);
6239 pthread_mutex_unlock(&clone_lock);
6240 } else {
6241 /* if no CLONE_VM, we consider it is a fork */
6242 if (flags & CLONE_INVALID_FORK_FLAGS) {
6243 return -TARGET_EINVAL;
6246 /* We can't support custom termination signals */
6247 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6248 return -TARGET_EINVAL;
6251 if (block_signals()) {
6252 return -TARGET_ERESTARTSYS;
6255 fork_start();
6256 ret = fork();
6257 if (ret == 0) {
6258 /* Child Process. */
6259 cpu_clone_regs_child(env, newsp, flags);
6260 fork_end(1);
6261 /* There is a race condition here. The parent process could
6262 theoretically read the TID in the child process before the child
6263 tid is set. This would require using either ptrace
6264 (not implemented) or having *_tidptr to point at a shared memory
6265 mapping. We can't repeat the spinlock hack used above because
6266 the child process gets its own copy of the lock. */
6267 if (flags & CLONE_CHILD_SETTID)
6268 put_user_u32(sys_gettid(), child_tidptr);
6269 if (flags & CLONE_PARENT_SETTID)
6270 put_user_u32(sys_gettid(), parent_tidptr);
6271 ts = (TaskState *)cpu->opaque;
6272 if (flags & CLONE_SETTLS)
6273 cpu_set_tls (env, newtls);
6274 if (flags & CLONE_CHILD_CLEARTID)
6275 ts->child_tidptr = child_tidptr;
6276 } else {
6277 cpu_clone_regs_parent(env, flags);
6278 fork_end(0);
6281 return ret;
6284 /* warning : doesn't handle linux specific flags... */
6285 static int target_to_host_fcntl_cmd(int cmd)
6287 int ret;
6289 switch(cmd) {
6290 case TARGET_F_DUPFD:
6291 case TARGET_F_GETFD:
6292 case TARGET_F_SETFD:
6293 case TARGET_F_GETFL:
6294 case TARGET_F_SETFL:
6295 case TARGET_F_OFD_GETLK:
6296 case TARGET_F_OFD_SETLK:
6297 case TARGET_F_OFD_SETLKW:
6298 ret = cmd;
6299 break;
6300 case TARGET_F_GETLK:
6301 ret = F_GETLK64;
6302 break;
6303 case TARGET_F_SETLK:
6304 ret = F_SETLK64;
6305 break;
6306 case TARGET_F_SETLKW:
6307 ret = F_SETLKW64;
6308 break;
6309 case TARGET_F_GETOWN:
6310 ret = F_GETOWN;
6311 break;
6312 case TARGET_F_SETOWN:
6313 ret = F_SETOWN;
6314 break;
6315 case TARGET_F_GETSIG:
6316 ret = F_GETSIG;
6317 break;
6318 case TARGET_F_SETSIG:
6319 ret = F_SETSIG;
6320 break;
6321 #if TARGET_ABI_BITS == 32
6322 case TARGET_F_GETLK64:
6323 ret = F_GETLK64;
6324 break;
6325 case TARGET_F_SETLK64:
6326 ret = F_SETLK64;
6327 break;
6328 case TARGET_F_SETLKW64:
6329 ret = F_SETLKW64;
6330 break;
6331 #endif
6332 case TARGET_F_SETLEASE:
6333 ret = F_SETLEASE;
6334 break;
6335 case TARGET_F_GETLEASE:
6336 ret = F_GETLEASE;
6337 break;
6338 #ifdef F_DUPFD_CLOEXEC
6339 case TARGET_F_DUPFD_CLOEXEC:
6340 ret = F_DUPFD_CLOEXEC;
6341 break;
6342 #endif
6343 case TARGET_F_NOTIFY:
6344 ret = F_NOTIFY;
6345 break;
6346 #ifdef F_GETOWN_EX
6347 case TARGET_F_GETOWN_EX:
6348 ret = F_GETOWN_EX;
6349 break;
6350 #endif
6351 #ifdef F_SETOWN_EX
6352 case TARGET_F_SETOWN_EX:
6353 ret = F_SETOWN_EX;
6354 break;
6355 #endif
6356 #ifdef F_SETPIPE_SZ
6357 case TARGET_F_SETPIPE_SZ:
6358 ret = F_SETPIPE_SZ;
6359 break;
6360 case TARGET_F_GETPIPE_SZ:
6361 ret = F_GETPIPE_SZ;
6362 break;
6363 #endif
6364 default:
6365 ret = -TARGET_EINVAL;
6366 break;
6369 #if defined(__powerpc64__)
6370 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6371 * is not supported by kernel. The glibc fcntl call actually adjusts
6372 * them to 5, 6 and 7 before making the syscall(). Since we make the
6373 * syscall directly, adjust to what is supported by the kernel.
6375 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6376 ret -= F_GETLK64 - 5;
6378 #endif
6380 return ret;
6383 #define FLOCK_TRANSTBL \
6384 switch (type) { \
6385 TRANSTBL_CONVERT(F_RDLCK); \
6386 TRANSTBL_CONVERT(F_WRLCK); \
6387 TRANSTBL_CONVERT(F_UNLCK); \
6388 TRANSTBL_CONVERT(F_EXLCK); \
6389 TRANSTBL_CONVERT(F_SHLCK); \
6392 static int target_to_host_flock(int type)
6394 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6395 FLOCK_TRANSTBL
6396 #undef TRANSTBL_CONVERT
6397 return -TARGET_EINVAL;
6400 static int host_to_target_flock(int type)
6402 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6403 FLOCK_TRANSTBL
6404 #undef TRANSTBL_CONVERT
6405 /* if we don't know how to convert the value coming
6406 * from the host we copy to the target field as-is
6408 return type;
6411 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6412 abi_ulong target_flock_addr)
6414 struct target_flock *target_fl;
6415 int l_type;
6417 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6418 return -TARGET_EFAULT;
6421 __get_user(l_type, &target_fl->l_type);
6422 l_type = target_to_host_flock(l_type);
6423 if (l_type < 0) {
6424 return l_type;
6426 fl->l_type = l_type;
6427 __get_user(fl->l_whence, &target_fl->l_whence);
6428 __get_user(fl->l_start, &target_fl->l_start);
6429 __get_user(fl->l_len, &target_fl->l_len);
6430 __get_user(fl->l_pid, &target_fl->l_pid);
6431 unlock_user_struct(target_fl, target_flock_addr, 0);
6432 return 0;
6435 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6436 const struct flock64 *fl)
6438 struct target_flock *target_fl;
6439 short l_type;
6441 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6442 return -TARGET_EFAULT;
6445 l_type = host_to_target_flock(fl->l_type);
6446 __put_user(l_type, &target_fl->l_type);
6447 __put_user(fl->l_whence, &target_fl->l_whence);
6448 __put_user(fl->l_start, &target_fl->l_start);
6449 __put_user(fl->l_len, &target_fl->l_len);
6450 __put_user(fl->l_pid, &target_fl->l_pid);
6451 unlock_user_struct(target_fl, target_flock_addr, 1);
6452 return 0;
6455 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6456 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6458 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6459 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6460 abi_ulong target_flock_addr)
6462 struct target_oabi_flock64 *target_fl;
6463 int l_type;
6465 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6466 return -TARGET_EFAULT;
6469 __get_user(l_type, &target_fl->l_type);
6470 l_type = target_to_host_flock(l_type);
6471 if (l_type < 0) {
6472 return l_type;
6474 fl->l_type = l_type;
6475 __get_user(fl->l_whence, &target_fl->l_whence);
6476 __get_user(fl->l_start, &target_fl->l_start);
6477 __get_user(fl->l_len, &target_fl->l_len);
6478 __get_user(fl->l_pid, &target_fl->l_pid);
6479 unlock_user_struct(target_fl, target_flock_addr, 0);
6480 return 0;
6483 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6484 const struct flock64 *fl)
6486 struct target_oabi_flock64 *target_fl;
6487 short l_type;
6489 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6490 return -TARGET_EFAULT;
6493 l_type = host_to_target_flock(fl->l_type);
6494 __put_user(l_type, &target_fl->l_type);
6495 __put_user(fl->l_whence, &target_fl->l_whence);
6496 __put_user(fl->l_start, &target_fl->l_start);
6497 __put_user(fl->l_len, &target_fl->l_len);
6498 __put_user(fl->l_pid, &target_fl->l_pid);
6499 unlock_user_struct(target_fl, target_flock_addr, 1);
6500 return 0;
6502 #endif
6504 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6505 abi_ulong target_flock_addr)
6507 struct target_flock64 *target_fl;
6508 int l_type;
6510 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6511 return -TARGET_EFAULT;
6514 __get_user(l_type, &target_fl->l_type);
6515 l_type = target_to_host_flock(l_type);
6516 if (l_type < 0) {
6517 return l_type;
6519 fl->l_type = l_type;
6520 __get_user(fl->l_whence, &target_fl->l_whence);
6521 __get_user(fl->l_start, &target_fl->l_start);
6522 __get_user(fl->l_len, &target_fl->l_len);
6523 __get_user(fl->l_pid, &target_fl->l_pid);
6524 unlock_user_struct(target_fl, target_flock_addr, 0);
6525 return 0;
6528 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6529 const struct flock64 *fl)
6531 struct target_flock64 *target_fl;
6532 short l_type;
6534 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6535 return -TARGET_EFAULT;
6538 l_type = host_to_target_flock(fl->l_type);
6539 __put_user(l_type, &target_fl->l_type);
6540 __put_user(fl->l_whence, &target_fl->l_whence);
6541 __put_user(fl->l_start, &target_fl->l_start);
6542 __put_user(fl->l_len, &target_fl->l_len);
6543 __put_user(fl->l_pid, &target_fl->l_pid);
6544 unlock_user_struct(target_fl, target_flock_addr, 1);
6545 return 0;
6548 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6550 struct flock64 fl64;
6551 #ifdef F_GETOWN_EX
6552 struct f_owner_ex fox;
6553 struct target_f_owner_ex *target_fox;
6554 #endif
6555 abi_long ret;
6556 int host_cmd = target_to_host_fcntl_cmd(cmd);
6558 if (host_cmd == -TARGET_EINVAL)
6559 return host_cmd;
6561 switch(cmd) {
6562 case TARGET_F_GETLK:
6563 ret = copy_from_user_flock(&fl64, arg);
6564 if (ret) {
6565 return ret;
6567 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6568 if (ret == 0) {
6569 ret = copy_to_user_flock(arg, &fl64);
6571 break;
6573 case TARGET_F_SETLK:
6574 case TARGET_F_SETLKW:
6575 ret = copy_from_user_flock(&fl64, arg);
6576 if (ret) {
6577 return ret;
6579 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6580 break;
6582 case TARGET_F_GETLK64:
6583 case TARGET_F_OFD_GETLK:
6584 ret = copy_from_user_flock64(&fl64, arg);
6585 if (ret) {
6586 return ret;
6588 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6589 if (ret == 0) {
6590 ret = copy_to_user_flock64(arg, &fl64);
6592 break;
6593 case TARGET_F_SETLK64:
6594 case TARGET_F_SETLKW64:
6595 case TARGET_F_OFD_SETLK:
6596 case TARGET_F_OFD_SETLKW:
6597 ret = copy_from_user_flock64(&fl64, arg);
6598 if (ret) {
6599 return ret;
6601 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6602 break;
6604 case TARGET_F_GETFL:
6605 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6606 if (ret >= 0) {
6607 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6609 break;
6611 case TARGET_F_SETFL:
6612 ret = get_errno(safe_fcntl(fd, host_cmd,
6613 target_to_host_bitmask(arg,
6614 fcntl_flags_tbl)));
6615 break;
6617 #ifdef F_GETOWN_EX
6618 case TARGET_F_GETOWN_EX:
6619 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6620 if (ret >= 0) {
6621 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6622 return -TARGET_EFAULT;
6623 target_fox->type = tswap32(fox.type);
6624 target_fox->pid = tswap32(fox.pid);
6625 unlock_user_struct(target_fox, arg, 1);
6627 break;
6628 #endif
6630 #ifdef F_SETOWN_EX
6631 case TARGET_F_SETOWN_EX:
6632 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6633 return -TARGET_EFAULT;
6634 fox.type = tswap32(target_fox->type);
6635 fox.pid = tswap32(target_fox->pid);
6636 unlock_user_struct(target_fox, arg, 0);
6637 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6638 break;
6639 #endif
6641 case TARGET_F_SETOWN:
6642 case TARGET_F_GETOWN:
6643 case TARGET_F_SETSIG:
6644 case TARGET_F_GETSIG:
6645 case TARGET_F_SETLEASE:
6646 case TARGET_F_GETLEASE:
6647 case TARGET_F_SETPIPE_SZ:
6648 case TARGET_F_GETPIPE_SZ:
6649 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6650 break;
6652 default:
6653 ret = get_errno(safe_fcntl(fd, cmd, arg));
6654 break;
6656 return ret;
6659 #ifdef USE_UID16
6661 static inline int high2lowuid(int uid)
6663 if (uid > 65535)
6664 return 65534;
6665 else
6666 return uid;
6669 static inline int high2lowgid(int gid)
6671 if (gid > 65535)
6672 return 65534;
6673 else
6674 return gid;
6677 static inline int low2highuid(int uid)
6679 if ((int16_t)uid == -1)
6680 return -1;
6681 else
6682 return uid;
6685 static inline int low2highgid(int gid)
6687 if ((int16_t)gid == -1)
6688 return -1;
6689 else
6690 return gid;
6692 static inline int tswapid(int id)
6694 return tswap16(id);
6697 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6699 #else /* !USE_UID16 */
6700 static inline int high2lowuid(int uid)
6702 return uid;
6704 static inline int high2lowgid(int gid)
6706 return gid;
6708 static inline int low2highuid(int uid)
6710 return uid;
6712 static inline int low2highgid(int gid)
6714 return gid;
6716 static inline int tswapid(int id)
6718 return tswap32(id);
6721 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6723 #endif /* USE_UID16 */
6725 /* We must do direct syscalls for setting UID/GID, because we want to
6726 * implement the Linux system call semantics of "change only for this thread",
6727 * not the libc/POSIX semantics of "change for all threads in process".
6728 * (See http://ewontfix.com/17/ for more details.)
6729 * We use the 32-bit version of the syscalls if present; if it is not
6730 * then either the host architecture supports 32-bit UIDs natively with
6731 * the standard syscall, or the 16-bit UID is the best we can do.
6733 #ifdef __NR_setuid32
6734 #define __NR_sys_setuid __NR_setuid32
6735 #else
6736 #define __NR_sys_setuid __NR_setuid
6737 #endif
6738 #ifdef __NR_setgid32
6739 #define __NR_sys_setgid __NR_setgid32
6740 #else
6741 #define __NR_sys_setgid __NR_setgid
6742 #endif
6743 #ifdef __NR_setresuid32
6744 #define __NR_sys_setresuid __NR_setresuid32
6745 #else
6746 #define __NR_sys_setresuid __NR_setresuid
6747 #endif
6748 #ifdef __NR_setresgid32
6749 #define __NR_sys_setresgid __NR_setresgid32
6750 #else
6751 #define __NR_sys_setresgid __NR_setresgid
6752 #endif
6754 _syscall1(int, sys_setuid, uid_t, uid)
6755 _syscall1(int, sys_setgid, gid_t, gid)
6756 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6757 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6759 void syscall_init(void)
6761 IOCTLEntry *ie;
6762 const argtype *arg_type;
6763 int size;
6764 int i;
6766 thunk_init(STRUCT_MAX);
6768 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6769 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6770 #include "syscall_types.h"
6771 #undef STRUCT
6772 #undef STRUCT_SPECIAL
6774 /* Build target_to_host_errno_table[] table from
6775 * host_to_target_errno_table[]. */
6776 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6777 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6780 /* we patch the ioctl size if necessary. We rely on the fact that
6781 no ioctl has all the bits at '1' in the size field */
6782 ie = ioctl_entries;
6783 while (ie->target_cmd != 0) {
6784 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6785 TARGET_IOC_SIZEMASK) {
6786 arg_type = ie->arg_type;
6787 if (arg_type[0] != TYPE_PTR) {
6788 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6789 ie->target_cmd);
6790 exit(1);
6792 arg_type++;
6793 size = thunk_type_size(arg_type, 0);
6794 ie->target_cmd = (ie->target_cmd &
6795 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6796 (size << TARGET_IOC_SIZESHIFT);
6799 /* automatic consistency check if same arch */
6800 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6801 (defined(__x86_64__) && defined(TARGET_X86_64))
6802 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6803 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6804 ie->name, ie->target_cmd, ie->host_cmd);
6806 #endif
6807 ie++;
6811 #ifdef TARGET_NR_truncate64
6812 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6813 abi_long arg2,
6814 abi_long arg3,
6815 abi_long arg4)
6817 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6818 arg2 = arg3;
6819 arg3 = arg4;
6821 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6823 #endif
6825 #ifdef TARGET_NR_ftruncate64
6826 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6827 abi_long arg2,
6828 abi_long arg3,
6829 abi_long arg4)
6831 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6832 arg2 = arg3;
6833 arg3 = arg4;
6835 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6837 #endif
6839 #if defined(TARGET_NR_timer_settime) || \
6840 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6841 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6842 abi_ulong target_addr)
6844 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6845 offsetof(struct target_itimerspec,
6846 it_interval)) ||
6847 target_to_host_timespec(&host_its->it_value, target_addr +
6848 offsetof(struct target_itimerspec,
6849 it_value))) {
6850 return -TARGET_EFAULT;
6853 return 0;
6855 #endif
6857 #if defined(TARGET_NR_timer_settime64) || \
6858 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6859 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6860 abi_ulong target_addr)
6862 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6863 offsetof(struct target__kernel_itimerspec,
6864 it_interval)) ||
6865 target_to_host_timespec64(&host_its->it_value, target_addr +
6866 offsetof(struct target__kernel_itimerspec,
6867 it_value))) {
6868 return -TARGET_EFAULT;
6871 return 0;
6873 #endif
6875 #if ((defined(TARGET_NR_timerfd_gettime) || \
6876 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6877 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6878 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6879 struct itimerspec *host_its)
6881 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6882 it_interval),
6883 &host_its->it_interval) ||
6884 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6885 it_value),
6886 &host_its->it_value)) {
6887 return -TARGET_EFAULT;
6889 return 0;
6891 #endif
6893 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6894 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6895 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6896 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6897 struct itimerspec *host_its)
6899 if (host_to_target_timespec64(target_addr +
6900 offsetof(struct target__kernel_itimerspec,
6901 it_interval),
6902 &host_its->it_interval) ||
6903 host_to_target_timespec64(target_addr +
6904 offsetof(struct target__kernel_itimerspec,
6905 it_value),
6906 &host_its->it_value)) {
6907 return -TARGET_EFAULT;
6909 return 0;
6911 #endif
6913 #if defined(TARGET_NR_adjtimex) || \
6914 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6915 static inline abi_long target_to_host_timex(struct timex *host_tx,
6916 abi_long target_addr)
6918 struct target_timex *target_tx;
6920 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6921 return -TARGET_EFAULT;
6924 __get_user(host_tx->modes, &target_tx->modes);
6925 __get_user(host_tx->offset, &target_tx->offset);
6926 __get_user(host_tx->freq, &target_tx->freq);
6927 __get_user(host_tx->maxerror, &target_tx->maxerror);
6928 __get_user(host_tx->esterror, &target_tx->esterror);
6929 __get_user(host_tx->status, &target_tx->status);
6930 __get_user(host_tx->constant, &target_tx->constant);
6931 __get_user(host_tx->precision, &target_tx->precision);
6932 __get_user(host_tx->tolerance, &target_tx->tolerance);
6933 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6934 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6935 __get_user(host_tx->tick, &target_tx->tick);
6936 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6937 __get_user(host_tx->jitter, &target_tx->jitter);
6938 __get_user(host_tx->shift, &target_tx->shift);
6939 __get_user(host_tx->stabil, &target_tx->stabil);
6940 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6941 __get_user(host_tx->calcnt, &target_tx->calcnt);
6942 __get_user(host_tx->errcnt, &target_tx->errcnt);
6943 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6944 __get_user(host_tx->tai, &target_tx->tai);
6946 unlock_user_struct(target_tx, target_addr, 0);
6947 return 0;
6950 static inline abi_long host_to_target_timex(abi_long target_addr,
6951 struct timex *host_tx)
6953 struct target_timex *target_tx;
6955 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6956 return -TARGET_EFAULT;
6959 __put_user(host_tx->modes, &target_tx->modes);
6960 __put_user(host_tx->offset, &target_tx->offset);
6961 __put_user(host_tx->freq, &target_tx->freq);
6962 __put_user(host_tx->maxerror, &target_tx->maxerror);
6963 __put_user(host_tx->esterror, &target_tx->esterror);
6964 __put_user(host_tx->status, &target_tx->status);
6965 __put_user(host_tx->constant, &target_tx->constant);
6966 __put_user(host_tx->precision, &target_tx->precision);
6967 __put_user(host_tx->tolerance, &target_tx->tolerance);
6968 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6969 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6970 __put_user(host_tx->tick, &target_tx->tick);
6971 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6972 __put_user(host_tx->jitter, &target_tx->jitter);
6973 __put_user(host_tx->shift, &target_tx->shift);
6974 __put_user(host_tx->stabil, &target_tx->stabil);
6975 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6976 __put_user(host_tx->calcnt, &target_tx->calcnt);
6977 __put_user(host_tx->errcnt, &target_tx->errcnt);
6978 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6979 __put_user(host_tx->tai, &target_tx->tai);
6981 unlock_user_struct(target_tx, target_addr, 1);
6982 return 0;
6984 #endif
6987 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6988 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6989 abi_long target_addr)
6991 struct target__kernel_timex *target_tx;
6993 if (copy_from_user_timeval64(&host_tx->time, target_addr +
6994 offsetof(struct target__kernel_timex,
6995 time))) {
6996 return -TARGET_EFAULT;
6999 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7000 return -TARGET_EFAULT;
7003 __get_user(host_tx->modes, &target_tx->modes);
7004 __get_user(host_tx->offset, &target_tx->offset);
7005 __get_user(host_tx->freq, &target_tx->freq);
7006 __get_user(host_tx->maxerror, &target_tx->maxerror);
7007 __get_user(host_tx->esterror, &target_tx->esterror);
7008 __get_user(host_tx->status, &target_tx->status);
7009 __get_user(host_tx->constant, &target_tx->constant);
7010 __get_user(host_tx->precision, &target_tx->precision);
7011 __get_user(host_tx->tolerance, &target_tx->tolerance);
7012 __get_user(host_tx->tick, &target_tx->tick);
7013 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7014 __get_user(host_tx->jitter, &target_tx->jitter);
7015 __get_user(host_tx->shift, &target_tx->shift);
7016 __get_user(host_tx->stabil, &target_tx->stabil);
7017 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7018 __get_user(host_tx->calcnt, &target_tx->calcnt);
7019 __get_user(host_tx->errcnt, &target_tx->errcnt);
7020 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7021 __get_user(host_tx->tai, &target_tx->tai);
7023 unlock_user_struct(target_tx, target_addr, 0);
7024 return 0;
7027 static inline abi_long host_to_target_timex64(abi_long target_addr,
7028 struct timex *host_tx)
7030 struct target__kernel_timex *target_tx;
7032 if (copy_to_user_timeval64(target_addr +
7033 offsetof(struct target__kernel_timex, time),
7034 &host_tx->time)) {
7035 return -TARGET_EFAULT;
7038 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7039 return -TARGET_EFAULT;
7042 __put_user(host_tx->modes, &target_tx->modes);
7043 __put_user(host_tx->offset, &target_tx->offset);
7044 __put_user(host_tx->freq, &target_tx->freq);
7045 __put_user(host_tx->maxerror, &target_tx->maxerror);
7046 __put_user(host_tx->esterror, &target_tx->esterror);
7047 __put_user(host_tx->status, &target_tx->status);
7048 __put_user(host_tx->constant, &target_tx->constant);
7049 __put_user(host_tx->precision, &target_tx->precision);
7050 __put_user(host_tx->tolerance, &target_tx->tolerance);
7051 __put_user(host_tx->tick, &target_tx->tick);
7052 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7053 __put_user(host_tx->jitter, &target_tx->jitter);
7054 __put_user(host_tx->shift, &target_tx->shift);
7055 __put_user(host_tx->stabil, &target_tx->stabil);
7056 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7057 __put_user(host_tx->calcnt, &target_tx->calcnt);
7058 __put_user(host_tx->errcnt, &target_tx->errcnt);
7059 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7060 __put_user(host_tx->tai, &target_tx->tai);
7062 unlock_user_struct(target_tx, target_addr, 1);
7063 return 0;
7065 #endif
7067 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7068 abi_ulong target_addr)
7070 struct target_sigevent *target_sevp;
7072 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7073 return -TARGET_EFAULT;
7076 /* This union is awkward on 64 bit systems because it has a 32 bit
7077 * integer and a pointer in it; we follow the conversion approach
7078 * used for handling sigval types in signal.c so the guest should get
7079 * the correct value back even if we did a 64 bit byteswap and it's
7080 * using the 32 bit integer.
7082 host_sevp->sigev_value.sival_ptr =
7083 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7084 host_sevp->sigev_signo =
7085 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7086 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7087 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7089 unlock_user_struct(target_sevp, target_addr, 1);
7090 return 0;
7093 #if defined(TARGET_NR_mlockall)
7094 static inline int target_to_host_mlockall_arg(int arg)
7096 int result = 0;
7098 if (arg & TARGET_MCL_CURRENT) {
7099 result |= MCL_CURRENT;
7101 if (arg & TARGET_MCL_FUTURE) {
7102 result |= MCL_FUTURE;
7104 #ifdef MCL_ONFAULT
7105 if (arg & TARGET_MCL_ONFAULT) {
7106 result |= MCL_ONFAULT;
7108 #endif
7110 return result;
7112 #endif
7114 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7115 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7116 defined(TARGET_NR_newfstatat))
7117 static inline abi_long host_to_target_stat64(void *cpu_env,
7118 abi_ulong target_addr,
7119 struct stat *host_st)
7121 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7122 if (((CPUARMState *)cpu_env)->eabi) {
7123 struct target_eabi_stat64 *target_st;
7125 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7126 return -TARGET_EFAULT;
7127 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7128 __put_user(host_st->st_dev, &target_st->st_dev);
7129 __put_user(host_st->st_ino, &target_st->st_ino);
7130 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7131 __put_user(host_st->st_ino, &target_st->__st_ino);
7132 #endif
7133 __put_user(host_st->st_mode, &target_st->st_mode);
7134 __put_user(host_st->st_nlink, &target_st->st_nlink);
7135 __put_user(host_st->st_uid, &target_st->st_uid);
7136 __put_user(host_st->st_gid, &target_st->st_gid);
7137 __put_user(host_st->st_rdev, &target_st->st_rdev);
7138 __put_user(host_st->st_size, &target_st->st_size);
7139 __put_user(host_st->st_blksize, &target_st->st_blksize);
7140 __put_user(host_st->st_blocks, &target_st->st_blocks);
7141 __put_user(host_st->st_atime, &target_st->target_st_atime);
7142 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7143 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7144 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7145 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7146 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7147 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7148 #endif
7149 unlock_user_struct(target_st, target_addr, 1);
7150 } else
7151 #endif
7153 #if defined(TARGET_HAS_STRUCT_STAT64)
7154 struct target_stat64 *target_st;
7155 #else
7156 struct target_stat *target_st;
7157 #endif
7159 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7160 return -TARGET_EFAULT;
7161 memset(target_st, 0, sizeof(*target_st));
7162 __put_user(host_st->st_dev, &target_st->st_dev);
7163 __put_user(host_st->st_ino, &target_st->st_ino);
7164 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7165 __put_user(host_st->st_ino, &target_st->__st_ino);
7166 #endif
7167 __put_user(host_st->st_mode, &target_st->st_mode);
7168 __put_user(host_st->st_nlink, &target_st->st_nlink);
7169 __put_user(host_st->st_uid, &target_st->st_uid);
7170 __put_user(host_st->st_gid, &target_st->st_gid);
7171 __put_user(host_st->st_rdev, &target_st->st_rdev);
7172 /* XXX: better use of kernel struct */
7173 __put_user(host_st->st_size, &target_st->st_size);
7174 __put_user(host_st->st_blksize, &target_st->st_blksize);
7175 __put_user(host_st->st_blocks, &target_st->st_blocks);
7176 __put_user(host_st->st_atime, &target_st->target_st_atime);
7177 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7178 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7179 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7180 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7181 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7182 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7183 #endif
7184 unlock_user_struct(target_st, target_addr, 1);
7187 return 0;
7189 #endif
7191 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7192 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7193 abi_ulong target_addr)
7195 struct target_statx *target_stx;
7197 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7198 return -TARGET_EFAULT;
7200 memset(target_stx, 0, sizeof(*target_stx));
7202 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7203 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7204 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7205 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7206 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7207 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7208 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7209 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7210 __put_user(host_stx->stx_size, &target_stx->stx_size);
7211 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7212 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7213 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7214 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7215 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7216 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7217 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7218 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7219 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7220 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7221 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7222 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7223 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7224 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7226 unlock_user_struct(target_stx, target_addr, 1);
7228 return 0;
7230 #endif
7232 static int do_sys_futex(int *uaddr, int op, int val,
7233 const struct timespec *timeout, int *uaddr2,
7234 int val3)
7236 #if HOST_LONG_BITS == 64
7237 #if defined(__NR_futex)
7238 /* always a 64-bit time_t, it doesn't define _time64 version */
7239 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7241 #endif
7242 #else /* HOST_LONG_BITS == 64 */
7243 #if defined(__NR_futex_time64)
7244 if (sizeof(timeout->tv_sec) == 8) {
7245 /* _time64 function on 32bit arch */
7246 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7248 #endif
7249 #if defined(__NR_futex)
7250 /* old function on 32bit arch */
7251 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7252 #endif
7253 #endif /* HOST_LONG_BITS == 64 */
7254 g_assert_not_reached();
7257 static int do_safe_futex(int *uaddr, int op, int val,
7258 const struct timespec *timeout, int *uaddr2,
7259 int val3)
7261 #if HOST_LONG_BITS == 64
7262 #if defined(__NR_futex)
7263 /* always a 64-bit time_t, it doesn't define _time64 version */
7264 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7265 #endif
7266 #else /* HOST_LONG_BITS == 64 */
7267 #if defined(__NR_futex_time64)
7268 if (sizeof(timeout->tv_sec) == 8) {
7269 /* _time64 function on 32bit arch */
7270 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7271 val3));
7273 #endif
7274 #if defined(__NR_futex)
7275 /* old function on 32bit arch */
7276 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7277 #endif
7278 #endif /* HOST_LONG_BITS == 64 */
7279 return -TARGET_ENOSYS;
7282 /* ??? Using host futex calls even when target atomic operations
7283 are not really atomic probably breaks things. However implementing
7284 futexes locally would make futexes shared between multiple processes
7285 tricky. However they're probably useless because guest atomic
7286 operations won't work either. */
7287 #if defined(TARGET_NR_futex)
7288 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7289 target_ulong uaddr2, int val3)
7291 struct timespec ts, *pts;
7292 int base_op;
7294 /* ??? We assume FUTEX_* constants are the same on both host
7295 and target. */
7296 #ifdef FUTEX_CMD_MASK
7297 base_op = op & FUTEX_CMD_MASK;
7298 #else
7299 base_op = op;
7300 #endif
7301 switch (base_op) {
7302 case FUTEX_WAIT:
7303 case FUTEX_WAIT_BITSET:
7304 if (timeout) {
7305 pts = &ts;
7306 target_to_host_timespec(pts, timeout);
7307 } else {
7308 pts = NULL;
7310 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7311 case FUTEX_WAKE:
7312 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7313 case FUTEX_FD:
7314 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7315 case FUTEX_REQUEUE:
7316 case FUTEX_CMP_REQUEUE:
7317 case FUTEX_WAKE_OP:
7318 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7319 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7320 But the prototype takes a `struct timespec *'; insert casts
7321 to satisfy the compiler. We do not need to tswap TIMEOUT
7322 since it's not compared to guest memory. */
7323 pts = (struct timespec *)(uintptr_t) timeout;
7324 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7325 (base_op == FUTEX_CMP_REQUEUE
7326 ? tswap32(val3)
7327 : val3));
7328 default:
7329 return -TARGET_ENOSYS;
7332 #endif
7334 #if defined(TARGET_NR_futex_time64)
7335 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7336 target_ulong uaddr2, int val3)
7338 struct timespec ts, *pts;
7339 int base_op;
7341 /* ??? We assume FUTEX_* constants are the same on both host
7342 and target. */
7343 #ifdef FUTEX_CMD_MASK
7344 base_op = op & FUTEX_CMD_MASK;
7345 #else
7346 base_op = op;
7347 #endif
7348 switch (base_op) {
7349 case FUTEX_WAIT:
7350 case FUTEX_WAIT_BITSET:
7351 if (timeout) {
7352 pts = &ts;
7353 target_to_host_timespec64(pts, timeout);
7354 } else {
7355 pts = NULL;
7357 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7358 case FUTEX_WAKE:
7359 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7360 case FUTEX_FD:
7361 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7362 case FUTEX_REQUEUE:
7363 case FUTEX_CMP_REQUEUE:
7364 case FUTEX_WAKE_OP:
7365 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7366 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7367 But the prototype takes a `struct timespec *'; insert casts
7368 to satisfy the compiler. We do not need to tswap TIMEOUT
7369 since it's not compared to guest memory. */
7370 pts = (struct timespec *)(uintptr_t) timeout;
7371 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7372 (base_op == FUTEX_CMP_REQUEUE
7373 ? tswap32(val3)
7374 : val3));
7375 default:
7376 return -TARGET_ENOSYS;
7379 #endif
7381 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7382 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7383 abi_long handle, abi_long mount_id,
7384 abi_long flags)
7386 struct file_handle *target_fh;
7387 struct file_handle *fh;
7388 int mid = 0;
7389 abi_long ret;
7390 char *name;
7391 unsigned int size, total_size;
7393 if (get_user_s32(size, handle)) {
7394 return -TARGET_EFAULT;
7397 name = lock_user_string(pathname);
7398 if (!name) {
7399 return -TARGET_EFAULT;
7402 total_size = sizeof(struct file_handle) + size;
7403 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7404 if (!target_fh) {
7405 unlock_user(name, pathname, 0);
7406 return -TARGET_EFAULT;
7409 fh = g_malloc0(total_size);
7410 fh->handle_bytes = size;
7412 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7413 unlock_user(name, pathname, 0);
7415 /* man name_to_handle_at(2):
7416 * Other than the use of the handle_bytes field, the caller should treat
7417 * the file_handle structure as an opaque data type
7420 memcpy(target_fh, fh, total_size);
7421 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7422 target_fh->handle_type = tswap32(fh->handle_type);
7423 g_free(fh);
7424 unlock_user(target_fh, handle, total_size);
7426 if (put_user_s32(mid, mount_id)) {
7427 return -TARGET_EFAULT;
7430 return ret;
7433 #endif
7435 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7436 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7437 abi_long flags)
7439 struct file_handle *target_fh;
7440 struct file_handle *fh;
7441 unsigned int size, total_size;
7442 abi_long ret;
7444 if (get_user_s32(size, handle)) {
7445 return -TARGET_EFAULT;
7448 total_size = sizeof(struct file_handle) + size;
7449 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7450 if (!target_fh) {
7451 return -TARGET_EFAULT;
7454 fh = g_memdup(target_fh, total_size);
7455 fh->handle_bytes = size;
7456 fh->handle_type = tswap32(target_fh->handle_type);
7458 ret = get_errno(open_by_handle_at(mount_fd, fh,
7459 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7461 g_free(fh);
7463 unlock_user(target_fh, handle, total_size);
7465 return ret;
7467 #endif
7469 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7471 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7473 int host_flags;
7474 target_sigset_t *target_mask;
7475 sigset_t host_mask;
7476 abi_long ret;
7478 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7479 return -TARGET_EINVAL;
7481 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7482 return -TARGET_EFAULT;
7485 target_to_host_sigset(&host_mask, target_mask);
7487 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7489 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7490 if (ret >= 0) {
7491 fd_trans_register(ret, &target_signalfd_trans);
7494 unlock_user_struct(target_mask, mask, 0);
7496 return ret;
7498 #endif
7500 /* Map host to target signal numbers for the wait family of syscalls.
7501 Assume all other status bits are the same. */
7502 int host_to_target_waitstatus(int status)
7504 if (WIFSIGNALED(status)) {
7505 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7507 if (WIFSTOPPED(status)) {
7508 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7509 | (status & 0xff);
7511 return status;
7514 static int open_self_cmdline(void *cpu_env, int fd)
7516 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7517 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7518 int i;
7520 for (i = 0; i < bprm->argc; i++) {
7521 size_t len = strlen(bprm->argv[i]) + 1;
7523 if (write(fd, bprm->argv[i], len) != len) {
7524 return -1;
7528 return 0;
7531 static int open_self_maps(void *cpu_env, int fd)
7533 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7534 TaskState *ts = cpu->opaque;
7535 GSList *map_info = read_self_maps();
7536 GSList *s;
7537 int count;
7539 for (s = map_info; s; s = g_slist_next(s)) {
7540 MapInfo *e = (MapInfo *) s->data;
7542 if (h2g_valid(e->start)) {
7543 unsigned long min = e->start;
7544 unsigned long max = e->end;
7545 int flags = page_get_flags(h2g(min));
7546 const char *path;
7548 max = h2g_valid(max - 1) ?
7549 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7551 if (page_check_range(h2g(min), max - min, flags) == -1) {
7552 continue;
7555 if (h2g(min) == ts->info->stack_limit) {
7556 path = "[stack]";
7557 } else {
7558 path = e->path;
7561 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7562 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7563 h2g(min), h2g(max - 1) + 1,
7564 e->is_read ? 'r' : '-',
7565 e->is_write ? 'w' : '-',
7566 e->is_exec ? 'x' : '-',
7567 e->is_priv ? 'p' : '-',
7568 (uint64_t) e->offset, e->dev, e->inode);
7569 if (path) {
7570 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7571 } else {
7572 dprintf(fd, "\n");
7577 free_self_maps(map_info);
7579 #ifdef TARGET_VSYSCALL_PAGE
7581 * We only support execution from the vsyscall page.
7582 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7584 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7585 " --xp 00000000 00:00 0",
7586 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7587 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7588 #endif
7590 return 0;
7593 static int open_self_stat(void *cpu_env, int fd)
7595 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7596 TaskState *ts = cpu->opaque;
7597 g_autoptr(GString) buf = g_string_new(NULL);
7598 int i;
7600 for (i = 0; i < 44; i++) {
7601 if (i == 0) {
7602 /* pid */
7603 g_string_printf(buf, FMT_pid " ", getpid());
7604 } else if (i == 1) {
7605 /* app name */
7606 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7607 bin = bin ? bin + 1 : ts->bprm->argv[0];
7608 g_string_printf(buf, "(%.15s) ", bin);
7609 } else if (i == 27) {
7610 /* stack bottom */
7611 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7612 } else {
7613 /* for the rest, there is MasterCard */
7614 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7617 if (write(fd, buf->str, buf->len) != buf->len) {
7618 return -1;
7622 return 0;
7625 static int open_self_auxv(void *cpu_env, int fd)
7627 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7628 TaskState *ts = cpu->opaque;
7629 abi_ulong auxv = ts->info->saved_auxv;
7630 abi_ulong len = ts->info->auxv_len;
7631 char *ptr;
7634 * Auxiliary vector is stored in target process stack.
7635 * read in whole auxv vector and copy it to file
7637 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7638 if (ptr != NULL) {
7639 while (len > 0) {
7640 ssize_t r;
7641 r = write(fd, ptr, len);
7642 if (r <= 0) {
7643 break;
7645 len -= r;
7646 ptr += r;
7648 lseek(fd, 0, SEEK_SET);
7649 unlock_user(ptr, auxv, len);
7652 return 0;
7655 static int is_proc_myself(const char *filename, const char *entry)
7657 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7658 filename += strlen("/proc/");
7659 if (!strncmp(filename, "self/", strlen("self/"))) {
7660 filename += strlen("self/");
7661 } else if (*filename >= '1' && *filename <= '9') {
7662 char myself[80];
7663 snprintf(myself, sizeof(myself), "%d/", getpid());
7664 if (!strncmp(filename, myself, strlen(myself))) {
7665 filename += strlen(myself);
7666 } else {
7667 return 0;
7669 } else {
7670 return 0;
7672 if (!strcmp(filename, entry)) {
7673 return 1;
7676 return 0;
7679 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7680 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7681 static int is_proc(const char *filename, const char *entry)
7683 return strcmp(filename, entry) == 0;
7685 #endif
7687 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7688 static int open_net_route(void *cpu_env, int fd)
7690 FILE *fp;
7691 char *line = NULL;
7692 size_t len = 0;
7693 ssize_t read;
7695 fp = fopen("/proc/net/route", "r");
7696 if (fp == NULL) {
7697 return -1;
7700 /* read header */
7702 read = getline(&line, &len, fp);
7703 dprintf(fd, "%s", line);
7705 /* read routes */
7707 while ((read = getline(&line, &len, fp)) != -1) {
7708 char iface[16];
7709 uint32_t dest, gw, mask;
7710 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7711 int fields;
7713 fields = sscanf(line,
7714 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7715 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7716 &mask, &mtu, &window, &irtt);
7717 if (fields != 11) {
7718 continue;
7720 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7721 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7722 metric, tswap32(mask), mtu, window, irtt);
7725 free(line);
7726 fclose(fp);
7728 return 0;
7730 #endif
7732 #if defined(TARGET_SPARC)
7733 static int open_cpuinfo(void *cpu_env, int fd)
7735 dprintf(fd, "type\t\t: sun4u\n");
7736 return 0;
7738 #endif
7740 #if defined(TARGET_HPPA)
7741 static int open_cpuinfo(void *cpu_env, int fd)
7743 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7744 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7745 dprintf(fd, "capabilities\t: os32\n");
7746 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7747 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7748 return 0;
7750 #endif
7752 #if defined(TARGET_M68K)
7753 static int open_hardware(void *cpu_env, int fd)
7755 dprintf(fd, "Model:\t\tqemu-m68k\n");
7756 return 0;
7758 #endif
7760 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7762 struct fake_open {
7763 const char *filename;
7764 int (*fill)(void *cpu_env, int fd);
7765 int (*cmp)(const char *s1, const char *s2);
7767 const struct fake_open *fake_open;
7768 static const struct fake_open fakes[] = {
7769 { "maps", open_self_maps, is_proc_myself },
7770 { "stat", open_self_stat, is_proc_myself },
7771 { "auxv", open_self_auxv, is_proc_myself },
7772 { "cmdline", open_self_cmdline, is_proc_myself },
7773 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7774 { "/proc/net/route", open_net_route, is_proc },
7775 #endif
7776 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7777 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7778 #endif
7779 #if defined(TARGET_M68K)
7780 { "/proc/hardware", open_hardware, is_proc },
7781 #endif
7782 { NULL, NULL, NULL }
7785 if (is_proc_myself(pathname, "exe")) {
7786 int execfd = qemu_getauxval(AT_EXECFD);
7787 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7790 for (fake_open = fakes; fake_open->filename; fake_open++) {
7791 if (fake_open->cmp(pathname, fake_open->filename)) {
7792 break;
7796 if (fake_open->filename) {
7797 const char *tmpdir;
7798 char filename[PATH_MAX];
7799 int fd, r;
7801 /* create temporary file to map stat to */
7802 tmpdir = getenv("TMPDIR");
7803 if (!tmpdir)
7804 tmpdir = "/tmp";
7805 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7806 fd = mkstemp(filename);
7807 if (fd < 0) {
7808 return fd;
7810 unlink(filename);
7812 if ((r = fake_open->fill(cpu_env, fd))) {
7813 int e = errno;
7814 close(fd);
7815 errno = e;
7816 return r;
7818 lseek(fd, 0, SEEK_SET);
7820 return fd;
7823 return safe_openat(dirfd, path(pathname), flags, mode);
7826 #define TIMER_MAGIC 0x0caf0000
7827 #define TIMER_MAGIC_MASK 0xffff0000
7829 /* Convert QEMU provided timer ID back to internal 16bit index format */
7830 static target_timer_t get_timer_id(abi_long arg)
7832 target_timer_t timerid = arg;
7834 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7835 return -TARGET_EINVAL;
7838 timerid &= 0xffff;
7840 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7841 return -TARGET_EINVAL;
7844 return timerid;
7847 static int target_to_host_cpu_mask(unsigned long *host_mask,
7848 size_t host_size,
7849 abi_ulong target_addr,
7850 size_t target_size)
7852 unsigned target_bits = sizeof(abi_ulong) * 8;
7853 unsigned host_bits = sizeof(*host_mask) * 8;
7854 abi_ulong *target_mask;
7855 unsigned i, j;
7857 assert(host_size >= target_size);
7859 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7860 if (!target_mask) {
7861 return -TARGET_EFAULT;
7863 memset(host_mask, 0, host_size);
7865 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7866 unsigned bit = i * target_bits;
7867 abi_ulong val;
7869 __get_user(val, &target_mask[i]);
7870 for (j = 0; j < target_bits; j++, bit++) {
7871 if (val & (1UL << j)) {
7872 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7877 unlock_user(target_mask, target_addr, 0);
7878 return 0;
7881 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7882 size_t host_size,
7883 abi_ulong target_addr,
7884 size_t target_size)
7886 unsigned target_bits = sizeof(abi_ulong) * 8;
7887 unsigned host_bits = sizeof(*host_mask) * 8;
7888 abi_ulong *target_mask;
7889 unsigned i, j;
7891 assert(host_size >= target_size);
7893 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7894 if (!target_mask) {
7895 return -TARGET_EFAULT;
7898 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7899 unsigned bit = i * target_bits;
7900 abi_ulong val = 0;
7902 for (j = 0; j < target_bits; j++, bit++) {
7903 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7904 val |= 1UL << j;
7907 __put_user(val, &target_mask[i]);
7910 unlock_user(target_mask, target_addr, target_size);
7911 return 0;
7914 /* This is an internal helper for do_syscall so that it is easier
7915 * to have a single return point, so that actions, such as logging
7916 * of syscall results, can be performed.
7917 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7919 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7920 abi_long arg2, abi_long arg3, abi_long arg4,
7921 abi_long arg5, abi_long arg6, abi_long arg7,
7922 abi_long arg8)
7924 CPUState *cpu = env_cpu(cpu_env);
7925 abi_long ret;
7926 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7927 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7928 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7929 || defined(TARGET_NR_statx)
7930 struct stat st;
7931 #endif
7932 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7933 || defined(TARGET_NR_fstatfs)
7934 struct statfs stfs;
7935 #endif
7936 void *p;
7938 switch(num) {
7939 case TARGET_NR_exit:
7940 /* In old applications this may be used to implement _exit(2).
7941 However in threaded applictions it is used for thread termination,
7942 and _exit_group is used for application termination.
7943 Do thread termination if we have more then one thread. */
7945 if (block_signals()) {
7946 return -TARGET_ERESTARTSYS;
7949 pthread_mutex_lock(&clone_lock);
7951 if (CPU_NEXT(first_cpu)) {
7952 TaskState *ts = cpu->opaque;
7954 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7955 object_unref(OBJECT(cpu));
7957 * At this point the CPU should be unrealized and removed
7958 * from cpu lists. We can clean-up the rest of the thread
7959 * data without the lock held.
7962 pthread_mutex_unlock(&clone_lock);
7964 if (ts->child_tidptr) {
7965 put_user_u32(0, ts->child_tidptr);
7966 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7967 NULL, NULL, 0);
7969 thread_cpu = NULL;
7970 g_free(ts);
7971 rcu_unregister_thread();
7972 pthread_exit(NULL);
7975 pthread_mutex_unlock(&clone_lock);
7976 preexit_cleanup(cpu_env, arg1);
7977 _exit(arg1);
7978 return 0; /* avoid warning */
7979 case TARGET_NR_read:
7980 if (arg2 == 0 && arg3 == 0) {
7981 return get_errno(safe_read(arg1, 0, 0));
7982 } else {
7983 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7984 return -TARGET_EFAULT;
7985 ret = get_errno(safe_read(arg1, p, arg3));
7986 if (ret >= 0 &&
7987 fd_trans_host_to_target_data(arg1)) {
7988 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7990 unlock_user(p, arg2, ret);
7992 return ret;
7993 case TARGET_NR_write:
7994 if (arg2 == 0 && arg3 == 0) {
7995 return get_errno(safe_write(arg1, 0, 0));
7997 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7998 return -TARGET_EFAULT;
7999 if (fd_trans_target_to_host_data(arg1)) {
8000 void *copy = g_malloc(arg3);
8001 memcpy(copy, p, arg3);
8002 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8003 if (ret >= 0) {
8004 ret = get_errno(safe_write(arg1, copy, ret));
8006 g_free(copy);
8007 } else {
8008 ret = get_errno(safe_write(arg1, p, arg3));
8010 unlock_user(p, arg2, 0);
8011 return ret;
8013 #ifdef TARGET_NR_open
8014 case TARGET_NR_open:
8015 if (!(p = lock_user_string(arg1)))
8016 return -TARGET_EFAULT;
8017 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8018 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8019 arg3));
8020 fd_trans_unregister(ret);
8021 unlock_user(p, arg1, 0);
8022 return ret;
8023 #endif
8024 case TARGET_NR_openat:
8025 if (!(p = lock_user_string(arg2)))
8026 return -TARGET_EFAULT;
8027 ret = get_errno(do_openat(cpu_env, arg1, p,
8028 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8029 arg4));
8030 fd_trans_unregister(ret);
8031 unlock_user(p, arg2, 0);
8032 return ret;
8033 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8034 case TARGET_NR_name_to_handle_at:
8035 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8036 return ret;
8037 #endif
8038 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8039 case TARGET_NR_open_by_handle_at:
8040 ret = do_open_by_handle_at(arg1, arg2, arg3);
8041 fd_trans_unregister(ret);
8042 return ret;
8043 #endif
8044 case TARGET_NR_close:
8045 fd_trans_unregister(arg1);
8046 return get_errno(close(arg1));
8048 case TARGET_NR_brk:
8049 return do_brk(arg1);
8050 #ifdef TARGET_NR_fork
8051 case TARGET_NR_fork:
8052 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8053 #endif
8054 #ifdef TARGET_NR_waitpid
8055 case TARGET_NR_waitpid:
8057 int status;
8058 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8059 if (!is_error(ret) && arg2 && ret
8060 && put_user_s32(host_to_target_waitstatus(status), arg2))
8061 return -TARGET_EFAULT;
8063 return ret;
8064 #endif
8065 #ifdef TARGET_NR_waitid
8066 case TARGET_NR_waitid:
8068 siginfo_t info;
8069 info.si_pid = 0;
8070 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8071 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8072 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8073 return -TARGET_EFAULT;
8074 host_to_target_siginfo(p, &info);
8075 unlock_user(p, arg3, sizeof(target_siginfo_t));
8078 return ret;
8079 #endif
8080 #ifdef TARGET_NR_creat /* not on alpha */
8081 case TARGET_NR_creat:
8082 if (!(p = lock_user_string(arg1)))
8083 return -TARGET_EFAULT;
8084 ret = get_errno(creat(p, arg2));
8085 fd_trans_unregister(ret);
8086 unlock_user(p, arg1, 0);
8087 return ret;
8088 #endif
8089 #ifdef TARGET_NR_link
8090 case TARGET_NR_link:
8092 void * p2;
8093 p = lock_user_string(arg1);
8094 p2 = lock_user_string(arg2);
8095 if (!p || !p2)
8096 ret = -TARGET_EFAULT;
8097 else
8098 ret = get_errno(link(p, p2));
8099 unlock_user(p2, arg2, 0);
8100 unlock_user(p, arg1, 0);
8102 return ret;
8103 #endif
8104 #if defined(TARGET_NR_linkat)
8105 case TARGET_NR_linkat:
8107 void * p2 = NULL;
8108 if (!arg2 || !arg4)
8109 return -TARGET_EFAULT;
8110 p = lock_user_string(arg2);
8111 p2 = lock_user_string(arg4);
8112 if (!p || !p2)
8113 ret = -TARGET_EFAULT;
8114 else
8115 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8116 unlock_user(p, arg2, 0);
8117 unlock_user(p2, arg4, 0);
8119 return ret;
8120 #endif
8121 #ifdef TARGET_NR_unlink
8122 case TARGET_NR_unlink:
8123 if (!(p = lock_user_string(arg1)))
8124 return -TARGET_EFAULT;
8125 ret = get_errno(unlink(p));
8126 unlock_user(p, arg1, 0);
8127 return ret;
8128 #endif
8129 #if defined(TARGET_NR_unlinkat)
8130 case TARGET_NR_unlinkat:
8131 if (!(p = lock_user_string(arg2)))
8132 return -TARGET_EFAULT;
8133 ret = get_errno(unlinkat(arg1, p, arg3));
8134 unlock_user(p, arg2, 0);
8135 return ret;
8136 #endif
8137 case TARGET_NR_execve:
8139 char **argp, **envp;
8140 int argc, envc;
8141 abi_ulong gp;
8142 abi_ulong guest_argp;
8143 abi_ulong guest_envp;
8144 abi_ulong addr;
8145 char **q;
8146 int total_size = 0;
8148 argc = 0;
8149 guest_argp = arg2;
8150 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8151 if (get_user_ual(addr, gp))
8152 return -TARGET_EFAULT;
8153 if (!addr)
8154 break;
8155 argc++;
8157 envc = 0;
8158 guest_envp = arg3;
8159 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8160 if (get_user_ual(addr, gp))
8161 return -TARGET_EFAULT;
8162 if (!addr)
8163 break;
8164 envc++;
8167 argp = g_new0(char *, argc + 1);
8168 envp = g_new0(char *, envc + 1);
8170 for (gp = guest_argp, q = argp; gp;
8171 gp += sizeof(abi_ulong), q++) {
8172 if (get_user_ual(addr, gp))
8173 goto execve_efault;
8174 if (!addr)
8175 break;
8176 if (!(*q = lock_user_string(addr)))
8177 goto execve_efault;
8178 total_size += strlen(*q) + 1;
8180 *q = NULL;
8182 for (gp = guest_envp, q = envp; gp;
8183 gp += sizeof(abi_ulong), q++) {
8184 if (get_user_ual(addr, gp))
8185 goto execve_efault;
8186 if (!addr)
8187 break;
8188 if (!(*q = lock_user_string(addr)))
8189 goto execve_efault;
8190 total_size += strlen(*q) + 1;
8192 *q = NULL;
8194 if (!(p = lock_user_string(arg1)))
8195 goto execve_efault;
8196 /* Although execve() is not an interruptible syscall it is
8197 * a special case where we must use the safe_syscall wrapper:
8198 * if we allow a signal to happen before we make the host
8199 * syscall then we will 'lose' it, because at the point of
8200 * execve the process leaves QEMU's control. So we use the
8201 * safe syscall wrapper to ensure that we either take the
8202 * signal as a guest signal, or else it does not happen
8203 * before the execve completes and makes it the other
8204 * program's problem.
8206 ret = get_errno(safe_execve(p, argp, envp));
8207 unlock_user(p, arg1, 0);
8209 goto execve_end;
8211 execve_efault:
8212 ret = -TARGET_EFAULT;
8214 execve_end:
8215 for (gp = guest_argp, q = argp; *q;
8216 gp += sizeof(abi_ulong), q++) {
8217 if (get_user_ual(addr, gp)
8218 || !addr)
8219 break;
8220 unlock_user(*q, addr, 0);
8222 for (gp = guest_envp, q = envp; *q;
8223 gp += sizeof(abi_ulong), q++) {
8224 if (get_user_ual(addr, gp)
8225 || !addr)
8226 break;
8227 unlock_user(*q, addr, 0);
8230 g_free(argp);
8231 g_free(envp);
8233 return ret;
8234 case TARGET_NR_chdir:
8235 if (!(p = lock_user_string(arg1)))
8236 return -TARGET_EFAULT;
8237 ret = get_errno(chdir(p));
8238 unlock_user(p, arg1, 0);
8239 return ret;
8240 #ifdef TARGET_NR_time
8241 case TARGET_NR_time:
8243 time_t host_time;
8244 ret = get_errno(time(&host_time));
8245 if (!is_error(ret)
8246 && arg1
8247 && put_user_sal(host_time, arg1))
8248 return -TARGET_EFAULT;
8250 return ret;
8251 #endif
8252 #ifdef TARGET_NR_mknod
8253 case TARGET_NR_mknod:
8254 if (!(p = lock_user_string(arg1)))
8255 return -TARGET_EFAULT;
8256 ret = get_errno(mknod(p, arg2, arg3));
8257 unlock_user(p, arg1, 0);
8258 return ret;
8259 #endif
8260 #if defined(TARGET_NR_mknodat)
8261 case TARGET_NR_mknodat:
8262 if (!(p = lock_user_string(arg2)))
8263 return -TARGET_EFAULT;
8264 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8265 unlock_user(p, arg2, 0);
8266 return ret;
8267 #endif
8268 #ifdef TARGET_NR_chmod
8269 case TARGET_NR_chmod:
8270 if (!(p = lock_user_string(arg1)))
8271 return -TARGET_EFAULT;
8272 ret = get_errno(chmod(p, arg2));
8273 unlock_user(p, arg1, 0);
8274 return ret;
8275 #endif
8276 #ifdef TARGET_NR_lseek
8277 case TARGET_NR_lseek:
8278 return get_errno(lseek(arg1, arg2, arg3));
8279 #endif
8280 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8281 /* Alpha specific */
8282 case TARGET_NR_getxpid:
8283 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8284 return get_errno(getpid());
8285 #endif
8286 #ifdef TARGET_NR_getpid
8287 case TARGET_NR_getpid:
8288 return get_errno(getpid());
8289 #endif
8290 case TARGET_NR_mount:
8292 /* need to look at the data field */
8293 void *p2, *p3;
8295 if (arg1) {
8296 p = lock_user_string(arg1);
8297 if (!p) {
8298 return -TARGET_EFAULT;
8300 } else {
8301 p = NULL;
8304 p2 = lock_user_string(arg2);
8305 if (!p2) {
8306 if (arg1) {
8307 unlock_user(p, arg1, 0);
8309 return -TARGET_EFAULT;
8312 if (arg3) {
8313 p3 = lock_user_string(arg3);
8314 if (!p3) {
8315 if (arg1) {
8316 unlock_user(p, arg1, 0);
8318 unlock_user(p2, arg2, 0);
8319 return -TARGET_EFAULT;
8321 } else {
8322 p3 = NULL;
8325 /* FIXME - arg5 should be locked, but it isn't clear how to
8326 * do that since it's not guaranteed to be a NULL-terminated
8327 * string.
8329 if (!arg5) {
8330 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8331 } else {
8332 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8334 ret = get_errno(ret);
8336 if (arg1) {
8337 unlock_user(p, arg1, 0);
8339 unlock_user(p2, arg2, 0);
8340 if (arg3) {
8341 unlock_user(p3, arg3, 0);
8344 return ret;
8345 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8346 #if defined(TARGET_NR_umount)
8347 case TARGET_NR_umount:
8348 #endif
8349 #if defined(TARGET_NR_oldumount)
8350 case TARGET_NR_oldumount:
8351 #endif
8352 if (!(p = lock_user_string(arg1)))
8353 return -TARGET_EFAULT;
8354 ret = get_errno(umount(p));
8355 unlock_user(p, arg1, 0);
8356 return ret;
8357 #endif
8358 #ifdef TARGET_NR_stime /* not on alpha */
8359 case TARGET_NR_stime:
8361 struct timespec ts;
8362 ts.tv_nsec = 0;
8363 if (get_user_sal(ts.tv_sec, arg1)) {
8364 return -TARGET_EFAULT;
8366 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8368 #endif
8369 #ifdef TARGET_NR_alarm /* not on alpha */
8370 case TARGET_NR_alarm:
8371 return alarm(arg1);
8372 #endif
8373 #ifdef TARGET_NR_pause /* not on alpha */
8374 case TARGET_NR_pause:
8375 if (!block_signals()) {
8376 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8378 return -TARGET_EINTR;
8379 #endif
8380 #ifdef TARGET_NR_utime
8381 case TARGET_NR_utime:
8383 struct utimbuf tbuf, *host_tbuf;
8384 struct target_utimbuf *target_tbuf;
8385 if (arg2) {
8386 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8387 return -TARGET_EFAULT;
8388 tbuf.actime = tswapal(target_tbuf->actime);
8389 tbuf.modtime = tswapal(target_tbuf->modtime);
8390 unlock_user_struct(target_tbuf, arg2, 0);
8391 host_tbuf = &tbuf;
8392 } else {
8393 host_tbuf = NULL;
8395 if (!(p = lock_user_string(arg1)))
8396 return -TARGET_EFAULT;
8397 ret = get_errno(utime(p, host_tbuf));
8398 unlock_user(p, arg1, 0);
8400 return ret;
8401 #endif
8402 #ifdef TARGET_NR_utimes
8403 case TARGET_NR_utimes:
8405 struct timeval *tvp, tv[2];
8406 if (arg2) {
8407 if (copy_from_user_timeval(&tv[0], arg2)
8408 || copy_from_user_timeval(&tv[1],
8409 arg2 + sizeof(struct target_timeval)))
8410 return -TARGET_EFAULT;
8411 tvp = tv;
8412 } else {
8413 tvp = NULL;
8415 if (!(p = lock_user_string(arg1)))
8416 return -TARGET_EFAULT;
8417 ret = get_errno(utimes(p, tvp));
8418 unlock_user(p, arg1, 0);
8420 return ret;
8421 #endif
8422 #if defined(TARGET_NR_futimesat)
8423 case TARGET_NR_futimesat:
8425 struct timeval *tvp, tv[2];
8426 if (arg3) {
8427 if (copy_from_user_timeval(&tv[0], arg3)
8428 || copy_from_user_timeval(&tv[1],
8429 arg3 + sizeof(struct target_timeval)))
8430 return -TARGET_EFAULT;
8431 tvp = tv;
8432 } else {
8433 tvp = NULL;
8435 if (!(p = lock_user_string(arg2))) {
8436 return -TARGET_EFAULT;
8438 ret = get_errno(futimesat(arg1, path(p), tvp));
8439 unlock_user(p, arg2, 0);
8441 return ret;
8442 #endif
8443 #ifdef TARGET_NR_access
8444 case TARGET_NR_access:
8445 if (!(p = lock_user_string(arg1))) {
8446 return -TARGET_EFAULT;
8448 ret = get_errno(access(path(p), arg2));
8449 unlock_user(p, arg1, 0);
8450 return ret;
8451 #endif
8452 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8453 case TARGET_NR_faccessat:
8454 if (!(p = lock_user_string(arg2))) {
8455 return -TARGET_EFAULT;
8457 ret = get_errno(faccessat(arg1, p, arg3, 0));
8458 unlock_user(p, arg2, 0);
8459 return ret;
8460 #endif
8461 #ifdef TARGET_NR_nice /* not on alpha */
8462 case TARGET_NR_nice:
8463 return get_errno(nice(arg1));
8464 #endif
8465 case TARGET_NR_sync:
8466 sync();
8467 return 0;
8468 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8469 case TARGET_NR_syncfs:
8470 return get_errno(syncfs(arg1));
8471 #endif
8472 case TARGET_NR_kill:
8473 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8474 #ifdef TARGET_NR_rename
8475 case TARGET_NR_rename:
8477 void *p2;
8478 p = lock_user_string(arg1);
8479 p2 = lock_user_string(arg2);
8480 if (!p || !p2)
8481 ret = -TARGET_EFAULT;
8482 else
8483 ret = get_errno(rename(p, p2));
8484 unlock_user(p2, arg2, 0);
8485 unlock_user(p, arg1, 0);
8487 return ret;
8488 #endif
8489 #if defined(TARGET_NR_renameat)
8490 case TARGET_NR_renameat:
8492 void *p2;
8493 p = lock_user_string(arg2);
8494 p2 = lock_user_string(arg4);
8495 if (!p || !p2)
8496 ret = -TARGET_EFAULT;
8497 else
8498 ret = get_errno(renameat(arg1, p, arg3, p2));
8499 unlock_user(p2, arg4, 0);
8500 unlock_user(p, arg2, 0);
8502 return ret;
8503 #endif
8504 #if defined(TARGET_NR_renameat2)
8505 case TARGET_NR_renameat2:
8507 void *p2;
8508 p = lock_user_string(arg2);
8509 p2 = lock_user_string(arg4);
8510 if (!p || !p2) {
8511 ret = -TARGET_EFAULT;
8512 } else {
8513 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8515 unlock_user(p2, arg4, 0);
8516 unlock_user(p, arg2, 0);
8518 return ret;
8519 #endif
8520 #ifdef TARGET_NR_mkdir
8521 case TARGET_NR_mkdir:
8522 if (!(p = lock_user_string(arg1)))
8523 return -TARGET_EFAULT;
8524 ret = get_errno(mkdir(p, arg2));
8525 unlock_user(p, arg1, 0);
8526 return ret;
8527 #endif
8528 #if defined(TARGET_NR_mkdirat)
8529 case TARGET_NR_mkdirat:
8530 if (!(p = lock_user_string(arg2)))
8531 return -TARGET_EFAULT;
8532 ret = get_errno(mkdirat(arg1, p, arg3));
8533 unlock_user(p, arg2, 0);
8534 return ret;
8535 #endif
8536 #ifdef TARGET_NR_rmdir
8537 case TARGET_NR_rmdir:
8538 if (!(p = lock_user_string(arg1)))
8539 return -TARGET_EFAULT;
8540 ret = get_errno(rmdir(p));
8541 unlock_user(p, arg1, 0);
8542 return ret;
8543 #endif
8544 case TARGET_NR_dup:
8545 ret = get_errno(dup(arg1));
8546 if (ret >= 0) {
8547 fd_trans_dup(arg1, ret);
8549 return ret;
8550 #ifdef TARGET_NR_pipe
8551 case TARGET_NR_pipe:
8552 return do_pipe(cpu_env, arg1, 0, 0);
8553 #endif
8554 #ifdef TARGET_NR_pipe2
8555 case TARGET_NR_pipe2:
8556 return do_pipe(cpu_env, arg1,
8557 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8558 #endif
8559 case TARGET_NR_times:
8561 struct target_tms *tmsp;
8562 struct tms tms;
8563 ret = get_errno(times(&tms));
8564 if (arg1) {
8565 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8566 if (!tmsp)
8567 return -TARGET_EFAULT;
8568 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8569 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8570 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8571 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8573 if (!is_error(ret))
8574 ret = host_to_target_clock_t(ret);
8576 return ret;
8577 case TARGET_NR_acct:
8578 if (arg1 == 0) {
8579 ret = get_errno(acct(NULL));
8580 } else {
8581 if (!(p = lock_user_string(arg1))) {
8582 return -TARGET_EFAULT;
8584 ret = get_errno(acct(path(p)));
8585 unlock_user(p, arg1, 0);
8587 return ret;
8588 #ifdef TARGET_NR_umount2
8589 case TARGET_NR_umount2:
8590 if (!(p = lock_user_string(arg1)))
8591 return -TARGET_EFAULT;
8592 ret = get_errno(umount2(p, arg2));
8593 unlock_user(p, arg1, 0);
8594 return ret;
8595 #endif
8596 case TARGET_NR_ioctl:
8597 return do_ioctl(arg1, arg2, arg3);
8598 #ifdef TARGET_NR_fcntl
8599 case TARGET_NR_fcntl:
8600 return do_fcntl(arg1, arg2, arg3);
8601 #endif
8602 case TARGET_NR_setpgid:
8603 return get_errno(setpgid(arg1, arg2));
8604 case TARGET_NR_umask:
8605 return get_errno(umask(arg1));
8606 case TARGET_NR_chroot:
8607 if (!(p = lock_user_string(arg1)))
8608 return -TARGET_EFAULT;
8609 ret = get_errno(chroot(p));
8610 unlock_user(p, arg1, 0);
8611 return ret;
8612 #ifdef TARGET_NR_dup2
8613 case TARGET_NR_dup2:
8614 ret = get_errno(dup2(arg1, arg2));
8615 if (ret >= 0) {
8616 fd_trans_dup(arg1, arg2);
8618 return ret;
8619 #endif
8620 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8621 case TARGET_NR_dup3:
8623 int host_flags;
8625 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8626 return -EINVAL;
8628 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8629 ret = get_errno(dup3(arg1, arg2, host_flags));
8630 if (ret >= 0) {
8631 fd_trans_dup(arg1, arg2);
8633 return ret;
8635 #endif
8636 #ifdef TARGET_NR_getppid /* not on alpha */
8637 case TARGET_NR_getppid:
8638 return get_errno(getppid());
8639 #endif
8640 #ifdef TARGET_NR_getpgrp
8641 case TARGET_NR_getpgrp:
8642 return get_errno(getpgrp());
8643 #endif
8644 case TARGET_NR_setsid:
8645 return get_errno(setsid());
8646 #ifdef TARGET_NR_sigaction
8647 case TARGET_NR_sigaction:
8649 #if defined(TARGET_ALPHA)
8650 struct target_sigaction act, oact, *pact = 0;
8651 struct target_old_sigaction *old_act;
8652 if (arg2) {
8653 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8654 return -TARGET_EFAULT;
8655 act._sa_handler = old_act->_sa_handler;
8656 target_siginitset(&act.sa_mask, old_act->sa_mask);
8657 act.sa_flags = old_act->sa_flags;
8658 act.sa_restorer = 0;
8659 unlock_user_struct(old_act, arg2, 0);
8660 pact = &act;
8662 ret = get_errno(do_sigaction(arg1, pact, &oact));
8663 if (!is_error(ret) && arg3) {
8664 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8665 return -TARGET_EFAULT;
8666 old_act->_sa_handler = oact._sa_handler;
8667 old_act->sa_mask = oact.sa_mask.sig[0];
8668 old_act->sa_flags = oact.sa_flags;
8669 unlock_user_struct(old_act, arg3, 1);
8671 #elif defined(TARGET_MIPS)
8672 struct target_sigaction act, oact, *pact, *old_act;
8674 if (arg2) {
8675 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8676 return -TARGET_EFAULT;
8677 act._sa_handler = old_act->_sa_handler;
8678 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8679 act.sa_flags = old_act->sa_flags;
8680 unlock_user_struct(old_act, arg2, 0);
8681 pact = &act;
8682 } else {
8683 pact = NULL;
8686 ret = get_errno(do_sigaction(arg1, pact, &oact));
8688 if (!is_error(ret) && arg3) {
8689 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8690 return -TARGET_EFAULT;
8691 old_act->_sa_handler = oact._sa_handler;
8692 old_act->sa_flags = oact.sa_flags;
8693 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8694 old_act->sa_mask.sig[1] = 0;
8695 old_act->sa_mask.sig[2] = 0;
8696 old_act->sa_mask.sig[3] = 0;
8697 unlock_user_struct(old_act, arg3, 1);
8699 #else
8700 struct target_old_sigaction *old_act;
8701 struct target_sigaction act, oact, *pact;
8702 if (arg2) {
8703 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8704 return -TARGET_EFAULT;
8705 act._sa_handler = old_act->_sa_handler;
8706 target_siginitset(&act.sa_mask, old_act->sa_mask);
8707 act.sa_flags = old_act->sa_flags;
8708 act.sa_restorer = old_act->sa_restorer;
8709 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8710 act.ka_restorer = 0;
8711 #endif
8712 unlock_user_struct(old_act, arg2, 0);
8713 pact = &act;
8714 } else {
8715 pact = NULL;
8717 ret = get_errno(do_sigaction(arg1, pact, &oact));
8718 if (!is_error(ret) && arg3) {
8719 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8720 return -TARGET_EFAULT;
8721 old_act->_sa_handler = oact._sa_handler;
8722 old_act->sa_mask = oact.sa_mask.sig[0];
8723 old_act->sa_flags = oact.sa_flags;
8724 old_act->sa_restorer = oact.sa_restorer;
8725 unlock_user_struct(old_act, arg3, 1);
8727 #endif
8729 return ret;
8730 #endif
8731 case TARGET_NR_rt_sigaction:
8733 #if defined(TARGET_ALPHA)
8734 /* For Alpha and SPARC this is a 5 argument syscall, with
8735 * a 'restorer' parameter which must be copied into the
8736 * sa_restorer field of the sigaction struct.
8737 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8738 * and arg5 is the sigsetsize.
8739 * Alpha also has a separate rt_sigaction struct that it uses
8740 * here; SPARC uses the usual sigaction struct.
8742 struct target_rt_sigaction *rt_act;
8743 struct target_sigaction act, oact, *pact = 0;
8745 if (arg4 != sizeof(target_sigset_t)) {
8746 return -TARGET_EINVAL;
8748 if (arg2) {
8749 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8750 return -TARGET_EFAULT;
8751 act._sa_handler = rt_act->_sa_handler;
8752 act.sa_mask = rt_act->sa_mask;
8753 act.sa_flags = rt_act->sa_flags;
8754 act.sa_restorer = arg5;
8755 unlock_user_struct(rt_act, arg2, 0);
8756 pact = &act;
8758 ret = get_errno(do_sigaction(arg1, pact, &oact));
8759 if (!is_error(ret) && arg3) {
8760 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8761 return -TARGET_EFAULT;
8762 rt_act->_sa_handler = oact._sa_handler;
8763 rt_act->sa_mask = oact.sa_mask;
8764 rt_act->sa_flags = oact.sa_flags;
8765 unlock_user_struct(rt_act, arg3, 1);
8767 #else
8768 #ifdef TARGET_SPARC
8769 target_ulong restorer = arg4;
8770 target_ulong sigsetsize = arg5;
8771 #else
8772 target_ulong sigsetsize = arg4;
8773 #endif
8774 struct target_sigaction *act;
8775 struct target_sigaction *oact;
8777 if (sigsetsize != sizeof(target_sigset_t)) {
8778 return -TARGET_EINVAL;
8780 if (arg2) {
8781 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8782 return -TARGET_EFAULT;
8784 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8785 act->ka_restorer = restorer;
8786 #endif
8787 } else {
8788 act = NULL;
8790 if (arg3) {
8791 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8792 ret = -TARGET_EFAULT;
8793 goto rt_sigaction_fail;
8795 } else
8796 oact = NULL;
8797 ret = get_errno(do_sigaction(arg1, act, oact));
8798 rt_sigaction_fail:
8799 if (act)
8800 unlock_user_struct(act, arg2, 0);
8801 if (oact)
8802 unlock_user_struct(oact, arg3, 1);
8803 #endif
8805 return ret;
8806 #ifdef TARGET_NR_sgetmask /* not on alpha */
8807 case TARGET_NR_sgetmask:
8809 sigset_t cur_set;
8810 abi_ulong target_set;
8811 ret = do_sigprocmask(0, NULL, &cur_set);
8812 if (!ret) {
8813 host_to_target_old_sigset(&target_set, &cur_set);
8814 ret = target_set;
8817 return ret;
8818 #endif
8819 #ifdef TARGET_NR_ssetmask /* not on alpha */
8820 case TARGET_NR_ssetmask:
8822 sigset_t set, oset;
8823 abi_ulong target_set = arg1;
8824 target_to_host_old_sigset(&set, &target_set);
8825 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8826 if (!ret) {
8827 host_to_target_old_sigset(&target_set, &oset);
8828 ret = target_set;
8831 return ret;
8832 #endif
8833 #ifdef TARGET_NR_sigprocmask
8834 case TARGET_NR_sigprocmask:
8836 #if defined(TARGET_ALPHA)
8837 sigset_t set, oldset;
8838 abi_ulong mask;
8839 int how;
8841 switch (arg1) {
8842 case TARGET_SIG_BLOCK:
8843 how = SIG_BLOCK;
8844 break;
8845 case TARGET_SIG_UNBLOCK:
8846 how = SIG_UNBLOCK;
8847 break;
8848 case TARGET_SIG_SETMASK:
8849 how = SIG_SETMASK;
8850 break;
8851 default:
8852 return -TARGET_EINVAL;
8854 mask = arg2;
8855 target_to_host_old_sigset(&set, &mask);
8857 ret = do_sigprocmask(how, &set, &oldset);
8858 if (!is_error(ret)) {
8859 host_to_target_old_sigset(&mask, &oldset);
8860 ret = mask;
8861 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8863 #else
8864 sigset_t set, oldset, *set_ptr;
8865 int how;
8867 if (arg2) {
8868 switch (arg1) {
8869 case TARGET_SIG_BLOCK:
8870 how = SIG_BLOCK;
8871 break;
8872 case TARGET_SIG_UNBLOCK:
8873 how = SIG_UNBLOCK;
8874 break;
8875 case TARGET_SIG_SETMASK:
8876 how = SIG_SETMASK;
8877 break;
8878 default:
8879 return -TARGET_EINVAL;
8881 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8882 return -TARGET_EFAULT;
8883 target_to_host_old_sigset(&set, p);
8884 unlock_user(p, arg2, 0);
8885 set_ptr = &set;
8886 } else {
8887 how = 0;
8888 set_ptr = NULL;
8890 ret = do_sigprocmask(how, set_ptr, &oldset);
8891 if (!is_error(ret) && arg3) {
8892 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8893 return -TARGET_EFAULT;
8894 host_to_target_old_sigset(p, &oldset);
8895 unlock_user(p, arg3, sizeof(target_sigset_t));
8897 #endif
8899 return ret;
8900 #endif
8901 case TARGET_NR_rt_sigprocmask:
8903 int how = arg1;
8904 sigset_t set, oldset, *set_ptr;
8906 if (arg4 != sizeof(target_sigset_t)) {
8907 return -TARGET_EINVAL;
8910 if (arg2) {
8911 switch(how) {
8912 case TARGET_SIG_BLOCK:
8913 how = SIG_BLOCK;
8914 break;
8915 case TARGET_SIG_UNBLOCK:
8916 how = SIG_UNBLOCK;
8917 break;
8918 case TARGET_SIG_SETMASK:
8919 how = SIG_SETMASK;
8920 break;
8921 default:
8922 return -TARGET_EINVAL;
8924 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8925 return -TARGET_EFAULT;
8926 target_to_host_sigset(&set, p);
8927 unlock_user(p, arg2, 0);
8928 set_ptr = &set;
8929 } else {
8930 how = 0;
8931 set_ptr = NULL;
8933 ret = do_sigprocmask(how, set_ptr, &oldset);
8934 if (!is_error(ret) && arg3) {
8935 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8936 return -TARGET_EFAULT;
8937 host_to_target_sigset(p, &oldset);
8938 unlock_user(p, arg3, sizeof(target_sigset_t));
8941 return ret;
8942 #ifdef TARGET_NR_sigpending
8943 case TARGET_NR_sigpending:
8945 sigset_t set;
8946 ret = get_errno(sigpending(&set));
8947 if (!is_error(ret)) {
8948 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8949 return -TARGET_EFAULT;
8950 host_to_target_old_sigset(p, &set);
8951 unlock_user(p, arg1, sizeof(target_sigset_t));
8954 return ret;
8955 #endif
8956 case TARGET_NR_rt_sigpending:
8958 sigset_t set;
8960 /* Yes, this check is >, not != like most. We follow the kernel's
8961 * logic and it does it like this because it implements
8962 * NR_sigpending through the same code path, and in that case
8963 * the old_sigset_t is smaller in size.
8965 if (arg2 > sizeof(target_sigset_t)) {
8966 return -TARGET_EINVAL;
8969 ret = get_errno(sigpending(&set));
8970 if (!is_error(ret)) {
8971 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8972 return -TARGET_EFAULT;
8973 host_to_target_sigset(p, &set);
8974 unlock_user(p, arg1, sizeof(target_sigset_t));
8977 return ret;
8978 #ifdef TARGET_NR_sigsuspend
8979 case TARGET_NR_sigsuspend:
8981 TaskState *ts = cpu->opaque;
8982 #if defined(TARGET_ALPHA)
8983 abi_ulong mask = arg1;
8984 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8985 #else
8986 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8987 return -TARGET_EFAULT;
8988 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8989 unlock_user(p, arg1, 0);
8990 #endif
8991 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8992 SIGSET_T_SIZE));
8993 if (ret != -TARGET_ERESTARTSYS) {
8994 ts->in_sigsuspend = 1;
8997 return ret;
8998 #endif
8999 case TARGET_NR_rt_sigsuspend:
9001 TaskState *ts = cpu->opaque;
9003 if (arg2 != sizeof(target_sigset_t)) {
9004 return -TARGET_EINVAL;
9006 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9007 return -TARGET_EFAULT;
9008 target_to_host_sigset(&ts->sigsuspend_mask, p);
9009 unlock_user(p, arg1, 0);
9010 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9011 SIGSET_T_SIZE));
9012 if (ret != -TARGET_ERESTARTSYS) {
9013 ts->in_sigsuspend = 1;
9016 return ret;
9017 #ifdef TARGET_NR_rt_sigtimedwait
9018 case TARGET_NR_rt_sigtimedwait:
9020 sigset_t set;
9021 struct timespec uts, *puts;
9022 siginfo_t uinfo;
9024 if (arg4 != sizeof(target_sigset_t)) {
9025 return -TARGET_EINVAL;
9028 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9029 return -TARGET_EFAULT;
9030 target_to_host_sigset(&set, p);
9031 unlock_user(p, arg1, 0);
9032 if (arg3) {
9033 puts = &uts;
9034 if (target_to_host_timespec(puts, arg3)) {
9035 return -TARGET_EFAULT;
9037 } else {
9038 puts = NULL;
9040 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9041 SIGSET_T_SIZE));
9042 if (!is_error(ret)) {
9043 if (arg2) {
9044 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9046 if (!p) {
9047 return -TARGET_EFAULT;
9049 host_to_target_siginfo(p, &uinfo);
9050 unlock_user(p, arg2, sizeof(target_siginfo_t));
9052 ret = host_to_target_signal(ret);
9055 return ret;
9056 #endif
9057 #ifdef TARGET_NR_rt_sigtimedwait_time64
9058 case TARGET_NR_rt_sigtimedwait_time64:
9060 sigset_t set;
9061 struct timespec uts, *puts;
9062 siginfo_t uinfo;
9064 if (arg4 != sizeof(target_sigset_t)) {
9065 return -TARGET_EINVAL;
9068 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9069 if (!p) {
9070 return -TARGET_EFAULT;
9072 target_to_host_sigset(&set, p);
9073 unlock_user(p, arg1, 0);
9074 if (arg3) {
9075 puts = &uts;
9076 if (target_to_host_timespec64(puts, arg3)) {
9077 return -TARGET_EFAULT;
9079 } else {
9080 puts = NULL;
9082 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9083 SIGSET_T_SIZE));
9084 if (!is_error(ret)) {
9085 if (arg2) {
9086 p = lock_user(VERIFY_WRITE, arg2,
9087 sizeof(target_siginfo_t), 0);
9088 if (!p) {
9089 return -TARGET_EFAULT;
9091 host_to_target_siginfo(p, &uinfo);
9092 unlock_user(p, arg2, sizeof(target_siginfo_t));
9094 ret = host_to_target_signal(ret);
9097 return ret;
9098 #endif
9099 case TARGET_NR_rt_sigqueueinfo:
9101 siginfo_t uinfo;
9103 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9104 if (!p) {
9105 return -TARGET_EFAULT;
9107 target_to_host_siginfo(&uinfo, p);
9108 unlock_user(p, arg3, 0);
9109 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9111 return ret;
9112 case TARGET_NR_rt_tgsigqueueinfo:
9114 siginfo_t uinfo;
9116 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9117 if (!p) {
9118 return -TARGET_EFAULT;
9120 target_to_host_siginfo(&uinfo, p);
9121 unlock_user(p, arg4, 0);
9122 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9124 return ret;
9125 #ifdef TARGET_NR_sigreturn
9126 case TARGET_NR_sigreturn:
9127 if (block_signals()) {
9128 return -TARGET_ERESTARTSYS;
9130 return do_sigreturn(cpu_env);
9131 #endif
9132 case TARGET_NR_rt_sigreturn:
9133 if (block_signals()) {
9134 return -TARGET_ERESTARTSYS;
9136 return do_rt_sigreturn(cpu_env);
9137 case TARGET_NR_sethostname:
9138 if (!(p = lock_user_string(arg1)))
9139 return -TARGET_EFAULT;
9140 ret = get_errno(sethostname(p, arg2));
9141 unlock_user(p, arg1, 0);
9142 return ret;
9143 #ifdef TARGET_NR_setrlimit
9144 case TARGET_NR_setrlimit:
9146 int resource = target_to_host_resource(arg1);
9147 struct target_rlimit *target_rlim;
9148 struct rlimit rlim;
9149 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9150 return -TARGET_EFAULT;
9151 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9152 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9153 unlock_user_struct(target_rlim, arg2, 0);
9155 * If we just passed through resource limit settings for memory then
9156 * they would also apply to QEMU's own allocations, and QEMU will
9157 * crash or hang or die if its allocations fail. Ideally we would
9158 * track the guest allocations in QEMU and apply the limits ourselves.
9159 * For now, just tell the guest the call succeeded but don't actually
9160 * limit anything.
9162 if (resource != RLIMIT_AS &&
9163 resource != RLIMIT_DATA &&
9164 resource != RLIMIT_STACK) {
9165 return get_errno(setrlimit(resource, &rlim));
9166 } else {
9167 return 0;
9170 #endif
9171 #ifdef TARGET_NR_getrlimit
9172 case TARGET_NR_getrlimit:
9174 int resource = target_to_host_resource(arg1);
9175 struct target_rlimit *target_rlim;
9176 struct rlimit rlim;
9178 ret = get_errno(getrlimit(resource, &rlim));
9179 if (!is_error(ret)) {
9180 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9181 return -TARGET_EFAULT;
9182 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9183 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9184 unlock_user_struct(target_rlim, arg2, 1);
9187 return ret;
9188 #endif
9189 case TARGET_NR_getrusage:
9191 struct rusage rusage;
9192 ret = get_errno(getrusage(arg1, &rusage));
9193 if (!is_error(ret)) {
9194 ret = host_to_target_rusage(arg2, &rusage);
9197 return ret;
9198 #if defined(TARGET_NR_gettimeofday)
9199 case TARGET_NR_gettimeofday:
9201 struct timeval tv;
9202 struct timezone tz;
9204 ret = get_errno(gettimeofday(&tv, &tz));
9205 if (!is_error(ret)) {
9206 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9207 return -TARGET_EFAULT;
9209 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9210 return -TARGET_EFAULT;
9214 return ret;
9215 #endif
9216 #if defined(TARGET_NR_settimeofday)
9217 case TARGET_NR_settimeofday:
9219 struct timeval tv, *ptv = NULL;
9220 struct timezone tz, *ptz = NULL;
9222 if (arg1) {
9223 if (copy_from_user_timeval(&tv, arg1)) {
9224 return -TARGET_EFAULT;
9226 ptv = &tv;
9229 if (arg2) {
9230 if (copy_from_user_timezone(&tz, arg2)) {
9231 return -TARGET_EFAULT;
9233 ptz = &tz;
9236 return get_errno(settimeofday(ptv, ptz));
9238 #endif
9239 #if defined(TARGET_NR_select)
9240 case TARGET_NR_select:
9241 #if defined(TARGET_WANT_NI_OLD_SELECT)
9242 /* some architectures used to have old_select here
9243 * but now ENOSYS it.
9245 ret = -TARGET_ENOSYS;
9246 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9247 ret = do_old_select(arg1);
9248 #else
9249 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9250 #endif
9251 return ret;
9252 #endif
9253 #ifdef TARGET_NR_pselect6
9254 case TARGET_NR_pselect6:
9256 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9257 fd_set rfds, wfds, efds;
9258 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9259 struct timespec ts, *ts_ptr;
9262 * The 6th arg is actually two args smashed together,
9263 * so we cannot use the C library.
9265 sigset_t set;
9266 struct {
9267 sigset_t *set;
9268 size_t size;
9269 } sig, *sig_ptr;
9271 abi_ulong arg_sigset, arg_sigsize, *arg7;
9272 target_sigset_t *target_sigset;
9274 n = arg1;
9275 rfd_addr = arg2;
9276 wfd_addr = arg3;
9277 efd_addr = arg4;
9278 ts_addr = arg5;
9280 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9281 if (ret) {
9282 return ret;
9284 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9285 if (ret) {
9286 return ret;
9288 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9289 if (ret) {
9290 return ret;
9294 * This takes a timespec, and not a timeval, so we cannot
9295 * use the do_select() helper ...
9297 if (ts_addr) {
9298 if (target_to_host_timespec(&ts, ts_addr)) {
9299 return -TARGET_EFAULT;
9301 ts_ptr = &ts;
9302 } else {
9303 ts_ptr = NULL;
9306 /* Extract the two packed args for the sigset */
9307 if (arg6) {
9308 sig_ptr = &sig;
9309 sig.size = SIGSET_T_SIZE;
9311 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9312 if (!arg7) {
9313 return -TARGET_EFAULT;
9315 arg_sigset = tswapal(arg7[0]);
9316 arg_sigsize = tswapal(arg7[1]);
9317 unlock_user(arg7, arg6, 0);
9319 if (arg_sigset) {
9320 sig.set = &set;
9321 if (arg_sigsize != sizeof(*target_sigset)) {
9322 /* Like the kernel, we enforce correct size sigsets */
9323 return -TARGET_EINVAL;
9325 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9326 sizeof(*target_sigset), 1);
9327 if (!target_sigset) {
9328 return -TARGET_EFAULT;
9330 target_to_host_sigset(&set, target_sigset);
9331 unlock_user(target_sigset, arg_sigset, 0);
9332 } else {
9333 sig.set = NULL;
9335 } else {
9336 sig_ptr = NULL;
9339 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9340 ts_ptr, sig_ptr));
9342 if (!is_error(ret)) {
9343 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9344 return -TARGET_EFAULT;
9345 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9346 return -TARGET_EFAULT;
9347 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9348 return -TARGET_EFAULT;
9350 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9351 return -TARGET_EFAULT;
9354 return ret;
9355 #endif
9356 #ifdef TARGET_NR_symlink
9357 case TARGET_NR_symlink:
9359 void *p2;
9360 p = lock_user_string(arg1);
9361 p2 = lock_user_string(arg2);
9362 if (!p || !p2)
9363 ret = -TARGET_EFAULT;
9364 else
9365 ret = get_errno(symlink(p, p2));
9366 unlock_user(p2, arg2, 0);
9367 unlock_user(p, arg1, 0);
9369 return ret;
9370 #endif
9371 #if defined(TARGET_NR_symlinkat)
9372 case TARGET_NR_symlinkat:
9374 void *p2;
9375 p = lock_user_string(arg1);
9376 p2 = lock_user_string(arg3);
9377 if (!p || !p2)
9378 ret = -TARGET_EFAULT;
9379 else
9380 ret = get_errno(symlinkat(p, arg2, p2));
9381 unlock_user(p2, arg3, 0);
9382 unlock_user(p, arg1, 0);
9384 return ret;
9385 #endif
9386 #ifdef TARGET_NR_readlink
9387 case TARGET_NR_readlink:
9389 void *p2;
9390 p = lock_user_string(arg1);
9391 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9392 if (!p || !p2) {
9393 ret = -TARGET_EFAULT;
9394 } else if (!arg3) {
9395 /* Short circuit this for the magic exe check. */
9396 ret = -TARGET_EINVAL;
9397 } else if (is_proc_myself((const char *)p, "exe")) {
9398 char real[PATH_MAX], *temp;
9399 temp = realpath(exec_path, real);
9400 /* Return value is # of bytes that we wrote to the buffer. */
9401 if (temp == NULL) {
9402 ret = get_errno(-1);
9403 } else {
9404 /* Don't worry about sign mismatch as earlier mapping
9405 * logic would have thrown a bad address error. */
9406 ret = MIN(strlen(real), arg3);
9407 /* We cannot NUL terminate the string. */
9408 memcpy(p2, real, ret);
9410 } else {
9411 ret = get_errno(readlink(path(p), p2, arg3));
9413 unlock_user(p2, arg2, ret);
9414 unlock_user(p, arg1, 0);
9416 return ret;
9417 #endif
9418 #if defined(TARGET_NR_readlinkat)
9419 case TARGET_NR_readlinkat:
9421 void *p2;
9422 p = lock_user_string(arg2);
9423 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9424 if (!p || !p2) {
9425 ret = -TARGET_EFAULT;
9426 } else if (is_proc_myself((const char *)p, "exe")) {
9427 char real[PATH_MAX], *temp;
9428 temp = realpath(exec_path, real);
9429 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9430 snprintf((char *)p2, arg4, "%s", real);
9431 } else {
9432 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9434 unlock_user(p2, arg3, ret);
9435 unlock_user(p, arg2, 0);
9437 return ret;
9438 #endif
9439 #ifdef TARGET_NR_swapon
9440 case TARGET_NR_swapon:
9441 if (!(p = lock_user_string(arg1)))
9442 return -TARGET_EFAULT;
9443 ret = get_errno(swapon(p, arg2));
9444 unlock_user(p, arg1, 0);
9445 return ret;
9446 #endif
9447 case TARGET_NR_reboot:
9448 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9449 /* arg4 must be ignored in all other cases */
9450 p = lock_user_string(arg4);
9451 if (!p) {
9452 return -TARGET_EFAULT;
9454 ret = get_errno(reboot(arg1, arg2, arg3, p));
9455 unlock_user(p, arg4, 0);
9456 } else {
9457 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9459 return ret;
9460 #ifdef TARGET_NR_mmap
9461 case TARGET_NR_mmap:
9462 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9463 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9464 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9465 || defined(TARGET_S390X)
9467 abi_ulong *v;
9468 abi_ulong v1, v2, v3, v4, v5, v6;
9469 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9470 return -TARGET_EFAULT;
9471 v1 = tswapal(v[0]);
9472 v2 = tswapal(v[1]);
9473 v3 = tswapal(v[2]);
9474 v4 = tswapal(v[3]);
9475 v5 = tswapal(v[4]);
9476 v6 = tswapal(v[5]);
9477 unlock_user(v, arg1, 0);
9478 ret = get_errno(target_mmap(v1, v2, v3,
9479 target_to_host_bitmask(v4, mmap_flags_tbl),
9480 v5, v6));
9482 #else
9483 ret = get_errno(target_mmap(arg1, arg2, arg3,
9484 target_to_host_bitmask(arg4, mmap_flags_tbl),
9485 arg5,
9486 arg6));
9487 #endif
9488 return ret;
9489 #endif
9490 #ifdef TARGET_NR_mmap2
9491 case TARGET_NR_mmap2:
9492 #ifndef MMAP_SHIFT
9493 #define MMAP_SHIFT 12
9494 #endif
9495 ret = target_mmap(arg1, arg2, arg3,
9496 target_to_host_bitmask(arg4, mmap_flags_tbl),
9497 arg5, arg6 << MMAP_SHIFT);
9498 return get_errno(ret);
9499 #endif
9500 case TARGET_NR_munmap:
9501 return get_errno(target_munmap(arg1, arg2));
9502 case TARGET_NR_mprotect:
9504 TaskState *ts = cpu->opaque;
9505 /* Special hack to detect libc making the stack executable. */
9506 if ((arg3 & PROT_GROWSDOWN)
9507 && arg1 >= ts->info->stack_limit
9508 && arg1 <= ts->info->start_stack) {
9509 arg3 &= ~PROT_GROWSDOWN;
9510 arg2 = arg2 + arg1 - ts->info->stack_limit;
9511 arg1 = ts->info->stack_limit;
9514 return get_errno(target_mprotect(arg1, arg2, arg3));
9515 #ifdef TARGET_NR_mremap
9516 case TARGET_NR_mremap:
9517 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9518 #endif
9519 /* ??? msync/mlock/munlock are broken for softmmu. */
9520 #ifdef TARGET_NR_msync
9521 case TARGET_NR_msync:
9522 return get_errno(msync(g2h(arg1), arg2, arg3));
9523 #endif
9524 #ifdef TARGET_NR_mlock
9525 case TARGET_NR_mlock:
9526 return get_errno(mlock(g2h(arg1), arg2));
9527 #endif
9528 #ifdef TARGET_NR_munlock
9529 case TARGET_NR_munlock:
9530 return get_errno(munlock(g2h(arg1), arg2));
9531 #endif
9532 #ifdef TARGET_NR_mlockall
9533 case TARGET_NR_mlockall:
9534 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9535 #endif
9536 #ifdef TARGET_NR_munlockall
9537 case TARGET_NR_munlockall:
9538 return get_errno(munlockall());
9539 #endif
9540 #ifdef TARGET_NR_truncate
9541 case TARGET_NR_truncate:
9542 if (!(p = lock_user_string(arg1)))
9543 return -TARGET_EFAULT;
9544 ret = get_errno(truncate(p, arg2));
9545 unlock_user(p, arg1, 0);
9546 return ret;
9547 #endif
9548 #ifdef TARGET_NR_ftruncate
9549 case TARGET_NR_ftruncate:
9550 return get_errno(ftruncate(arg1, arg2));
9551 #endif
9552 case TARGET_NR_fchmod:
9553 return get_errno(fchmod(arg1, arg2));
9554 #if defined(TARGET_NR_fchmodat)
9555 case TARGET_NR_fchmodat:
9556 if (!(p = lock_user_string(arg2)))
9557 return -TARGET_EFAULT;
9558 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9559 unlock_user(p, arg2, 0);
9560 return ret;
9561 #endif
9562 case TARGET_NR_getpriority:
9563 /* Note that negative values are valid for getpriority, so we must
9564 differentiate based on errno settings. */
9565 errno = 0;
9566 ret = getpriority(arg1, arg2);
9567 if (ret == -1 && errno != 0) {
9568 return -host_to_target_errno(errno);
9570 #ifdef TARGET_ALPHA
9571 /* Return value is the unbiased priority. Signal no error. */
9572 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9573 #else
9574 /* Return value is a biased priority to avoid negative numbers. */
9575 ret = 20 - ret;
9576 #endif
9577 return ret;
9578 case TARGET_NR_setpriority:
9579 return get_errno(setpriority(arg1, arg2, arg3));
9580 #ifdef TARGET_NR_statfs
9581 case TARGET_NR_statfs:
9582 if (!(p = lock_user_string(arg1))) {
9583 return -TARGET_EFAULT;
9585 ret = get_errno(statfs(path(p), &stfs));
9586 unlock_user(p, arg1, 0);
9587 convert_statfs:
9588 if (!is_error(ret)) {
9589 struct target_statfs *target_stfs;
9591 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9592 return -TARGET_EFAULT;
9593 __put_user(stfs.f_type, &target_stfs->f_type);
9594 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9595 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9596 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9597 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9598 __put_user(stfs.f_files, &target_stfs->f_files);
9599 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9600 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9601 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9602 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9603 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9604 #ifdef _STATFS_F_FLAGS
9605 __put_user(stfs.f_flags, &target_stfs->f_flags);
9606 #else
9607 __put_user(0, &target_stfs->f_flags);
9608 #endif
9609 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9610 unlock_user_struct(target_stfs, arg2, 1);
9612 return ret;
9613 #endif
9614 #ifdef TARGET_NR_fstatfs
9615 case TARGET_NR_fstatfs:
9616 ret = get_errno(fstatfs(arg1, &stfs));
9617 goto convert_statfs;
9618 #endif
9619 #ifdef TARGET_NR_statfs64
9620 case TARGET_NR_statfs64:
9621 if (!(p = lock_user_string(arg1))) {
9622 return -TARGET_EFAULT;
9624 ret = get_errno(statfs(path(p), &stfs));
9625 unlock_user(p, arg1, 0);
9626 convert_statfs64:
9627 if (!is_error(ret)) {
9628 struct target_statfs64 *target_stfs;
9630 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9631 return -TARGET_EFAULT;
9632 __put_user(stfs.f_type, &target_stfs->f_type);
9633 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9634 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9635 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9636 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9637 __put_user(stfs.f_files, &target_stfs->f_files);
9638 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9639 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9640 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9641 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9642 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9643 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9644 unlock_user_struct(target_stfs, arg3, 1);
9646 return ret;
9647 case TARGET_NR_fstatfs64:
9648 ret = get_errno(fstatfs(arg1, &stfs));
9649 goto convert_statfs64;
9650 #endif
9651 #ifdef TARGET_NR_socketcall
9652 case TARGET_NR_socketcall:
9653 return do_socketcall(arg1, arg2);
9654 #endif
9655 #ifdef TARGET_NR_accept
9656 case TARGET_NR_accept:
9657 return do_accept4(arg1, arg2, arg3, 0);
9658 #endif
9659 #ifdef TARGET_NR_accept4
9660 case TARGET_NR_accept4:
9661 return do_accept4(arg1, arg2, arg3, arg4);
9662 #endif
9663 #ifdef TARGET_NR_bind
9664 case TARGET_NR_bind:
9665 return do_bind(arg1, arg2, arg3);
9666 #endif
9667 #ifdef TARGET_NR_connect
9668 case TARGET_NR_connect:
9669 return do_connect(arg1, arg2, arg3);
9670 #endif
9671 #ifdef TARGET_NR_getpeername
9672 case TARGET_NR_getpeername:
9673 return do_getpeername(arg1, arg2, arg3);
9674 #endif
9675 #ifdef TARGET_NR_getsockname
9676 case TARGET_NR_getsockname:
9677 return do_getsockname(arg1, arg2, arg3);
9678 #endif
9679 #ifdef TARGET_NR_getsockopt
9680 case TARGET_NR_getsockopt:
9681 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9682 #endif
9683 #ifdef TARGET_NR_listen
9684 case TARGET_NR_listen:
9685 return get_errno(listen(arg1, arg2));
9686 #endif
9687 #ifdef TARGET_NR_recv
9688 case TARGET_NR_recv:
9689 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9690 #endif
9691 #ifdef TARGET_NR_recvfrom
9692 case TARGET_NR_recvfrom:
9693 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9694 #endif
9695 #ifdef TARGET_NR_recvmsg
9696 case TARGET_NR_recvmsg:
9697 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9698 #endif
9699 #ifdef TARGET_NR_send
9700 case TARGET_NR_send:
9701 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9702 #endif
9703 #ifdef TARGET_NR_sendmsg
9704 case TARGET_NR_sendmsg:
9705 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9706 #endif
9707 #ifdef TARGET_NR_sendmmsg
9708 case TARGET_NR_sendmmsg:
9709 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9710 #endif
9711 #ifdef TARGET_NR_recvmmsg
9712 case TARGET_NR_recvmmsg:
9713 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9714 #endif
9715 #ifdef TARGET_NR_sendto
9716 case TARGET_NR_sendto:
9717 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9718 #endif
9719 #ifdef TARGET_NR_shutdown
9720 case TARGET_NR_shutdown:
9721 return get_errno(shutdown(arg1, arg2));
9722 #endif
9723 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9724 case TARGET_NR_getrandom:
9725 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9726 if (!p) {
9727 return -TARGET_EFAULT;
9729 ret = get_errno(getrandom(p, arg2, arg3));
9730 unlock_user(p, arg1, ret);
9731 return ret;
9732 #endif
9733 #ifdef TARGET_NR_socket
9734 case TARGET_NR_socket:
9735 return do_socket(arg1, arg2, arg3);
9736 #endif
9737 #ifdef TARGET_NR_socketpair
9738 case TARGET_NR_socketpair:
9739 return do_socketpair(arg1, arg2, arg3, arg4);
9740 #endif
9741 #ifdef TARGET_NR_setsockopt
9742 case TARGET_NR_setsockopt:
9743 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9744 #endif
9745 #if defined(TARGET_NR_syslog)
9746 case TARGET_NR_syslog:
9748 int len = arg2;
9750 switch (arg1) {
9751 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9752 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9753 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9754 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9755 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9756 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9757 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9758 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9759 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9760 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9761 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9762 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9764 if (len < 0) {
9765 return -TARGET_EINVAL;
9767 if (len == 0) {
9768 return 0;
9770 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9771 if (!p) {
9772 return -TARGET_EFAULT;
9774 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9775 unlock_user(p, arg2, arg3);
9777 return ret;
9778 default:
9779 return -TARGET_EINVAL;
9782 break;
9783 #endif
9784 case TARGET_NR_setitimer:
9786 struct itimerval value, ovalue, *pvalue;
9788 if (arg2) {
9789 pvalue = &value;
9790 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9791 || copy_from_user_timeval(&pvalue->it_value,
9792 arg2 + sizeof(struct target_timeval)))
9793 return -TARGET_EFAULT;
9794 } else {
9795 pvalue = NULL;
9797 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9798 if (!is_error(ret) && arg3) {
9799 if (copy_to_user_timeval(arg3,
9800 &ovalue.it_interval)
9801 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9802 &ovalue.it_value))
9803 return -TARGET_EFAULT;
9806 return ret;
9807 case TARGET_NR_getitimer:
9809 struct itimerval value;
9811 ret = get_errno(getitimer(arg1, &value));
9812 if (!is_error(ret) && arg2) {
9813 if (copy_to_user_timeval(arg2,
9814 &value.it_interval)
9815 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9816 &value.it_value))
9817 return -TARGET_EFAULT;
9820 return ret;
9821 #ifdef TARGET_NR_stat
9822 case TARGET_NR_stat:
9823 if (!(p = lock_user_string(arg1))) {
9824 return -TARGET_EFAULT;
9826 ret = get_errno(stat(path(p), &st));
9827 unlock_user(p, arg1, 0);
9828 goto do_stat;
9829 #endif
9830 #ifdef TARGET_NR_lstat
9831 case TARGET_NR_lstat:
9832 if (!(p = lock_user_string(arg1))) {
9833 return -TARGET_EFAULT;
9835 ret = get_errno(lstat(path(p), &st));
9836 unlock_user(p, arg1, 0);
9837 goto do_stat;
9838 #endif
9839 #ifdef TARGET_NR_fstat
9840 case TARGET_NR_fstat:
9842 ret = get_errno(fstat(arg1, &st));
9843 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9844 do_stat:
9845 #endif
9846 if (!is_error(ret)) {
9847 struct target_stat *target_st;
9849 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9850 return -TARGET_EFAULT;
9851 memset(target_st, 0, sizeof(*target_st));
9852 __put_user(st.st_dev, &target_st->st_dev);
9853 __put_user(st.st_ino, &target_st->st_ino);
9854 __put_user(st.st_mode, &target_st->st_mode);
9855 __put_user(st.st_uid, &target_st->st_uid);
9856 __put_user(st.st_gid, &target_st->st_gid);
9857 __put_user(st.st_nlink, &target_st->st_nlink);
9858 __put_user(st.st_rdev, &target_st->st_rdev);
9859 __put_user(st.st_size, &target_st->st_size);
9860 __put_user(st.st_blksize, &target_st->st_blksize);
9861 __put_user(st.st_blocks, &target_st->st_blocks);
9862 __put_user(st.st_atime, &target_st->target_st_atime);
9863 __put_user(st.st_mtime, &target_st->target_st_mtime);
9864 __put_user(st.st_ctime, &target_st->target_st_ctime);
9865 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9866 defined(TARGET_STAT_HAVE_NSEC)
9867 __put_user(st.st_atim.tv_nsec,
9868 &target_st->target_st_atime_nsec);
9869 __put_user(st.st_mtim.tv_nsec,
9870 &target_st->target_st_mtime_nsec);
9871 __put_user(st.st_ctim.tv_nsec,
9872 &target_st->target_st_ctime_nsec);
9873 #endif
9874 unlock_user_struct(target_st, arg2, 1);
9877 return ret;
9878 #endif
9879 case TARGET_NR_vhangup:
9880 return get_errno(vhangup());
9881 #ifdef TARGET_NR_syscall
9882 case TARGET_NR_syscall:
9883 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9884 arg6, arg7, arg8, 0);
9885 #endif
9886 #if defined(TARGET_NR_wait4)
9887 case TARGET_NR_wait4:
9889 int status;
9890 abi_long status_ptr = arg2;
9891 struct rusage rusage, *rusage_ptr;
9892 abi_ulong target_rusage = arg4;
9893 abi_long rusage_err;
9894 if (target_rusage)
9895 rusage_ptr = &rusage;
9896 else
9897 rusage_ptr = NULL;
9898 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9899 if (!is_error(ret)) {
9900 if (status_ptr && ret) {
9901 status = host_to_target_waitstatus(status);
9902 if (put_user_s32(status, status_ptr))
9903 return -TARGET_EFAULT;
9905 if (target_rusage) {
9906 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9907 if (rusage_err) {
9908 ret = rusage_err;
9913 return ret;
9914 #endif
9915 #ifdef TARGET_NR_swapoff
9916 case TARGET_NR_swapoff:
9917 if (!(p = lock_user_string(arg1)))
9918 return -TARGET_EFAULT;
9919 ret = get_errno(swapoff(p));
9920 unlock_user(p, arg1, 0);
9921 return ret;
9922 #endif
9923 case TARGET_NR_sysinfo:
9925 struct target_sysinfo *target_value;
9926 struct sysinfo value;
9927 ret = get_errno(sysinfo(&value));
9928 if (!is_error(ret) && arg1)
9930 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9931 return -TARGET_EFAULT;
9932 __put_user(value.uptime, &target_value->uptime);
9933 __put_user(value.loads[0], &target_value->loads[0]);
9934 __put_user(value.loads[1], &target_value->loads[1]);
9935 __put_user(value.loads[2], &target_value->loads[2]);
9936 __put_user(value.totalram, &target_value->totalram);
9937 __put_user(value.freeram, &target_value->freeram);
9938 __put_user(value.sharedram, &target_value->sharedram);
9939 __put_user(value.bufferram, &target_value->bufferram);
9940 __put_user(value.totalswap, &target_value->totalswap);
9941 __put_user(value.freeswap, &target_value->freeswap);
9942 __put_user(value.procs, &target_value->procs);
9943 __put_user(value.totalhigh, &target_value->totalhigh);
9944 __put_user(value.freehigh, &target_value->freehigh);
9945 __put_user(value.mem_unit, &target_value->mem_unit);
9946 unlock_user_struct(target_value, arg1, 1);
9949 return ret;
9950 #ifdef TARGET_NR_ipc
9951 case TARGET_NR_ipc:
9952 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9953 #endif
9954 #ifdef TARGET_NR_semget
9955 case TARGET_NR_semget:
9956 return get_errno(semget(arg1, arg2, arg3));
9957 #endif
9958 #ifdef TARGET_NR_semop
9959 case TARGET_NR_semop:
9960 return do_semtimedop(arg1, arg2, arg3, 0, false);
9961 #endif
9962 #ifdef TARGET_NR_semtimedop
9963 case TARGET_NR_semtimedop:
9964 return do_semtimedop(arg1, arg2, arg3, arg4, false);
9965 #endif
9966 #ifdef TARGET_NR_semtimedop_time64
9967 case TARGET_NR_semtimedop_time64:
9968 return do_semtimedop(arg1, arg2, arg3, arg4, true);
9969 #endif
9970 #ifdef TARGET_NR_semctl
9971 case TARGET_NR_semctl:
9972 return do_semctl(arg1, arg2, arg3, arg4);
9973 #endif
9974 #ifdef TARGET_NR_msgctl
9975 case TARGET_NR_msgctl:
9976 return do_msgctl(arg1, arg2, arg3);
9977 #endif
9978 #ifdef TARGET_NR_msgget
9979 case TARGET_NR_msgget:
9980 return get_errno(msgget(arg1, arg2));
9981 #endif
9982 #ifdef TARGET_NR_msgrcv
9983 case TARGET_NR_msgrcv:
9984 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9985 #endif
9986 #ifdef TARGET_NR_msgsnd
9987 case TARGET_NR_msgsnd:
9988 return do_msgsnd(arg1, arg2, arg3, arg4);
9989 #endif
9990 #ifdef TARGET_NR_shmget
9991 case TARGET_NR_shmget:
9992 return get_errno(shmget(arg1, arg2, arg3));
9993 #endif
9994 #ifdef TARGET_NR_shmctl
9995 case TARGET_NR_shmctl:
9996 return do_shmctl(arg1, arg2, arg3);
9997 #endif
9998 #ifdef TARGET_NR_shmat
9999 case TARGET_NR_shmat:
10000 return do_shmat(cpu_env, arg1, arg2, arg3);
10001 #endif
10002 #ifdef TARGET_NR_shmdt
10003 case TARGET_NR_shmdt:
10004 return do_shmdt(arg1);
10005 #endif
10006 case TARGET_NR_fsync:
10007 return get_errno(fsync(arg1));
10008 case TARGET_NR_clone:
10009 /* Linux manages to have three different orderings for its
10010 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10011 * match the kernel's CONFIG_CLONE_* settings.
10012 * Microblaze is further special in that it uses a sixth
10013 * implicit argument to clone for the TLS pointer.
10015 #if defined(TARGET_MICROBLAZE)
10016 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10017 #elif defined(TARGET_CLONE_BACKWARDS)
10018 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10019 #elif defined(TARGET_CLONE_BACKWARDS2)
10020 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10021 #else
10022 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10023 #endif
10024 return ret;
10025 #ifdef __NR_exit_group
10026 /* new thread calls */
10027 case TARGET_NR_exit_group:
10028 preexit_cleanup(cpu_env, arg1);
10029 return get_errno(exit_group(arg1));
10030 #endif
10031 case TARGET_NR_setdomainname:
10032 if (!(p = lock_user_string(arg1)))
10033 return -TARGET_EFAULT;
10034 ret = get_errno(setdomainname(p, arg2));
10035 unlock_user(p, arg1, 0);
10036 return ret;
10037 case TARGET_NR_uname:
10038 /* no need to transcode because we use the linux syscall */
10040 struct new_utsname * buf;
10042 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10043 return -TARGET_EFAULT;
10044 ret = get_errno(sys_uname(buf));
10045 if (!is_error(ret)) {
10046 /* Overwrite the native machine name with whatever is being
10047 emulated. */
10048 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10049 sizeof(buf->machine));
10050 /* Allow the user to override the reported release. */
10051 if (qemu_uname_release && *qemu_uname_release) {
10052 g_strlcpy(buf->release, qemu_uname_release,
10053 sizeof(buf->release));
10056 unlock_user_struct(buf, arg1, 1);
10058 return ret;
10059 #ifdef TARGET_I386
10060 case TARGET_NR_modify_ldt:
10061 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10062 #if !defined(TARGET_X86_64)
10063 case TARGET_NR_vm86:
10064 return do_vm86(cpu_env, arg1, arg2);
10065 #endif
10066 #endif
10067 #if defined(TARGET_NR_adjtimex)
10068 case TARGET_NR_adjtimex:
10070 struct timex host_buf;
10072 if (target_to_host_timex(&host_buf, arg1) != 0) {
10073 return -TARGET_EFAULT;
10075 ret = get_errno(adjtimex(&host_buf));
10076 if (!is_error(ret)) {
10077 if (host_to_target_timex(arg1, &host_buf) != 0) {
10078 return -TARGET_EFAULT;
10082 return ret;
10083 #endif
10084 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10085 case TARGET_NR_clock_adjtime:
10087 struct timex htx, *phtx = &htx;
10089 if (target_to_host_timex(phtx, arg2) != 0) {
10090 return -TARGET_EFAULT;
10092 ret = get_errno(clock_adjtime(arg1, phtx));
10093 if (!is_error(ret) && phtx) {
10094 if (host_to_target_timex(arg2, phtx) != 0) {
10095 return -TARGET_EFAULT;
10099 return ret;
10100 #endif
10101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10102 case TARGET_NR_clock_adjtime64:
10104 struct timex htx;
10106 if (target_to_host_timex64(&htx, arg2) != 0) {
10107 return -TARGET_EFAULT;
10109 ret = get_errno(clock_adjtime(arg1, &htx));
10110 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10111 return -TARGET_EFAULT;
10114 return ret;
10115 #endif
10116 case TARGET_NR_getpgid:
10117 return get_errno(getpgid(arg1));
10118 case TARGET_NR_fchdir:
10119 return get_errno(fchdir(arg1));
10120 case TARGET_NR_personality:
10121 return get_errno(personality(arg1));
10122 #ifdef TARGET_NR__llseek /* Not on alpha */
10123 case TARGET_NR__llseek:
10125 int64_t res;
10126 #if !defined(__NR_llseek)
10127 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10128 if (res == -1) {
10129 ret = get_errno(res);
10130 } else {
10131 ret = 0;
10133 #else
10134 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10135 #endif
10136 if ((ret == 0) && put_user_s64(res, arg4)) {
10137 return -TARGET_EFAULT;
10140 return ret;
10141 #endif
10142 #ifdef TARGET_NR_getdents
10143 case TARGET_NR_getdents:
10144 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10145 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10147 struct target_dirent *target_dirp;
10148 struct linux_dirent *dirp;
10149 abi_long count = arg3;
10151 dirp = g_try_malloc(count);
10152 if (!dirp) {
10153 return -TARGET_ENOMEM;
10156 ret = get_errno(sys_getdents(arg1, dirp, count));
10157 if (!is_error(ret)) {
10158 struct linux_dirent *de;
10159 struct target_dirent *tde;
10160 int len = ret;
10161 int reclen, treclen;
10162 int count1, tnamelen;
10164 count1 = 0;
10165 de = dirp;
10166 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10167 return -TARGET_EFAULT;
10168 tde = target_dirp;
10169 while (len > 0) {
10170 reclen = de->d_reclen;
10171 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10172 assert(tnamelen >= 0);
10173 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10174 assert(count1 + treclen <= count);
10175 tde->d_reclen = tswap16(treclen);
10176 tde->d_ino = tswapal(de->d_ino);
10177 tde->d_off = tswapal(de->d_off);
10178 memcpy(tde->d_name, de->d_name, tnamelen);
10179 de = (struct linux_dirent *)((char *)de + reclen);
10180 len -= reclen;
10181 tde = (struct target_dirent *)((char *)tde + treclen);
10182 count1 += treclen;
10184 ret = count1;
10185 unlock_user(target_dirp, arg2, ret);
10187 g_free(dirp);
10189 #else
10191 struct linux_dirent *dirp;
10192 abi_long count = arg3;
10194 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10195 return -TARGET_EFAULT;
10196 ret = get_errno(sys_getdents(arg1, dirp, count));
10197 if (!is_error(ret)) {
10198 struct linux_dirent *de;
10199 int len = ret;
10200 int reclen;
10201 de = dirp;
10202 while (len > 0) {
10203 reclen = de->d_reclen;
10204 if (reclen > len)
10205 break;
10206 de->d_reclen = tswap16(reclen);
10207 tswapls(&de->d_ino);
10208 tswapls(&de->d_off);
10209 de = (struct linux_dirent *)((char *)de + reclen);
10210 len -= reclen;
10213 unlock_user(dirp, arg2, ret);
10215 #endif
10216 #else
10217 /* Implement getdents in terms of getdents64 */
10219 struct linux_dirent64 *dirp;
10220 abi_long count = arg3;
10222 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10223 if (!dirp) {
10224 return -TARGET_EFAULT;
10226 ret = get_errno(sys_getdents64(arg1, dirp, count));
10227 if (!is_error(ret)) {
10228 /* Convert the dirent64 structs to target dirent. We do this
10229 * in-place, since we can guarantee that a target_dirent is no
10230 * larger than a dirent64; however this means we have to be
10231 * careful to read everything before writing in the new format.
10233 struct linux_dirent64 *de;
10234 struct target_dirent *tde;
10235 int len = ret;
10236 int tlen = 0;
10238 de = dirp;
10239 tde = (struct target_dirent *)dirp;
10240 while (len > 0) {
10241 int namelen, treclen;
10242 int reclen = de->d_reclen;
10243 uint64_t ino = de->d_ino;
10244 int64_t off = de->d_off;
10245 uint8_t type = de->d_type;
10247 namelen = strlen(de->d_name);
10248 treclen = offsetof(struct target_dirent, d_name)
10249 + namelen + 2;
10250 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10252 memmove(tde->d_name, de->d_name, namelen + 1);
10253 tde->d_ino = tswapal(ino);
10254 tde->d_off = tswapal(off);
10255 tde->d_reclen = tswap16(treclen);
10256 /* The target_dirent type is in what was formerly a padding
10257 * byte at the end of the structure:
10259 *(((char *)tde) + treclen - 1) = type;
10261 de = (struct linux_dirent64 *)((char *)de + reclen);
10262 tde = (struct target_dirent *)((char *)tde + treclen);
10263 len -= reclen;
10264 tlen += treclen;
10266 ret = tlen;
10268 unlock_user(dirp, arg2, ret);
10270 #endif
10271 return ret;
10272 #endif /* TARGET_NR_getdents */
10273 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10274 case TARGET_NR_getdents64:
10276 struct linux_dirent64 *dirp;
10277 abi_long count = arg3;
10278 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10279 return -TARGET_EFAULT;
10280 ret = get_errno(sys_getdents64(arg1, dirp, count));
10281 if (!is_error(ret)) {
10282 struct linux_dirent64 *de;
10283 int len = ret;
10284 int reclen;
10285 de = dirp;
10286 while (len > 0) {
10287 reclen = de->d_reclen;
10288 if (reclen > len)
10289 break;
10290 de->d_reclen = tswap16(reclen);
10291 tswap64s((uint64_t *)&de->d_ino);
10292 tswap64s((uint64_t *)&de->d_off);
10293 de = (struct linux_dirent64 *)((char *)de + reclen);
10294 len -= reclen;
10297 unlock_user(dirp, arg2, ret);
10299 return ret;
10300 #endif /* TARGET_NR_getdents64 */
10301 #if defined(TARGET_NR__newselect)
10302 case TARGET_NR__newselect:
10303 return do_select(arg1, arg2, arg3, arg4, arg5);
10304 #endif
10305 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10306 # ifdef TARGET_NR_poll
10307 case TARGET_NR_poll:
10308 # endif
10309 # ifdef TARGET_NR_ppoll
10310 case TARGET_NR_ppoll:
10311 # endif
10313 struct target_pollfd *target_pfd;
10314 unsigned int nfds = arg2;
10315 struct pollfd *pfd;
10316 unsigned int i;
10318 pfd = NULL;
10319 target_pfd = NULL;
10320 if (nfds) {
10321 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10322 return -TARGET_EINVAL;
10325 target_pfd = lock_user(VERIFY_WRITE, arg1,
10326 sizeof(struct target_pollfd) * nfds, 1);
10327 if (!target_pfd) {
10328 return -TARGET_EFAULT;
10331 pfd = alloca(sizeof(struct pollfd) * nfds);
10332 for (i = 0; i < nfds; i++) {
10333 pfd[i].fd = tswap32(target_pfd[i].fd);
10334 pfd[i].events = tswap16(target_pfd[i].events);
10338 switch (num) {
10339 # ifdef TARGET_NR_ppoll
10340 case TARGET_NR_ppoll:
10342 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10343 target_sigset_t *target_set;
10344 sigset_t _set, *set = &_set;
10346 if (arg3) {
10347 if (target_to_host_timespec(timeout_ts, arg3)) {
10348 unlock_user(target_pfd, arg1, 0);
10349 return -TARGET_EFAULT;
10351 } else {
10352 timeout_ts = NULL;
10355 if (arg4) {
10356 if (arg5 != sizeof(target_sigset_t)) {
10357 unlock_user(target_pfd, arg1, 0);
10358 return -TARGET_EINVAL;
10361 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10362 if (!target_set) {
10363 unlock_user(target_pfd, arg1, 0);
10364 return -TARGET_EFAULT;
10366 target_to_host_sigset(set, target_set);
10367 } else {
10368 set = NULL;
10371 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10372 set, SIGSET_T_SIZE));
10374 if (!is_error(ret) && arg3) {
10375 host_to_target_timespec(arg3, timeout_ts);
10377 if (arg4) {
10378 unlock_user(target_set, arg4, 0);
10380 break;
10382 # endif
10383 # ifdef TARGET_NR_poll
10384 case TARGET_NR_poll:
10386 struct timespec ts, *pts;
10388 if (arg3 >= 0) {
10389 /* Convert ms to secs, ns */
10390 ts.tv_sec = arg3 / 1000;
10391 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10392 pts = &ts;
10393 } else {
10394 /* -ve poll() timeout means "infinite" */
10395 pts = NULL;
10397 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10398 break;
10400 # endif
10401 default:
10402 g_assert_not_reached();
10405 if (!is_error(ret)) {
10406 for(i = 0; i < nfds; i++) {
10407 target_pfd[i].revents = tswap16(pfd[i].revents);
10410 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10412 return ret;
10413 #endif
10414 case TARGET_NR_flock:
10415 /* NOTE: the flock constant seems to be the same for every
10416 Linux platform */
10417 return get_errno(safe_flock(arg1, arg2));
10418 case TARGET_NR_readv:
10420 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10421 if (vec != NULL) {
10422 ret = get_errno(safe_readv(arg1, vec, arg3));
10423 unlock_iovec(vec, arg2, arg3, 1);
10424 } else {
10425 ret = -host_to_target_errno(errno);
10428 return ret;
10429 case TARGET_NR_writev:
10431 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10432 if (vec != NULL) {
10433 ret = get_errno(safe_writev(arg1, vec, arg3));
10434 unlock_iovec(vec, arg2, arg3, 0);
10435 } else {
10436 ret = -host_to_target_errno(errno);
10439 return ret;
10440 #if defined(TARGET_NR_preadv)
10441 case TARGET_NR_preadv:
10443 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10444 if (vec != NULL) {
10445 unsigned long low, high;
10447 target_to_host_low_high(arg4, arg5, &low, &high);
10448 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10449 unlock_iovec(vec, arg2, arg3, 1);
10450 } else {
10451 ret = -host_to_target_errno(errno);
10454 return ret;
10455 #endif
10456 #if defined(TARGET_NR_pwritev)
10457 case TARGET_NR_pwritev:
10459 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10460 if (vec != NULL) {
10461 unsigned long low, high;
10463 target_to_host_low_high(arg4, arg5, &low, &high);
10464 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10465 unlock_iovec(vec, arg2, arg3, 0);
10466 } else {
10467 ret = -host_to_target_errno(errno);
10470 return ret;
10471 #endif
10472 case TARGET_NR_getsid:
10473 return get_errno(getsid(arg1));
10474 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10475 case TARGET_NR_fdatasync:
10476 return get_errno(fdatasync(arg1));
10477 #endif
10478 #ifdef TARGET_NR__sysctl
10479 case TARGET_NR__sysctl:
10480 /* We don't implement this, but ENOTDIR is always a safe
10481 return value. */
10482 return -TARGET_ENOTDIR;
10483 #endif
10484 case TARGET_NR_sched_getaffinity:
10486 unsigned int mask_size;
10487 unsigned long *mask;
10490 * sched_getaffinity needs multiples of ulong, so need to take
10491 * care of mismatches between target ulong and host ulong sizes.
10493 if (arg2 & (sizeof(abi_ulong) - 1)) {
10494 return -TARGET_EINVAL;
10496 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10498 mask = alloca(mask_size);
10499 memset(mask, 0, mask_size);
10500 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10502 if (!is_error(ret)) {
10503 if (ret > arg2) {
10504 /* More data returned than the caller's buffer will fit.
10505 * This only happens if sizeof(abi_long) < sizeof(long)
10506 * and the caller passed us a buffer holding an odd number
10507 * of abi_longs. If the host kernel is actually using the
10508 * extra 4 bytes then fail EINVAL; otherwise we can just
10509 * ignore them and only copy the interesting part.
10511 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10512 if (numcpus > arg2 * 8) {
10513 return -TARGET_EINVAL;
10515 ret = arg2;
10518 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10519 return -TARGET_EFAULT;
10523 return ret;
10524 case TARGET_NR_sched_setaffinity:
10526 unsigned int mask_size;
10527 unsigned long *mask;
10530 * sched_setaffinity needs multiples of ulong, so need to take
10531 * care of mismatches between target ulong and host ulong sizes.
10533 if (arg2 & (sizeof(abi_ulong) - 1)) {
10534 return -TARGET_EINVAL;
10536 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10537 mask = alloca(mask_size);
10539 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10540 if (ret) {
10541 return ret;
10544 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10546 case TARGET_NR_getcpu:
10548 unsigned cpu, node;
10549 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10550 arg2 ? &node : NULL,
10551 NULL));
10552 if (is_error(ret)) {
10553 return ret;
10555 if (arg1 && put_user_u32(cpu, arg1)) {
10556 return -TARGET_EFAULT;
10558 if (arg2 && put_user_u32(node, arg2)) {
10559 return -TARGET_EFAULT;
10562 return ret;
10563 case TARGET_NR_sched_setparam:
10565 struct sched_param *target_schp;
10566 struct sched_param schp;
10568 if (arg2 == 0) {
10569 return -TARGET_EINVAL;
10571 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10572 return -TARGET_EFAULT;
10573 schp.sched_priority = tswap32(target_schp->sched_priority);
10574 unlock_user_struct(target_schp, arg2, 0);
10575 return get_errno(sched_setparam(arg1, &schp));
10577 case TARGET_NR_sched_getparam:
10579 struct sched_param *target_schp;
10580 struct sched_param schp;
10582 if (arg2 == 0) {
10583 return -TARGET_EINVAL;
10585 ret = get_errno(sched_getparam(arg1, &schp));
10586 if (!is_error(ret)) {
10587 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10588 return -TARGET_EFAULT;
10589 target_schp->sched_priority = tswap32(schp.sched_priority);
10590 unlock_user_struct(target_schp, arg2, 1);
10593 return ret;
10594 case TARGET_NR_sched_setscheduler:
10596 struct sched_param *target_schp;
10597 struct sched_param schp;
10598 if (arg3 == 0) {
10599 return -TARGET_EINVAL;
10601 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10602 return -TARGET_EFAULT;
10603 schp.sched_priority = tswap32(target_schp->sched_priority);
10604 unlock_user_struct(target_schp, arg3, 0);
10605 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10607 case TARGET_NR_sched_getscheduler:
10608 return get_errno(sched_getscheduler(arg1));
10609 case TARGET_NR_sched_yield:
10610 return get_errno(sched_yield());
10611 case TARGET_NR_sched_get_priority_max:
10612 return get_errno(sched_get_priority_max(arg1));
10613 case TARGET_NR_sched_get_priority_min:
10614 return get_errno(sched_get_priority_min(arg1));
10615 #ifdef TARGET_NR_sched_rr_get_interval
10616 case TARGET_NR_sched_rr_get_interval:
10618 struct timespec ts;
10619 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10620 if (!is_error(ret)) {
10621 ret = host_to_target_timespec(arg2, &ts);
10624 return ret;
10625 #endif
10626 #ifdef TARGET_NR_sched_rr_get_interval_time64
10627 case TARGET_NR_sched_rr_get_interval_time64:
10629 struct timespec ts;
10630 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10631 if (!is_error(ret)) {
10632 ret = host_to_target_timespec64(arg2, &ts);
10635 return ret;
10636 #endif
10637 #if defined(TARGET_NR_nanosleep)
10638 case TARGET_NR_nanosleep:
10640 struct timespec req, rem;
10641 target_to_host_timespec(&req, arg1);
10642 ret = get_errno(safe_nanosleep(&req, &rem));
10643 if (is_error(ret) && arg2) {
10644 host_to_target_timespec(arg2, &rem);
10647 return ret;
10648 #endif
10649 case TARGET_NR_prctl:
10650 switch (arg1) {
10651 case PR_GET_PDEATHSIG:
10653 int deathsig;
10654 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10655 if (!is_error(ret) && arg2
10656 && put_user_ual(deathsig, arg2)) {
10657 return -TARGET_EFAULT;
10659 return ret;
10661 #ifdef PR_GET_NAME
10662 case PR_GET_NAME:
10664 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10665 if (!name) {
10666 return -TARGET_EFAULT;
10668 ret = get_errno(prctl(arg1, (unsigned long)name,
10669 arg3, arg4, arg5));
10670 unlock_user(name, arg2, 16);
10671 return ret;
10673 case PR_SET_NAME:
10675 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10676 if (!name) {
10677 return -TARGET_EFAULT;
10679 ret = get_errno(prctl(arg1, (unsigned long)name,
10680 arg3, arg4, arg5));
10681 unlock_user(name, arg2, 0);
10682 return ret;
10684 #endif
10685 #ifdef TARGET_MIPS
10686 case TARGET_PR_GET_FP_MODE:
10688 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10689 ret = 0;
10690 if (env->CP0_Status & (1 << CP0St_FR)) {
10691 ret |= TARGET_PR_FP_MODE_FR;
10693 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10694 ret |= TARGET_PR_FP_MODE_FRE;
10696 return ret;
10698 case TARGET_PR_SET_FP_MODE:
10700 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10701 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10702 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10703 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10704 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10706 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10707 TARGET_PR_FP_MODE_FRE;
10709 /* If nothing to change, return right away, successfully. */
10710 if (old_fr == new_fr && old_fre == new_fre) {
10711 return 0;
10713 /* Check the value is valid */
10714 if (arg2 & ~known_bits) {
10715 return -TARGET_EOPNOTSUPP;
10717 /* Setting FRE without FR is not supported. */
10718 if (new_fre && !new_fr) {
10719 return -TARGET_EOPNOTSUPP;
10721 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10722 /* FR1 is not supported */
10723 return -TARGET_EOPNOTSUPP;
10725 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10726 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10727 /* cannot set FR=0 */
10728 return -TARGET_EOPNOTSUPP;
10730 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10731 /* Cannot set FRE=1 */
10732 return -TARGET_EOPNOTSUPP;
10735 int i;
10736 fpr_t *fpr = env->active_fpu.fpr;
10737 for (i = 0; i < 32 ; i += 2) {
10738 if (!old_fr && new_fr) {
10739 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10740 } else if (old_fr && !new_fr) {
10741 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10745 if (new_fr) {
10746 env->CP0_Status |= (1 << CP0St_FR);
10747 env->hflags |= MIPS_HFLAG_F64;
10748 } else {
10749 env->CP0_Status &= ~(1 << CP0St_FR);
10750 env->hflags &= ~MIPS_HFLAG_F64;
10752 if (new_fre) {
10753 env->CP0_Config5 |= (1 << CP0C5_FRE);
10754 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10755 env->hflags |= MIPS_HFLAG_FRE;
10757 } else {
10758 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10759 env->hflags &= ~MIPS_HFLAG_FRE;
10762 return 0;
10764 #endif /* MIPS */
10765 #ifdef TARGET_AARCH64
10766 case TARGET_PR_SVE_SET_VL:
10768 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10769 * PR_SVE_VL_INHERIT. Note the kernel definition
10770 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10771 * even though the current architectural maximum is VQ=16.
10773 ret = -TARGET_EINVAL;
10774 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10775 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10776 CPUARMState *env = cpu_env;
10777 ARMCPU *cpu = env_archcpu(env);
10778 uint32_t vq, old_vq;
10780 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10781 vq = MAX(arg2 / 16, 1);
10782 vq = MIN(vq, cpu->sve_max_vq);
10784 if (vq < old_vq) {
10785 aarch64_sve_narrow_vq(env, vq);
10787 env->vfp.zcr_el[1] = vq - 1;
10788 arm_rebuild_hflags(env);
10789 ret = vq * 16;
10791 return ret;
10792 case TARGET_PR_SVE_GET_VL:
10793 ret = -TARGET_EINVAL;
10795 ARMCPU *cpu = env_archcpu(cpu_env);
10796 if (cpu_isar_feature(aa64_sve, cpu)) {
10797 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10800 return ret;
10801 case TARGET_PR_PAC_RESET_KEYS:
10803 CPUARMState *env = cpu_env;
10804 ARMCPU *cpu = env_archcpu(env);
10806 if (arg3 || arg4 || arg5) {
10807 return -TARGET_EINVAL;
10809 if (cpu_isar_feature(aa64_pauth, cpu)) {
10810 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10811 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10812 TARGET_PR_PAC_APGAKEY);
10813 int ret = 0;
10814 Error *err = NULL;
10816 if (arg2 == 0) {
10817 arg2 = all;
10818 } else if (arg2 & ~all) {
10819 return -TARGET_EINVAL;
10821 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10822 ret |= qemu_guest_getrandom(&env->keys.apia,
10823 sizeof(ARMPACKey), &err);
10825 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10826 ret |= qemu_guest_getrandom(&env->keys.apib,
10827 sizeof(ARMPACKey), &err);
10829 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10830 ret |= qemu_guest_getrandom(&env->keys.apda,
10831 sizeof(ARMPACKey), &err);
10833 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10834 ret |= qemu_guest_getrandom(&env->keys.apdb,
10835 sizeof(ARMPACKey), &err);
10837 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10838 ret |= qemu_guest_getrandom(&env->keys.apga,
10839 sizeof(ARMPACKey), &err);
10841 if (ret != 0) {
10843 * Some unknown failure in the crypto. The best
10844 * we can do is log it and fail the syscall.
10845 * The real syscall cannot fail this way.
10847 qemu_log_mask(LOG_UNIMP,
10848 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10849 error_get_pretty(err));
10850 error_free(err);
10851 return -TARGET_EIO;
10853 return 0;
10856 return -TARGET_EINVAL;
10857 #endif /* AARCH64 */
10858 case PR_GET_SECCOMP:
10859 case PR_SET_SECCOMP:
10860 /* Disable seccomp to prevent the target disabling syscalls we
10861 * need. */
10862 return -TARGET_EINVAL;
10863 default:
10864 /* Most prctl options have no pointer arguments */
10865 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10867 break;
10868 #ifdef TARGET_NR_arch_prctl
10869 case TARGET_NR_arch_prctl:
10870 return do_arch_prctl(cpu_env, arg1, arg2);
10871 #endif
10872 #ifdef TARGET_NR_pread64
10873 case TARGET_NR_pread64:
10874 if (regpairs_aligned(cpu_env, num)) {
10875 arg4 = arg5;
10876 arg5 = arg6;
10878 if (arg2 == 0 && arg3 == 0) {
10879 /* Special-case NULL buffer and zero length, which should succeed */
10880 p = 0;
10881 } else {
10882 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10883 if (!p) {
10884 return -TARGET_EFAULT;
10887 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10888 unlock_user(p, arg2, ret);
10889 return ret;
10890 case TARGET_NR_pwrite64:
10891 if (regpairs_aligned(cpu_env, num)) {
10892 arg4 = arg5;
10893 arg5 = arg6;
10895 if (arg2 == 0 && arg3 == 0) {
10896 /* Special-case NULL buffer and zero length, which should succeed */
10897 p = 0;
10898 } else {
10899 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10900 if (!p) {
10901 return -TARGET_EFAULT;
10904 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10905 unlock_user(p, arg2, 0);
10906 return ret;
10907 #endif
10908 case TARGET_NR_getcwd:
10909 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10910 return -TARGET_EFAULT;
10911 ret = get_errno(sys_getcwd1(p, arg2));
10912 unlock_user(p, arg1, ret);
10913 return ret;
10914 case TARGET_NR_capget:
10915 case TARGET_NR_capset:
10917 struct target_user_cap_header *target_header;
10918 struct target_user_cap_data *target_data = NULL;
10919 struct __user_cap_header_struct header;
10920 struct __user_cap_data_struct data[2];
10921 struct __user_cap_data_struct *dataptr = NULL;
10922 int i, target_datalen;
10923 int data_items = 1;
10925 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10926 return -TARGET_EFAULT;
10928 header.version = tswap32(target_header->version);
10929 header.pid = tswap32(target_header->pid);
10931 if (header.version != _LINUX_CAPABILITY_VERSION) {
10932 /* Version 2 and up takes pointer to two user_data structs */
10933 data_items = 2;
10936 target_datalen = sizeof(*target_data) * data_items;
10938 if (arg2) {
10939 if (num == TARGET_NR_capget) {
10940 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10941 } else {
10942 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10944 if (!target_data) {
10945 unlock_user_struct(target_header, arg1, 0);
10946 return -TARGET_EFAULT;
10949 if (num == TARGET_NR_capset) {
10950 for (i = 0; i < data_items; i++) {
10951 data[i].effective = tswap32(target_data[i].effective);
10952 data[i].permitted = tswap32(target_data[i].permitted);
10953 data[i].inheritable = tswap32(target_data[i].inheritable);
10957 dataptr = data;
10960 if (num == TARGET_NR_capget) {
10961 ret = get_errno(capget(&header, dataptr));
10962 } else {
10963 ret = get_errno(capset(&header, dataptr));
10966 /* The kernel always updates version for both capget and capset */
10967 target_header->version = tswap32(header.version);
10968 unlock_user_struct(target_header, arg1, 1);
10970 if (arg2) {
10971 if (num == TARGET_NR_capget) {
10972 for (i = 0; i < data_items; i++) {
10973 target_data[i].effective = tswap32(data[i].effective);
10974 target_data[i].permitted = tswap32(data[i].permitted);
10975 target_data[i].inheritable = tswap32(data[i].inheritable);
10977 unlock_user(target_data, arg2, target_datalen);
10978 } else {
10979 unlock_user(target_data, arg2, 0);
10982 return ret;
10984 case TARGET_NR_sigaltstack:
10985 return do_sigaltstack(arg1, arg2,
10986 get_sp_from_cpustate((CPUArchState *)cpu_env));
10988 #ifdef CONFIG_SENDFILE
10989 #ifdef TARGET_NR_sendfile
10990 case TARGET_NR_sendfile:
10992 off_t *offp = NULL;
10993 off_t off;
10994 if (arg3) {
10995 ret = get_user_sal(off, arg3);
10996 if (is_error(ret)) {
10997 return ret;
10999 offp = &off;
11001 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11002 if (!is_error(ret) && arg3) {
11003 abi_long ret2 = put_user_sal(off, arg3);
11004 if (is_error(ret2)) {
11005 ret = ret2;
11008 return ret;
11010 #endif
11011 #ifdef TARGET_NR_sendfile64
11012 case TARGET_NR_sendfile64:
11014 off_t *offp = NULL;
11015 off_t off;
11016 if (arg3) {
11017 ret = get_user_s64(off, arg3);
11018 if (is_error(ret)) {
11019 return ret;
11021 offp = &off;
11023 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11024 if (!is_error(ret) && arg3) {
11025 abi_long ret2 = put_user_s64(off, arg3);
11026 if (is_error(ret2)) {
11027 ret = ret2;
11030 return ret;
11032 #endif
11033 #endif
11034 #ifdef TARGET_NR_vfork
11035 case TARGET_NR_vfork:
11036 return get_errno(do_fork(cpu_env,
11037 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11038 0, 0, 0, 0));
11039 #endif
11040 #ifdef TARGET_NR_ugetrlimit
11041 case TARGET_NR_ugetrlimit:
11043 struct rlimit rlim;
11044 int resource = target_to_host_resource(arg1);
11045 ret = get_errno(getrlimit(resource, &rlim));
11046 if (!is_error(ret)) {
11047 struct target_rlimit *target_rlim;
11048 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11049 return -TARGET_EFAULT;
11050 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11051 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11052 unlock_user_struct(target_rlim, arg2, 1);
11054 return ret;
11056 #endif
11057 #ifdef TARGET_NR_truncate64
11058 case TARGET_NR_truncate64:
11059 if (!(p = lock_user_string(arg1)))
11060 return -TARGET_EFAULT;
11061 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11062 unlock_user(p, arg1, 0);
11063 return ret;
11064 #endif
11065 #ifdef TARGET_NR_ftruncate64
11066 case TARGET_NR_ftruncate64:
11067 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11068 #endif
11069 #ifdef TARGET_NR_stat64
11070 case TARGET_NR_stat64:
11071 if (!(p = lock_user_string(arg1))) {
11072 return -TARGET_EFAULT;
11074 ret = get_errno(stat(path(p), &st));
11075 unlock_user(p, arg1, 0);
11076 if (!is_error(ret))
11077 ret = host_to_target_stat64(cpu_env, arg2, &st);
11078 return ret;
11079 #endif
11080 #ifdef TARGET_NR_lstat64
11081 case TARGET_NR_lstat64:
11082 if (!(p = lock_user_string(arg1))) {
11083 return -TARGET_EFAULT;
11085 ret = get_errno(lstat(path(p), &st));
11086 unlock_user(p, arg1, 0);
11087 if (!is_error(ret))
11088 ret = host_to_target_stat64(cpu_env, arg2, &st);
11089 return ret;
11090 #endif
11091 #ifdef TARGET_NR_fstat64
11092 case TARGET_NR_fstat64:
11093 ret = get_errno(fstat(arg1, &st));
11094 if (!is_error(ret))
11095 ret = host_to_target_stat64(cpu_env, arg2, &st);
11096 return ret;
11097 #endif
11098 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11099 #ifdef TARGET_NR_fstatat64
11100 case TARGET_NR_fstatat64:
11101 #endif
11102 #ifdef TARGET_NR_newfstatat
11103 case TARGET_NR_newfstatat:
11104 #endif
11105 if (!(p = lock_user_string(arg2))) {
11106 return -TARGET_EFAULT;
11108 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11109 unlock_user(p, arg2, 0);
11110 if (!is_error(ret))
11111 ret = host_to_target_stat64(cpu_env, arg3, &st);
11112 return ret;
11113 #endif
11114 #if defined(TARGET_NR_statx)
11115 case TARGET_NR_statx:
11117 struct target_statx *target_stx;
11118 int dirfd = arg1;
11119 int flags = arg3;
11121 p = lock_user_string(arg2);
11122 if (p == NULL) {
11123 return -TARGET_EFAULT;
11125 #if defined(__NR_statx)
11128 * It is assumed that struct statx is architecture independent.
11130 struct target_statx host_stx;
11131 int mask = arg4;
11133 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11134 if (!is_error(ret)) {
11135 if (host_to_target_statx(&host_stx, arg5) != 0) {
11136 unlock_user(p, arg2, 0);
11137 return -TARGET_EFAULT;
11141 if (ret != -TARGET_ENOSYS) {
11142 unlock_user(p, arg2, 0);
11143 return ret;
11146 #endif
11147 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11148 unlock_user(p, arg2, 0);
11150 if (!is_error(ret)) {
11151 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11152 return -TARGET_EFAULT;
11154 memset(target_stx, 0, sizeof(*target_stx));
11155 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11156 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11157 __put_user(st.st_ino, &target_stx->stx_ino);
11158 __put_user(st.st_mode, &target_stx->stx_mode);
11159 __put_user(st.st_uid, &target_stx->stx_uid);
11160 __put_user(st.st_gid, &target_stx->stx_gid);
11161 __put_user(st.st_nlink, &target_stx->stx_nlink);
11162 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11163 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11164 __put_user(st.st_size, &target_stx->stx_size);
11165 __put_user(st.st_blksize, &target_stx->stx_blksize);
11166 __put_user(st.st_blocks, &target_stx->stx_blocks);
11167 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11168 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11169 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11170 unlock_user_struct(target_stx, arg5, 1);
11173 return ret;
11174 #endif
11175 #ifdef TARGET_NR_lchown
11176 case TARGET_NR_lchown:
11177 if (!(p = lock_user_string(arg1)))
11178 return -TARGET_EFAULT;
11179 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11180 unlock_user(p, arg1, 0);
11181 return ret;
11182 #endif
11183 #ifdef TARGET_NR_getuid
11184 case TARGET_NR_getuid:
11185 return get_errno(high2lowuid(getuid()));
11186 #endif
11187 #ifdef TARGET_NR_getgid
11188 case TARGET_NR_getgid:
11189 return get_errno(high2lowgid(getgid()));
11190 #endif
11191 #ifdef TARGET_NR_geteuid
11192 case TARGET_NR_geteuid:
11193 return get_errno(high2lowuid(geteuid()));
11194 #endif
11195 #ifdef TARGET_NR_getegid
11196 case TARGET_NR_getegid:
11197 return get_errno(high2lowgid(getegid()));
11198 #endif
11199 case TARGET_NR_setreuid:
11200 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11201 case TARGET_NR_setregid:
11202 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11203 case TARGET_NR_getgroups:
11205 int gidsetsize = arg1;
11206 target_id *target_grouplist;
11207 gid_t *grouplist;
11208 int i;
11210 grouplist = alloca(gidsetsize * sizeof(gid_t));
11211 ret = get_errno(getgroups(gidsetsize, grouplist));
11212 if (gidsetsize == 0)
11213 return ret;
11214 if (!is_error(ret)) {
11215 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11216 if (!target_grouplist)
11217 return -TARGET_EFAULT;
11218 for(i = 0;i < ret; i++)
11219 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11220 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11223 return ret;
11224 case TARGET_NR_setgroups:
11226 int gidsetsize = arg1;
11227 target_id *target_grouplist;
11228 gid_t *grouplist = NULL;
11229 int i;
11230 if (gidsetsize) {
11231 grouplist = alloca(gidsetsize * sizeof(gid_t));
11232 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11233 if (!target_grouplist) {
11234 return -TARGET_EFAULT;
11236 for (i = 0; i < gidsetsize; i++) {
11237 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11239 unlock_user(target_grouplist, arg2, 0);
11241 return get_errno(setgroups(gidsetsize, grouplist));
11243 case TARGET_NR_fchown:
11244 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11245 #if defined(TARGET_NR_fchownat)
11246 case TARGET_NR_fchownat:
11247 if (!(p = lock_user_string(arg2)))
11248 return -TARGET_EFAULT;
11249 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11250 low2highgid(arg4), arg5));
11251 unlock_user(p, arg2, 0);
11252 return ret;
11253 #endif
11254 #ifdef TARGET_NR_setresuid
11255 case TARGET_NR_setresuid:
11256 return get_errno(sys_setresuid(low2highuid(arg1),
11257 low2highuid(arg2),
11258 low2highuid(arg3)));
11259 #endif
11260 #ifdef TARGET_NR_getresuid
11261 case TARGET_NR_getresuid:
11263 uid_t ruid, euid, suid;
11264 ret = get_errno(getresuid(&ruid, &euid, &suid));
11265 if (!is_error(ret)) {
11266 if (put_user_id(high2lowuid(ruid), arg1)
11267 || put_user_id(high2lowuid(euid), arg2)
11268 || put_user_id(high2lowuid(suid), arg3))
11269 return -TARGET_EFAULT;
11272 return ret;
11273 #endif
11274 #ifdef TARGET_NR_getresgid
11275 case TARGET_NR_setresgid:
11276 return get_errno(sys_setresgid(low2highgid(arg1),
11277 low2highgid(arg2),
11278 low2highgid(arg3)));
11279 #endif
11280 #ifdef TARGET_NR_getresgid
11281 case TARGET_NR_getresgid:
11283 gid_t rgid, egid, sgid;
11284 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11285 if (!is_error(ret)) {
11286 if (put_user_id(high2lowgid(rgid), arg1)
11287 || put_user_id(high2lowgid(egid), arg2)
11288 || put_user_id(high2lowgid(sgid), arg3))
11289 return -TARGET_EFAULT;
11292 return ret;
11293 #endif
11294 #ifdef TARGET_NR_chown
11295 case TARGET_NR_chown:
11296 if (!(p = lock_user_string(arg1)))
11297 return -TARGET_EFAULT;
11298 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11299 unlock_user(p, arg1, 0);
11300 return ret;
11301 #endif
11302 case TARGET_NR_setuid:
11303 return get_errno(sys_setuid(low2highuid(arg1)));
11304 case TARGET_NR_setgid:
11305 return get_errno(sys_setgid(low2highgid(arg1)));
11306 case TARGET_NR_setfsuid:
11307 return get_errno(setfsuid(arg1));
11308 case TARGET_NR_setfsgid:
11309 return get_errno(setfsgid(arg1));
11311 #ifdef TARGET_NR_lchown32
11312 case TARGET_NR_lchown32:
11313 if (!(p = lock_user_string(arg1)))
11314 return -TARGET_EFAULT;
11315 ret = get_errno(lchown(p, arg2, arg3));
11316 unlock_user(p, arg1, 0);
11317 return ret;
11318 #endif
11319 #ifdef TARGET_NR_getuid32
11320 case TARGET_NR_getuid32:
11321 return get_errno(getuid());
11322 #endif
11324 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11325 /* Alpha specific */
11326 case TARGET_NR_getxuid:
11328 uid_t euid;
11329 euid=geteuid();
11330 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11332 return get_errno(getuid());
11333 #endif
11334 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11335 /* Alpha specific */
11336 case TARGET_NR_getxgid:
11338 uid_t egid;
11339 egid=getegid();
11340 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11342 return get_errno(getgid());
11343 #endif
11344 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11345 /* Alpha specific */
11346 case TARGET_NR_osf_getsysinfo:
11347 ret = -TARGET_EOPNOTSUPP;
11348 switch (arg1) {
11349 case TARGET_GSI_IEEE_FP_CONTROL:
11351 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11352 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11354 swcr &= ~SWCR_STATUS_MASK;
11355 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11357 if (put_user_u64 (swcr, arg2))
11358 return -TARGET_EFAULT;
11359 ret = 0;
11361 break;
11363 /* case GSI_IEEE_STATE_AT_SIGNAL:
11364 -- Not implemented in linux kernel.
11365 case GSI_UACPROC:
11366 -- Retrieves current unaligned access state; not much used.
11367 case GSI_PROC_TYPE:
11368 -- Retrieves implver information; surely not used.
11369 case GSI_GET_HWRPB:
11370 -- Grabs a copy of the HWRPB; surely not used.
11373 return ret;
11374 #endif
11375 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11376 /* Alpha specific */
11377 case TARGET_NR_osf_setsysinfo:
11378 ret = -TARGET_EOPNOTSUPP;
11379 switch (arg1) {
11380 case TARGET_SSI_IEEE_FP_CONTROL:
11382 uint64_t swcr, fpcr;
11384 if (get_user_u64 (swcr, arg2)) {
11385 return -TARGET_EFAULT;
11389 * The kernel calls swcr_update_status to update the
11390 * status bits from the fpcr at every point that it
11391 * could be queried. Therefore, we store the status
11392 * bits only in FPCR.
11394 ((CPUAlphaState *)cpu_env)->swcr
11395 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11397 fpcr = cpu_alpha_load_fpcr(cpu_env);
11398 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11399 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11400 cpu_alpha_store_fpcr(cpu_env, fpcr);
11401 ret = 0;
11403 break;
11405 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11407 uint64_t exc, fpcr, fex;
11409 if (get_user_u64(exc, arg2)) {
11410 return -TARGET_EFAULT;
11412 exc &= SWCR_STATUS_MASK;
11413 fpcr = cpu_alpha_load_fpcr(cpu_env);
11415 /* Old exceptions are not signaled. */
11416 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11417 fex = exc & ~fex;
11418 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11419 fex &= ((CPUArchState *)cpu_env)->swcr;
11421 /* Update the hardware fpcr. */
11422 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11423 cpu_alpha_store_fpcr(cpu_env, fpcr);
11425 if (fex) {
11426 int si_code = TARGET_FPE_FLTUNK;
11427 target_siginfo_t info;
11429 if (fex & SWCR_TRAP_ENABLE_DNO) {
11430 si_code = TARGET_FPE_FLTUND;
11432 if (fex & SWCR_TRAP_ENABLE_INE) {
11433 si_code = TARGET_FPE_FLTRES;
11435 if (fex & SWCR_TRAP_ENABLE_UNF) {
11436 si_code = TARGET_FPE_FLTUND;
11438 if (fex & SWCR_TRAP_ENABLE_OVF) {
11439 si_code = TARGET_FPE_FLTOVF;
11441 if (fex & SWCR_TRAP_ENABLE_DZE) {
11442 si_code = TARGET_FPE_FLTDIV;
11444 if (fex & SWCR_TRAP_ENABLE_INV) {
11445 si_code = TARGET_FPE_FLTINV;
11448 info.si_signo = SIGFPE;
11449 info.si_errno = 0;
11450 info.si_code = si_code;
11451 info._sifields._sigfault._addr
11452 = ((CPUArchState *)cpu_env)->pc;
11453 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11454 QEMU_SI_FAULT, &info);
11456 ret = 0;
11458 break;
11460 /* case SSI_NVPAIRS:
11461 -- Used with SSIN_UACPROC to enable unaligned accesses.
11462 case SSI_IEEE_STATE_AT_SIGNAL:
11463 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11464 -- Not implemented in linux kernel
11467 return ret;
11468 #endif
11469 #ifdef TARGET_NR_osf_sigprocmask
11470 /* Alpha specific. */
11471 case TARGET_NR_osf_sigprocmask:
11473 abi_ulong mask;
11474 int how;
11475 sigset_t set, oldset;
11477 switch(arg1) {
11478 case TARGET_SIG_BLOCK:
11479 how = SIG_BLOCK;
11480 break;
11481 case TARGET_SIG_UNBLOCK:
11482 how = SIG_UNBLOCK;
11483 break;
11484 case TARGET_SIG_SETMASK:
11485 how = SIG_SETMASK;
11486 break;
11487 default:
11488 return -TARGET_EINVAL;
11490 mask = arg2;
11491 target_to_host_old_sigset(&set, &mask);
11492 ret = do_sigprocmask(how, &set, &oldset);
11493 if (!ret) {
11494 host_to_target_old_sigset(&mask, &oldset);
11495 ret = mask;
11498 return ret;
11499 #endif
11501 #ifdef TARGET_NR_getgid32
11502 case TARGET_NR_getgid32:
11503 return get_errno(getgid());
11504 #endif
11505 #ifdef TARGET_NR_geteuid32
11506 case TARGET_NR_geteuid32:
11507 return get_errno(geteuid());
11508 #endif
11509 #ifdef TARGET_NR_getegid32
11510 case TARGET_NR_getegid32:
11511 return get_errno(getegid());
11512 #endif
11513 #ifdef TARGET_NR_setreuid32
11514 case TARGET_NR_setreuid32:
11515 return get_errno(setreuid(arg1, arg2));
11516 #endif
11517 #ifdef TARGET_NR_setregid32
11518 case TARGET_NR_setregid32:
11519 return get_errno(setregid(arg1, arg2));
11520 #endif
11521 #ifdef TARGET_NR_getgroups32
11522 case TARGET_NR_getgroups32:
11524 int gidsetsize = arg1;
11525 uint32_t *target_grouplist;
11526 gid_t *grouplist;
11527 int i;
11529 grouplist = alloca(gidsetsize * sizeof(gid_t));
11530 ret = get_errno(getgroups(gidsetsize, grouplist));
11531 if (gidsetsize == 0)
11532 return ret;
11533 if (!is_error(ret)) {
11534 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11535 if (!target_grouplist) {
11536 return -TARGET_EFAULT;
11538 for(i = 0;i < ret; i++)
11539 target_grouplist[i] = tswap32(grouplist[i]);
11540 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11543 return ret;
11544 #endif
11545 #ifdef TARGET_NR_setgroups32
11546 case TARGET_NR_setgroups32:
11548 int gidsetsize = arg1;
11549 uint32_t *target_grouplist;
11550 gid_t *grouplist;
11551 int i;
11553 grouplist = alloca(gidsetsize * sizeof(gid_t));
11554 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11555 if (!target_grouplist) {
11556 return -TARGET_EFAULT;
11558 for(i = 0;i < gidsetsize; i++)
11559 grouplist[i] = tswap32(target_grouplist[i]);
11560 unlock_user(target_grouplist, arg2, 0);
11561 return get_errno(setgroups(gidsetsize, grouplist));
11563 #endif
11564 #ifdef TARGET_NR_fchown32
11565 case TARGET_NR_fchown32:
11566 return get_errno(fchown(arg1, arg2, arg3));
11567 #endif
11568 #ifdef TARGET_NR_setresuid32
11569 case TARGET_NR_setresuid32:
11570 return get_errno(sys_setresuid(arg1, arg2, arg3));
11571 #endif
11572 #ifdef TARGET_NR_getresuid32
11573 case TARGET_NR_getresuid32:
11575 uid_t ruid, euid, suid;
11576 ret = get_errno(getresuid(&ruid, &euid, &suid));
11577 if (!is_error(ret)) {
11578 if (put_user_u32(ruid, arg1)
11579 || put_user_u32(euid, arg2)
11580 || put_user_u32(suid, arg3))
11581 return -TARGET_EFAULT;
11584 return ret;
11585 #endif
11586 #ifdef TARGET_NR_setresgid32
11587 case TARGET_NR_setresgid32:
11588 return get_errno(sys_setresgid(arg1, arg2, arg3));
11589 #endif
11590 #ifdef TARGET_NR_getresgid32
11591 case TARGET_NR_getresgid32:
11593 gid_t rgid, egid, sgid;
11594 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11595 if (!is_error(ret)) {
11596 if (put_user_u32(rgid, arg1)
11597 || put_user_u32(egid, arg2)
11598 || put_user_u32(sgid, arg3))
11599 return -TARGET_EFAULT;
11602 return ret;
11603 #endif
11604 #ifdef TARGET_NR_chown32
11605 case TARGET_NR_chown32:
11606 if (!(p = lock_user_string(arg1)))
11607 return -TARGET_EFAULT;
11608 ret = get_errno(chown(p, arg2, arg3));
11609 unlock_user(p, arg1, 0);
11610 return ret;
11611 #endif
11612 #ifdef TARGET_NR_setuid32
11613 case TARGET_NR_setuid32:
11614 return get_errno(sys_setuid(arg1));
11615 #endif
11616 #ifdef TARGET_NR_setgid32
11617 case TARGET_NR_setgid32:
11618 return get_errno(sys_setgid(arg1));
11619 #endif
11620 #ifdef TARGET_NR_setfsuid32
11621 case TARGET_NR_setfsuid32:
11622 return get_errno(setfsuid(arg1));
11623 #endif
11624 #ifdef TARGET_NR_setfsgid32
11625 case TARGET_NR_setfsgid32:
11626 return get_errno(setfsgid(arg1));
11627 #endif
11628 #ifdef TARGET_NR_mincore
11629 case TARGET_NR_mincore:
11631 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11632 if (!a) {
11633 return -TARGET_ENOMEM;
11635 p = lock_user_string(arg3);
11636 if (!p) {
11637 ret = -TARGET_EFAULT;
11638 } else {
11639 ret = get_errno(mincore(a, arg2, p));
11640 unlock_user(p, arg3, ret);
11642 unlock_user(a, arg1, 0);
11644 return ret;
11645 #endif
11646 #ifdef TARGET_NR_arm_fadvise64_64
11647 case TARGET_NR_arm_fadvise64_64:
11648 /* arm_fadvise64_64 looks like fadvise64_64 but
11649 * with different argument order: fd, advice, offset, len
11650 * rather than the usual fd, offset, len, advice.
11651 * Note that offset and len are both 64-bit so appear as
11652 * pairs of 32-bit registers.
11654 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11655 target_offset64(arg5, arg6), arg2);
11656 return -host_to_target_errno(ret);
11657 #endif
11659 #if TARGET_ABI_BITS == 32
11661 #ifdef TARGET_NR_fadvise64_64
11662 case TARGET_NR_fadvise64_64:
11663 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11664 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11665 ret = arg2;
11666 arg2 = arg3;
11667 arg3 = arg4;
11668 arg4 = arg5;
11669 arg5 = arg6;
11670 arg6 = ret;
11671 #else
11672 /* 6 args: fd, offset (high, low), len (high, low), advice */
11673 if (regpairs_aligned(cpu_env, num)) {
11674 /* offset is in (3,4), len in (5,6) and advice in 7 */
11675 arg2 = arg3;
11676 arg3 = arg4;
11677 arg4 = arg5;
11678 arg5 = arg6;
11679 arg6 = arg7;
11681 #endif
11682 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11683 target_offset64(arg4, arg5), arg6);
11684 return -host_to_target_errno(ret);
11685 #endif
11687 #ifdef TARGET_NR_fadvise64
11688 case TARGET_NR_fadvise64:
11689 /* 5 args: fd, offset (high, low), len, advice */
11690 if (regpairs_aligned(cpu_env, num)) {
11691 /* offset is in (3,4), len in 5 and advice in 6 */
11692 arg2 = arg3;
11693 arg3 = arg4;
11694 arg4 = arg5;
11695 arg5 = arg6;
11697 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11698 return -host_to_target_errno(ret);
11699 #endif
11701 #else /* not a 32-bit ABI */
11702 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11703 #ifdef TARGET_NR_fadvise64_64
11704 case TARGET_NR_fadvise64_64:
11705 #endif
11706 #ifdef TARGET_NR_fadvise64
11707 case TARGET_NR_fadvise64:
11708 #endif
11709 #ifdef TARGET_S390X
11710 switch (arg4) {
11711 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11712 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11713 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11714 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11715 default: break;
11717 #endif
11718 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11719 #endif
11720 #endif /* end of 64-bit ABI fadvise handling */
11722 #ifdef TARGET_NR_madvise
11723 case TARGET_NR_madvise:
11724 /* A straight passthrough may not be safe because qemu sometimes
11725 turns private file-backed mappings into anonymous mappings.
11726 This will break MADV_DONTNEED.
11727 This is a hint, so ignoring and returning success is ok. */
11728 return 0;
11729 #endif
11730 #ifdef TARGET_NR_fcntl64
11731 case TARGET_NR_fcntl64:
11733 int cmd;
11734 struct flock64 fl;
11735 from_flock64_fn *copyfrom = copy_from_user_flock64;
11736 to_flock64_fn *copyto = copy_to_user_flock64;
11738 #ifdef TARGET_ARM
11739 if (!((CPUARMState *)cpu_env)->eabi) {
11740 copyfrom = copy_from_user_oabi_flock64;
11741 copyto = copy_to_user_oabi_flock64;
11743 #endif
11745 cmd = target_to_host_fcntl_cmd(arg2);
11746 if (cmd == -TARGET_EINVAL) {
11747 return cmd;
11750 switch(arg2) {
11751 case TARGET_F_GETLK64:
11752 ret = copyfrom(&fl, arg3);
11753 if (ret) {
11754 break;
11756 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11757 if (ret == 0) {
11758 ret = copyto(arg3, &fl);
11760 break;
11762 case TARGET_F_SETLK64:
11763 case TARGET_F_SETLKW64:
11764 ret = copyfrom(&fl, arg3);
11765 if (ret) {
11766 break;
11768 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11769 break;
11770 default:
11771 ret = do_fcntl(arg1, arg2, arg3);
11772 break;
11774 return ret;
11776 #endif
11777 #ifdef TARGET_NR_cacheflush
11778 case TARGET_NR_cacheflush:
11779 /* self-modifying code is handled automatically, so nothing needed */
11780 return 0;
11781 #endif
11782 #ifdef TARGET_NR_getpagesize
11783 case TARGET_NR_getpagesize:
11784 return TARGET_PAGE_SIZE;
11785 #endif
11786 case TARGET_NR_gettid:
11787 return get_errno(sys_gettid());
11788 #ifdef TARGET_NR_readahead
11789 case TARGET_NR_readahead:
11790 #if TARGET_ABI_BITS == 32
11791 if (regpairs_aligned(cpu_env, num)) {
11792 arg2 = arg3;
11793 arg3 = arg4;
11794 arg4 = arg5;
11796 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11797 #else
11798 ret = get_errno(readahead(arg1, arg2, arg3));
11799 #endif
11800 return ret;
11801 #endif
11802 #ifdef CONFIG_ATTR
11803 #ifdef TARGET_NR_setxattr
11804 case TARGET_NR_listxattr:
11805 case TARGET_NR_llistxattr:
11807 void *p, *b = 0;
11808 if (arg2) {
11809 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11810 if (!b) {
11811 return -TARGET_EFAULT;
11814 p = lock_user_string(arg1);
11815 if (p) {
11816 if (num == TARGET_NR_listxattr) {
11817 ret = get_errno(listxattr(p, b, arg3));
11818 } else {
11819 ret = get_errno(llistxattr(p, b, arg3));
11821 } else {
11822 ret = -TARGET_EFAULT;
11824 unlock_user(p, arg1, 0);
11825 unlock_user(b, arg2, arg3);
11826 return ret;
11828 case TARGET_NR_flistxattr:
11830 void *b = 0;
11831 if (arg2) {
11832 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11833 if (!b) {
11834 return -TARGET_EFAULT;
11837 ret = get_errno(flistxattr(arg1, b, arg3));
11838 unlock_user(b, arg2, arg3);
11839 return ret;
11841 case TARGET_NR_setxattr:
11842 case TARGET_NR_lsetxattr:
11844 void *p, *n, *v = 0;
11845 if (arg3) {
11846 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11847 if (!v) {
11848 return -TARGET_EFAULT;
11851 p = lock_user_string(arg1);
11852 n = lock_user_string(arg2);
11853 if (p && n) {
11854 if (num == TARGET_NR_setxattr) {
11855 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11856 } else {
11857 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11859 } else {
11860 ret = -TARGET_EFAULT;
11862 unlock_user(p, arg1, 0);
11863 unlock_user(n, arg2, 0);
11864 unlock_user(v, arg3, 0);
11866 return ret;
11867 case TARGET_NR_fsetxattr:
11869 void *n, *v = 0;
11870 if (arg3) {
11871 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11872 if (!v) {
11873 return -TARGET_EFAULT;
11876 n = lock_user_string(arg2);
11877 if (n) {
11878 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11879 } else {
11880 ret = -TARGET_EFAULT;
11882 unlock_user(n, arg2, 0);
11883 unlock_user(v, arg3, 0);
11885 return ret;
11886 case TARGET_NR_getxattr:
11887 case TARGET_NR_lgetxattr:
11889 void *p, *n, *v = 0;
11890 if (arg3) {
11891 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11892 if (!v) {
11893 return -TARGET_EFAULT;
11896 p = lock_user_string(arg1);
11897 n = lock_user_string(arg2);
11898 if (p && n) {
11899 if (num == TARGET_NR_getxattr) {
11900 ret = get_errno(getxattr(p, n, v, arg4));
11901 } else {
11902 ret = get_errno(lgetxattr(p, n, v, arg4));
11904 } else {
11905 ret = -TARGET_EFAULT;
11907 unlock_user(p, arg1, 0);
11908 unlock_user(n, arg2, 0);
11909 unlock_user(v, arg3, arg4);
11911 return ret;
11912 case TARGET_NR_fgetxattr:
11914 void *n, *v = 0;
11915 if (arg3) {
11916 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11917 if (!v) {
11918 return -TARGET_EFAULT;
11921 n = lock_user_string(arg2);
11922 if (n) {
11923 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11924 } else {
11925 ret = -TARGET_EFAULT;
11927 unlock_user(n, arg2, 0);
11928 unlock_user(v, arg3, arg4);
11930 return ret;
11931 case TARGET_NR_removexattr:
11932 case TARGET_NR_lremovexattr:
11934 void *p, *n;
11935 p = lock_user_string(arg1);
11936 n = lock_user_string(arg2);
11937 if (p && n) {
11938 if (num == TARGET_NR_removexattr) {
11939 ret = get_errno(removexattr(p, n));
11940 } else {
11941 ret = get_errno(lremovexattr(p, n));
11943 } else {
11944 ret = -TARGET_EFAULT;
11946 unlock_user(p, arg1, 0);
11947 unlock_user(n, arg2, 0);
11949 return ret;
11950 case TARGET_NR_fremovexattr:
11952 void *n;
11953 n = lock_user_string(arg2);
11954 if (n) {
11955 ret = get_errno(fremovexattr(arg1, n));
11956 } else {
11957 ret = -TARGET_EFAULT;
11959 unlock_user(n, arg2, 0);
11961 return ret;
11962 #endif
11963 #endif /* CONFIG_ATTR */
11964 #ifdef TARGET_NR_set_thread_area
11965 case TARGET_NR_set_thread_area:
11966 #if defined(TARGET_MIPS)
11967 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11968 return 0;
11969 #elif defined(TARGET_CRIS)
11970 if (arg1 & 0xff)
11971 ret = -TARGET_EINVAL;
11972 else {
11973 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11974 ret = 0;
11976 return ret;
11977 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11978 return do_set_thread_area(cpu_env, arg1);
11979 #elif defined(TARGET_M68K)
11981 TaskState *ts = cpu->opaque;
11982 ts->tp_value = arg1;
11983 return 0;
11985 #else
11986 return -TARGET_ENOSYS;
11987 #endif
11988 #endif
11989 #ifdef TARGET_NR_get_thread_area
11990 case TARGET_NR_get_thread_area:
11991 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11992 return do_get_thread_area(cpu_env, arg1);
11993 #elif defined(TARGET_M68K)
11995 TaskState *ts = cpu->opaque;
11996 return ts->tp_value;
11998 #else
11999 return -TARGET_ENOSYS;
12000 #endif
12001 #endif
12002 #ifdef TARGET_NR_getdomainname
12003 case TARGET_NR_getdomainname:
12004 return -TARGET_ENOSYS;
12005 #endif
12007 #ifdef TARGET_NR_clock_settime
12008 case TARGET_NR_clock_settime:
12010 struct timespec ts;
12012 ret = target_to_host_timespec(&ts, arg2);
12013 if (!is_error(ret)) {
12014 ret = get_errno(clock_settime(arg1, &ts));
12016 return ret;
12018 #endif
12019 #ifdef TARGET_NR_clock_settime64
12020 case TARGET_NR_clock_settime64:
12022 struct timespec ts;
12024 ret = target_to_host_timespec64(&ts, arg2);
12025 if (!is_error(ret)) {
12026 ret = get_errno(clock_settime(arg1, &ts));
12028 return ret;
12030 #endif
12031 #ifdef TARGET_NR_clock_gettime
12032 case TARGET_NR_clock_gettime:
12034 struct timespec ts;
12035 ret = get_errno(clock_gettime(arg1, &ts));
12036 if (!is_error(ret)) {
12037 ret = host_to_target_timespec(arg2, &ts);
12039 return ret;
12041 #endif
12042 #ifdef TARGET_NR_clock_gettime64
12043 case TARGET_NR_clock_gettime64:
12045 struct timespec ts;
12046 ret = get_errno(clock_gettime(arg1, &ts));
12047 if (!is_error(ret)) {
12048 ret = host_to_target_timespec64(arg2, &ts);
12050 return ret;
12052 #endif
12053 #ifdef TARGET_NR_clock_getres
12054 case TARGET_NR_clock_getres:
12056 struct timespec ts;
12057 ret = get_errno(clock_getres(arg1, &ts));
12058 if (!is_error(ret)) {
12059 host_to_target_timespec(arg2, &ts);
12061 return ret;
12063 #endif
12064 #ifdef TARGET_NR_clock_getres_time64
12065 case TARGET_NR_clock_getres_time64:
12067 struct timespec ts;
12068 ret = get_errno(clock_getres(arg1, &ts));
12069 if (!is_error(ret)) {
12070 host_to_target_timespec64(arg2, &ts);
12072 return ret;
12074 #endif
12075 #ifdef TARGET_NR_clock_nanosleep
12076 case TARGET_NR_clock_nanosleep:
12078 struct timespec ts;
12079 if (target_to_host_timespec(&ts, arg3)) {
12080 return -TARGET_EFAULT;
12082 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12083 &ts, arg4 ? &ts : NULL));
12085 * if the call is interrupted by a signal handler, it fails
12086 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12087 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12089 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12090 host_to_target_timespec(arg4, &ts)) {
12091 return -TARGET_EFAULT;
12094 return ret;
12096 #endif
12097 #ifdef TARGET_NR_clock_nanosleep_time64
12098 case TARGET_NR_clock_nanosleep_time64:
12100 struct timespec ts;
12102 if (target_to_host_timespec64(&ts, arg3)) {
12103 return -TARGET_EFAULT;
12106 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12107 &ts, arg4 ? &ts : NULL));
12109 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12110 host_to_target_timespec64(arg4, &ts)) {
12111 return -TARGET_EFAULT;
12113 return ret;
12115 #endif
12117 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12118 case TARGET_NR_set_tid_address:
12119 return get_errno(set_tid_address((int *)g2h(arg1)));
12120 #endif
12122 case TARGET_NR_tkill:
12123 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12125 case TARGET_NR_tgkill:
12126 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12127 target_to_host_signal(arg3)));
12129 #ifdef TARGET_NR_set_robust_list
12130 case TARGET_NR_set_robust_list:
12131 case TARGET_NR_get_robust_list:
12132 /* The ABI for supporting robust futexes has userspace pass
12133 * the kernel a pointer to a linked list which is updated by
12134 * userspace after the syscall; the list is walked by the kernel
12135 * when the thread exits. Since the linked list in QEMU guest
12136 * memory isn't a valid linked list for the host and we have
12137 * no way to reliably intercept the thread-death event, we can't
12138 * support these. Silently return ENOSYS so that guest userspace
12139 * falls back to a non-robust futex implementation (which should
12140 * be OK except in the corner case of the guest crashing while
12141 * holding a mutex that is shared with another process via
12142 * shared memory).
12144 return -TARGET_ENOSYS;
12145 #endif
12147 #if defined(TARGET_NR_utimensat)
12148 case TARGET_NR_utimensat:
12150 struct timespec *tsp, ts[2];
12151 if (!arg3) {
12152 tsp = NULL;
12153 } else {
12154 if (target_to_host_timespec(ts, arg3)) {
12155 return -TARGET_EFAULT;
12157 if (target_to_host_timespec(ts + 1, arg3 +
12158 sizeof(struct target_timespec))) {
12159 return -TARGET_EFAULT;
12161 tsp = ts;
12163 if (!arg2)
12164 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12165 else {
12166 if (!(p = lock_user_string(arg2))) {
12167 return -TARGET_EFAULT;
12169 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12170 unlock_user(p, arg2, 0);
12173 return ret;
12174 #endif
12175 #ifdef TARGET_NR_utimensat_time64
12176 case TARGET_NR_utimensat_time64:
12178 struct timespec *tsp, ts[2];
12179 if (!arg3) {
12180 tsp = NULL;
12181 } else {
12182 if (target_to_host_timespec64(ts, arg3)) {
12183 return -TARGET_EFAULT;
12185 if (target_to_host_timespec64(ts + 1, arg3 +
12186 sizeof(struct target__kernel_timespec))) {
12187 return -TARGET_EFAULT;
12189 tsp = ts;
12191 if (!arg2)
12192 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12193 else {
12194 p = lock_user_string(arg2);
12195 if (!p) {
12196 return -TARGET_EFAULT;
12198 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12199 unlock_user(p, arg2, 0);
12202 return ret;
12203 #endif
12204 #ifdef TARGET_NR_futex
12205 case TARGET_NR_futex:
12206 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12207 #endif
12208 #ifdef TARGET_NR_futex_time64
12209 case TARGET_NR_futex_time64:
12210 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12211 #endif
12212 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12213 case TARGET_NR_inotify_init:
12214 ret = get_errno(sys_inotify_init());
12215 if (ret >= 0) {
12216 fd_trans_register(ret, &target_inotify_trans);
12218 return ret;
12219 #endif
12220 #ifdef CONFIG_INOTIFY1
12221 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12222 case TARGET_NR_inotify_init1:
12223 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12224 fcntl_flags_tbl)));
12225 if (ret >= 0) {
12226 fd_trans_register(ret, &target_inotify_trans);
12228 return ret;
12229 #endif
12230 #endif
12231 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12232 case TARGET_NR_inotify_add_watch:
12233 p = lock_user_string(arg2);
12234 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12235 unlock_user(p, arg2, 0);
12236 return ret;
12237 #endif
12238 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12239 case TARGET_NR_inotify_rm_watch:
12240 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12241 #endif
12243 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12244 case TARGET_NR_mq_open:
12246 struct mq_attr posix_mq_attr;
12247 struct mq_attr *pposix_mq_attr;
12248 int host_flags;
12250 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12251 pposix_mq_attr = NULL;
12252 if (arg4) {
12253 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12254 return -TARGET_EFAULT;
12256 pposix_mq_attr = &posix_mq_attr;
12258 p = lock_user_string(arg1 - 1);
12259 if (!p) {
12260 return -TARGET_EFAULT;
12262 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12263 unlock_user (p, arg1, 0);
12265 return ret;
12267 case TARGET_NR_mq_unlink:
12268 p = lock_user_string(arg1 - 1);
12269 if (!p) {
12270 return -TARGET_EFAULT;
12272 ret = get_errno(mq_unlink(p));
12273 unlock_user (p, arg1, 0);
12274 return ret;
12276 #ifdef TARGET_NR_mq_timedsend
12277 case TARGET_NR_mq_timedsend:
12279 struct timespec ts;
12281 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12282 if (arg5 != 0) {
12283 if (target_to_host_timespec(&ts, arg5)) {
12284 return -TARGET_EFAULT;
12286 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12287 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12288 return -TARGET_EFAULT;
12290 } else {
12291 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12293 unlock_user (p, arg2, arg3);
12295 return ret;
12296 #endif
12297 #ifdef TARGET_NR_mq_timedsend_time64
12298 case TARGET_NR_mq_timedsend_time64:
12300 struct timespec ts;
12302 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12303 if (arg5 != 0) {
12304 if (target_to_host_timespec64(&ts, arg5)) {
12305 return -TARGET_EFAULT;
12307 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12308 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12309 return -TARGET_EFAULT;
12311 } else {
12312 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12314 unlock_user(p, arg2, arg3);
12316 return ret;
12317 #endif
12319 #ifdef TARGET_NR_mq_timedreceive
12320 case TARGET_NR_mq_timedreceive:
12322 struct timespec ts;
12323 unsigned int prio;
12325 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12326 if (arg5 != 0) {
12327 if (target_to_host_timespec(&ts, arg5)) {
12328 return -TARGET_EFAULT;
12330 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12331 &prio, &ts));
12332 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12333 return -TARGET_EFAULT;
12335 } else {
12336 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12337 &prio, NULL));
12339 unlock_user (p, arg2, arg3);
12340 if (arg4 != 0)
12341 put_user_u32(prio, arg4);
12343 return ret;
12344 #endif
12345 #ifdef TARGET_NR_mq_timedreceive_time64
12346 case TARGET_NR_mq_timedreceive_time64:
12348 struct timespec ts;
12349 unsigned int prio;
12351 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12352 if (arg5 != 0) {
12353 if (target_to_host_timespec64(&ts, arg5)) {
12354 return -TARGET_EFAULT;
12356 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12357 &prio, &ts));
12358 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12359 return -TARGET_EFAULT;
12361 } else {
12362 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12363 &prio, NULL));
12365 unlock_user(p, arg2, arg3);
12366 if (arg4 != 0) {
12367 put_user_u32(prio, arg4);
12370 return ret;
12371 #endif
12373 /* Not implemented for now... */
12374 /* case TARGET_NR_mq_notify: */
12375 /* break; */
12377 case TARGET_NR_mq_getsetattr:
12379 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12380 ret = 0;
12381 if (arg2 != 0) {
12382 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12383 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12384 &posix_mq_attr_out));
12385 } else if (arg3 != 0) {
12386 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12388 if (ret == 0 && arg3 != 0) {
12389 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12392 return ret;
12393 #endif
12395 #ifdef CONFIG_SPLICE
12396 #ifdef TARGET_NR_tee
12397 case TARGET_NR_tee:
12399 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12401 return ret;
12402 #endif
12403 #ifdef TARGET_NR_splice
12404 case TARGET_NR_splice:
12406 loff_t loff_in, loff_out;
12407 loff_t *ploff_in = NULL, *ploff_out = NULL;
12408 if (arg2) {
12409 if (get_user_u64(loff_in, arg2)) {
12410 return -TARGET_EFAULT;
12412 ploff_in = &loff_in;
12414 if (arg4) {
12415 if (get_user_u64(loff_out, arg4)) {
12416 return -TARGET_EFAULT;
12418 ploff_out = &loff_out;
12420 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12421 if (arg2) {
12422 if (put_user_u64(loff_in, arg2)) {
12423 return -TARGET_EFAULT;
12426 if (arg4) {
12427 if (put_user_u64(loff_out, arg4)) {
12428 return -TARGET_EFAULT;
12432 return ret;
12433 #endif
12434 #ifdef TARGET_NR_vmsplice
12435 case TARGET_NR_vmsplice:
12437 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12438 if (vec != NULL) {
12439 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12440 unlock_iovec(vec, arg2, arg3, 0);
12441 } else {
12442 ret = -host_to_target_errno(errno);
12445 return ret;
12446 #endif
12447 #endif /* CONFIG_SPLICE */
12448 #ifdef CONFIG_EVENTFD
12449 #if defined(TARGET_NR_eventfd)
12450 case TARGET_NR_eventfd:
12451 ret = get_errno(eventfd(arg1, 0));
12452 if (ret >= 0) {
12453 fd_trans_register(ret, &target_eventfd_trans);
12455 return ret;
12456 #endif
12457 #if defined(TARGET_NR_eventfd2)
12458 case TARGET_NR_eventfd2:
12460 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12461 if (arg2 & TARGET_O_NONBLOCK) {
12462 host_flags |= O_NONBLOCK;
12464 if (arg2 & TARGET_O_CLOEXEC) {
12465 host_flags |= O_CLOEXEC;
12467 ret = get_errno(eventfd(arg1, host_flags));
12468 if (ret >= 0) {
12469 fd_trans_register(ret, &target_eventfd_trans);
12471 return ret;
12473 #endif
12474 #endif /* CONFIG_EVENTFD */
12475 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12476 case TARGET_NR_fallocate:
12477 #if TARGET_ABI_BITS == 32
12478 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12479 target_offset64(arg5, arg6)));
12480 #else
12481 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12482 #endif
12483 return ret;
12484 #endif
12485 #if defined(CONFIG_SYNC_FILE_RANGE)
12486 #if defined(TARGET_NR_sync_file_range)
12487 case TARGET_NR_sync_file_range:
12488 #if TARGET_ABI_BITS == 32
12489 #if defined(TARGET_MIPS)
12490 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12491 target_offset64(arg5, arg6), arg7));
12492 #else
12493 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12494 target_offset64(arg4, arg5), arg6));
12495 #endif /* !TARGET_MIPS */
12496 #else
12497 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12498 #endif
12499 return ret;
12500 #endif
12501 #if defined(TARGET_NR_sync_file_range2) || \
12502 defined(TARGET_NR_arm_sync_file_range)
12503 #if defined(TARGET_NR_sync_file_range2)
12504 case TARGET_NR_sync_file_range2:
12505 #endif
12506 #if defined(TARGET_NR_arm_sync_file_range)
12507 case TARGET_NR_arm_sync_file_range:
12508 #endif
12509 /* This is like sync_file_range but the arguments are reordered */
12510 #if TARGET_ABI_BITS == 32
12511 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12512 target_offset64(arg5, arg6), arg2));
12513 #else
12514 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12515 #endif
12516 return ret;
12517 #endif
12518 #endif
12519 #if defined(TARGET_NR_signalfd4)
12520 case TARGET_NR_signalfd4:
12521 return do_signalfd4(arg1, arg2, arg4);
12522 #endif
12523 #if defined(TARGET_NR_signalfd)
12524 case TARGET_NR_signalfd:
12525 return do_signalfd4(arg1, arg2, 0);
12526 #endif
12527 #if defined(CONFIG_EPOLL)
12528 #if defined(TARGET_NR_epoll_create)
12529 case TARGET_NR_epoll_create:
12530 return get_errno(epoll_create(arg1));
12531 #endif
12532 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12533 case TARGET_NR_epoll_create1:
12534 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12535 #endif
12536 #if defined(TARGET_NR_epoll_ctl)
12537 case TARGET_NR_epoll_ctl:
12539 struct epoll_event ep;
12540 struct epoll_event *epp = 0;
12541 if (arg4) {
12542 struct target_epoll_event *target_ep;
12543 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12544 return -TARGET_EFAULT;
12546 ep.events = tswap32(target_ep->events);
12547 /* The epoll_data_t union is just opaque data to the kernel,
12548 * so we transfer all 64 bits across and need not worry what
12549 * actual data type it is.
12551 ep.data.u64 = tswap64(target_ep->data.u64);
12552 unlock_user_struct(target_ep, arg4, 0);
12553 epp = &ep;
12555 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12557 #endif
12559 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12560 #if defined(TARGET_NR_epoll_wait)
12561 case TARGET_NR_epoll_wait:
12562 #endif
12563 #if defined(TARGET_NR_epoll_pwait)
12564 case TARGET_NR_epoll_pwait:
12565 #endif
12567 struct target_epoll_event *target_ep;
12568 struct epoll_event *ep;
12569 int epfd = arg1;
12570 int maxevents = arg3;
12571 int timeout = arg4;
12573 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12574 return -TARGET_EINVAL;
12577 target_ep = lock_user(VERIFY_WRITE, arg2,
12578 maxevents * sizeof(struct target_epoll_event), 1);
12579 if (!target_ep) {
12580 return -TARGET_EFAULT;
12583 ep = g_try_new(struct epoll_event, maxevents);
12584 if (!ep) {
12585 unlock_user(target_ep, arg2, 0);
12586 return -TARGET_ENOMEM;
12589 switch (num) {
12590 #if defined(TARGET_NR_epoll_pwait)
12591 case TARGET_NR_epoll_pwait:
12593 target_sigset_t *target_set;
12594 sigset_t _set, *set = &_set;
12596 if (arg5) {
12597 if (arg6 != sizeof(target_sigset_t)) {
12598 ret = -TARGET_EINVAL;
12599 break;
12602 target_set = lock_user(VERIFY_READ, arg5,
12603 sizeof(target_sigset_t), 1);
12604 if (!target_set) {
12605 ret = -TARGET_EFAULT;
12606 break;
12608 target_to_host_sigset(set, target_set);
12609 unlock_user(target_set, arg5, 0);
12610 } else {
12611 set = NULL;
12614 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12615 set, SIGSET_T_SIZE));
12616 break;
12618 #endif
12619 #if defined(TARGET_NR_epoll_wait)
12620 case TARGET_NR_epoll_wait:
12621 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12622 NULL, 0));
12623 break;
12624 #endif
12625 default:
12626 ret = -TARGET_ENOSYS;
12628 if (!is_error(ret)) {
12629 int i;
12630 for (i = 0; i < ret; i++) {
12631 target_ep[i].events = tswap32(ep[i].events);
12632 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12634 unlock_user(target_ep, arg2,
12635 ret * sizeof(struct target_epoll_event));
12636 } else {
12637 unlock_user(target_ep, arg2, 0);
12639 g_free(ep);
12640 return ret;
12642 #endif
12643 #endif
12644 #ifdef TARGET_NR_prlimit64
12645 case TARGET_NR_prlimit64:
12647 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12648 struct target_rlimit64 *target_rnew, *target_rold;
12649 struct host_rlimit64 rnew, rold, *rnewp = 0;
12650 int resource = target_to_host_resource(arg2);
12652 if (arg3 && (resource != RLIMIT_AS &&
12653 resource != RLIMIT_DATA &&
12654 resource != RLIMIT_STACK)) {
12655 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12656 return -TARGET_EFAULT;
12658 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12659 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12660 unlock_user_struct(target_rnew, arg3, 0);
12661 rnewp = &rnew;
12664 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12665 if (!is_error(ret) && arg4) {
12666 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12667 return -TARGET_EFAULT;
12669 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12670 target_rold->rlim_max = tswap64(rold.rlim_max);
12671 unlock_user_struct(target_rold, arg4, 1);
12673 return ret;
12675 #endif
12676 #ifdef TARGET_NR_gethostname
12677 case TARGET_NR_gethostname:
12679 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12680 if (name) {
12681 ret = get_errno(gethostname(name, arg2));
12682 unlock_user(name, arg1, arg2);
12683 } else {
12684 ret = -TARGET_EFAULT;
12686 return ret;
12688 #endif
12689 #ifdef TARGET_NR_atomic_cmpxchg_32
12690 case TARGET_NR_atomic_cmpxchg_32:
12692 /* should use start_exclusive from main.c */
12693 abi_ulong mem_value;
12694 if (get_user_u32(mem_value, arg6)) {
12695 target_siginfo_t info;
12696 info.si_signo = SIGSEGV;
12697 info.si_errno = 0;
12698 info.si_code = TARGET_SEGV_MAPERR;
12699 info._sifields._sigfault._addr = arg6;
12700 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12701 QEMU_SI_FAULT, &info);
12702 ret = 0xdeadbeef;
12705 if (mem_value == arg2)
12706 put_user_u32(arg1, arg6);
12707 return mem_value;
12709 #endif
12710 #ifdef TARGET_NR_atomic_barrier
12711 case TARGET_NR_atomic_barrier:
12712 /* Like the kernel implementation and the
12713 qemu arm barrier, no-op this? */
12714 return 0;
12715 #endif
12717 #ifdef TARGET_NR_timer_create
12718 case TARGET_NR_timer_create:
12720 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12722 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12724 int clkid = arg1;
12725 int timer_index = next_free_host_timer();
12727 if (timer_index < 0) {
12728 ret = -TARGET_EAGAIN;
12729 } else {
12730 timer_t *phtimer = g_posix_timers + timer_index;
12732 if (arg2) {
12733 phost_sevp = &host_sevp;
12734 ret = target_to_host_sigevent(phost_sevp, arg2);
12735 if (ret != 0) {
12736 return ret;
12740 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12741 if (ret) {
12742 phtimer = NULL;
12743 } else {
12744 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12745 return -TARGET_EFAULT;
12749 return ret;
12751 #endif
12753 #ifdef TARGET_NR_timer_settime
12754 case TARGET_NR_timer_settime:
12756 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12757 * struct itimerspec * old_value */
12758 target_timer_t timerid = get_timer_id(arg1);
12760 if (timerid < 0) {
12761 ret = timerid;
12762 } else if (arg3 == 0) {
12763 ret = -TARGET_EINVAL;
12764 } else {
12765 timer_t htimer = g_posix_timers[timerid];
12766 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12768 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12769 return -TARGET_EFAULT;
12771 ret = get_errno(
12772 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12773 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12774 return -TARGET_EFAULT;
12777 return ret;
12779 #endif
12781 #ifdef TARGET_NR_timer_settime64
12782 case TARGET_NR_timer_settime64:
12784 target_timer_t timerid = get_timer_id(arg1);
12786 if (timerid < 0) {
12787 ret = timerid;
12788 } else if (arg3 == 0) {
12789 ret = -TARGET_EINVAL;
12790 } else {
12791 timer_t htimer = g_posix_timers[timerid];
12792 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12794 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12795 return -TARGET_EFAULT;
12797 ret = get_errno(
12798 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12799 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12800 return -TARGET_EFAULT;
12803 return ret;
12805 #endif
12807 #ifdef TARGET_NR_timer_gettime
12808 case TARGET_NR_timer_gettime:
12810 /* args: timer_t timerid, struct itimerspec *curr_value */
12811 target_timer_t timerid = get_timer_id(arg1);
12813 if (timerid < 0) {
12814 ret = timerid;
12815 } else if (!arg2) {
12816 ret = -TARGET_EFAULT;
12817 } else {
12818 timer_t htimer = g_posix_timers[timerid];
12819 struct itimerspec hspec;
12820 ret = get_errno(timer_gettime(htimer, &hspec));
12822 if (host_to_target_itimerspec(arg2, &hspec)) {
12823 ret = -TARGET_EFAULT;
12826 return ret;
12828 #endif
12830 #ifdef TARGET_NR_timer_gettime64
12831 case TARGET_NR_timer_gettime64:
12833 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12834 target_timer_t timerid = get_timer_id(arg1);
12836 if (timerid < 0) {
12837 ret = timerid;
12838 } else if (!arg2) {
12839 ret = -TARGET_EFAULT;
12840 } else {
12841 timer_t htimer = g_posix_timers[timerid];
12842 struct itimerspec hspec;
12843 ret = get_errno(timer_gettime(htimer, &hspec));
12845 if (host_to_target_itimerspec64(arg2, &hspec)) {
12846 ret = -TARGET_EFAULT;
12849 return ret;
12851 #endif
12853 #ifdef TARGET_NR_timer_getoverrun
12854 case TARGET_NR_timer_getoverrun:
12856 /* args: timer_t timerid */
12857 target_timer_t timerid = get_timer_id(arg1);
12859 if (timerid < 0) {
12860 ret = timerid;
12861 } else {
12862 timer_t htimer = g_posix_timers[timerid];
12863 ret = get_errno(timer_getoverrun(htimer));
12865 return ret;
12867 #endif
12869 #ifdef TARGET_NR_timer_delete
12870 case TARGET_NR_timer_delete:
12872 /* args: timer_t timerid */
12873 target_timer_t timerid = get_timer_id(arg1);
12875 if (timerid < 0) {
12876 ret = timerid;
12877 } else {
12878 timer_t htimer = g_posix_timers[timerid];
12879 ret = get_errno(timer_delete(htimer));
12880 g_posix_timers[timerid] = 0;
12882 return ret;
12884 #endif
12886 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12887 case TARGET_NR_timerfd_create:
12888 return get_errno(timerfd_create(arg1,
12889 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12890 #endif
12892 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12893 case TARGET_NR_timerfd_gettime:
12895 struct itimerspec its_curr;
12897 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12899 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12900 return -TARGET_EFAULT;
12903 return ret;
12904 #endif
12906 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12907 case TARGET_NR_timerfd_gettime64:
12909 struct itimerspec its_curr;
12911 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12913 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12914 return -TARGET_EFAULT;
12917 return ret;
12918 #endif
12920 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12921 case TARGET_NR_timerfd_settime:
12923 struct itimerspec its_new, its_old, *p_new;
12925 if (arg3) {
12926 if (target_to_host_itimerspec(&its_new, arg3)) {
12927 return -TARGET_EFAULT;
12929 p_new = &its_new;
12930 } else {
12931 p_new = NULL;
12934 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12936 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12937 return -TARGET_EFAULT;
12940 return ret;
12941 #endif
12943 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12944 case TARGET_NR_timerfd_settime64:
12946 struct itimerspec its_new, its_old, *p_new;
12948 if (arg3) {
12949 if (target_to_host_itimerspec64(&its_new, arg3)) {
12950 return -TARGET_EFAULT;
12952 p_new = &its_new;
12953 } else {
12954 p_new = NULL;
12957 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12959 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12960 return -TARGET_EFAULT;
12963 return ret;
12964 #endif
12966 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12967 case TARGET_NR_ioprio_get:
12968 return get_errno(ioprio_get(arg1, arg2));
12969 #endif
12971 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12972 case TARGET_NR_ioprio_set:
12973 return get_errno(ioprio_set(arg1, arg2, arg3));
12974 #endif
12976 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12977 case TARGET_NR_setns:
12978 return get_errno(setns(arg1, arg2));
12979 #endif
12980 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12981 case TARGET_NR_unshare:
12982 return get_errno(unshare(arg1));
12983 #endif
12984 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12985 case TARGET_NR_kcmp:
12986 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12987 #endif
12988 #ifdef TARGET_NR_swapcontext
12989 case TARGET_NR_swapcontext:
12990 /* PowerPC specific. */
12991 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12992 #endif
12993 #ifdef TARGET_NR_memfd_create
12994 case TARGET_NR_memfd_create:
12995 p = lock_user_string(arg1);
12996 if (!p) {
12997 return -TARGET_EFAULT;
12999 ret = get_errno(memfd_create(p, arg2));
13000 fd_trans_unregister(ret);
13001 unlock_user(p, arg1, 0);
13002 return ret;
13003 #endif
13004 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13005 case TARGET_NR_membarrier:
13006 return get_errno(membarrier(arg1, arg2));
13007 #endif
13009 default:
13010 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13011 return -TARGET_ENOSYS;
13013 return ret;
13016 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13017 abi_long arg2, abi_long arg3, abi_long arg4,
13018 abi_long arg5, abi_long arg6, abi_long arg7,
13019 abi_long arg8)
13021 CPUState *cpu = env_cpu(cpu_env);
13022 abi_long ret;
13024 #ifdef DEBUG_ERESTARTSYS
13025 /* Debug-only code for exercising the syscall-restart code paths
13026 * in the per-architecture cpu main loops: restart every syscall
13027 * the guest makes once before letting it through.
13030 static bool flag;
13031 flag = !flag;
13032 if (flag) {
13033 return -TARGET_ERESTARTSYS;
13036 #endif
13038 record_syscall_start(cpu, num, arg1,
13039 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13041 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13042 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13045 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13046 arg5, arg6, arg7, arg8);
13048 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13049 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13050 arg3, arg4, arg5, arg6);
13053 record_syscall_return(cpu, num, ret);
13054 return ret;