docs: rstfy s390 dasd ipl documentation
[qemu/ar7.git] / linux-user / syscall.c
blob8d27d1080752ccbb88949b7fa2c55bc05456b6da
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
125 #ifndef CLONE_IO
126 #define CLONE_IO 0x80000000 /* Clone io context */
127 #endif
129 /* We can't directly call the host clone syscall, because this will
130 * badly confuse libc (breaking mutexes, for example). So we must
131 * divide clone flags into:
132 * * flag combinations that look like pthread_create()
133 * * flag combinations that look like fork()
134 * * flags we can implement within QEMU itself
135 * * flags we can't support and will return an error for
137 /* For thread creation, all these flags must be present; for
138 * fork, none must be present.
140 #define CLONE_THREAD_FLAGS \
141 (CLONE_VM | CLONE_FS | CLONE_FILES | \
142 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
144 /* These flags are ignored:
145 * CLONE_DETACHED is now ignored by the kernel;
146 * CLONE_IO is just an optimisation hint to the I/O scheduler
148 #define CLONE_IGNORED_FLAGS \
149 (CLONE_DETACHED | CLONE_IO)
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
161 #define CLONE_INVALID_FORK_FLAGS \
162 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
164 #define CLONE_INVALID_THREAD_FLAGS \
165 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
166 CLONE_IGNORED_FLAGS))
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169 * have almost all been allocated. We cannot support any of
170 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172 * The checks against the invalid thread masks above will catch these.
173 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177 * once. This exercises the codepaths for restart.
179 //#define DEBUG_ERESTARTSYS
181 //#include <linux/msdos_fs.h>
182 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
183 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
193 #define _syscall0(type,name) \
194 static type name (void) \
196 return syscall(__NR_##name); \
199 #define _syscall1(type,name,type1,arg1) \
200 static type name (type1 arg1) \
202 return syscall(__NR_##name, arg1); \
205 #define _syscall2(type,name,type1,arg1,type2,arg2) \
206 static type name (type1 arg1,type2 arg2) \
208 return syscall(__NR_##name, arg1, arg2); \
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
212 static type name (type1 arg1,type2 arg2,type3 arg3) \
214 return syscall(__NR_##name, arg1, arg2, arg3); \
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
232 type5,arg5,type6,arg6) \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
234 type6 arg6) \
236 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
256 #endif
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
261 #endif
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid)
266 /* For the 64-bit guest on 32-bit host case we must emulate
267 * getdents using getdents64, because otherwise the host
268 * might hand us back more dirent records than we can fit
269 * into the guest buffer after structure format conversion.
270 * Otherwise we emulate getdents with getdents if the host has it.
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
274 #endif
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
278 #endif
279 #if (defined(TARGET_NR_getdents) && \
280 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
283 #endif
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
286 loff_t *, res, uint, wh);
287 #endif
288 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
289 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
290 siginfo_t *, uinfo)
291 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group,int,error_code)
294 #endif
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address,int *,tidptr)
297 #endif
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300 const struct timespec *,timeout,int *,uaddr2,int,val3)
301 #endif
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
307 unsigned long *, user_mask_ptr);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
310 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
311 void *, arg);
312 _syscall2(int, capget, struct __user_cap_header_struct *, header,
313 struct __user_cap_data_struct *, data);
314 _syscall2(int, capset, struct __user_cap_header_struct *, header,
315 struct __user_cap_data_struct *, data);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get, int, which, int, who)
318 #endif
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
321 #endif
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
324 #endif
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
328 unsigned long, idx1, unsigned long, idx2)
329 #endif
332 * It is assumed that struct statx is architecture independent.
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
336 unsigned int, mask, struct target_statx *, statxbuf)
337 #endif
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier, int, cmd, int, flags)
340 #endif
342 static bitmask_transtbl fcntl_flags_tbl[] = {
343 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
344 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
345 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
346 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
347 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
348 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
349 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
350 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
351 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
352 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
353 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
354 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
355 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
356 #if defined(O_DIRECT)
357 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
358 #endif
359 #if defined(O_NOATIME)
360 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
361 #endif
362 #if defined(O_CLOEXEC)
363 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
364 #endif
365 #if defined(O_PATH)
366 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
367 #endif
368 #if defined(O_TMPFILE)
369 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
370 #endif
371 /* Don't terminate the list prematurely on 64-bit host+guest. */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
374 #endif
375 { 0, 0, 0, 0 }
378 static int sys_getcwd1(char *buf, size_t size)
380 if (getcwd(buf, size) == NULL) {
381 /* getcwd() sets errno */
382 return (-1);
384 return strlen(buf)+1;
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
391 const struct timespec *,tsp,int,flags)
392 #else
393 static int sys_utimensat(int dirfd, const char *pathname,
394 const struct timespec times[2], int flags)
396 errno = ENOSYS;
397 return -1;
399 #endif
400 #endif /* TARGET_NR_utimensat */
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
406 const char *, new, unsigned int, flags)
407 #else
408 static int sys_renameat2(int oldfd, const char *old,
409 int newfd, const char *new, int flags)
411 if (flags == 0) {
412 return renameat(oldfd, old, newfd, new);
414 errno = ENOSYS;
415 return -1;
417 #endif
418 #endif /* TARGET_NR_renameat2 */
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
426 return (inotify_init());
428 #endif
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
432 return (inotify_add_watch(fd, pathname, mask));
434 #endif
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd, int32_t wd)
438 return (inotify_rm_watch(fd, wd));
440 #endif
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags)
445 return (inotify_init1(flags));
447 #endif
448 #endif
449 #else
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY */
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464 uint64_t rlim_cur;
465 uint64_t rlim_max;
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468 const struct host_rlimit64 *, new_limit,
469 struct host_rlimit64 *, old_limit)
470 #endif
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
477 static inline int next_free_host_timer(void)
479 int k ;
480 /* FIXME: Does finding the next free slot require a lock? */
481 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482 if (g_posix_timers[k] == 0) {
483 g_posix_timers[k] = (timer_t) 1;
484 return k;
487 return -1;
489 #endif
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env, int num)
495 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501 * of registers which translates to the same as ARM/MIPS, because we start with
502 * r3 as arg1 */
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env, int num)
508 switch (num) {
509 case TARGET_NR_pread64:
510 case TARGET_NR_pwrite64:
511 return 1;
513 default:
514 return 0;
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
519 #else
520 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
521 #endif
523 #define ERRNO_TABLE_SIZE 1200
525 /* target_to_host_errno_table[] is initialized from
526 * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
531 * This list is the union of errno values overridden in asm-<arch>/errno.h
532 * minus the errnos that are not actually generic to all archs.
534 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
535 [EAGAIN] = TARGET_EAGAIN,
536 [EIDRM] = TARGET_EIDRM,
537 [ECHRNG] = TARGET_ECHRNG,
538 [EL2NSYNC] = TARGET_EL2NSYNC,
539 [EL3HLT] = TARGET_EL3HLT,
540 [EL3RST] = TARGET_EL3RST,
541 [ELNRNG] = TARGET_ELNRNG,
542 [EUNATCH] = TARGET_EUNATCH,
543 [ENOCSI] = TARGET_ENOCSI,
544 [EL2HLT] = TARGET_EL2HLT,
545 [EDEADLK] = TARGET_EDEADLK,
546 [ENOLCK] = TARGET_ENOLCK,
547 [EBADE] = TARGET_EBADE,
548 [EBADR] = TARGET_EBADR,
549 [EXFULL] = TARGET_EXFULL,
550 [ENOANO] = TARGET_ENOANO,
551 [EBADRQC] = TARGET_EBADRQC,
552 [EBADSLT] = TARGET_EBADSLT,
553 [EBFONT] = TARGET_EBFONT,
554 [ENOSTR] = TARGET_ENOSTR,
555 [ENODATA] = TARGET_ENODATA,
556 [ETIME] = TARGET_ETIME,
557 [ENOSR] = TARGET_ENOSR,
558 [ENONET] = TARGET_ENONET,
559 [ENOPKG] = TARGET_ENOPKG,
560 [EREMOTE] = TARGET_EREMOTE,
561 [ENOLINK] = TARGET_ENOLINK,
562 [EADV] = TARGET_EADV,
563 [ESRMNT] = TARGET_ESRMNT,
564 [ECOMM] = TARGET_ECOMM,
565 [EPROTO] = TARGET_EPROTO,
566 [EDOTDOT] = TARGET_EDOTDOT,
567 [EMULTIHOP] = TARGET_EMULTIHOP,
568 [EBADMSG] = TARGET_EBADMSG,
569 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
570 [EOVERFLOW] = TARGET_EOVERFLOW,
571 [ENOTUNIQ] = TARGET_ENOTUNIQ,
572 [EBADFD] = TARGET_EBADFD,
573 [EREMCHG] = TARGET_EREMCHG,
574 [ELIBACC] = TARGET_ELIBACC,
575 [ELIBBAD] = TARGET_ELIBBAD,
576 [ELIBSCN] = TARGET_ELIBSCN,
577 [ELIBMAX] = TARGET_ELIBMAX,
578 [ELIBEXEC] = TARGET_ELIBEXEC,
579 [EILSEQ] = TARGET_EILSEQ,
580 [ENOSYS] = TARGET_ENOSYS,
581 [ELOOP] = TARGET_ELOOP,
582 [ERESTART] = TARGET_ERESTART,
583 [ESTRPIPE] = TARGET_ESTRPIPE,
584 [ENOTEMPTY] = TARGET_ENOTEMPTY,
585 [EUSERS] = TARGET_EUSERS,
586 [ENOTSOCK] = TARGET_ENOTSOCK,
587 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
588 [EMSGSIZE] = TARGET_EMSGSIZE,
589 [EPROTOTYPE] = TARGET_EPROTOTYPE,
590 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
591 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
592 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
593 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
594 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
595 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
596 [EADDRINUSE] = TARGET_EADDRINUSE,
597 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
598 [ENETDOWN] = TARGET_ENETDOWN,
599 [ENETUNREACH] = TARGET_ENETUNREACH,
600 [ENETRESET] = TARGET_ENETRESET,
601 [ECONNABORTED] = TARGET_ECONNABORTED,
602 [ECONNRESET] = TARGET_ECONNRESET,
603 [ENOBUFS] = TARGET_ENOBUFS,
604 [EISCONN] = TARGET_EISCONN,
605 [ENOTCONN] = TARGET_ENOTCONN,
606 [EUCLEAN] = TARGET_EUCLEAN,
607 [ENOTNAM] = TARGET_ENOTNAM,
608 [ENAVAIL] = TARGET_ENAVAIL,
609 [EISNAM] = TARGET_EISNAM,
610 [EREMOTEIO] = TARGET_EREMOTEIO,
611 [EDQUOT] = TARGET_EDQUOT,
612 [ESHUTDOWN] = TARGET_ESHUTDOWN,
613 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
614 [ETIMEDOUT] = TARGET_ETIMEDOUT,
615 [ECONNREFUSED] = TARGET_ECONNREFUSED,
616 [EHOSTDOWN] = TARGET_EHOSTDOWN,
617 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
618 [EALREADY] = TARGET_EALREADY,
619 [EINPROGRESS] = TARGET_EINPROGRESS,
620 [ESTALE] = TARGET_ESTALE,
621 [ECANCELED] = TARGET_ECANCELED,
622 [ENOMEDIUM] = TARGET_ENOMEDIUM,
623 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
624 #ifdef ENOKEY
625 [ENOKEY] = TARGET_ENOKEY,
626 #endif
627 #ifdef EKEYEXPIRED
628 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
629 #endif
630 #ifdef EKEYREVOKED
631 [EKEYREVOKED] = TARGET_EKEYREVOKED,
632 #endif
633 #ifdef EKEYREJECTED
634 [EKEYREJECTED] = TARGET_EKEYREJECTED,
635 #endif
636 #ifdef EOWNERDEAD
637 [EOWNERDEAD] = TARGET_EOWNERDEAD,
638 #endif
639 #ifdef ENOTRECOVERABLE
640 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
641 #endif
642 #ifdef ENOMSG
643 [ENOMSG] = TARGET_ENOMSG,
644 #endif
645 #ifdef ERKFILL
646 [ERFKILL] = TARGET_ERFKILL,
647 #endif
648 #ifdef EHWPOISON
649 [EHWPOISON] = TARGET_EHWPOISON,
650 #endif
653 static inline int host_to_target_errno(int err)
655 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656 host_to_target_errno_table[err]) {
657 return host_to_target_errno_table[err];
659 return err;
662 static inline int target_to_host_errno(int err)
664 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665 target_to_host_errno_table[err]) {
666 return target_to_host_errno_table[err];
668 return err;
671 static inline abi_long get_errno(abi_long ret)
673 if (ret == -1)
674 return -host_to_target_errno(errno);
675 else
676 return ret;
679 const char *target_strerror(int err)
681 if (err == TARGET_ERESTARTSYS) {
682 return "To be restarted";
684 if (err == TARGET_QEMU_ESIGRETURN) {
685 return "Successful exit from sigreturn";
688 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
689 return NULL;
691 return strerror(target_to_host_errno(err));
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
697 return safe_syscall(__NR_##name); \
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
703 return safe_syscall(__NR_##name, arg1); \
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
709 return safe_syscall(__NR_##name, arg1, arg2); \
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
722 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726 type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728 type5 arg5) \
730 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734 type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736 type5 arg5, type6 arg6) \
738 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
741 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
742 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
743 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
744 int, flags, mode_t, mode)
745 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
746 struct rusage *, rusage)
747 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
748 int, options, struct rusage *, rusage)
749 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
750 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
751 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
752 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
753 struct timespec *, tsp, const sigset_t *, sigmask,
754 size_t, sigsetsize)
755 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
756 int, maxevents, int, timeout, const sigset_t *, sigmask,
757 size_t, sigsetsize)
758 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
759 const struct timespec *,timeout,int *,uaddr2,int,val3)
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767 unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769 unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771 socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
780 const struct timespec *, uts, size_t, sigsetsize)
781 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
782 int, flags)
783 safe_syscall2(int, nanosleep, const struct timespec *, req,
784 struct timespec *, rem)
785 #ifdef TARGET_NR_clock_nanosleep
786 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
787 const struct timespec *, req, struct timespec *, rem)
788 #endif
789 #ifdef __NR_ipc
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791 void *, ptr, long, fifth)
792 #endif
793 #ifdef __NR_msgsnd
794 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
795 int, flags)
796 #endif
797 #ifdef __NR_msgrcv
798 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
799 long, msgtype, int, flags)
800 #endif
801 #ifdef __NR_semtimedop
802 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
803 unsigned, nsops, const struct timespec *, timeout)
804 #endif
805 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
806 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
807 size_t, len, unsigned, prio, const struct timespec *, timeout)
808 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
809 size_t, len, unsigned *, prio, const struct timespec *, timeout)
810 #endif
811 /* We do ioctl like this rather than via safe_syscall3 to preserve the
812 * "third argument might be integer or pointer or not present" behaviour of
813 * the libc function.
815 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
816 /* Similarly for fcntl. Note that callers must always:
817 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
818 * use the flock64 struct rather than unsuffixed flock
819 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
821 #ifdef __NR_fcntl64
822 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
823 #else
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
825 #endif
827 static inline int host_to_target_sock_type(int host_type)
829 int target_type;
831 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
832 case SOCK_DGRAM:
833 target_type = TARGET_SOCK_DGRAM;
834 break;
835 case SOCK_STREAM:
836 target_type = TARGET_SOCK_STREAM;
837 break;
838 default:
839 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
840 break;
843 #if defined(SOCK_CLOEXEC)
844 if (host_type & SOCK_CLOEXEC) {
845 target_type |= TARGET_SOCK_CLOEXEC;
847 #endif
849 #if defined(SOCK_NONBLOCK)
850 if (host_type & SOCK_NONBLOCK) {
851 target_type |= TARGET_SOCK_NONBLOCK;
853 #endif
855 return target_type;
858 static abi_ulong target_brk;
859 static abi_ulong target_original_brk;
860 static abi_ulong brk_page;
862 void target_set_brk(abi_ulong new_brk)
864 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
865 brk_page = HOST_PAGE_ALIGN(target_brk);
868 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
869 #define DEBUGF_BRK(message, args...)
871 /* do_brk() must return target values and target errnos. */
872 abi_long do_brk(abi_ulong new_brk)
874 abi_long mapped_addr;
875 abi_ulong new_alloc_size;
877 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
879 if (!new_brk) {
880 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
881 return target_brk;
883 if (new_brk < target_original_brk) {
884 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
885 target_brk);
886 return target_brk;
889 /* If the new brk is less than the highest page reserved to the
890 * target heap allocation, set it and we're almost done... */
891 if (new_brk <= brk_page) {
892 /* Heap contents are initialized to zero, as for anonymous
893 * mapped pages. */
894 if (new_brk > target_brk) {
895 memset(g2h(target_brk), 0, new_brk - target_brk);
897 target_brk = new_brk;
898 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
899 return target_brk;
902 /* We need to allocate more memory after the brk... Note that
903 * we don't use MAP_FIXED because that will map over the top of
904 * any existing mapping (like the one with the host libc or qemu
905 * itself); instead we treat "mapped but at wrong address" as
906 * a failure and unmap again.
908 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
909 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
910 PROT_READ|PROT_WRITE,
911 MAP_ANON|MAP_PRIVATE, 0, 0));
913 if (mapped_addr == brk_page) {
914 /* Heap contents are initialized to zero, as for anonymous
915 * mapped pages. Technically the new pages are already
916 * initialized to zero since they *are* anonymous mapped
917 * pages, however we have to take care with the contents that
918 * come from the remaining part of the previous page: it may
919 * contains garbage data due to a previous heap usage (grown
920 * then shrunken). */
921 memset(g2h(target_brk), 0, brk_page - target_brk);
923 target_brk = new_brk;
924 brk_page = HOST_PAGE_ALIGN(target_brk);
925 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
926 target_brk);
927 return target_brk;
928 } else if (mapped_addr != -1) {
929 /* Mapped but at wrong address, meaning there wasn't actually
930 * enough space for this brk.
932 target_munmap(mapped_addr, new_alloc_size);
933 mapped_addr = -1;
934 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
936 else {
937 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
940 #if defined(TARGET_ALPHA)
941 /* We (partially) emulate OSF/1 on Alpha, which requires we
942 return a proper errno, not an unchanged brk value. */
943 return -TARGET_ENOMEM;
944 #endif
945 /* For everything else, return the previous break. */
946 return target_brk;
949 static inline abi_long copy_from_user_fdset(fd_set *fds,
950 abi_ulong target_fds_addr,
951 int n)
953 int i, nw, j, k;
954 abi_ulong b, *target_fds;
956 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
957 if (!(target_fds = lock_user(VERIFY_READ,
958 target_fds_addr,
959 sizeof(abi_ulong) * nw,
960 1)))
961 return -TARGET_EFAULT;
963 FD_ZERO(fds);
964 k = 0;
965 for (i = 0; i < nw; i++) {
966 /* grab the abi_ulong */
967 __get_user(b, &target_fds[i]);
968 for (j = 0; j < TARGET_ABI_BITS; j++) {
969 /* check the bit inside the abi_ulong */
970 if ((b >> j) & 1)
971 FD_SET(k, fds);
972 k++;
976 unlock_user(target_fds, target_fds_addr, 0);
978 return 0;
981 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
982 abi_ulong target_fds_addr,
983 int n)
985 if (target_fds_addr) {
986 if (copy_from_user_fdset(fds, target_fds_addr, n))
987 return -TARGET_EFAULT;
988 *fds_ptr = fds;
989 } else {
990 *fds_ptr = NULL;
992 return 0;
995 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
996 const fd_set *fds,
997 int n)
999 int i, nw, j, k;
1000 abi_long v;
1001 abi_ulong *target_fds;
1003 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1004 if (!(target_fds = lock_user(VERIFY_WRITE,
1005 target_fds_addr,
1006 sizeof(abi_ulong) * nw,
1007 0)))
1008 return -TARGET_EFAULT;
1010 k = 0;
1011 for (i = 0; i < nw; i++) {
1012 v = 0;
1013 for (j = 0; j < TARGET_ABI_BITS; j++) {
1014 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1015 k++;
1017 __put_user(v, &target_fds[i]);
1020 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1022 return 0;
1025 #if defined(__alpha__)
1026 #define HOST_HZ 1024
1027 #else
1028 #define HOST_HZ 100
1029 #endif
1031 static inline abi_long host_to_target_clock_t(long ticks)
1033 #if HOST_HZ == TARGET_HZ
1034 return ticks;
1035 #else
1036 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1037 #endif
1040 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1041 const struct rusage *rusage)
1043 struct target_rusage *target_rusage;
1045 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1046 return -TARGET_EFAULT;
1047 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1048 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1049 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1050 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1051 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1052 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1053 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1054 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1055 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1056 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1057 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1058 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1059 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1060 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1061 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1062 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1063 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1064 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1065 unlock_user_struct(target_rusage, target_addr, 1);
1067 return 0;
1070 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1072 abi_ulong target_rlim_swap;
1073 rlim_t result;
1075 target_rlim_swap = tswapal(target_rlim);
1076 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1077 return RLIM_INFINITY;
1079 result = target_rlim_swap;
1080 if (target_rlim_swap != (rlim_t)result)
1081 return RLIM_INFINITY;
1083 return result;
1086 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1088 abi_ulong target_rlim_swap;
1089 abi_ulong result;
1091 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1092 target_rlim_swap = TARGET_RLIM_INFINITY;
1093 else
1094 target_rlim_swap = rlim;
1095 result = tswapal(target_rlim_swap);
1097 return result;
1100 static inline int target_to_host_resource(int code)
1102 switch (code) {
1103 case TARGET_RLIMIT_AS:
1104 return RLIMIT_AS;
1105 case TARGET_RLIMIT_CORE:
1106 return RLIMIT_CORE;
1107 case TARGET_RLIMIT_CPU:
1108 return RLIMIT_CPU;
1109 case TARGET_RLIMIT_DATA:
1110 return RLIMIT_DATA;
1111 case TARGET_RLIMIT_FSIZE:
1112 return RLIMIT_FSIZE;
1113 case TARGET_RLIMIT_LOCKS:
1114 return RLIMIT_LOCKS;
1115 case TARGET_RLIMIT_MEMLOCK:
1116 return RLIMIT_MEMLOCK;
1117 case TARGET_RLIMIT_MSGQUEUE:
1118 return RLIMIT_MSGQUEUE;
1119 case TARGET_RLIMIT_NICE:
1120 return RLIMIT_NICE;
1121 case TARGET_RLIMIT_NOFILE:
1122 return RLIMIT_NOFILE;
1123 case TARGET_RLIMIT_NPROC:
1124 return RLIMIT_NPROC;
1125 case TARGET_RLIMIT_RSS:
1126 return RLIMIT_RSS;
1127 case TARGET_RLIMIT_RTPRIO:
1128 return RLIMIT_RTPRIO;
1129 case TARGET_RLIMIT_SIGPENDING:
1130 return RLIMIT_SIGPENDING;
1131 case TARGET_RLIMIT_STACK:
1132 return RLIMIT_STACK;
1133 default:
1134 return code;
1138 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1139 abi_ulong target_tv_addr)
1141 struct target_timeval *target_tv;
1143 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1144 return -TARGET_EFAULT;
1147 __get_user(tv->tv_sec, &target_tv->tv_sec);
1148 __get_user(tv->tv_usec, &target_tv->tv_usec);
1150 unlock_user_struct(target_tv, target_tv_addr, 0);
1152 return 0;
1155 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1156 const struct timeval *tv)
1158 struct target_timeval *target_tv;
1160 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1161 return -TARGET_EFAULT;
1164 __put_user(tv->tv_sec, &target_tv->tv_sec);
1165 __put_user(tv->tv_usec, &target_tv->tv_usec);
1167 unlock_user_struct(target_tv, target_tv_addr, 1);
1169 return 0;
1172 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1173 const struct timeval *tv)
1175 struct target__kernel_sock_timeval *target_tv;
1177 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1178 return -TARGET_EFAULT;
1181 __put_user(tv->tv_sec, &target_tv->tv_sec);
1182 __put_user(tv->tv_usec, &target_tv->tv_usec);
1184 unlock_user_struct(target_tv, target_tv_addr, 1);
1186 return 0;
1189 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1190 abi_ulong target_addr)
1192 struct target_timespec *target_ts;
1194 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1195 return -TARGET_EFAULT;
1197 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1198 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199 unlock_user_struct(target_ts, target_addr, 0);
1200 return 0;
1203 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1204 struct timespec *host_ts)
1206 struct target_timespec *target_ts;
1208 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1209 return -TARGET_EFAULT;
1211 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1212 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213 unlock_user_struct(target_ts, target_addr, 1);
1214 return 0;
1217 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1218 struct timespec *host_ts)
1220 struct target__kernel_timespec *target_ts;
1222 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1223 return -TARGET_EFAULT;
1225 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1226 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227 unlock_user_struct(target_ts, target_addr, 1);
1228 return 0;
1231 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1232 abi_ulong target_tz_addr)
1234 struct target_timezone *target_tz;
1236 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1237 return -TARGET_EFAULT;
1240 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1241 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1243 unlock_user_struct(target_tz, target_tz_addr, 0);
1245 return 0;
1248 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1249 #include <mqueue.h>
1251 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1252 abi_ulong target_mq_attr_addr)
1254 struct target_mq_attr *target_mq_attr;
1256 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1257 target_mq_attr_addr, 1))
1258 return -TARGET_EFAULT;
1260 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1261 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1262 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1263 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1265 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1267 return 0;
1270 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1271 const struct mq_attr *attr)
1273 struct target_mq_attr *target_mq_attr;
1275 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1276 target_mq_attr_addr, 0))
1277 return -TARGET_EFAULT;
1279 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1280 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1281 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1282 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1284 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1286 return 0;
1288 #endif
1290 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1291 /* do_select() must return target values and target errnos. */
1292 static abi_long do_select(int n,
1293 abi_ulong rfd_addr, abi_ulong wfd_addr,
1294 abi_ulong efd_addr, abi_ulong target_tv_addr)
1296 fd_set rfds, wfds, efds;
1297 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1298 struct timeval tv;
1299 struct timespec ts, *ts_ptr;
1300 abi_long ret;
1302 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1303 if (ret) {
1304 return ret;
1306 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1307 if (ret) {
1308 return ret;
1310 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1311 if (ret) {
1312 return ret;
1315 if (target_tv_addr) {
1316 if (copy_from_user_timeval(&tv, target_tv_addr))
1317 return -TARGET_EFAULT;
1318 ts.tv_sec = tv.tv_sec;
1319 ts.tv_nsec = tv.tv_usec * 1000;
1320 ts_ptr = &ts;
1321 } else {
1322 ts_ptr = NULL;
1325 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1326 ts_ptr, NULL));
1328 if (!is_error(ret)) {
1329 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1330 return -TARGET_EFAULT;
1331 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1332 return -TARGET_EFAULT;
1333 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1334 return -TARGET_EFAULT;
1336 if (target_tv_addr) {
1337 tv.tv_sec = ts.tv_sec;
1338 tv.tv_usec = ts.tv_nsec / 1000;
1339 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1340 return -TARGET_EFAULT;
1345 return ret;
1348 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1349 static abi_long do_old_select(abi_ulong arg1)
1351 struct target_sel_arg_struct *sel;
1352 abi_ulong inp, outp, exp, tvp;
1353 long nsel;
1355 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1356 return -TARGET_EFAULT;
1359 nsel = tswapal(sel->n);
1360 inp = tswapal(sel->inp);
1361 outp = tswapal(sel->outp);
1362 exp = tswapal(sel->exp);
1363 tvp = tswapal(sel->tvp);
1365 unlock_user_struct(sel, arg1, 0);
1367 return do_select(nsel, inp, outp, exp, tvp);
1369 #endif
1370 #endif
1372 static abi_long do_pipe2(int host_pipe[], int flags)
1374 #ifdef CONFIG_PIPE2
1375 return pipe2(host_pipe, flags);
1376 #else
1377 return -ENOSYS;
1378 #endif
1381 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1382 int flags, int is_pipe2)
1384 int host_pipe[2];
1385 abi_long ret;
1386 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1388 if (is_error(ret))
1389 return get_errno(ret);
1391 /* Several targets have special calling conventions for the original
1392 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1393 if (!is_pipe2) {
1394 #if defined(TARGET_ALPHA)
1395 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1396 return host_pipe[0];
1397 #elif defined(TARGET_MIPS)
1398 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1399 return host_pipe[0];
1400 #elif defined(TARGET_SH4)
1401 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1402 return host_pipe[0];
1403 #elif defined(TARGET_SPARC)
1404 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1405 return host_pipe[0];
1406 #endif
1409 if (put_user_s32(host_pipe[0], pipedes)
1410 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1411 return -TARGET_EFAULT;
1412 return get_errno(ret);
1415 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1416 abi_ulong target_addr,
1417 socklen_t len)
1419 struct target_ip_mreqn *target_smreqn;
1421 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1422 if (!target_smreqn)
1423 return -TARGET_EFAULT;
1424 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1425 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1426 if (len == sizeof(struct target_ip_mreqn))
1427 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1428 unlock_user(target_smreqn, target_addr, 0);
1430 return 0;
1433 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1434 abi_ulong target_addr,
1435 socklen_t len)
1437 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1438 sa_family_t sa_family;
1439 struct target_sockaddr *target_saddr;
1441 if (fd_trans_target_to_host_addr(fd)) {
1442 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1445 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1446 if (!target_saddr)
1447 return -TARGET_EFAULT;
1449 sa_family = tswap16(target_saddr->sa_family);
1451 /* Oops. The caller might send a incomplete sun_path; sun_path
1452 * must be terminated by \0 (see the manual page), but
1453 * unfortunately it is quite common to specify sockaddr_un
1454 * length as "strlen(x->sun_path)" while it should be
1455 * "strlen(...) + 1". We'll fix that here if needed.
1456 * Linux kernel has a similar feature.
1459 if (sa_family == AF_UNIX) {
1460 if (len < unix_maxlen && len > 0) {
1461 char *cp = (char*)target_saddr;
1463 if ( cp[len-1] && !cp[len] )
1464 len++;
1466 if (len > unix_maxlen)
1467 len = unix_maxlen;
1470 memcpy(addr, target_saddr, len);
1471 addr->sa_family = sa_family;
1472 if (sa_family == AF_NETLINK) {
1473 struct sockaddr_nl *nladdr;
1475 nladdr = (struct sockaddr_nl *)addr;
1476 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1477 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1478 } else if (sa_family == AF_PACKET) {
1479 struct target_sockaddr_ll *lladdr;
1481 lladdr = (struct target_sockaddr_ll *)addr;
1482 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1483 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1485 unlock_user(target_saddr, target_addr, 0);
1487 return 0;
1490 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1491 struct sockaddr *addr,
1492 socklen_t len)
1494 struct target_sockaddr *target_saddr;
1496 if (len == 0) {
1497 return 0;
1499 assert(addr);
1501 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1502 if (!target_saddr)
1503 return -TARGET_EFAULT;
1504 memcpy(target_saddr, addr, len);
1505 if (len >= offsetof(struct target_sockaddr, sa_family) +
1506 sizeof(target_saddr->sa_family)) {
1507 target_saddr->sa_family = tswap16(addr->sa_family);
1509 if (addr->sa_family == AF_NETLINK &&
1510 len >= sizeof(struct target_sockaddr_nl)) {
1511 struct target_sockaddr_nl *target_nl =
1512 (struct target_sockaddr_nl *)target_saddr;
1513 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1514 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1515 } else if (addr->sa_family == AF_PACKET) {
1516 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1517 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1518 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1519 } else if (addr->sa_family == AF_INET6 &&
1520 len >= sizeof(struct target_sockaddr_in6)) {
1521 struct target_sockaddr_in6 *target_in6 =
1522 (struct target_sockaddr_in6 *)target_saddr;
1523 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1525 unlock_user(target_saddr, target_addr, len);
1527 return 0;
1530 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1531 struct target_msghdr *target_msgh)
1533 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1534 abi_long msg_controllen;
1535 abi_ulong target_cmsg_addr;
1536 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1537 socklen_t space = 0;
1539 msg_controllen = tswapal(target_msgh->msg_controllen);
1540 if (msg_controllen < sizeof (struct target_cmsghdr))
1541 goto the_end;
1542 target_cmsg_addr = tswapal(target_msgh->msg_control);
1543 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1544 target_cmsg_start = target_cmsg;
1545 if (!target_cmsg)
1546 return -TARGET_EFAULT;
1548 while (cmsg && target_cmsg) {
1549 void *data = CMSG_DATA(cmsg);
1550 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1552 int len = tswapal(target_cmsg->cmsg_len)
1553 - sizeof(struct target_cmsghdr);
1555 space += CMSG_SPACE(len);
1556 if (space > msgh->msg_controllen) {
1557 space -= CMSG_SPACE(len);
1558 /* This is a QEMU bug, since we allocated the payload
1559 * area ourselves (unlike overflow in host-to-target
1560 * conversion, which is just the guest giving us a buffer
1561 * that's too small). It can't happen for the payload types
1562 * we currently support; if it becomes an issue in future
1563 * we would need to improve our allocation strategy to
1564 * something more intelligent than "twice the size of the
1565 * target buffer we're reading from".
1567 qemu_log_mask(LOG_UNIMP,
1568 ("Unsupported ancillary data %d/%d: "
1569 "unhandled msg size\n"),
1570 tswap32(target_cmsg->cmsg_level),
1571 tswap32(target_cmsg->cmsg_type));
1572 break;
1575 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1576 cmsg->cmsg_level = SOL_SOCKET;
1577 } else {
1578 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1580 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1581 cmsg->cmsg_len = CMSG_LEN(len);
1583 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1584 int *fd = (int *)data;
1585 int *target_fd = (int *)target_data;
1586 int i, numfds = len / sizeof(int);
1588 for (i = 0; i < numfds; i++) {
1589 __get_user(fd[i], target_fd + i);
1591 } else if (cmsg->cmsg_level == SOL_SOCKET
1592 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1593 struct ucred *cred = (struct ucred *)data;
1594 struct target_ucred *target_cred =
1595 (struct target_ucred *)target_data;
1597 __get_user(cred->pid, &target_cred->pid);
1598 __get_user(cred->uid, &target_cred->uid);
1599 __get_user(cred->gid, &target_cred->gid);
1600 } else {
1601 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1602 cmsg->cmsg_level, cmsg->cmsg_type);
1603 memcpy(data, target_data, len);
1606 cmsg = CMSG_NXTHDR(msgh, cmsg);
1607 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1608 target_cmsg_start);
1610 unlock_user(target_cmsg, target_cmsg_addr, 0);
1611 the_end:
1612 msgh->msg_controllen = space;
1613 return 0;
1616 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1617 struct msghdr *msgh)
1619 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1620 abi_long msg_controllen;
1621 abi_ulong target_cmsg_addr;
1622 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1623 socklen_t space = 0;
1625 msg_controllen = tswapal(target_msgh->msg_controllen);
1626 if (msg_controllen < sizeof (struct target_cmsghdr))
1627 goto the_end;
1628 target_cmsg_addr = tswapal(target_msgh->msg_control);
1629 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1630 target_cmsg_start = target_cmsg;
1631 if (!target_cmsg)
1632 return -TARGET_EFAULT;
1634 while (cmsg && target_cmsg) {
1635 void *data = CMSG_DATA(cmsg);
1636 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1638 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1639 int tgt_len, tgt_space;
1641 /* We never copy a half-header but may copy half-data;
1642 * this is Linux's behaviour in put_cmsg(). Note that
1643 * truncation here is a guest problem (which we report
1644 * to the guest via the CTRUNC bit), unlike truncation
1645 * in target_to_host_cmsg, which is a QEMU bug.
1647 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1648 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1649 break;
1652 if (cmsg->cmsg_level == SOL_SOCKET) {
1653 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1654 } else {
1655 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1657 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1659 /* Payload types which need a different size of payload on
1660 * the target must adjust tgt_len here.
1662 tgt_len = len;
1663 switch (cmsg->cmsg_level) {
1664 case SOL_SOCKET:
1665 switch (cmsg->cmsg_type) {
1666 case SO_TIMESTAMP:
1667 tgt_len = sizeof(struct target_timeval);
1668 break;
1669 default:
1670 break;
1672 break;
1673 default:
1674 break;
1677 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1678 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1679 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1682 /* We must now copy-and-convert len bytes of payload
1683 * into tgt_len bytes of destination space. Bear in mind
1684 * that in both source and destination we may be dealing
1685 * with a truncated value!
1687 switch (cmsg->cmsg_level) {
1688 case SOL_SOCKET:
1689 switch (cmsg->cmsg_type) {
1690 case SCM_RIGHTS:
1692 int *fd = (int *)data;
1693 int *target_fd = (int *)target_data;
1694 int i, numfds = tgt_len / sizeof(int);
1696 for (i = 0; i < numfds; i++) {
1697 __put_user(fd[i], target_fd + i);
1699 break;
1701 case SO_TIMESTAMP:
1703 struct timeval *tv = (struct timeval *)data;
1704 struct target_timeval *target_tv =
1705 (struct target_timeval *)target_data;
1707 if (len != sizeof(struct timeval) ||
1708 tgt_len != sizeof(struct target_timeval)) {
1709 goto unimplemented;
1712 /* copy struct timeval to target */
1713 __put_user(tv->tv_sec, &target_tv->tv_sec);
1714 __put_user(tv->tv_usec, &target_tv->tv_usec);
1715 break;
1717 case SCM_CREDENTIALS:
1719 struct ucred *cred = (struct ucred *)data;
1720 struct target_ucred *target_cred =
1721 (struct target_ucred *)target_data;
1723 __put_user(cred->pid, &target_cred->pid);
1724 __put_user(cred->uid, &target_cred->uid);
1725 __put_user(cred->gid, &target_cred->gid);
1726 break;
1728 default:
1729 goto unimplemented;
1731 break;
1733 case SOL_IP:
1734 switch (cmsg->cmsg_type) {
1735 case IP_TTL:
1737 uint32_t *v = (uint32_t *)data;
1738 uint32_t *t_int = (uint32_t *)target_data;
1740 if (len != sizeof(uint32_t) ||
1741 tgt_len != sizeof(uint32_t)) {
1742 goto unimplemented;
1744 __put_user(*v, t_int);
1745 break;
1747 case IP_RECVERR:
1749 struct errhdr_t {
1750 struct sock_extended_err ee;
1751 struct sockaddr_in offender;
1753 struct errhdr_t *errh = (struct errhdr_t *)data;
1754 struct errhdr_t *target_errh =
1755 (struct errhdr_t *)target_data;
1757 if (len != sizeof(struct errhdr_t) ||
1758 tgt_len != sizeof(struct errhdr_t)) {
1759 goto unimplemented;
1761 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1762 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1763 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1764 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1765 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1766 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1767 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1768 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1769 (void *) &errh->offender, sizeof(errh->offender));
1770 break;
1772 default:
1773 goto unimplemented;
1775 break;
1777 case SOL_IPV6:
1778 switch (cmsg->cmsg_type) {
1779 case IPV6_HOPLIMIT:
1781 uint32_t *v = (uint32_t *)data;
1782 uint32_t *t_int = (uint32_t *)target_data;
1784 if (len != sizeof(uint32_t) ||
1785 tgt_len != sizeof(uint32_t)) {
1786 goto unimplemented;
1788 __put_user(*v, t_int);
1789 break;
1791 case IPV6_RECVERR:
1793 struct errhdr6_t {
1794 struct sock_extended_err ee;
1795 struct sockaddr_in6 offender;
1797 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1798 struct errhdr6_t *target_errh =
1799 (struct errhdr6_t *)target_data;
1801 if (len != sizeof(struct errhdr6_t) ||
1802 tgt_len != sizeof(struct errhdr6_t)) {
1803 goto unimplemented;
1805 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1806 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1807 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1808 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1809 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1810 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1811 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1812 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1813 (void *) &errh->offender, sizeof(errh->offender));
1814 break;
1816 default:
1817 goto unimplemented;
1819 break;
1821 default:
1822 unimplemented:
1823 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1824 cmsg->cmsg_level, cmsg->cmsg_type);
1825 memcpy(target_data, data, MIN(len, tgt_len));
1826 if (tgt_len > len) {
1827 memset(target_data + len, 0, tgt_len - len);
1831 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1832 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1833 if (msg_controllen < tgt_space) {
1834 tgt_space = msg_controllen;
1836 msg_controllen -= tgt_space;
1837 space += tgt_space;
1838 cmsg = CMSG_NXTHDR(msgh, cmsg);
1839 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840 target_cmsg_start);
1842 unlock_user(target_cmsg, target_cmsg_addr, space);
1843 the_end:
1844 target_msgh->msg_controllen = tswapal(space);
1845 return 0;
1848 /* do_setsockopt() Must return target values and target errnos. */
1849 static abi_long do_setsockopt(int sockfd, int level, int optname,
1850 abi_ulong optval_addr, socklen_t optlen)
1852 abi_long ret;
1853 int val;
1854 struct ip_mreqn *ip_mreq;
1855 struct ip_mreq_source *ip_mreq_source;
1857 switch(level) {
1858 case SOL_TCP:
1859 /* TCP options all take an 'int' value. */
1860 if (optlen < sizeof(uint32_t))
1861 return -TARGET_EINVAL;
1863 if (get_user_u32(val, optval_addr))
1864 return -TARGET_EFAULT;
1865 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1866 break;
1867 case SOL_IP:
1868 switch(optname) {
1869 case IP_TOS:
1870 case IP_TTL:
1871 case IP_HDRINCL:
1872 case IP_ROUTER_ALERT:
1873 case IP_RECVOPTS:
1874 case IP_RETOPTS:
1875 case IP_PKTINFO:
1876 case IP_MTU_DISCOVER:
1877 case IP_RECVERR:
1878 case IP_RECVTTL:
1879 case IP_RECVTOS:
1880 #ifdef IP_FREEBIND
1881 case IP_FREEBIND:
1882 #endif
1883 case IP_MULTICAST_TTL:
1884 case IP_MULTICAST_LOOP:
1885 val = 0;
1886 if (optlen >= sizeof(uint32_t)) {
1887 if (get_user_u32(val, optval_addr))
1888 return -TARGET_EFAULT;
1889 } else if (optlen >= 1) {
1890 if (get_user_u8(val, optval_addr))
1891 return -TARGET_EFAULT;
1893 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1894 break;
1895 case IP_ADD_MEMBERSHIP:
1896 case IP_DROP_MEMBERSHIP:
1897 if (optlen < sizeof (struct target_ip_mreq) ||
1898 optlen > sizeof (struct target_ip_mreqn))
1899 return -TARGET_EINVAL;
1901 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1902 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1903 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1904 break;
1906 case IP_BLOCK_SOURCE:
1907 case IP_UNBLOCK_SOURCE:
1908 case IP_ADD_SOURCE_MEMBERSHIP:
1909 case IP_DROP_SOURCE_MEMBERSHIP:
1910 if (optlen != sizeof (struct target_ip_mreq_source))
1911 return -TARGET_EINVAL;
1913 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1914 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1915 unlock_user (ip_mreq_source, optval_addr, 0);
1916 break;
1918 default:
1919 goto unimplemented;
1921 break;
1922 case SOL_IPV6:
1923 switch (optname) {
1924 case IPV6_MTU_DISCOVER:
1925 case IPV6_MTU:
1926 case IPV6_V6ONLY:
1927 case IPV6_RECVPKTINFO:
1928 case IPV6_UNICAST_HOPS:
1929 case IPV6_MULTICAST_HOPS:
1930 case IPV6_MULTICAST_LOOP:
1931 case IPV6_RECVERR:
1932 case IPV6_RECVHOPLIMIT:
1933 case IPV6_2292HOPLIMIT:
1934 case IPV6_CHECKSUM:
1935 case IPV6_ADDRFORM:
1936 case IPV6_2292PKTINFO:
1937 case IPV6_RECVTCLASS:
1938 case IPV6_RECVRTHDR:
1939 case IPV6_2292RTHDR:
1940 case IPV6_RECVHOPOPTS:
1941 case IPV6_2292HOPOPTS:
1942 case IPV6_RECVDSTOPTS:
1943 case IPV6_2292DSTOPTS:
1944 case IPV6_TCLASS:
1945 #ifdef IPV6_RECVPATHMTU
1946 case IPV6_RECVPATHMTU:
1947 #endif
1948 #ifdef IPV6_TRANSPARENT
1949 case IPV6_TRANSPARENT:
1950 #endif
1951 #ifdef IPV6_FREEBIND
1952 case IPV6_FREEBIND:
1953 #endif
1954 #ifdef IPV6_RECVORIGDSTADDR
1955 case IPV6_RECVORIGDSTADDR:
1956 #endif
1957 val = 0;
1958 if (optlen < sizeof(uint32_t)) {
1959 return -TARGET_EINVAL;
1961 if (get_user_u32(val, optval_addr)) {
1962 return -TARGET_EFAULT;
1964 ret = get_errno(setsockopt(sockfd, level, optname,
1965 &val, sizeof(val)));
1966 break;
1967 case IPV6_PKTINFO:
1969 struct in6_pktinfo pki;
1971 if (optlen < sizeof(pki)) {
1972 return -TARGET_EINVAL;
1975 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1976 return -TARGET_EFAULT;
1979 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1981 ret = get_errno(setsockopt(sockfd, level, optname,
1982 &pki, sizeof(pki)));
1983 break;
1985 case IPV6_ADD_MEMBERSHIP:
1986 case IPV6_DROP_MEMBERSHIP:
1988 struct ipv6_mreq ipv6mreq;
1990 if (optlen < sizeof(ipv6mreq)) {
1991 return -TARGET_EINVAL;
1994 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1995 return -TARGET_EFAULT;
1998 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2000 ret = get_errno(setsockopt(sockfd, level, optname,
2001 &ipv6mreq, sizeof(ipv6mreq)));
2002 break;
2004 default:
2005 goto unimplemented;
2007 break;
2008 case SOL_ICMPV6:
2009 switch (optname) {
2010 case ICMPV6_FILTER:
2012 struct icmp6_filter icmp6f;
2014 if (optlen > sizeof(icmp6f)) {
2015 optlen = sizeof(icmp6f);
2018 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2019 return -TARGET_EFAULT;
2022 for (val = 0; val < 8; val++) {
2023 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2026 ret = get_errno(setsockopt(sockfd, level, optname,
2027 &icmp6f, optlen));
2028 break;
2030 default:
2031 goto unimplemented;
2033 break;
2034 case SOL_RAW:
2035 switch (optname) {
2036 case ICMP_FILTER:
2037 case IPV6_CHECKSUM:
2038 /* those take an u32 value */
2039 if (optlen < sizeof(uint32_t)) {
2040 return -TARGET_EINVAL;
2043 if (get_user_u32(val, optval_addr)) {
2044 return -TARGET_EFAULT;
2046 ret = get_errno(setsockopt(sockfd, level, optname,
2047 &val, sizeof(val)));
2048 break;
2050 default:
2051 goto unimplemented;
2053 break;
2054 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2055 case SOL_ALG:
2056 switch (optname) {
2057 case ALG_SET_KEY:
2059 char *alg_key = g_malloc(optlen);
2061 if (!alg_key) {
2062 return -TARGET_ENOMEM;
2064 if (copy_from_user(alg_key, optval_addr, optlen)) {
2065 g_free(alg_key);
2066 return -TARGET_EFAULT;
2068 ret = get_errno(setsockopt(sockfd, level, optname,
2069 alg_key, optlen));
2070 g_free(alg_key);
2071 break;
2073 case ALG_SET_AEAD_AUTHSIZE:
2075 ret = get_errno(setsockopt(sockfd, level, optname,
2076 NULL, optlen));
2077 break;
2079 default:
2080 goto unimplemented;
2082 break;
2083 #endif
2084 case TARGET_SOL_SOCKET:
2085 switch (optname) {
2086 case TARGET_SO_RCVTIMEO:
2088 struct timeval tv;
2090 optname = SO_RCVTIMEO;
2092 set_timeout:
2093 if (optlen != sizeof(struct target_timeval)) {
2094 return -TARGET_EINVAL;
2097 if (copy_from_user_timeval(&tv, optval_addr)) {
2098 return -TARGET_EFAULT;
2101 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2102 &tv, sizeof(tv)));
2103 return ret;
2105 case TARGET_SO_SNDTIMEO:
2106 optname = SO_SNDTIMEO;
2107 goto set_timeout;
2108 case TARGET_SO_ATTACH_FILTER:
2110 struct target_sock_fprog *tfprog;
2111 struct target_sock_filter *tfilter;
2112 struct sock_fprog fprog;
2113 struct sock_filter *filter;
2114 int i;
2116 if (optlen != sizeof(*tfprog)) {
2117 return -TARGET_EINVAL;
2119 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2120 return -TARGET_EFAULT;
2122 if (!lock_user_struct(VERIFY_READ, tfilter,
2123 tswapal(tfprog->filter), 0)) {
2124 unlock_user_struct(tfprog, optval_addr, 1);
2125 return -TARGET_EFAULT;
2128 fprog.len = tswap16(tfprog->len);
2129 filter = g_try_new(struct sock_filter, fprog.len);
2130 if (filter == NULL) {
2131 unlock_user_struct(tfilter, tfprog->filter, 1);
2132 unlock_user_struct(tfprog, optval_addr, 1);
2133 return -TARGET_ENOMEM;
2135 for (i = 0; i < fprog.len; i++) {
2136 filter[i].code = tswap16(tfilter[i].code);
2137 filter[i].jt = tfilter[i].jt;
2138 filter[i].jf = tfilter[i].jf;
2139 filter[i].k = tswap32(tfilter[i].k);
2141 fprog.filter = filter;
2143 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2144 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2145 g_free(filter);
2147 unlock_user_struct(tfilter, tfprog->filter, 1);
2148 unlock_user_struct(tfprog, optval_addr, 1);
2149 return ret;
2151 case TARGET_SO_BINDTODEVICE:
2153 char *dev_ifname, *addr_ifname;
2155 if (optlen > IFNAMSIZ - 1) {
2156 optlen = IFNAMSIZ - 1;
2158 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2159 if (!dev_ifname) {
2160 return -TARGET_EFAULT;
2162 optname = SO_BINDTODEVICE;
2163 addr_ifname = alloca(IFNAMSIZ);
2164 memcpy(addr_ifname, dev_ifname, optlen);
2165 addr_ifname[optlen] = 0;
2166 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2167 addr_ifname, optlen));
2168 unlock_user (dev_ifname, optval_addr, 0);
2169 return ret;
2171 case TARGET_SO_LINGER:
2173 struct linger lg;
2174 struct target_linger *tlg;
2176 if (optlen != sizeof(struct target_linger)) {
2177 return -TARGET_EINVAL;
2179 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2180 return -TARGET_EFAULT;
2182 __get_user(lg.l_onoff, &tlg->l_onoff);
2183 __get_user(lg.l_linger, &tlg->l_linger);
2184 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2185 &lg, sizeof(lg)));
2186 unlock_user_struct(tlg, optval_addr, 0);
2187 return ret;
2189 /* Options with 'int' argument. */
2190 case TARGET_SO_DEBUG:
2191 optname = SO_DEBUG;
2192 break;
2193 case TARGET_SO_REUSEADDR:
2194 optname = SO_REUSEADDR;
2195 break;
2196 #ifdef SO_REUSEPORT
2197 case TARGET_SO_REUSEPORT:
2198 optname = SO_REUSEPORT;
2199 break;
2200 #endif
2201 case TARGET_SO_TYPE:
2202 optname = SO_TYPE;
2203 break;
2204 case TARGET_SO_ERROR:
2205 optname = SO_ERROR;
2206 break;
2207 case TARGET_SO_DONTROUTE:
2208 optname = SO_DONTROUTE;
2209 break;
2210 case TARGET_SO_BROADCAST:
2211 optname = SO_BROADCAST;
2212 break;
2213 case TARGET_SO_SNDBUF:
2214 optname = SO_SNDBUF;
2215 break;
2216 case TARGET_SO_SNDBUFFORCE:
2217 optname = SO_SNDBUFFORCE;
2218 break;
2219 case TARGET_SO_RCVBUF:
2220 optname = SO_RCVBUF;
2221 break;
2222 case TARGET_SO_RCVBUFFORCE:
2223 optname = SO_RCVBUFFORCE;
2224 break;
2225 case TARGET_SO_KEEPALIVE:
2226 optname = SO_KEEPALIVE;
2227 break;
2228 case TARGET_SO_OOBINLINE:
2229 optname = SO_OOBINLINE;
2230 break;
2231 case TARGET_SO_NO_CHECK:
2232 optname = SO_NO_CHECK;
2233 break;
2234 case TARGET_SO_PRIORITY:
2235 optname = SO_PRIORITY;
2236 break;
2237 #ifdef SO_BSDCOMPAT
2238 case TARGET_SO_BSDCOMPAT:
2239 optname = SO_BSDCOMPAT;
2240 break;
2241 #endif
2242 case TARGET_SO_PASSCRED:
2243 optname = SO_PASSCRED;
2244 break;
2245 case TARGET_SO_PASSSEC:
2246 optname = SO_PASSSEC;
2247 break;
2248 case TARGET_SO_TIMESTAMP:
2249 optname = SO_TIMESTAMP;
2250 break;
2251 case TARGET_SO_RCVLOWAT:
2252 optname = SO_RCVLOWAT;
2253 break;
2254 default:
2255 goto unimplemented;
2257 if (optlen < sizeof(uint32_t))
2258 return -TARGET_EINVAL;
2260 if (get_user_u32(val, optval_addr))
2261 return -TARGET_EFAULT;
2262 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2263 break;
2264 #ifdef SOL_NETLINK
2265 case SOL_NETLINK:
2266 switch (optname) {
2267 case NETLINK_PKTINFO:
2268 case NETLINK_ADD_MEMBERSHIP:
2269 case NETLINK_DROP_MEMBERSHIP:
2270 case NETLINK_BROADCAST_ERROR:
2271 case NETLINK_NO_ENOBUFS:
2272 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2273 case NETLINK_LISTEN_ALL_NSID:
2274 case NETLINK_CAP_ACK:
2275 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2276 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2277 case NETLINK_EXT_ACK:
2278 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2279 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2280 case NETLINK_GET_STRICT_CHK:
2281 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2282 break;
2283 default:
2284 goto unimplemented;
2286 val = 0;
2287 if (optlen < sizeof(uint32_t)) {
2288 return -TARGET_EINVAL;
2290 if (get_user_u32(val, optval_addr)) {
2291 return -TARGET_EFAULT;
2293 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2294 sizeof(val)));
2295 break;
2296 #endif /* SOL_NETLINK */
2297 default:
2298 unimplemented:
2299 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2300 level, optname);
2301 ret = -TARGET_ENOPROTOOPT;
2303 return ret;
2306 /* do_getsockopt() Must return target values and target errnos. */
2307 static abi_long do_getsockopt(int sockfd, int level, int optname,
2308 abi_ulong optval_addr, abi_ulong optlen)
2310 abi_long ret;
2311 int len, val;
2312 socklen_t lv;
2314 switch(level) {
2315 case TARGET_SOL_SOCKET:
2316 level = SOL_SOCKET;
2317 switch (optname) {
2318 /* These don't just return a single integer */
2319 case TARGET_SO_PEERNAME:
2320 goto unimplemented;
2321 case TARGET_SO_RCVTIMEO: {
2322 struct timeval tv;
2323 socklen_t tvlen;
2325 optname = SO_RCVTIMEO;
2327 get_timeout:
2328 if (get_user_u32(len, optlen)) {
2329 return -TARGET_EFAULT;
2331 if (len < 0) {
2332 return -TARGET_EINVAL;
2335 tvlen = sizeof(tv);
2336 ret = get_errno(getsockopt(sockfd, level, optname,
2337 &tv, &tvlen));
2338 if (ret < 0) {
2339 return ret;
2341 if (len > sizeof(struct target_timeval)) {
2342 len = sizeof(struct target_timeval);
2344 if (copy_to_user_timeval(optval_addr, &tv)) {
2345 return -TARGET_EFAULT;
2347 if (put_user_u32(len, optlen)) {
2348 return -TARGET_EFAULT;
2350 break;
2352 case TARGET_SO_SNDTIMEO:
2353 optname = SO_SNDTIMEO;
2354 goto get_timeout;
2355 case TARGET_SO_PEERCRED: {
2356 struct ucred cr;
2357 socklen_t crlen;
2358 struct target_ucred *tcr;
2360 if (get_user_u32(len, optlen)) {
2361 return -TARGET_EFAULT;
2363 if (len < 0) {
2364 return -TARGET_EINVAL;
2367 crlen = sizeof(cr);
2368 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2369 &cr, &crlen));
2370 if (ret < 0) {
2371 return ret;
2373 if (len > crlen) {
2374 len = crlen;
2376 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2377 return -TARGET_EFAULT;
2379 __put_user(cr.pid, &tcr->pid);
2380 __put_user(cr.uid, &tcr->uid);
2381 __put_user(cr.gid, &tcr->gid);
2382 unlock_user_struct(tcr, optval_addr, 1);
2383 if (put_user_u32(len, optlen)) {
2384 return -TARGET_EFAULT;
2386 break;
2388 case TARGET_SO_PEERSEC: {
2389 char *name;
2391 if (get_user_u32(len, optlen)) {
2392 return -TARGET_EFAULT;
2394 if (len < 0) {
2395 return -TARGET_EINVAL;
2397 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2398 if (!name) {
2399 return -TARGET_EFAULT;
2401 lv = len;
2402 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2403 name, &lv));
2404 if (put_user_u32(lv, optlen)) {
2405 ret = -TARGET_EFAULT;
2407 unlock_user(name, optval_addr, lv);
2408 break;
2410 case TARGET_SO_LINGER:
2412 struct linger lg;
2413 socklen_t lglen;
2414 struct target_linger *tlg;
2416 if (get_user_u32(len, optlen)) {
2417 return -TARGET_EFAULT;
2419 if (len < 0) {
2420 return -TARGET_EINVAL;
2423 lglen = sizeof(lg);
2424 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2425 &lg, &lglen));
2426 if (ret < 0) {
2427 return ret;
2429 if (len > lglen) {
2430 len = lglen;
2432 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2433 return -TARGET_EFAULT;
2435 __put_user(lg.l_onoff, &tlg->l_onoff);
2436 __put_user(lg.l_linger, &tlg->l_linger);
2437 unlock_user_struct(tlg, optval_addr, 1);
2438 if (put_user_u32(len, optlen)) {
2439 return -TARGET_EFAULT;
2441 break;
2443 /* Options with 'int' argument. */
2444 case TARGET_SO_DEBUG:
2445 optname = SO_DEBUG;
2446 goto int_case;
2447 case TARGET_SO_REUSEADDR:
2448 optname = SO_REUSEADDR;
2449 goto int_case;
2450 #ifdef SO_REUSEPORT
2451 case TARGET_SO_REUSEPORT:
2452 optname = SO_REUSEPORT;
2453 goto int_case;
2454 #endif
2455 case TARGET_SO_TYPE:
2456 optname = SO_TYPE;
2457 goto int_case;
2458 case TARGET_SO_ERROR:
2459 optname = SO_ERROR;
2460 goto int_case;
2461 case TARGET_SO_DONTROUTE:
2462 optname = SO_DONTROUTE;
2463 goto int_case;
2464 case TARGET_SO_BROADCAST:
2465 optname = SO_BROADCAST;
2466 goto int_case;
2467 case TARGET_SO_SNDBUF:
2468 optname = SO_SNDBUF;
2469 goto int_case;
2470 case TARGET_SO_RCVBUF:
2471 optname = SO_RCVBUF;
2472 goto int_case;
2473 case TARGET_SO_KEEPALIVE:
2474 optname = SO_KEEPALIVE;
2475 goto int_case;
2476 case TARGET_SO_OOBINLINE:
2477 optname = SO_OOBINLINE;
2478 goto int_case;
2479 case TARGET_SO_NO_CHECK:
2480 optname = SO_NO_CHECK;
2481 goto int_case;
2482 case TARGET_SO_PRIORITY:
2483 optname = SO_PRIORITY;
2484 goto int_case;
2485 #ifdef SO_BSDCOMPAT
2486 case TARGET_SO_BSDCOMPAT:
2487 optname = SO_BSDCOMPAT;
2488 goto int_case;
2489 #endif
2490 case TARGET_SO_PASSCRED:
2491 optname = SO_PASSCRED;
2492 goto int_case;
2493 case TARGET_SO_TIMESTAMP:
2494 optname = SO_TIMESTAMP;
2495 goto int_case;
2496 case TARGET_SO_RCVLOWAT:
2497 optname = SO_RCVLOWAT;
2498 goto int_case;
2499 case TARGET_SO_ACCEPTCONN:
2500 optname = SO_ACCEPTCONN;
2501 goto int_case;
2502 default:
2503 goto int_case;
2505 break;
2506 case SOL_TCP:
2507 /* TCP options all take an 'int' value. */
2508 int_case:
2509 if (get_user_u32(len, optlen))
2510 return -TARGET_EFAULT;
2511 if (len < 0)
2512 return -TARGET_EINVAL;
2513 lv = sizeof(lv);
2514 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2515 if (ret < 0)
2516 return ret;
2517 if (optname == SO_TYPE) {
2518 val = host_to_target_sock_type(val);
2520 if (len > lv)
2521 len = lv;
2522 if (len == 4) {
2523 if (put_user_u32(val, optval_addr))
2524 return -TARGET_EFAULT;
2525 } else {
2526 if (put_user_u8(val, optval_addr))
2527 return -TARGET_EFAULT;
2529 if (put_user_u32(len, optlen))
2530 return -TARGET_EFAULT;
2531 break;
2532 case SOL_IP:
2533 switch(optname) {
2534 case IP_TOS:
2535 case IP_TTL:
2536 case IP_HDRINCL:
2537 case IP_ROUTER_ALERT:
2538 case IP_RECVOPTS:
2539 case IP_RETOPTS:
2540 case IP_PKTINFO:
2541 case IP_MTU_DISCOVER:
2542 case IP_RECVERR:
2543 case IP_RECVTOS:
2544 #ifdef IP_FREEBIND
2545 case IP_FREEBIND:
2546 #endif
2547 case IP_MULTICAST_TTL:
2548 case IP_MULTICAST_LOOP:
2549 if (get_user_u32(len, optlen))
2550 return -TARGET_EFAULT;
2551 if (len < 0)
2552 return -TARGET_EINVAL;
2553 lv = sizeof(lv);
2554 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2555 if (ret < 0)
2556 return ret;
2557 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2558 len = 1;
2559 if (put_user_u32(len, optlen)
2560 || put_user_u8(val, optval_addr))
2561 return -TARGET_EFAULT;
2562 } else {
2563 if (len > sizeof(int))
2564 len = sizeof(int);
2565 if (put_user_u32(len, optlen)
2566 || put_user_u32(val, optval_addr))
2567 return -TARGET_EFAULT;
2569 break;
2570 default:
2571 ret = -TARGET_ENOPROTOOPT;
2572 break;
2574 break;
2575 case SOL_IPV6:
2576 switch (optname) {
2577 case IPV6_MTU_DISCOVER:
2578 case IPV6_MTU:
2579 case IPV6_V6ONLY:
2580 case IPV6_RECVPKTINFO:
2581 case IPV6_UNICAST_HOPS:
2582 case IPV6_MULTICAST_HOPS:
2583 case IPV6_MULTICAST_LOOP:
2584 case IPV6_RECVERR:
2585 case IPV6_RECVHOPLIMIT:
2586 case IPV6_2292HOPLIMIT:
2587 case IPV6_CHECKSUM:
2588 case IPV6_ADDRFORM:
2589 case IPV6_2292PKTINFO:
2590 case IPV6_RECVTCLASS:
2591 case IPV6_RECVRTHDR:
2592 case IPV6_2292RTHDR:
2593 case IPV6_RECVHOPOPTS:
2594 case IPV6_2292HOPOPTS:
2595 case IPV6_RECVDSTOPTS:
2596 case IPV6_2292DSTOPTS:
2597 case IPV6_TCLASS:
2598 #ifdef IPV6_RECVPATHMTU
2599 case IPV6_RECVPATHMTU:
2600 #endif
2601 #ifdef IPV6_TRANSPARENT
2602 case IPV6_TRANSPARENT:
2603 #endif
2604 #ifdef IPV6_FREEBIND
2605 case IPV6_FREEBIND:
2606 #endif
2607 #ifdef IPV6_RECVORIGDSTADDR
2608 case IPV6_RECVORIGDSTADDR:
2609 #endif
2610 if (get_user_u32(len, optlen))
2611 return -TARGET_EFAULT;
2612 if (len < 0)
2613 return -TARGET_EINVAL;
2614 lv = sizeof(lv);
2615 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2616 if (ret < 0)
2617 return ret;
2618 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2619 len = 1;
2620 if (put_user_u32(len, optlen)
2621 || put_user_u8(val, optval_addr))
2622 return -TARGET_EFAULT;
2623 } else {
2624 if (len > sizeof(int))
2625 len = sizeof(int);
2626 if (put_user_u32(len, optlen)
2627 || put_user_u32(val, optval_addr))
2628 return -TARGET_EFAULT;
2630 break;
2631 default:
2632 ret = -TARGET_ENOPROTOOPT;
2633 break;
2635 break;
2636 #ifdef SOL_NETLINK
2637 case SOL_NETLINK:
2638 switch (optname) {
2639 case NETLINK_PKTINFO:
2640 case NETLINK_BROADCAST_ERROR:
2641 case NETLINK_NO_ENOBUFS:
2642 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2643 case NETLINK_LISTEN_ALL_NSID:
2644 case NETLINK_CAP_ACK:
2645 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2647 case NETLINK_EXT_ACK:
2648 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2649 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2650 case NETLINK_GET_STRICT_CHK:
2651 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2652 if (get_user_u32(len, optlen)) {
2653 return -TARGET_EFAULT;
2655 if (len != sizeof(val)) {
2656 return -TARGET_EINVAL;
2658 lv = len;
2659 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2660 if (ret < 0) {
2661 return ret;
2663 if (put_user_u32(lv, optlen)
2664 || put_user_u32(val, optval_addr)) {
2665 return -TARGET_EFAULT;
2667 break;
2668 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2669 case NETLINK_LIST_MEMBERSHIPS:
2671 uint32_t *results;
2672 int i;
2673 if (get_user_u32(len, optlen)) {
2674 return -TARGET_EFAULT;
2676 if (len < 0) {
2677 return -TARGET_EINVAL;
2679 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2680 if (!results) {
2681 return -TARGET_EFAULT;
2683 lv = len;
2684 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2685 if (ret < 0) {
2686 unlock_user(results, optval_addr, 0);
2687 return ret;
2689 /* swap host endianess to target endianess. */
2690 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2691 results[i] = tswap32(results[i]);
2693 if (put_user_u32(lv, optlen)) {
2694 return -TARGET_EFAULT;
2696 unlock_user(results, optval_addr, 0);
2697 break;
2699 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2700 default:
2701 goto unimplemented;
2703 break;
2704 #endif /* SOL_NETLINK */
2705 default:
2706 unimplemented:
2707 qemu_log_mask(LOG_UNIMP,
2708 "getsockopt level=%d optname=%d not yet supported\n",
2709 level, optname);
2710 ret = -TARGET_EOPNOTSUPP;
2711 break;
2713 return ret;
2716 /* Convert target low/high pair representing file offset into the host
2717 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2718 * as the kernel doesn't handle them either.
2720 static void target_to_host_low_high(abi_ulong tlow,
2721 abi_ulong thigh,
2722 unsigned long *hlow,
2723 unsigned long *hhigh)
2725 uint64_t off = tlow |
2726 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2727 TARGET_LONG_BITS / 2;
2729 *hlow = off;
2730 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2733 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2734 abi_ulong count, int copy)
2736 struct target_iovec *target_vec;
2737 struct iovec *vec;
2738 abi_ulong total_len, max_len;
2739 int i;
2740 int err = 0;
2741 bool bad_address = false;
2743 if (count == 0) {
2744 errno = 0;
2745 return NULL;
2747 if (count > IOV_MAX) {
2748 errno = EINVAL;
2749 return NULL;
2752 vec = g_try_new0(struct iovec, count);
2753 if (vec == NULL) {
2754 errno = ENOMEM;
2755 return NULL;
2758 target_vec = lock_user(VERIFY_READ, target_addr,
2759 count * sizeof(struct target_iovec), 1);
2760 if (target_vec == NULL) {
2761 err = EFAULT;
2762 goto fail2;
2765 /* ??? If host page size > target page size, this will result in a
2766 value larger than what we can actually support. */
2767 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2768 total_len = 0;
2770 for (i = 0; i < count; i++) {
2771 abi_ulong base = tswapal(target_vec[i].iov_base);
2772 abi_long len = tswapal(target_vec[i].iov_len);
2774 if (len < 0) {
2775 err = EINVAL;
2776 goto fail;
2777 } else if (len == 0) {
2778 /* Zero length pointer is ignored. */
2779 vec[i].iov_base = 0;
2780 } else {
2781 vec[i].iov_base = lock_user(type, base, len, copy);
2782 /* If the first buffer pointer is bad, this is a fault. But
2783 * subsequent bad buffers will result in a partial write; this
2784 * is realized by filling the vector with null pointers and
2785 * zero lengths. */
2786 if (!vec[i].iov_base) {
2787 if (i == 0) {
2788 err = EFAULT;
2789 goto fail;
2790 } else {
2791 bad_address = true;
2794 if (bad_address) {
2795 len = 0;
2797 if (len > max_len - total_len) {
2798 len = max_len - total_len;
2801 vec[i].iov_len = len;
2802 total_len += len;
2805 unlock_user(target_vec, target_addr, 0);
2806 return vec;
2808 fail:
2809 while (--i >= 0) {
2810 if (tswapal(target_vec[i].iov_len) > 0) {
2811 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2814 unlock_user(target_vec, target_addr, 0);
2815 fail2:
2816 g_free(vec);
2817 errno = err;
2818 return NULL;
2821 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2822 abi_ulong count, int copy)
2824 struct target_iovec *target_vec;
2825 int i;
2827 target_vec = lock_user(VERIFY_READ, target_addr,
2828 count * sizeof(struct target_iovec), 1);
2829 if (target_vec) {
2830 for (i = 0; i < count; i++) {
2831 abi_ulong base = tswapal(target_vec[i].iov_base);
2832 abi_long len = tswapal(target_vec[i].iov_len);
2833 if (len < 0) {
2834 break;
2836 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2838 unlock_user(target_vec, target_addr, 0);
2841 g_free(vec);
2844 static inline int target_to_host_sock_type(int *type)
2846 int host_type = 0;
2847 int target_type = *type;
2849 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2850 case TARGET_SOCK_DGRAM:
2851 host_type = SOCK_DGRAM;
2852 break;
2853 case TARGET_SOCK_STREAM:
2854 host_type = SOCK_STREAM;
2855 break;
2856 default:
2857 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2858 break;
2860 if (target_type & TARGET_SOCK_CLOEXEC) {
2861 #if defined(SOCK_CLOEXEC)
2862 host_type |= SOCK_CLOEXEC;
2863 #else
2864 return -TARGET_EINVAL;
2865 #endif
2867 if (target_type & TARGET_SOCK_NONBLOCK) {
2868 #if defined(SOCK_NONBLOCK)
2869 host_type |= SOCK_NONBLOCK;
2870 #elif !defined(O_NONBLOCK)
2871 return -TARGET_EINVAL;
2872 #endif
2874 *type = host_type;
2875 return 0;
2878 /* Try to emulate socket type flags after socket creation. */
2879 static int sock_flags_fixup(int fd, int target_type)
2881 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2882 if (target_type & TARGET_SOCK_NONBLOCK) {
2883 int flags = fcntl(fd, F_GETFL);
2884 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2885 close(fd);
2886 return -TARGET_EINVAL;
2889 #endif
2890 return fd;
2893 /* do_socket() Must return target values and target errnos. */
2894 static abi_long do_socket(int domain, int type, int protocol)
2896 int target_type = type;
2897 int ret;
2899 ret = target_to_host_sock_type(&type);
2900 if (ret) {
2901 return ret;
2904 if (domain == PF_NETLINK && !(
2905 #ifdef CONFIG_RTNETLINK
2906 protocol == NETLINK_ROUTE ||
2907 #endif
2908 protocol == NETLINK_KOBJECT_UEVENT ||
2909 protocol == NETLINK_AUDIT)) {
2910 return -EPFNOSUPPORT;
2913 if (domain == AF_PACKET ||
2914 (domain == AF_INET && type == SOCK_PACKET)) {
2915 protocol = tswap16(protocol);
2918 ret = get_errno(socket(domain, type, protocol));
2919 if (ret >= 0) {
2920 ret = sock_flags_fixup(ret, target_type);
2921 if (type == SOCK_PACKET) {
2922 /* Manage an obsolete case :
2923 * if socket type is SOCK_PACKET, bind by name
2925 fd_trans_register(ret, &target_packet_trans);
2926 } else if (domain == PF_NETLINK) {
2927 switch (protocol) {
2928 #ifdef CONFIG_RTNETLINK
2929 case NETLINK_ROUTE:
2930 fd_trans_register(ret, &target_netlink_route_trans);
2931 break;
2932 #endif
2933 case NETLINK_KOBJECT_UEVENT:
2934 /* nothing to do: messages are strings */
2935 break;
2936 case NETLINK_AUDIT:
2937 fd_trans_register(ret, &target_netlink_audit_trans);
2938 break;
2939 default:
2940 g_assert_not_reached();
2944 return ret;
2947 /* do_bind() Must return target values and target errnos. */
2948 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2949 socklen_t addrlen)
2951 void *addr;
2952 abi_long ret;
2954 if ((int)addrlen < 0) {
2955 return -TARGET_EINVAL;
2958 addr = alloca(addrlen+1);
2960 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2961 if (ret)
2962 return ret;
2964 return get_errno(bind(sockfd, addr, addrlen));
2967 /* do_connect() Must return target values and target errnos. */
2968 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2969 socklen_t addrlen)
2971 void *addr;
2972 abi_long ret;
2974 if ((int)addrlen < 0) {
2975 return -TARGET_EINVAL;
2978 addr = alloca(addrlen+1);
2980 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2981 if (ret)
2982 return ret;
2984 return get_errno(safe_connect(sockfd, addr, addrlen));
2987 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2988 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2989 int flags, int send)
2991 abi_long ret, len;
2992 struct msghdr msg;
2993 abi_ulong count;
2994 struct iovec *vec;
2995 abi_ulong target_vec;
2997 if (msgp->msg_name) {
2998 msg.msg_namelen = tswap32(msgp->msg_namelen);
2999 msg.msg_name = alloca(msg.msg_namelen+1);
3000 ret = target_to_host_sockaddr(fd, msg.msg_name,
3001 tswapal(msgp->msg_name),
3002 msg.msg_namelen);
3003 if (ret == -TARGET_EFAULT) {
3004 /* For connected sockets msg_name and msg_namelen must
3005 * be ignored, so returning EFAULT immediately is wrong.
3006 * Instead, pass a bad msg_name to the host kernel, and
3007 * let it decide whether to return EFAULT or not.
3009 msg.msg_name = (void *)-1;
3010 } else if (ret) {
3011 goto out2;
3013 } else {
3014 msg.msg_name = NULL;
3015 msg.msg_namelen = 0;
3017 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3018 msg.msg_control = alloca(msg.msg_controllen);
3019 memset(msg.msg_control, 0, msg.msg_controllen);
3021 msg.msg_flags = tswap32(msgp->msg_flags);
3023 count = tswapal(msgp->msg_iovlen);
3024 target_vec = tswapal(msgp->msg_iov);
3026 if (count > IOV_MAX) {
3027 /* sendrcvmsg returns a different errno for this condition than
3028 * readv/writev, so we must catch it here before lock_iovec() does.
3030 ret = -TARGET_EMSGSIZE;
3031 goto out2;
3034 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3035 target_vec, count, send);
3036 if (vec == NULL) {
3037 ret = -host_to_target_errno(errno);
3038 goto out2;
3040 msg.msg_iovlen = count;
3041 msg.msg_iov = vec;
3043 if (send) {
3044 if (fd_trans_target_to_host_data(fd)) {
3045 void *host_msg;
3047 host_msg = g_malloc(msg.msg_iov->iov_len);
3048 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3049 ret = fd_trans_target_to_host_data(fd)(host_msg,
3050 msg.msg_iov->iov_len);
3051 if (ret >= 0) {
3052 msg.msg_iov->iov_base = host_msg;
3053 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3055 g_free(host_msg);
3056 } else {
3057 ret = target_to_host_cmsg(&msg, msgp);
3058 if (ret == 0) {
3059 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3062 } else {
3063 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3064 if (!is_error(ret)) {
3065 len = ret;
3066 if (fd_trans_host_to_target_data(fd)) {
3067 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3068 MIN(msg.msg_iov->iov_len, len));
3069 } else {
3070 ret = host_to_target_cmsg(msgp, &msg);
3072 if (!is_error(ret)) {
3073 msgp->msg_namelen = tswap32(msg.msg_namelen);
3074 msgp->msg_flags = tswap32(msg.msg_flags);
3075 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3076 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3077 msg.msg_name, msg.msg_namelen);
3078 if (ret) {
3079 goto out;
3083 ret = len;
3088 out:
3089 unlock_iovec(vec, target_vec, count, !send);
3090 out2:
3091 return ret;
3094 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3095 int flags, int send)
3097 abi_long ret;
3098 struct target_msghdr *msgp;
3100 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3101 msgp,
3102 target_msg,
3103 send ? 1 : 0)) {
3104 return -TARGET_EFAULT;
3106 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3107 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3108 return ret;
3111 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3112 * so it might not have this *mmsg-specific flag either.
3114 #ifndef MSG_WAITFORONE
3115 #define MSG_WAITFORONE 0x10000
3116 #endif
3118 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3119 unsigned int vlen, unsigned int flags,
3120 int send)
3122 struct target_mmsghdr *mmsgp;
3123 abi_long ret = 0;
3124 int i;
3126 if (vlen > UIO_MAXIOV) {
3127 vlen = UIO_MAXIOV;
3130 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3131 if (!mmsgp) {
3132 return -TARGET_EFAULT;
3135 for (i = 0; i < vlen; i++) {
3136 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3137 if (is_error(ret)) {
3138 break;
3140 mmsgp[i].msg_len = tswap32(ret);
3141 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3142 if (flags & MSG_WAITFORONE) {
3143 flags |= MSG_DONTWAIT;
3147 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3149 /* Return number of datagrams sent if we sent any at all;
3150 * otherwise return the error.
3152 if (i) {
3153 return i;
3155 return ret;
3158 /* do_accept4() Must return target values and target errnos. */
3159 static abi_long do_accept4(int fd, abi_ulong target_addr,
3160 abi_ulong target_addrlen_addr, int flags)
3162 socklen_t addrlen, ret_addrlen;
3163 void *addr;
3164 abi_long ret;
3165 int host_flags;
3167 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3169 if (target_addr == 0) {
3170 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3173 /* linux returns EINVAL if addrlen pointer is invalid */
3174 if (get_user_u32(addrlen, target_addrlen_addr))
3175 return -TARGET_EINVAL;
3177 if ((int)addrlen < 0) {
3178 return -TARGET_EINVAL;
3181 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3182 return -TARGET_EINVAL;
3184 addr = alloca(addrlen);
3186 ret_addrlen = addrlen;
3187 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3188 if (!is_error(ret)) {
3189 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3190 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3191 ret = -TARGET_EFAULT;
3194 return ret;
3197 /* do_getpeername() Must return target values and target errnos. */
3198 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3199 abi_ulong target_addrlen_addr)
3201 socklen_t addrlen, ret_addrlen;
3202 void *addr;
3203 abi_long ret;
3205 if (get_user_u32(addrlen, target_addrlen_addr))
3206 return -TARGET_EFAULT;
3208 if ((int)addrlen < 0) {
3209 return -TARGET_EINVAL;
3212 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3213 return -TARGET_EFAULT;
3215 addr = alloca(addrlen);
3217 ret_addrlen = addrlen;
3218 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3219 if (!is_error(ret)) {
3220 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3221 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3222 ret = -TARGET_EFAULT;
3225 return ret;
3228 /* do_getsockname() Must return target values and target errnos. */
3229 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3230 abi_ulong target_addrlen_addr)
3232 socklen_t addrlen, ret_addrlen;
3233 void *addr;
3234 abi_long ret;
3236 if (get_user_u32(addrlen, target_addrlen_addr))
3237 return -TARGET_EFAULT;
3239 if ((int)addrlen < 0) {
3240 return -TARGET_EINVAL;
3243 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3244 return -TARGET_EFAULT;
3246 addr = alloca(addrlen);
3248 ret_addrlen = addrlen;
3249 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3250 if (!is_error(ret)) {
3251 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3252 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3253 ret = -TARGET_EFAULT;
3256 return ret;
3259 /* do_socketpair() Must return target values and target errnos. */
3260 static abi_long do_socketpair(int domain, int type, int protocol,
3261 abi_ulong target_tab_addr)
3263 int tab[2];
3264 abi_long ret;
3266 target_to_host_sock_type(&type);
3268 ret = get_errno(socketpair(domain, type, protocol, tab));
3269 if (!is_error(ret)) {
3270 if (put_user_s32(tab[0], target_tab_addr)
3271 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3272 ret = -TARGET_EFAULT;
3274 return ret;
3277 /* do_sendto() Must return target values and target errnos. */
3278 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3279 abi_ulong target_addr, socklen_t addrlen)
3281 void *addr;
3282 void *host_msg;
3283 void *copy_msg = NULL;
3284 abi_long ret;
3286 if ((int)addrlen < 0) {
3287 return -TARGET_EINVAL;
3290 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3291 if (!host_msg)
3292 return -TARGET_EFAULT;
3293 if (fd_trans_target_to_host_data(fd)) {
3294 copy_msg = host_msg;
3295 host_msg = g_malloc(len);
3296 memcpy(host_msg, copy_msg, len);
3297 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3298 if (ret < 0) {
3299 goto fail;
3302 if (target_addr) {
3303 addr = alloca(addrlen+1);
3304 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3305 if (ret) {
3306 goto fail;
3308 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3309 } else {
3310 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3312 fail:
3313 if (copy_msg) {
3314 g_free(host_msg);
3315 host_msg = copy_msg;
3317 unlock_user(host_msg, msg, 0);
3318 return ret;
3321 /* do_recvfrom() Must return target values and target errnos. */
3322 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3323 abi_ulong target_addr,
3324 abi_ulong target_addrlen)
3326 socklen_t addrlen, ret_addrlen;
3327 void *addr;
3328 void *host_msg;
3329 abi_long ret;
3331 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3332 if (!host_msg)
3333 return -TARGET_EFAULT;
3334 if (target_addr) {
3335 if (get_user_u32(addrlen, target_addrlen)) {
3336 ret = -TARGET_EFAULT;
3337 goto fail;
3339 if ((int)addrlen < 0) {
3340 ret = -TARGET_EINVAL;
3341 goto fail;
3343 addr = alloca(addrlen);
3344 ret_addrlen = addrlen;
3345 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3346 addr, &ret_addrlen));
3347 } else {
3348 addr = NULL; /* To keep compiler quiet. */
3349 addrlen = 0; /* To keep compiler quiet. */
3350 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3352 if (!is_error(ret)) {
3353 if (fd_trans_host_to_target_data(fd)) {
3354 abi_long trans;
3355 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3356 if (is_error(trans)) {
3357 ret = trans;
3358 goto fail;
3361 if (target_addr) {
3362 host_to_target_sockaddr(target_addr, addr,
3363 MIN(addrlen, ret_addrlen));
3364 if (put_user_u32(ret_addrlen, target_addrlen)) {
3365 ret = -TARGET_EFAULT;
3366 goto fail;
3369 unlock_user(host_msg, msg, len);
3370 } else {
3371 fail:
3372 unlock_user(host_msg, msg, 0);
3374 return ret;
3377 #ifdef TARGET_NR_socketcall
3378 /* do_socketcall() must return target values and target errnos. */
3379 static abi_long do_socketcall(int num, abi_ulong vptr)
3381 static const unsigned nargs[] = { /* number of arguments per operation */
3382 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3383 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3384 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3385 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3386 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3387 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3388 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3389 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3390 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3391 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3392 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3393 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3394 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3395 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3396 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3397 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3398 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3399 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3400 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3401 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3403 abi_long a[6]; /* max 6 args */
3404 unsigned i;
3406 /* check the range of the first argument num */
3407 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3408 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3409 return -TARGET_EINVAL;
3411 /* ensure we have space for args */
3412 if (nargs[num] > ARRAY_SIZE(a)) {
3413 return -TARGET_EINVAL;
3415 /* collect the arguments in a[] according to nargs[] */
3416 for (i = 0; i < nargs[num]; ++i) {
3417 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3418 return -TARGET_EFAULT;
3421 /* now when we have the args, invoke the appropriate underlying function */
3422 switch (num) {
3423 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3424 return do_socket(a[0], a[1], a[2]);
3425 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3426 return do_bind(a[0], a[1], a[2]);
3427 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3428 return do_connect(a[0], a[1], a[2]);
3429 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3430 return get_errno(listen(a[0], a[1]));
3431 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3432 return do_accept4(a[0], a[1], a[2], 0);
3433 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3434 return do_getsockname(a[0], a[1], a[2]);
3435 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3436 return do_getpeername(a[0], a[1], a[2]);
3437 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3438 return do_socketpair(a[0], a[1], a[2], a[3]);
3439 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3440 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3441 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3442 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3443 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3444 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3445 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3446 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3447 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3448 return get_errno(shutdown(a[0], a[1]));
3449 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3450 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3451 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3452 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3453 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3454 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3455 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3456 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3457 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3458 return do_accept4(a[0], a[1], a[2], a[3]);
3459 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3460 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3461 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3462 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3463 default:
3464 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3465 return -TARGET_EINVAL;
3468 #endif
3470 #define N_SHM_REGIONS 32
3472 static struct shm_region {
3473 abi_ulong start;
3474 abi_ulong size;
3475 bool in_use;
3476 } shm_regions[N_SHM_REGIONS];
3478 #ifndef TARGET_SEMID64_DS
3479 /* asm-generic version of this struct */
3480 struct target_semid64_ds
3482 struct target_ipc_perm sem_perm;
3483 abi_ulong sem_otime;
3484 #if TARGET_ABI_BITS == 32
3485 abi_ulong __unused1;
3486 #endif
3487 abi_ulong sem_ctime;
3488 #if TARGET_ABI_BITS == 32
3489 abi_ulong __unused2;
3490 #endif
3491 abi_ulong sem_nsems;
3492 abi_ulong __unused3;
3493 abi_ulong __unused4;
3495 #endif
3497 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3498 abi_ulong target_addr)
3500 struct target_ipc_perm *target_ip;
3501 struct target_semid64_ds *target_sd;
3503 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3504 return -TARGET_EFAULT;
3505 target_ip = &(target_sd->sem_perm);
3506 host_ip->__key = tswap32(target_ip->__key);
3507 host_ip->uid = tswap32(target_ip->uid);
3508 host_ip->gid = tswap32(target_ip->gid);
3509 host_ip->cuid = tswap32(target_ip->cuid);
3510 host_ip->cgid = tswap32(target_ip->cgid);
3511 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3512 host_ip->mode = tswap32(target_ip->mode);
3513 #else
3514 host_ip->mode = tswap16(target_ip->mode);
3515 #endif
3516 #if defined(TARGET_PPC)
3517 host_ip->__seq = tswap32(target_ip->__seq);
3518 #else
3519 host_ip->__seq = tswap16(target_ip->__seq);
3520 #endif
3521 unlock_user_struct(target_sd, target_addr, 0);
3522 return 0;
3525 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3526 struct ipc_perm *host_ip)
3528 struct target_ipc_perm *target_ip;
3529 struct target_semid64_ds *target_sd;
3531 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3532 return -TARGET_EFAULT;
3533 target_ip = &(target_sd->sem_perm);
3534 target_ip->__key = tswap32(host_ip->__key);
3535 target_ip->uid = tswap32(host_ip->uid);
3536 target_ip->gid = tswap32(host_ip->gid);
3537 target_ip->cuid = tswap32(host_ip->cuid);
3538 target_ip->cgid = tswap32(host_ip->cgid);
3539 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3540 target_ip->mode = tswap32(host_ip->mode);
3541 #else
3542 target_ip->mode = tswap16(host_ip->mode);
3543 #endif
3544 #if defined(TARGET_PPC)
3545 target_ip->__seq = tswap32(host_ip->__seq);
3546 #else
3547 target_ip->__seq = tswap16(host_ip->__seq);
3548 #endif
3549 unlock_user_struct(target_sd, target_addr, 1);
3550 return 0;
3553 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3554 abi_ulong target_addr)
3556 struct target_semid64_ds *target_sd;
3558 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3559 return -TARGET_EFAULT;
3560 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3561 return -TARGET_EFAULT;
3562 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3563 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3564 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3565 unlock_user_struct(target_sd, target_addr, 0);
3566 return 0;
3569 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3570 struct semid_ds *host_sd)
3572 struct target_semid64_ds *target_sd;
3574 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3575 return -TARGET_EFAULT;
3576 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3577 return -TARGET_EFAULT;
3578 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3579 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3580 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3581 unlock_user_struct(target_sd, target_addr, 1);
3582 return 0;
3585 struct target_seminfo {
3586 int semmap;
3587 int semmni;
3588 int semmns;
3589 int semmnu;
3590 int semmsl;
3591 int semopm;
3592 int semume;
3593 int semusz;
3594 int semvmx;
3595 int semaem;
3598 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3599 struct seminfo *host_seminfo)
3601 struct target_seminfo *target_seminfo;
3602 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3603 return -TARGET_EFAULT;
3604 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3605 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3606 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3607 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3608 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3609 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3610 __put_user(host_seminfo->semume, &target_seminfo->semume);
3611 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3612 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3613 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3614 unlock_user_struct(target_seminfo, target_addr, 1);
3615 return 0;
3618 union semun {
3619 int val;
3620 struct semid_ds *buf;
3621 unsigned short *array;
3622 struct seminfo *__buf;
3625 union target_semun {
3626 int val;
3627 abi_ulong buf;
3628 abi_ulong array;
3629 abi_ulong __buf;
3632 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3633 abi_ulong target_addr)
3635 int nsems;
3636 unsigned short *array;
3637 union semun semun;
3638 struct semid_ds semid_ds;
3639 int i, ret;
3641 semun.buf = &semid_ds;
3643 ret = semctl(semid, 0, IPC_STAT, semun);
3644 if (ret == -1)
3645 return get_errno(ret);
3647 nsems = semid_ds.sem_nsems;
3649 *host_array = g_try_new(unsigned short, nsems);
3650 if (!*host_array) {
3651 return -TARGET_ENOMEM;
3653 array = lock_user(VERIFY_READ, target_addr,
3654 nsems*sizeof(unsigned short), 1);
3655 if (!array) {
3656 g_free(*host_array);
3657 return -TARGET_EFAULT;
3660 for(i=0; i<nsems; i++) {
3661 __get_user((*host_array)[i], &array[i]);
3663 unlock_user(array, target_addr, 0);
3665 return 0;
3668 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3669 unsigned short **host_array)
3671 int nsems;
3672 unsigned short *array;
3673 union semun semun;
3674 struct semid_ds semid_ds;
3675 int i, ret;
3677 semun.buf = &semid_ds;
3679 ret = semctl(semid, 0, IPC_STAT, semun);
3680 if (ret == -1)
3681 return get_errno(ret);
3683 nsems = semid_ds.sem_nsems;
3685 array = lock_user(VERIFY_WRITE, target_addr,
3686 nsems*sizeof(unsigned short), 0);
3687 if (!array)
3688 return -TARGET_EFAULT;
3690 for(i=0; i<nsems; i++) {
3691 __put_user((*host_array)[i], &array[i]);
3693 g_free(*host_array);
3694 unlock_user(array, target_addr, 1);
3696 return 0;
3699 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3700 abi_ulong target_arg)
3702 union target_semun target_su = { .buf = target_arg };
3703 union semun arg;
3704 struct semid_ds dsarg;
3705 unsigned short *array = NULL;
3706 struct seminfo seminfo;
3707 abi_long ret = -TARGET_EINVAL;
3708 abi_long err;
3709 cmd &= 0xff;
3711 switch( cmd ) {
3712 case GETVAL:
3713 case SETVAL:
3714 /* In 64 bit cross-endian situations, we will erroneously pick up
3715 * the wrong half of the union for the "val" element. To rectify
3716 * this, the entire 8-byte structure is byteswapped, followed by
3717 * a swap of the 4 byte val field. In other cases, the data is
3718 * already in proper host byte order. */
3719 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3720 target_su.buf = tswapal(target_su.buf);
3721 arg.val = tswap32(target_su.val);
3722 } else {
3723 arg.val = target_su.val;
3725 ret = get_errno(semctl(semid, semnum, cmd, arg));
3726 break;
3727 case GETALL:
3728 case SETALL:
3729 err = target_to_host_semarray(semid, &array, target_su.array);
3730 if (err)
3731 return err;
3732 arg.array = array;
3733 ret = get_errno(semctl(semid, semnum, cmd, arg));
3734 err = host_to_target_semarray(semid, target_su.array, &array);
3735 if (err)
3736 return err;
3737 break;
3738 case IPC_STAT:
3739 case IPC_SET:
3740 case SEM_STAT:
3741 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3742 if (err)
3743 return err;
3744 arg.buf = &dsarg;
3745 ret = get_errno(semctl(semid, semnum, cmd, arg));
3746 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3747 if (err)
3748 return err;
3749 break;
3750 case IPC_INFO:
3751 case SEM_INFO:
3752 arg.__buf = &seminfo;
3753 ret = get_errno(semctl(semid, semnum, cmd, arg));
3754 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3755 if (err)
3756 return err;
3757 break;
3758 case IPC_RMID:
3759 case GETPID:
3760 case GETNCNT:
3761 case GETZCNT:
3762 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3763 break;
3766 return ret;
3769 struct target_sembuf {
3770 unsigned short sem_num;
3771 short sem_op;
3772 short sem_flg;
3775 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3776 abi_ulong target_addr,
3777 unsigned nsops)
3779 struct target_sembuf *target_sembuf;
3780 int i;
3782 target_sembuf = lock_user(VERIFY_READ, target_addr,
3783 nsops*sizeof(struct target_sembuf), 1);
3784 if (!target_sembuf)
3785 return -TARGET_EFAULT;
3787 for(i=0; i<nsops; i++) {
3788 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3789 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3790 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3793 unlock_user(target_sembuf, target_addr, 0);
3795 return 0;
3798 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3800 struct sembuf sops[nsops];
3801 abi_long ret;
3803 if (target_to_host_sembuf(sops, ptr, nsops))
3804 return -TARGET_EFAULT;
3806 ret = -TARGET_ENOSYS;
3807 #ifdef __NR_semtimedop
3808 ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3809 #endif
3810 #ifdef __NR_ipc
3811 if (ret == -TARGET_ENOSYS) {
3812 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3814 #endif
3815 return ret;
3818 struct target_msqid_ds
3820 struct target_ipc_perm msg_perm;
3821 abi_ulong msg_stime;
3822 #if TARGET_ABI_BITS == 32
3823 abi_ulong __unused1;
3824 #endif
3825 abi_ulong msg_rtime;
3826 #if TARGET_ABI_BITS == 32
3827 abi_ulong __unused2;
3828 #endif
3829 abi_ulong msg_ctime;
3830 #if TARGET_ABI_BITS == 32
3831 abi_ulong __unused3;
3832 #endif
3833 abi_ulong __msg_cbytes;
3834 abi_ulong msg_qnum;
3835 abi_ulong msg_qbytes;
3836 abi_ulong msg_lspid;
3837 abi_ulong msg_lrpid;
3838 abi_ulong __unused4;
3839 abi_ulong __unused5;
3842 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3843 abi_ulong target_addr)
3845 struct target_msqid_ds *target_md;
3847 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3848 return -TARGET_EFAULT;
3849 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3850 return -TARGET_EFAULT;
3851 host_md->msg_stime = tswapal(target_md->msg_stime);
3852 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3853 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3854 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3855 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3856 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3857 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3858 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3859 unlock_user_struct(target_md, target_addr, 0);
3860 return 0;
3863 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3864 struct msqid_ds *host_md)
3866 struct target_msqid_ds *target_md;
3868 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3869 return -TARGET_EFAULT;
3870 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3871 return -TARGET_EFAULT;
3872 target_md->msg_stime = tswapal(host_md->msg_stime);
3873 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3874 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3875 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3876 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3877 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3878 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3879 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3880 unlock_user_struct(target_md, target_addr, 1);
3881 return 0;
3884 struct target_msginfo {
3885 int msgpool;
3886 int msgmap;
3887 int msgmax;
3888 int msgmnb;
3889 int msgmni;
3890 int msgssz;
3891 int msgtql;
3892 unsigned short int msgseg;
3895 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3896 struct msginfo *host_msginfo)
3898 struct target_msginfo *target_msginfo;
3899 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3900 return -TARGET_EFAULT;
3901 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3902 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3903 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3904 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3905 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3906 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3907 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3908 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3909 unlock_user_struct(target_msginfo, target_addr, 1);
3910 return 0;
3913 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3915 struct msqid_ds dsarg;
3916 struct msginfo msginfo;
3917 abi_long ret = -TARGET_EINVAL;
3919 cmd &= 0xff;
3921 switch (cmd) {
3922 case IPC_STAT:
3923 case IPC_SET:
3924 case MSG_STAT:
3925 if (target_to_host_msqid_ds(&dsarg,ptr))
3926 return -TARGET_EFAULT;
3927 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3928 if (host_to_target_msqid_ds(ptr,&dsarg))
3929 return -TARGET_EFAULT;
3930 break;
3931 case IPC_RMID:
3932 ret = get_errno(msgctl(msgid, cmd, NULL));
3933 break;
3934 case IPC_INFO:
3935 case MSG_INFO:
3936 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3937 if (host_to_target_msginfo(ptr, &msginfo))
3938 return -TARGET_EFAULT;
3939 break;
3942 return ret;
3945 struct target_msgbuf {
3946 abi_long mtype;
3947 char mtext[1];
3950 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3951 ssize_t msgsz, int msgflg)
3953 struct target_msgbuf *target_mb;
3954 struct msgbuf *host_mb;
3955 abi_long ret = 0;
3957 if (msgsz < 0) {
3958 return -TARGET_EINVAL;
3961 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3962 return -TARGET_EFAULT;
3963 host_mb = g_try_malloc(msgsz + sizeof(long));
3964 if (!host_mb) {
3965 unlock_user_struct(target_mb, msgp, 0);
3966 return -TARGET_ENOMEM;
3968 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3969 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3970 ret = -TARGET_ENOSYS;
3971 #ifdef __NR_msgsnd
3972 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3973 #endif
3974 #ifdef __NR_ipc
3975 if (ret == -TARGET_ENOSYS) {
3976 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3977 host_mb, 0));
3979 #endif
3980 g_free(host_mb);
3981 unlock_user_struct(target_mb, msgp, 0);
3983 return ret;
3986 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3987 ssize_t msgsz, abi_long msgtyp,
3988 int msgflg)
3990 struct target_msgbuf *target_mb;
3991 char *target_mtext;
3992 struct msgbuf *host_mb;
3993 abi_long ret = 0;
3995 if (msgsz < 0) {
3996 return -TARGET_EINVAL;
3999 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4000 return -TARGET_EFAULT;
4002 host_mb = g_try_malloc(msgsz + sizeof(long));
4003 if (!host_mb) {
4004 ret = -TARGET_ENOMEM;
4005 goto end;
4007 ret = -TARGET_ENOSYS;
4008 #ifdef __NR_msgrcv
4009 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4010 #endif
4011 #ifdef __NR_ipc
4012 if (ret == -TARGET_ENOSYS) {
4013 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4014 msgflg, host_mb, msgtyp));
4016 #endif
4018 if (ret > 0) {
4019 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4020 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4021 if (!target_mtext) {
4022 ret = -TARGET_EFAULT;
4023 goto end;
4025 memcpy(target_mb->mtext, host_mb->mtext, ret);
4026 unlock_user(target_mtext, target_mtext_addr, ret);
4029 target_mb->mtype = tswapal(host_mb->mtype);
4031 end:
4032 if (target_mb)
4033 unlock_user_struct(target_mb, msgp, 1);
4034 g_free(host_mb);
4035 return ret;
4038 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4039 abi_ulong target_addr)
4041 struct target_shmid_ds *target_sd;
4043 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4044 return -TARGET_EFAULT;
4045 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4046 return -TARGET_EFAULT;
4047 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4048 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4049 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4050 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4051 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4052 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4053 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4054 unlock_user_struct(target_sd, target_addr, 0);
4055 return 0;
4058 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4059 struct shmid_ds *host_sd)
4061 struct target_shmid_ds *target_sd;
4063 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4064 return -TARGET_EFAULT;
4065 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4066 return -TARGET_EFAULT;
4067 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4068 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4069 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4070 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4071 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4072 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4073 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4074 unlock_user_struct(target_sd, target_addr, 1);
4075 return 0;
4078 struct target_shminfo {
4079 abi_ulong shmmax;
4080 abi_ulong shmmin;
4081 abi_ulong shmmni;
4082 abi_ulong shmseg;
4083 abi_ulong shmall;
4086 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4087 struct shminfo *host_shminfo)
4089 struct target_shminfo *target_shminfo;
4090 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4091 return -TARGET_EFAULT;
4092 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4093 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4094 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4095 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4096 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4097 unlock_user_struct(target_shminfo, target_addr, 1);
4098 return 0;
4101 struct target_shm_info {
4102 int used_ids;
4103 abi_ulong shm_tot;
4104 abi_ulong shm_rss;
4105 abi_ulong shm_swp;
4106 abi_ulong swap_attempts;
4107 abi_ulong swap_successes;
4110 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4111 struct shm_info *host_shm_info)
4113 struct target_shm_info *target_shm_info;
4114 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4115 return -TARGET_EFAULT;
4116 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4117 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4118 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4119 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4120 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4121 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4122 unlock_user_struct(target_shm_info, target_addr, 1);
4123 return 0;
4126 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4128 struct shmid_ds dsarg;
4129 struct shminfo shminfo;
4130 struct shm_info shm_info;
4131 abi_long ret = -TARGET_EINVAL;
4133 cmd &= 0xff;
4135 switch(cmd) {
4136 case IPC_STAT:
4137 case IPC_SET:
4138 case SHM_STAT:
4139 if (target_to_host_shmid_ds(&dsarg, buf))
4140 return -TARGET_EFAULT;
4141 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4142 if (host_to_target_shmid_ds(buf, &dsarg))
4143 return -TARGET_EFAULT;
4144 break;
4145 case IPC_INFO:
4146 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4147 if (host_to_target_shminfo(buf, &shminfo))
4148 return -TARGET_EFAULT;
4149 break;
4150 case SHM_INFO:
4151 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4152 if (host_to_target_shm_info(buf, &shm_info))
4153 return -TARGET_EFAULT;
4154 break;
4155 case IPC_RMID:
4156 case SHM_LOCK:
4157 case SHM_UNLOCK:
4158 ret = get_errno(shmctl(shmid, cmd, NULL));
4159 break;
4162 return ret;
4165 #ifndef TARGET_FORCE_SHMLBA
4166 /* For most architectures, SHMLBA is the same as the page size;
4167 * some architectures have larger values, in which case they should
4168 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4169 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4170 * and defining its own value for SHMLBA.
4172 * The kernel also permits SHMLBA to be set by the architecture to a
4173 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4174 * this means that addresses are rounded to the large size if
4175 * SHM_RND is set but addresses not aligned to that size are not rejected
4176 * as long as they are at least page-aligned. Since the only architecture
4177 * which uses this is ia64 this code doesn't provide for that oddity.
4179 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4181 return TARGET_PAGE_SIZE;
4183 #endif
4185 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4186 int shmid, abi_ulong shmaddr, int shmflg)
4188 abi_long raddr;
4189 void *host_raddr;
4190 struct shmid_ds shm_info;
4191 int i,ret;
4192 abi_ulong shmlba;
4194 /* find out the length of the shared memory segment */
4195 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4196 if (is_error(ret)) {
4197 /* can't get length, bail out */
4198 return ret;
4201 shmlba = target_shmlba(cpu_env);
4203 if (shmaddr & (shmlba - 1)) {
4204 if (shmflg & SHM_RND) {
4205 shmaddr &= ~(shmlba - 1);
4206 } else {
4207 return -TARGET_EINVAL;
4210 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4211 return -TARGET_EINVAL;
4214 mmap_lock();
4216 if (shmaddr)
4217 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4218 else {
4219 abi_ulong mmap_start;
4221 /* In order to use the host shmat, we need to honor host SHMLBA. */
4222 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4224 if (mmap_start == -1) {
4225 errno = ENOMEM;
4226 host_raddr = (void *)-1;
4227 } else
4228 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4231 if (host_raddr == (void *)-1) {
4232 mmap_unlock();
4233 return get_errno((long)host_raddr);
4235 raddr=h2g((unsigned long)host_raddr);
4237 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4238 PAGE_VALID | PAGE_READ |
4239 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4241 for (i = 0; i < N_SHM_REGIONS; i++) {
4242 if (!shm_regions[i].in_use) {
4243 shm_regions[i].in_use = true;
4244 shm_regions[i].start = raddr;
4245 shm_regions[i].size = shm_info.shm_segsz;
4246 break;
4250 mmap_unlock();
4251 return raddr;
4255 static inline abi_long do_shmdt(abi_ulong shmaddr)
4257 int i;
4258 abi_long rv;
4260 mmap_lock();
4262 for (i = 0; i < N_SHM_REGIONS; ++i) {
4263 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4264 shm_regions[i].in_use = false;
4265 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4266 break;
4269 rv = get_errno(shmdt(g2h(shmaddr)));
4271 mmap_unlock();
4273 return rv;
4276 #ifdef TARGET_NR_ipc
4277 /* ??? This only works with linear mappings. */
4278 /* do_ipc() must return target values and target errnos. */
4279 static abi_long do_ipc(CPUArchState *cpu_env,
4280 unsigned int call, abi_long first,
4281 abi_long second, abi_long third,
4282 abi_long ptr, abi_long fifth)
4284 int version;
4285 abi_long ret = 0;
4287 version = call >> 16;
4288 call &= 0xffff;
4290 switch (call) {
4291 case IPCOP_semop:
4292 ret = do_semop(first, ptr, second);
4293 break;
4295 case IPCOP_semget:
4296 ret = get_errno(semget(first, second, third));
4297 break;
4299 case IPCOP_semctl: {
4300 /* The semun argument to semctl is passed by value, so dereference the
4301 * ptr argument. */
4302 abi_ulong atptr;
4303 get_user_ual(atptr, ptr);
4304 ret = do_semctl(first, second, third, atptr);
4305 break;
4308 case IPCOP_msgget:
4309 ret = get_errno(msgget(first, second));
4310 break;
4312 case IPCOP_msgsnd:
4313 ret = do_msgsnd(first, ptr, second, third);
4314 break;
4316 case IPCOP_msgctl:
4317 ret = do_msgctl(first, second, ptr);
4318 break;
4320 case IPCOP_msgrcv:
4321 switch (version) {
4322 case 0:
4324 struct target_ipc_kludge {
4325 abi_long msgp;
4326 abi_long msgtyp;
4327 } *tmp;
4329 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4330 ret = -TARGET_EFAULT;
4331 break;
4334 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4336 unlock_user_struct(tmp, ptr, 0);
4337 break;
4339 default:
4340 ret = do_msgrcv(first, ptr, second, fifth, third);
4342 break;
4344 case IPCOP_shmat:
4345 switch (version) {
4346 default:
4348 abi_ulong raddr;
4349 raddr = do_shmat(cpu_env, first, ptr, second);
4350 if (is_error(raddr))
4351 return get_errno(raddr);
4352 if (put_user_ual(raddr, third))
4353 return -TARGET_EFAULT;
4354 break;
4356 case 1:
4357 ret = -TARGET_EINVAL;
4358 break;
4360 break;
4361 case IPCOP_shmdt:
4362 ret = do_shmdt(ptr);
4363 break;
4365 case IPCOP_shmget:
4366 /* IPC_* flag values are the same on all linux platforms */
4367 ret = get_errno(shmget(first, second, third));
4368 break;
4370 /* IPC_* and SHM_* command values are the same on all linux platforms */
4371 case IPCOP_shmctl:
4372 ret = do_shmctl(first, second, ptr);
4373 break;
4374 default:
4375 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4376 call, version);
4377 ret = -TARGET_ENOSYS;
4378 break;
4380 return ret;
4382 #endif
4384 /* kernel structure types definitions */
4386 #define STRUCT(name, ...) STRUCT_ ## name,
4387 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4388 enum {
4389 #include "syscall_types.h"
4390 STRUCT_MAX
4392 #undef STRUCT
4393 #undef STRUCT_SPECIAL
4395 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4396 #define STRUCT_SPECIAL(name)
4397 #include "syscall_types.h"
4398 #undef STRUCT
4399 #undef STRUCT_SPECIAL
4401 typedef struct IOCTLEntry IOCTLEntry;
4403 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4404 int fd, int cmd, abi_long arg);
4406 struct IOCTLEntry {
4407 int target_cmd;
4408 unsigned int host_cmd;
4409 const char *name;
4410 int access;
4411 do_ioctl_fn *do_ioctl;
4412 const argtype arg_type[5];
4415 #define IOC_R 0x0001
4416 #define IOC_W 0x0002
4417 #define IOC_RW (IOC_R | IOC_W)
4419 #define MAX_STRUCT_SIZE 4096
4421 #ifdef CONFIG_FIEMAP
4422 /* So fiemap access checks don't overflow on 32 bit systems.
4423 * This is very slightly smaller than the limit imposed by
4424 * the underlying kernel.
4426 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4427 / sizeof(struct fiemap_extent))
4429 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4430 int fd, int cmd, abi_long arg)
4432 /* The parameter for this ioctl is a struct fiemap followed
4433 * by an array of struct fiemap_extent whose size is set
4434 * in fiemap->fm_extent_count. The array is filled in by the
4435 * ioctl.
4437 int target_size_in, target_size_out;
4438 struct fiemap *fm;
4439 const argtype *arg_type = ie->arg_type;
4440 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4441 void *argptr, *p;
4442 abi_long ret;
4443 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4444 uint32_t outbufsz;
4445 int free_fm = 0;
4447 assert(arg_type[0] == TYPE_PTR);
4448 assert(ie->access == IOC_RW);
4449 arg_type++;
4450 target_size_in = thunk_type_size(arg_type, 0);
4451 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4452 if (!argptr) {
4453 return -TARGET_EFAULT;
4455 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4456 unlock_user(argptr, arg, 0);
4457 fm = (struct fiemap *)buf_temp;
4458 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4459 return -TARGET_EINVAL;
4462 outbufsz = sizeof (*fm) +
4463 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4465 if (outbufsz > MAX_STRUCT_SIZE) {
4466 /* We can't fit all the extents into the fixed size buffer.
4467 * Allocate one that is large enough and use it instead.
4469 fm = g_try_malloc(outbufsz);
4470 if (!fm) {
4471 return -TARGET_ENOMEM;
4473 memcpy(fm, buf_temp, sizeof(struct fiemap));
4474 free_fm = 1;
4476 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4477 if (!is_error(ret)) {
4478 target_size_out = target_size_in;
4479 /* An extent_count of 0 means we were only counting the extents
4480 * so there are no structs to copy
4482 if (fm->fm_extent_count != 0) {
4483 target_size_out += fm->fm_mapped_extents * extent_size;
4485 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4486 if (!argptr) {
4487 ret = -TARGET_EFAULT;
4488 } else {
4489 /* Convert the struct fiemap */
4490 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4491 if (fm->fm_extent_count != 0) {
4492 p = argptr + target_size_in;
4493 /* ...and then all the struct fiemap_extents */
4494 for (i = 0; i < fm->fm_mapped_extents; i++) {
4495 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4496 THUNK_TARGET);
4497 p += extent_size;
4500 unlock_user(argptr, arg, target_size_out);
4503 if (free_fm) {
4504 g_free(fm);
4506 return ret;
4508 #endif
4510 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4511 int fd, int cmd, abi_long arg)
4513 const argtype *arg_type = ie->arg_type;
4514 int target_size;
4515 void *argptr;
4516 int ret;
4517 struct ifconf *host_ifconf;
4518 uint32_t outbufsz;
4519 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4520 int target_ifreq_size;
4521 int nb_ifreq;
4522 int free_buf = 0;
4523 int i;
4524 int target_ifc_len;
4525 abi_long target_ifc_buf;
4526 int host_ifc_len;
4527 char *host_ifc_buf;
4529 assert(arg_type[0] == TYPE_PTR);
4530 assert(ie->access == IOC_RW);
4532 arg_type++;
4533 target_size = thunk_type_size(arg_type, 0);
4535 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4536 if (!argptr)
4537 return -TARGET_EFAULT;
4538 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4539 unlock_user(argptr, arg, 0);
4541 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4542 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4543 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4545 if (target_ifc_buf != 0) {
4546 target_ifc_len = host_ifconf->ifc_len;
4547 nb_ifreq = target_ifc_len / target_ifreq_size;
4548 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4550 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4551 if (outbufsz > MAX_STRUCT_SIZE) {
4553 * We can't fit all the extents into the fixed size buffer.
4554 * Allocate one that is large enough and use it instead.
4556 host_ifconf = malloc(outbufsz);
4557 if (!host_ifconf) {
4558 return -TARGET_ENOMEM;
4560 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4561 free_buf = 1;
4563 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4565 host_ifconf->ifc_len = host_ifc_len;
4566 } else {
4567 host_ifc_buf = NULL;
4569 host_ifconf->ifc_buf = host_ifc_buf;
4571 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4572 if (!is_error(ret)) {
4573 /* convert host ifc_len to target ifc_len */
4575 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4576 target_ifc_len = nb_ifreq * target_ifreq_size;
4577 host_ifconf->ifc_len = target_ifc_len;
4579 /* restore target ifc_buf */
4581 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4583 /* copy struct ifconf to target user */
4585 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4586 if (!argptr)
4587 return -TARGET_EFAULT;
4588 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4589 unlock_user(argptr, arg, target_size);
4591 if (target_ifc_buf != 0) {
4592 /* copy ifreq[] to target user */
4593 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4594 for (i = 0; i < nb_ifreq ; i++) {
4595 thunk_convert(argptr + i * target_ifreq_size,
4596 host_ifc_buf + i * sizeof(struct ifreq),
4597 ifreq_arg_type, THUNK_TARGET);
4599 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4603 if (free_buf) {
4604 free(host_ifconf);
4607 return ret;
4610 #if defined(CONFIG_USBFS)
4611 #if HOST_LONG_BITS > 64
4612 #error USBDEVFS thunks do not support >64 bit hosts yet.
4613 #endif
4614 struct live_urb {
4615 uint64_t target_urb_adr;
4616 uint64_t target_buf_adr;
4617 char *target_buf_ptr;
4618 struct usbdevfs_urb host_urb;
4621 static GHashTable *usbdevfs_urb_hashtable(void)
4623 static GHashTable *urb_hashtable;
4625 if (!urb_hashtable) {
4626 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4628 return urb_hashtable;
4631 static void urb_hashtable_insert(struct live_urb *urb)
4633 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4634 g_hash_table_insert(urb_hashtable, urb, urb);
4637 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4639 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4640 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4643 static void urb_hashtable_remove(struct live_urb *urb)
4645 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4646 g_hash_table_remove(urb_hashtable, urb);
4649 static abi_long
4650 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4651 int fd, int cmd, abi_long arg)
4653 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4654 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4655 struct live_urb *lurb;
4656 void *argptr;
4657 uint64_t hurb;
4658 int target_size;
4659 uintptr_t target_urb_adr;
4660 abi_long ret;
4662 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4664 memset(buf_temp, 0, sizeof(uint64_t));
4665 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4666 if (is_error(ret)) {
4667 return ret;
4670 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4671 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4672 if (!lurb->target_urb_adr) {
4673 return -TARGET_EFAULT;
4675 urb_hashtable_remove(lurb);
4676 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4677 lurb->host_urb.buffer_length);
4678 lurb->target_buf_ptr = NULL;
4680 /* restore the guest buffer pointer */
4681 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4683 /* update the guest urb struct */
4684 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4685 if (!argptr) {
4686 g_free(lurb);
4687 return -TARGET_EFAULT;
4689 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4690 unlock_user(argptr, lurb->target_urb_adr, target_size);
4692 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4693 /* write back the urb handle */
4694 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4695 if (!argptr) {
4696 g_free(lurb);
4697 return -TARGET_EFAULT;
4700 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4701 target_urb_adr = lurb->target_urb_adr;
4702 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4703 unlock_user(argptr, arg, target_size);
4705 g_free(lurb);
4706 return ret;
4709 static abi_long
4710 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4711 uint8_t *buf_temp __attribute__((unused)),
4712 int fd, int cmd, abi_long arg)
4714 struct live_urb *lurb;
4716 /* map target address back to host URB with metadata. */
4717 lurb = urb_hashtable_lookup(arg);
4718 if (!lurb) {
4719 return -TARGET_EFAULT;
4721 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4724 static abi_long
4725 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4726 int fd, int cmd, abi_long arg)
4728 const argtype *arg_type = ie->arg_type;
4729 int target_size;
4730 abi_long ret;
4731 void *argptr;
4732 int rw_dir;
4733 struct live_urb *lurb;
4736 * each submitted URB needs to map to a unique ID for the
4737 * kernel, and that unique ID needs to be a pointer to
4738 * host memory. hence, we need to malloc for each URB.
4739 * isochronous transfers have a variable length struct.
4741 arg_type++;
4742 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4744 /* construct host copy of urb and metadata */
4745 lurb = g_try_malloc0(sizeof(struct live_urb));
4746 if (!lurb) {
4747 return -TARGET_ENOMEM;
4750 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4751 if (!argptr) {
4752 g_free(lurb);
4753 return -TARGET_EFAULT;
4755 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4756 unlock_user(argptr, arg, 0);
4758 lurb->target_urb_adr = arg;
4759 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4761 /* buffer space used depends on endpoint type so lock the entire buffer */
4762 /* control type urbs should check the buffer contents for true direction */
4763 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4764 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4765 lurb->host_urb.buffer_length, 1);
4766 if (lurb->target_buf_ptr == NULL) {
4767 g_free(lurb);
4768 return -TARGET_EFAULT;
4771 /* update buffer pointer in host copy */
4772 lurb->host_urb.buffer = lurb->target_buf_ptr;
4774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4775 if (is_error(ret)) {
4776 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4777 g_free(lurb);
4778 } else {
4779 urb_hashtable_insert(lurb);
4782 return ret;
4784 #endif /* CONFIG_USBFS */
4786 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4787 int cmd, abi_long arg)
4789 void *argptr;
4790 struct dm_ioctl *host_dm;
4791 abi_long guest_data;
4792 uint32_t guest_data_size;
4793 int target_size;
4794 const argtype *arg_type = ie->arg_type;
4795 abi_long ret;
4796 void *big_buf = NULL;
4797 char *host_data;
4799 arg_type++;
4800 target_size = thunk_type_size(arg_type, 0);
4801 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4802 if (!argptr) {
4803 ret = -TARGET_EFAULT;
4804 goto out;
4806 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4807 unlock_user(argptr, arg, 0);
4809 /* buf_temp is too small, so fetch things into a bigger buffer */
4810 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4811 memcpy(big_buf, buf_temp, target_size);
4812 buf_temp = big_buf;
4813 host_dm = big_buf;
4815 guest_data = arg + host_dm->data_start;
4816 if ((guest_data - arg) < 0) {
4817 ret = -TARGET_EINVAL;
4818 goto out;
4820 guest_data_size = host_dm->data_size - host_dm->data_start;
4821 host_data = (char*)host_dm + host_dm->data_start;
4823 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4824 if (!argptr) {
4825 ret = -TARGET_EFAULT;
4826 goto out;
4829 switch (ie->host_cmd) {
4830 case DM_REMOVE_ALL:
4831 case DM_LIST_DEVICES:
4832 case DM_DEV_CREATE:
4833 case DM_DEV_REMOVE:
4834 case DM_DEV_SUSPEND:
4835 case DM_DEV_STATUS:
4836 case DM_DEV_WAIT:
4837 case DM_TABLE_STATUS:
4838 case DM_TABLE_CLEAR:
4839 case DM_TABLE_DEPS:
4840 case DM_LIST_VERSIONS:
4841 /* no input data */
4842 break;
4843 case DM_DEV_RENAME:
4844 case DM_DEV_SET_GEOMETRY:
4845 /* data contains only strings */
4846 memcpy(host_data, argptr, guest_data_size);
4847 break;
4848 case DM_TARGET_MSG:
4849 memcpy(host_data, argptr, guest_data_size);
4850 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4851 break;
4852 case DM_TABLE_LOAD:
4854 void *gspec = argptr;
4855 void *cur_data = host_data;
4856 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4857 int spec_size = thunk_type_size(arg_type, 0);
4858 int i;
4860 for (i = 0; i < host_dm->target_count; i++) {
4861 struct dm_target_spec *spec = cur_data;
4862 uint32_t next;
4863 int slen;
4865 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4866 slen = strlen((char*)gspec + spec_size) + 1;
4867 next = spec->next;
4868 spec->next = sizeof(*spec) + slen;
4869 strcpy((char*)&spec[1], gspec + spec_size);
4870 gspec += next;
4871 cur_data += spec->next;
4873 break;
4875 default:
4876 ret = -TARGET_EINVAL;
4877 unlock_user(argptr, guest_data, 0);
4878 goto out;
4880 unlock_user(argptr, guest_data, 0);
4882 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4883 if (!is_error(ret)) {
4884 guest_data = arg + host_dm->data_start;
4885 guest_data_size = host_dm->data_size - host_dm->data_start;
4886 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4887 switch (ie->host_cmd) {
4888 case DM_REMOVE_ALL:
4889 case DM_DEV_CREATE:
4890 case DM_DEV_REMOVE:
4891 case DM_DEV_RENAME:
4892 case DM_DEV_SUSPEND:
4893 case DM_DEV_STATUS:
4894 case DM_TABLE_LOAD:
4895 case DM_TABLE_CLEAR:
4896 case DM_TARGET_MSG:
4897 case DM_DEV_SET_GEOMETRY:
4898 /* no return data */
4899 break;
4900 case DM_LIST_DEVICES:
4902 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4903 uint32_t remaining_data = guest_data_size;
4904 void *cur_data = argptr;
4905 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4906 int nl_size = 12; /* can't use thunk_size due to alignment */
4908 while (1) {
4909 uint32_t next = nl->next;
4910 if (next) {
4911 nl->next = nl_size + (strlen(nl->name) + 1);
4913 if (remaining_data < nl->next) {
4914 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4915 break;
4917 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4918 strcpy(cur_data + nl_size, nl->name);
4919 cur_data += nl->next;
4920 remaining_data -= nl->next;
4921 if (!next) {
4922 break;
4924 nl = (void*)nl + next;
4926 break;
4928 case DM_DEV_WAIT:
4929 case DM_TABLE_STATUS:
4931 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4932 void *cur_data = argptr;
4933 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4934 int spec_size = thunk_type_size(arg_type, 0);
4935 int i;
4937 for (i = 0; i < host_dm->target_count; i++) {
4938 uint32_t next = spec->next;
4939 int slen = strlen((char*)&spec[1]) + 1;
4940 spec->next = (cur_data - argptr) + spec_size + slen;
4941 if (guest_data_size < spec->next) {
4942 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4943 break;
4945 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4946 strcpy(cur_data + spec_size, (char*)&spec[1]);
4947 cur_data = argptr + spec->next;
4948 spec = (void*)host_dm + host_dm->data_start + next;
4950 break;
4952 case DM_TABLE_DEPS:
4954 void *hdata = (void*)host_dm + host_dm->data_start;
4955 int count = *(uint32_t*)hdata;
4956 uint64_t *hdev = hdata + 8;
4957 uint64_t *gdev = argptr + 8;
4958 int i;
4960 *(uint32_t*)argptr = tswap32(count);
4961 for (i = 0; i < count; i++) {
4962 *gdev = tswap64(*hdev);
4963 gdev++;
4964 hdev++;
4966 break;
4968 case DM_LIST_VERSIONS:
4970 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4971 uint32_t remaining_data = guest_data_size;
4972 void *cur_data = argptr;
4973 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4974 int vers_size = thunk_type_size(arg_type, 0);
4976 while (1) {
4977 uint32_t next = vers->next;
4978 if (next) {
4979 vers->next = vers_size + (strlen(vers->name) + 1);
4981 if (remaining_data < vers->next) {
4982 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4983 break;
4985 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4986 strcpy(cur_data + vers_size, vers->name);
4987 cur_data += vers->next;
4988 remaining_data -= vers->next;
4989 if (!next) {
4990 break;
4992 vers = (void*)vers + next;
4994 break;
4996 default:
4997 unlock_user(argptr, guest_data, 0);
4998 ret = -TARGET_EINVAL;
4999 goto out;
5001 unlock_user(argptr, guest_data, guest_data_size);
5003 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5004 if (!argptr) {
5005 ret = -TARGET_EFAULT;
5006 goto out;
5008 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5009 unlock_user(argptr, arg, target_size);
5011 out:
5012 g_free(big_buf);
5013 return ret;
5016 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5017 int cmd, abi_long arg)
5019 void *argptr;
5020 int target_size;
5021 const argtype *arg_type = ie->arg_type;
5022 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5023 abi_long ret;
5025 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5026 struct blkpg_partition host_part;
5028 /* Read and convert blkpg */
5029 arg_type++;
5030 target_size = thunk_type_size(arg_type, 0);
5031 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5032 if (!argptr) {
5033 ret = -TARGET_EFAULT;
5034 goto out;
5036 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5037 unlock_user(argptr, arg, 0);
5039 switch (host_blkpg->op) {
5040 case BLKPG_ADD_PARTITION:
5041 case BLKPG_DEL_PARTITION:
5042 /* payload is struct blkpg_partition */
5043 break;
5044 default:
5045 /* Unknown opcode */
5046 ret = -TARGET_EINVAL;
5047 goto out;
5050 /* Read and convert blkpg->data */
5051 arg = (abi_long)(uintptr_t)host_blkpg->data;
5052 target_size = thunk_type_size(part_arg_type, 0);
5053 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5054 if (!argptr) {
5055 ret = -TARGET_EFAULT;
5056 goto out;
5058 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5059 unlock_user(argptr, arg, 0);
5061 /* Swizzle the data pointer to our local copy and call! */
5062 host_blkpg->data = &host_part;
5063 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5065 out:
5066 return ret;
5069 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5070 int fd, int cmd, abi_long arg)
5072 const argtype *arg_type = ie->arg_type;
5073 const StructEntry *se;
5074 const argtype *field_types;
5075 const int *dst_offsets, *src_offsets;
5076 int target_size;
5077 void *argptr;
5078 abi_ulong *target_rt_dev_ptr = NULL;
5079 unsigned long *host_rt_dev_ptr = NULL;
5080 abi_long ret;
5081 int i;
5083 assert(ie->access == IOC_W);
5084 assert(*arg_type == TYPE_PTR);
5085 arg_type++;
5086 assert(*arg_type == TYPE_STRUCT);
5087 target_size = thunk_type_size(arg_type, 0);
5088 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5089 if (!argptr) {
5090 return -TARGET_EFAULT;
5092 arg_type++;
5093 assert(*arg_type == (int)STRUCT_rtentry);
5094 se = struct_entries + *arg_type++;
5095 assert(se->convert[0] == NULL);
5096 /* convert struct here to be able to catch rt_dev string */
5097 field_types = se->field_types;
5098 dst_offsets = se->field_offsets[THUNK_HOST];
5099 src_offsets = se->field_offsets[THUNK_TARGET];
5100 for (i = 0; i < se->nb_fields; i++) {
5101 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5102 assert(*field_types == TYPE_PTRVOID);
5103 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5104 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5105 if (*target_rt_dev_ptr != 0) {
5106 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5107 tswapal(*target_rt_dev_ptr));
5108 if (!*host_rt_dev_ptr) {
5109 unlock_user(argptr, arg, 0);
5110 return -TARGET_EFAULT;
5112 } else {
5113 *host_rt_dev_ptr = 0;
5115 field_types++;
5116 continue;
5118 field_types = thunk_convert(buf_temp + dst_offsets[i],
5119 argptr + src_offsets[i],
5120 field_types, THUNK_HOST);
5122 unlock_user(argptr, arg, 0);
5124 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5126 assert(host_rt_dev_ptr != NULL);
5127 assert(target_rt_dev_ptr != NULL);
5128 if (*host_rt_dev_ptr != 0) {
5129 unlock_user((void *)*host_rt_dev_ptr,
5130 *target_rt_dev_ptr, 0);
5132 return ret;
5135 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5136 int fd, int cmd, abi_long arg)
5138 int sig = target_to_host_signal(arg);
5139 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5142 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5143 int fd, int cmd, abi_long arg)
5145 struct timeval tv;
5146 abi_long ret;
5148 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5149 if (is_error(ret)) {
5150 return ret;
5153 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5154 if (copy_to_user_timeval(arg, &tv)) {
5155 return -TARGET_EFAULT;
5157 } else {
5158 if (copy_to_user_timeval64(arg, &tv)) {
5159 return -TARGET_EFAULT;
5163 return ret;
5166 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5167 int fd, int cmd, abi_long arg)
5169 struct timespec ts;
5170 abi_long ret;
5172 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5173 if (is_error(ret)) {
5174 return ret;
5177 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5178 if (host_to_target_timespec(arg, &ts)) {
5179 return -TARGET_EFAULT;
5181 } else{
5182 if (host_to_target_timespec64(arg, &ts)) {
5183 return -TARGET_EFAULT;
5187 return ret;
5190 #ifdef TIOCGPTPEER
5191 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5192 int fd, int cmd, abi_long arg)
5194 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5195 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5197 #endif
5199 static IOCTLEntry ioctl_entries[] = {
5200 #define IOCTL(cmd, access, ...) \
5201 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5202 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5203 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5204 #define IOCTL_IGNORE(cmd) \
5205 { TARGET_ ## cmd, 0, #cmd },
5206 #include "ioctls.h"
5207 { 0, 0, },
5210 /* ??? Implement proper locking for ioctls. */
5211 /* do_ioctl() Must return target values and target errnos. */
5212 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5214 const IOCTLEntry *ie;
5215 const argtype *arg_type;
5216 abi_long ret;
5217 uint8_t buf_temp[MAX_STRUCT_SIZE];
5218 int target_size;
5219 void *argptr;
5221 ie = ioctl_entries;
5222 for(;;) {
5223 if (ie->target_cmd == 0) {
5224 qemu_log_mask(
5225 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5226 return -TARGET_ENOSYS;
5228 if (ie->target_cmd == cmd)
5229 break;
5230 ie++;
5232 arg_type = ie->arg_type;
5233 if (ie->do_ioctl) {
5234 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5235 } else if (!ie->host_cmd) {
5236 /* Some architectures define BSD ioctls in their headers
5237 that are not implemented in Linux. */
5238 return -TARGET_ENOSYS;
5241 switch(arg_type[0]) {
5242 case TYPE_NULL:
5243 /* no argument */
5244 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5245 break;
5246 case TYPE_PTRVOID:
5247 case TYPE_INT:
5248 case TYPE_LONG:
5249 case TYPE_ULONG:
5250 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5251 break;
5252 case TYPE_PTR:
5253 arg_type++;
5254 target_size = thunk_type_size(arg_type, 0);
5255 switch(ie->access) {
5256 case IOC_R:
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5258 if (!is_error(ret)) {
5259 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5260 if (!argptr)
5261 return -TARGET_EFAULT;
5262 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5263 unlock_user(argptr, arg, target_size);
5265 break;
5266 case IOC_W:
5267 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5268 if (!argptr)
5269 return -TARGET_EFAULT;
5270 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5271 unlock_user(argptr, arg, 0);
5272 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5273 break;
5274 default:
5275 case IOC_RW:
5276 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5277 if (!argptr)
5278 return -TARGET_EFAULT;
5279 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5280 unlock_user(argptr, arg, 0);
5281 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5282 if (!is_error(ret)) {
5283 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5284 if (!argptr)
5285 return -TARGET_EFAULT;
5286 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5287 unlock_user(argptr, arg, target_size);
5289 break;
5291 break;
5292 default:
5293 qemu_log_mask(LOG_UNIMP,
5294 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5295 (long)cmd, arg_type[0]);
5296 ret = -TARGET_ENOSYS;
5297 break;
5299 return ret;
5302 static const bitmask_transtbl iflag_tbl[] = {
5303 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5304 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5305 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5306 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5307 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5308 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5309 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5310 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5311 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5312 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5313 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5314 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5315 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5316 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5317 { 0, 0, 0, 0 }
5320 static const bitmask_transtbl oflag_tbl[] = {
5321 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5322 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5323 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5324 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5325 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5326 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5327 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5328 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5329 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5330 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5331 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5332 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5333 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5334 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5335 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5336 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5337 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5338 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5339 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5340 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5341 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5342 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5343 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5344 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5345 { 0, 0, 0, 0 }
5348 static const bitmask_transtbl cflag_tbl[] = {
5349 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5350 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5351 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5352 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5353 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5354 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5355 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5356 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5357 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5358 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5359 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5360 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5361 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5362 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5363 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5364 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5365 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5366 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5367 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5368 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5369 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5370 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5371 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5372 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5373 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5374 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5375 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5376 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5377 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5378 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5379 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5380 { 0, 0, 0, 0 }
5383 static const bitmask_transtbl lflag_tbl[] = {
5384 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5385 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5386 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5387 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5388 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5389 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5390 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5391 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5392 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5393 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5394 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5395 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5396 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5397 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5398 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5399 { 0, 0, 0, 0 }
5402 static void target_to_host_termios (void *dst, const void *src)
5404 struct host_termios *host = dst;
5405 const struct target_termios *target = src;
5407 host->c_iflag =
5408 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5409 host->c_oflag =
5410 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5411 host->c_cflag =
5412 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5413 host->c_lflag =
5414 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5415 host->c_line = target->c_line;
5417 memset(host->c_cc, 0, sizeof(host->c_cc));
5418 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5419 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5420 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5421 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5422 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5423 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5424 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5425 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5426 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5427 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5428 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5429 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5430 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5431 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5432 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5433 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5434 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5437 static void host_to_target_termios (void *dst, const void *src)
5439 struct target_termios *target = dst;
5440 const struct host_termios *host = src;
5442 target->c_iflag =
5443 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5444 target->c_oflag =
5445 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5446 target->c_cflag =
5447 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5448 target->c_lflag =
5449 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5450 target->c_line = host->c_line;
5452 memset(target->c_cc, 0, sizeof(target->c_cc));
5453 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5454 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5455 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5456 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5457 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5458 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5459 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5460 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5461 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5462 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5463 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5464 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5465 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5466 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5467 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5468 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5469 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5472 static const StructEntry struct_termios_def = {
5473 .convert = { host_to_target_termios, target_to_host_termios },
5474 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5475 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5478 static bitmask_transtbl mmap_flags_tbl[] = {
5479 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5480 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5481 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5482 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5483 MAP_ANONYMOUS, MAP_ANONYMOUS },
5484 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5485 MAP_GROWSDOWN, MAP_GROWSDOWN },
5486 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5487 MAP_DENYWRITE, MAP_DENYWRITE },
5488 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5489 MAP_EXECUTABLE, MAP_EXECUTABLE },
5490 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5491 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5492 MAP_NORESERVE, MAP_NORESERVE },
5493 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5494 /* MAP_STACK had been ignored by the kernel for quite some time.
5495 Recognize it for the target insofar as we do not want to pass
5496 it through to the host. */
5497 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5498 { 0, 0, 0, 0 }
5501 #if defined(TARGET_I386)
5503 /* NOTE: there is really one LDT for all the threads */
5504 static uint8_t *ldt_table;
5506 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5508 int size;
5509 void *p;
5511 if (!ldt_table)
5512 return 0;
5513 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5514 if (size > bytecount)
5515 size = bytecount;
5516 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5517 if (!p)
5518 return -TARGET_EFAULT;
5519 /* ??? Should this by byteswapped? */
5520 memcpy(p, ldt_table, size);
5521 unlock_user(p, ptr, size);
5522 return size;
5525 /* XXX: add locking support */
5526 static abi_long write_ldt(CPUX86State *env,
5527 abi_ulong ptr, unsigned long bytecount, int oldmode)
5529 struct target_modify_ldt_ldt_s ldt_info;
5530 struct target_modify_ldt_ldt_s *target_ldt_info;
5531 int seg_32bit, contents, read_exec_only, limit_in_pages;
5532 int seg_not_present, useable, lm;
5533 uint32_t *lp, entry_1, entry_2;
5535 if (bytecount != sizeof(ldt_info))
5536 return -TARGET_EINVAL;
5537 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5538 return -TARGET_EFAULT;
5539 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5540 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5541 ldt_info.limit = tswap32(target_ldt_info->limit);
5542 ldt_info.flags = tswap32(target_ldt_info->flags);
5543 unlock_user_struct(target_ldt_info, ptr, 0);
5545 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5546 return -TARGET_EINVAL;
5547 seg_32bit = ldt_info.flags & 1;
5548 contents = (ldt_info.flags >> 1) & 3;
5549 read_exec_only = (ldt_info.flags >> 3) & 1;
5550 limit_in_pages = (ldt_info.flags >> 4) & 1;
5551 seg_not_present = (ldt_info.flags >> 5) & 1;
5552 useable = (ldt_info.flags >> 6) & 1;
5553 #ifdef TARGET_ABI32
5554 lm = 0;
5555 #else
5556 lm = (ldt_info.flags >> 7) & 1;
5557 #endif
5558 if (contents == 3) {
5559 if (oldmode)
5560 return -TARGET_EINVAL;
5561 if (seg_not_present == 0)
5562 return -TARGET_EINVAL;
5564 /* allocate the LDT */
5565 if (!ldt_table) {
5566 env->ldt.base = target_mmap(0,
5567 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5568 PROT_READ|PROT_WRITE,
5569 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5570 if (env->ldt.base == -1)
5571 return -TARGET_ENOMEM;
5572 memset(g2h(env->ldt.base), 0,
5573 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5574 env->ldt.limit = 0xffff;
5575 ldt_table = g2h(env->ldt.base);
5578 /* NOTE: same code as Linux kernel */
5579 /* Allow LDTs to be cleared by the user. */
5580 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5581 if (oldmode ||
5582 (contents == 0 &&
5583 read_exec_only == 1 &&
5584 seg_32bit == 0 &&
5585 limit_in_pages == 0 &&
5586 seg_not_present == 1 &&
5587 useable == 0 )) {
5588 entry_1 = 0;
5589 entry_2 = 0;
5590 goto install;
5594 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5595 (ldt_info.limit & 0x0ffff);
5596 entry_2 = (ldt_info.base_addr & 0xff000000) |
5597 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5598 (ldt_info.limit & 0xf0000) |
5599 ((read_exec_only ^ 1) << 9) |
5600 (contents << 10) |
5601 ((seg_not_present ^ 1) << 15) |
5602 (seg_32bit << 22) |
5603 (limit_in_pages << 23) |
5604 (lm << 21) |
5605 0x7000;
5606 if (!oldmode)
5607 entry_2 |= (useable << 20);
5609 /* Install the new entry ... */
5610 install:
5611 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5612 lp[0] = tswap32(entry_1);
5613 lp[1] = tswap32(entry_2);
5614 return 0;
5617 /* specific and weird i386 syscalls */
5618 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5619 unsigned long bytecount)
5621 abi_long ret;
5623 switch (func) {
5624 case 0:
5625 ret = read_ldt(ptr, bytecount);
5626 break;
5627 case 1:
5628 ret = write_ldt(env, ptr, bytecount, 1);
5629 break;
5630 case 0x11:
5631 ret = write_ldt(env, ptr, bytecount, 0);
5632 break;
5633 default:
5634 ret = -TARGET_ENOSYS;
5635 break;
5637 return ret;
5640 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5641 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5643 uint64_t *gdt_table = g2h(env->gdt.base);
5644 struct target_modify_ldt_ldt_s ldt_info;
5645 struct target_modify_ldt_ldt_s *target_ldt_info;
5646 int seg_32bit, contents, read_exec_only, limit_in_pages;
5647 int seg_not_present, useable, lm;
5648 uint32_t *lp, entry_1, entry_2;
5649 int i;
5651 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5652 if (!target_ldt_info)
5653 return -TARGET_EFAULT;
5654 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5655 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5656 ldt_info.limit = tswap32(target_ldt_info->limit);
5657 ldt_info.flags = tswap32(target_ldt_info->flags);
5658 if (ldt_info.entry_number == -1) {
5659 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5660 if (gdt_table[i] == 0) {
5661 ldt_info.entry_number = i;
5662 target_ldt_info->entry_number = tswap32(i);
5663 break;
5667 unlock_user_struct(target_ldt_info, ptr, 1);
5669 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5670 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5671 return -TARGET_EINVAL;
5672 seg_32bit = ldt_info.flags & 1;
5673 contents = (ldt_info.flags >> 1) & 3;
5674 read_exec_only = (ldt_info.flags >> 3) & 1;
5675 limit_in_pages = (ldt_info.flags >> 4) & 1;
5676 seg_not_present = (ldt_info.flags >> 5) & 1;
5677 useable = (ldt_info.flags >> 6) & 1;
5678 #ifdef TARGET_ABI32
5679 lm = 0;
5680 #else
5681 lm = (ldt_info.flags >> 7) & 1;
5682 #endif
5684 if (contents == 3) {
5685 if (seg_not_present == 0)
5686 return -TARGET_EINVAL;
5689 /* NOTE: same code as Linux kernel */
5690 /* Allow LDTs to be cleared by the user. */
5691 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5692 if ((contents == 0 &&
5693 read_exec_only == 1 &&
5694 seg_32bit == 0 &&
5695 limit_in_pages == 0 &&
5696 seg_not_present == 1 &&
5697 useable == 0 )) {
5698 entry_1 = 0;
5699 entry_2 = 0;
5700 goto install;
5704 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5705 (ldt_info.limit & 0x0ffff);
5706 entry_2 = (ldt_info.base_addr & 0xff000000) |
5707 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5708 (ldt_info.limit & 0xf0000) |
5709 ((read_exec_only ^ 1) << 9) |
5710 (contents << 10) |
5711 ((seg_not_present ^ 1) << 15) |
5712 (seg_32bit << 22) |
5713 (limit_in_pages << 23) |
5714 (useable << 20) |
5715 (lm << 21) |
5716 0x7000;
5718 /* Install the new entry ... */
5719 install:
5720 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5721 lp[0] = tswap32(entry_1);
5722 lp[1] = tswap32(entry_2);
5723 return 0;
5726 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5728 struct target_modify_ldt_ldt_s *target_ldt_info;
5729 uint64_t *gdt_table = g2h(env->gdt.base);
5730 uint32_t base_addr, limit, flags;
5731 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5732 int seg_not_present, useable, lm;
5733 uint32_t *lp, entry_1, entry_2;
5735 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5736 if (!target_ldt_info)
5737 return -TARGET_EFAULT;
5738 idx = tswap32(target_ldt_info->entry_number);
5739 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5740 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5741 unlock_user_struct(target_ldt_info, ptr, 1);
5742 return -TARGET_EINVAL;
5744 lp = (uint32_t *)(gdt_table + idx);
5745 entry_1 = tswap32(lp[0]);
5746 entry_2 = tswap32(lp[1]);
5748 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5749 contents = (entry_2 >> 10) & 3;
5750 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5751 seg_32bit = (entry_2 >> 22) & 1;
5752 limit_in_pages = (entry_2 >> 23) & 1;
5753 useable = (entry_2 >> 20) & 1;
5754 #ifdef TARGET_ABI32
5755 lm = 0;
5756 #else
5757 lm = (entry_2 >> 21) & 1;
5758 #endif
5759 flags = (seg_32bit << 0) | (contents << 1) |
5760 (read_exec_only << 3) | (limit_in_pages << 4) |
5761 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5762 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5763 base_addr = (entry_1 >> 16) |
5764 (entry_2 & 0xff000000) |
5765 ((entry_2 & 0xff) << 16);
5766 target_ldt_info->base_addr = tswapal(base_addr);
5767 target_ldt_info->limit = tswap32(limit);
5768 target_ldt_info->flags = tswap32(flags);
5769 unlock_user_struct(target_ldt_info, ptr, 1);
5770 return 0;
5772 #endif /* TARGET_I386 && TARGET_ABI32 */
5774 #ifndef TARGET_ABI32
5775 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5777 abi_long ret = 0;
5778 abi_ulong val;
5779 int idx;
5781 switch(code) {
5782 case TARGET_ARCH_SET_GS:
5783 case TARGET_ARCH_SET_FS:
5784 if (code == TARGET_ARCH_SET_GS)
5785 idx = R_GS;
5786 else
5787 idx = R_FS;
5788 cpu_x86_load_seg(env, idx, 0);
5789 env->segs[idx].base = addr;
5790 break;
5791 case TARGET_ARCH_GET_GS:
5792 case TARGET_ARCH_GET_FS:
5793 if (code == TARGET_ARCH_GET_GS)
5794 idx = R_GS;
5795 else
5796 idx = R_FS;
5797 val = env->segs[idx].base;
5798 if (put_user(val, addr, abi_ulong))
5799 ret = -TARGET_EFAULT;
5800 break;
5801 default:
5802 ret = -TARGET_EINVAL;
5803 break;
5805 return ret;
5807 #endif
5809 #endif /* defined(TARGET_I386) */
5811 #define NEW_STACK_SIZE 0x40000
5814 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5815 typedef struct {
5816 CPUArchState *env;
5817 pthread_mutex_t mutex;
5818 pthread_cond_t cond;
5819 pthread_t thread;
5820 uint32_t tid;
5821 abi_ulong child_tidptr;
5822 abi_ulong parent_tidptr;
5823 sigset_t sigmask;
5824 } new_thread_info;
5826 static void *clone_func(void *arg)
5828 new_thread_info *info = arg;
5829 CPUArchState *env;
5830 CPUState *cpu;
5831 TaskState *ts;
5833 rcu_register_thread();
5834 tcg_register_thread();
5835 env = info->env;
5836 cpu = env_cpu(env);
5837 thread_cpu = cpu;
5838 ts = (TaskState *)cpu->opaque;
5839 info->tid = sys_gettid();
5840 task_settid(ts);
5841 if (info->child_tidptr)
5842 put_user_u32(info->tid, info->child_tidptr);
5843 if (info->parent_tidptr)
5844 put_user_u32(info->tid, info->parent_tidptr);
5845 qemu_guest_random_seed_thread_part2(cpu->random_seed);
5846 /* Enable signals. */
5847 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5848 /* Signal to the parent that we're ready. */
5849 pthread_mutex_lock(&info->mutex);
5850 pthread_cond_broadcast(&info->cond);
5851 pthread_mutex_unlock(&info->mutex);
5852 /* Wait until the parent has finished initializing the tls state. */
5853 pthread_mutex_lock(&clone_lock);
5854 pthread_mutex_unlock(&clone_lock);
5855 cpu_loop(env);
5856 /* never exits */
5857 return NULL;
5860 /* do_fork() Must return host values and target errnos (unlike most
5861 do_*() functions). */
5862 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5863 abi_ulong parent_tidptr, target_ulong newtls,
5864 abi_ulong child_tidptr)
5866 CPUState *cpu = env_cpu(env);
5867 int ret;
5868 TaskState *ts;
5869 CPUState *new_cpu;
5870 CPUArchState *new_env;
5871 sigset_t sigmask;
5873 flags &= ~CLONE_IGNORED_FLAGS;
5875 /* Emulate vfork() with fork() */
5876 if (flags & CLONE_VFORK)
5877 flags &= ~(CLONE_VFORK | CLONE_VM);
5879 if (flags & CLONE_VM) {
5880 TaskState *parent_ts = (TaskState *)cpu->opaque;
5881 new_thread_info info;
5882 pthread_attr_t attr;
5884 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5885 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5886 return -TARGET_EINVAL;
5889 ts = g_new0(TaskState, 1);
5890 init_task_state(ts);
5892 /* Grab a mutex so that thread setup appears atomic. */
5893 pthread_mutex_lock(&clone_lock);
5895 /* we create a new CPU instance. */
5896 new_env = cpu_copy(env);
5897 /* Init regs that differ from the parent. */
5898 cpu_clone_regs_child(new_env, newsp, flags);
5899 cpu_clone_regs_parent(env, flags);
5900 new_cpu = env_cpu(new_env);
5901 new_cpu->opaque = ts;
5902 ts->bprm = parent_ts->bprm;
5903 ts->info = parent_ts->info;
5904 ts->signal_mask = parent_ts->signal_mask;
5906 if (flags & CLONE_CHILD_CLEARTID) {
5907 ts->child_tidptr = child_tidptr;
5910 if (flags & CLONE_SETTLS) {
5911 cpu_set_tls (new_env, newtls);
5914 memset(&info, 0, sizeof(info));
5915 pthread_mutex_init(&info.mutex, NULL);
5916 pthread_mutex_lock(&info.mutex);
5917 pthread_cond_init(&info.cond, NULL);
5918 info.env = new_env;
5919 if (flags & CLONE_CHILD_SETTID) {
5920 info.child_tidptr = child_tidptr;
5922 if (flags & CLONE_PARENT_SETTID) {
5923 info.parent_tidptr = parent_tidptr;
5926 ret = pthread_attr_init(&attr);
5927 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5928 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5929 /* It is not safe to deliver signals until the child has finished
5930 initializing, so temporarily block all signals. */
5931 sigfillset(&sigmask);
5932 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5933 cpu->random_seed = qemu_guest_random_seed_thread_part1();
5935 /* If this is our first additional thread, we need to ensure we
5936 * generate code for parallel execution and flush old translations.
5938 if (!parallel_cpus) {
5939 parallel_cpus = true;
5940 tb_flush(cpu);
5943 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5944 /* TODO: Free new CPU state if thread creation failed. */
5946 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5947 pthread_attr_destroy(&attr);
5948 if (ret == 0) {
5949 /* Wait for the child to initialize. */
5950 pthread_cond_wait(&info.cond, &info.mutex);
5951 ret = info.tid;
5952 } else {
5953 ret = -1;
5955 pthread_mutex_unlock(&info.mutex);
5956 pthread_cond_destroy(&info.cond);
5957 pthread_mutex_destroy(&info.mutex);
5958 pthread_mutex_unlock(&clone_lock);
5959 } else {
5960 /* if no CLONE_VM, we consider it is a fork */
5961 if (flags & CLONE_INVALID_FORK_FLAGS) {
5962 return -TARGET_EINVAL;
5965 /* We can't support custom termination signals */
5966 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5967 return -TARGET_EINVAL;
5970 if (block_signals()) {
5971 return -TARGET_ERESTARTSYS;
5974 fork_start();
5975 ret = fork();
5976 if (ret == 0) {
5977 /* Child Process. */
5978 cpu_clone_regs_child(env, newsp, flags);
5979 fork_end(1);
5980 /* There is a race condition here. The parent process could
5981 theoretically read the TID in the child process before the child
5982 tid is set. This would require using either ptrace
5983 (not implemented) or having *_tidptr to point at a shared memory
5984 mapping. We can't repeat the spinlock hack used above because
5985 the child process gets its own copy of the lock. */
5986 if (flags & CLONE_CHILD_SETTID)
5987 put_user_u32(sys_gettid(), child_tidptr);
5988 if (flags & CLONE_PARENT_SETTID)
5989 put_user_u32(sys_gettid(), parent_tidptr);
5990 ts = (TaskState *)cpu->opaque;
5991 if (flags & CLONE_SETTLS)
5992 cpu_set_tls (env, newtls);
5993 if (flags & CLONE_CHILD_CLEARTID)
5994 ts->child_tidptr = child_tidptr;
5995 } else {
5996 cpu_clone_regs_parent(env, flags);
5997 fork_end(0);
6000 return ret;
6003 /* warning : doesn't handle linux specific flags... */
6004 static int target_to_host_fcntl_cmd(int cmd)
6006 int ret;
6008 switch(cmd) {
6009 case TARGET_F_DUPFD:
6010 case TARGET_F_GETFD:
6011 case TARGET_F_SETFD:
6012 case TARGET_F_GETFL:
6013 case TARGET_F_SETFL:
6014 ret = cmd;
6015 break;
6016 case TARGET_F_GETLK:
6017 ret = F_GETLK64;
6018 break;
6019 case TARGET_F_SETLK:
6020 ret = F_SETLK64;
6021 break;
6022 case TARGET_F_SETLKW:
6023 ret = F_SETLKW64;
6024 break;
6025 case TARGET_F_GETOWN:
6026 ret = F_GETOWN;
6027 break;
6028 case TARGET_F_SETOWN:
6029 ret = F_SETOWN;
6030 break;
6031 case TARGET_F_GETSIG:
6032 ret = F_GETSIG;
6033 break;
6034 case TARGET_F_SETSIG:
6035 ret = F_SETSIG;
6036 break;
6037 #if TARGET_ABI_BITS == 32
6038 case TARGET_F_GETLK64:
6039 ret = F_GETLK64;
6040 break;
6041 case TARGET_F_SETLK64:
6042 ret = F_SETLK64;
6043 break;
6044 case TARGET_F_SETLKW64:
6045 ret = F_SETLKW64;
6046 break;
6047 #endif
6048 case TARGET_F_SETLEASE:
6049 ret = F_SETLEASE;
6050 break;
6051 case TARGET_F_GETLEASE:
6052 ret = F_GETLEASE;
6053 break;
6054 #ifdef F_DUPFD_CLOEXEC
6055 case TARGET_F_DUPFD_CLOEXEC:
6056 ret = F_DUPFD_CLOEXEC;
6057 break;
6058 #endif
6059 case TARGET_F_NOTIFY:
6060 ret = F_NOTIFY;
6061 break;
6062 #ifdef F_GETOWN_EX
6063 case TARGET_F_GETOWN_EX:
6064 ret = F_GETOWN_EX;
6065 break;
6066 #endif
6067 #ifdef F_SETOWN_EX
6068 case TARGET_F_SETOWN_EX:
6069 ret = F_SETOWN_EX;
6070 break;
6071 #endif
6072 #ifdef F_SETPIPE_SZ
6073 case TARGET_F_SETPIPE_SZ:
6074 ret = F_SETPIPE_SZ;
6075 break;
6076 case TARGET_F_GETPIPE_SZ:
6077 ret = F_GETPIPE_SZ;
6078 break;
6079 #endif
6080 default:
6081 ret = -TARGET_EINVAL;
6082 break;
6085 #if defined(__powerpc64__)
6086 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6087 * is not supported by kernel. The glibc fcntl call actually adjusts
6088 * them to 5, 6 and 7 before making the syscall(). Since we make the
6089 * syscall directly, adjust to what is supported by the kernel.
6091 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6092 ret -= F_GETLK64 - 5;
6094 #endif
6096 return ret;
6099 #define FLOCK_TRANSTBL \
6100 switch (type) { \
6101 TRANSTBL_CONVERT(F_RDLCK); \
6102 TRANSTBL_CONVERT(F_WRLCK); \
6103 TRANSTBL_CONVERT(F_UNLCK); \
6104 TRANSTBL_CONVERT(F_EXLCK); \
6105 TRANSTBL_CONVERT(F_SHLCK); \
6108 static int target_to_host_flock(int type)
6110 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6111 FLOCK_TRANSTBL
6112 #undef TRANSTBL_CONVERT
6113 return -TARGET_EINVAL;
6116 static int host_to_target_flock(int type)
6118 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6119 FLOCK_TRANSTBL
6120 #undef TRANSTBL_CONVERT
6121 /* if we don't know how to convert the value coming
6122 * from the host we copy to the target field as-is
6124 return type;
6127 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6128 abi_ulong target_flock_addr)
6130 struct target_flock *target_fl;
6131 int l_type;
6133 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6134 return -TARGET_EFAULT;
6137 __get_user(l_type, &target_fl->l_type);
6138 l_type = target_to_host_flock(l_type);
6139 if (l_type < 0) {
6140 return l_type;
6142 fl->l_type = l_type;
6143 __get_user(fl->l_whence, &target_fl->l_whence);
6144 __get_user(fl->l_start, &target_fl->l_start);
6145 __get_user(fl->l_len, &target_fl->l_len);
6146 __get_user(fl->l_pid, &target_fl->l_pid);
6147 unlock_user_struct(target_fl, target_flock_addr, 0);
6148 return 0;
6151 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6152 const struct flock64 *fl)
6154 struct target_flock *target_fl;
6155 short l_type;
6157 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6158 return -TARGET_EFAULT;
6161 l_type = host_to_target_flock(fl->l_type);
6162 __put_user(l_type, &target_fl->l_type);
6163 __put_user(fl->l_whence, &target_fl->l_whence);
6164 __put_user(fl->l_start, &target_fl->l_start);
6165 __put_user(fl->l_len, &target_fl->l_len);
6166 __put_user(fl->l_pid, &target_fl->l_pid);
6167 unlock_user_struct(target_fl, target_flock_addr, 1);
6168 return 0;
6171 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6172 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6174 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6175 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6176 abi_ulong target_flock_addr)
6178 struct target_oabi_flock64 *target_fl;
6179 int l_type;
6181 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6182 return -TARGET_EFAULT;
6185 __get_user(l_type, &target_fl->l_type);
6186 l_type = target_to_host_flock(l_type);
6187 if (l_type < 0) {
6188 return l_type;
6190 fl->l_type = l_type;
6191 __get_user(fl->l_whence, &target_fl->l_whence);
6192 __get_user(fl->l_start, &target_fl->l_start);
6193 __get_user(fl->l_len, &target_fl->l_len);
6194 __get_user(fl->l_pid, &target_fl->l_pid);
6195 unlock_user_struct(target_fl, target_flock_addr, 0);
6196 return 0;
6199 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6200 const struct flock64 *fl)
6202 struct target_oabi_flock64 *target_fl;
6203 short l_type;
6205 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6206 return -TARGET_EFAULT;
6209 l_type = host_to_target_flock(fl->l_type);
6210 __put_user(l_type, &target_fl->l_type);
6211 __put_user(fl->l_whence, &target_fl->l_whence);
6212 __put_user(fl->l_start, &target_fl->l_start);
6213 __put_user(fl->l_len, &target_fl->l_len);
6214 __put_user(fl->l_pid, &target_fl->l_pid);
6215 unlock_user_struct(target_fl, target_flock_addr, 1);
6216 return 0;
6218 #endif
6220 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6221 abi_ulong target_flock_addr)
6223 struct target_flock64 *target_fl;
6224 int l_type;
6226 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6227 return -TARGET_EFAULT;
6230 __get_user(l_type, &target_fl->l_type);
6231 l_type = target_to_host_flock(l_type);
6232 if (l_type < 0) {
6233 return l_type;
6235 fl->l_type = l_type;
6236 __get_user(fl->l_whence, &target_fl->l_whence);
6237 __get_user(fl->l_start, &target_fl->l_start);
6238 __get_user(fl->l_len, &target_fl->l_len);
6239 __get_user(fl->l_pid, &target_fl->l_pid);
6240 unlock_user_struct(target_fl, target_flock_addr, 0);
6241 return 0;
6244 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6245 const struct flock64 *fl)
6247 struct target_flock64 *target_fl;
6248 short l_type;
6250 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6251 return -TARGET_EFAULT;
6254 l_type = host_to_target_flock(fl->l_type);
6255 __put_user(l_type, &target_fl->l_type);
6256 __put_user(fl->l_whence, &target_fl->l_whence);
6257 __put_user(fl->l_start, &target_fl->l_start);
6258 __put_user(fl->l_len, &target_fl->l_len);
6259 __put_user(fl->l_pid, &target_fl->l_pid);
6260 unlock_user_struct(target_fl, target_flock_addr, 1);
6261 return 0;
6264 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6266 struct flock64 fl64;
6267 #ifdef F_GETOWN_EX
6268 struct f_owner_ex fox;
6269 struct target_f_owner_ex *target_fox;
6270 #endif
6271 abi_long ret;
6272 int host_cmd = target_to_host_fcntl_cmd(cmd);
6274 if (host_cmd == -TARGET_EINVAL)
6275 return host_cmd;
6277 switch(cmd) {
6278 case TARGET_F_GETLK:
6279 ret = copy_from_user_flock(&fl64, arg);
6280 if (ret) {
6281 return ret;
6283 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6284 if (ret == 0) {
6285 ret = copy_to_user_flock(arg, &fl64);
6287 break;
6289 case TARGET_F_SETLK:
6290 case TARGET_F_SETLKW:
6291 ret = copy_from_user_flock(&fl64, arg);
6292 if (ret) {
6293 return ret;
6295 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6296 break;
6298 case TARGET_F_GETLK64:
6299 ret = copy_from_user_flock64(&fl64, arg);
6300 if (ret) {
6301 return ret;
6303 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6304 if (ret == 0) {
6305 ret = copy_to_user_flock64(arg, &fl64);
6307 break;
6308 case TARGET_F_SETLK64:
6309 case TARGET_F_SETLKW64:
6310 ret = copy_from_user_flock64(&fl64, arg);
6311 if (ret) {
6312 return ret;
6314 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6315 break;
6317 case TARGET_F_GETFL:
6318 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6319 if (ret >= 0) {
6320 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6322 break;
6324 case TARGET_F_SETFL:
6325 ret = get_errno(safe_fcntl(fd, host_cmd,
6326 target_to_host_bitmask(arg,
6327 fcntl_flags_tbl)));
6328 break;
6330 #ifdef F_GETOWN_EX
6331 case TARGET_F_GETOWN_EX:
6332 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6333 if (ret >= 0) {
6334 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6335 return -TARGET_EFAULT;
6336 target_fox->type = tswap32(fox.type);
6337 target_fox->pid = tswap32(fox.pid);
6338 unlock_user_struct(target_fox, arg, 1);
6340 break;
6341 #endif
6343 #ifdef F_SETOWN_EX
6344 case TARGET_F_SETOWN_EX:
6345 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6346 return -TARGET_EFAULT;
6347 fox.type = tswap32(target_fox->type);
6348 fox.pid = tswap32(target_fox->pid);
6349 unlock_user_struct(target_fox, arg, 0);
6350 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6351 break;
6352 #endif
6354 case TARGET_F_SETOWN:
6355 case TARGET_F_GETOWN:
6356 case TARGET_F_SETSIG:
6357 case TARGET_F_GETSIG:
6358 case TARGET_F_SETLEASE:
6359 case TARGET_F_GETLEASE:
6360 case TARGET_F_SETPIPE_SZ:
6361 case TARGET_F_GETPIPE_SZ:
6362 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6363 break;
6365 default:
6366 ret = get_errno(safe_fcntl(fd, cmd, arg));
6367 break;
6369 return ret;
6372 #ifdef USE_UID16
6374 static inline int high2lowuid(int uid)
6376 if (uid > 65535)
6377 return 65534;
6378 else
6379 return uid;
6382 static inline int high2lowgid(int gid)
6384 if (gid > 65535)
6385 return 65534;
6386 else
6387 return gid;
6390 static inline int low2highuid(int uid)
6392 if ((int16_t)uid == -1)
6393 return -1;
6394 else
6395 return uid;
6398 static inline int low2highgid(int gid)
6400 if ((int16_t)gid == -1)
6401 return -1;
6402 else
6403 return gid;
6405 static inline int tswapid(int id)
6407 return tswap16(id);
6410 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6412 #else /* !USE_UID16 */
6413 static inline int high2lowuid(int uid)
6415 return uid;
6417 static inline int high2lowgid(int gid)
6419 return gid;
6421 static inline int low2highuid(int uid)
6423 return uid;
6425 static inline int low2highgid(int gid)
6427 return gid;
6429 static inline int tswapid(int id)
6431 return tswap32(id);
6434 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6436 #endif /* USE_UID16 */
6438 /* We must do direct syscalls for setting UID/GID, because we want to
6439 * implement the Linux system call semantics of "change only for this thread",
6440 * not the libc/POSIX semantics of "change for all threads in process".
6441 * (See http://ewontfix.com/17/ for more details.)
6442 * We use the 32-bit version of the syscalls if present; if it is not
6443 * then either the host architecture supports 32-bit UIDs natively with
6444 * the standard syscall, or the 16-bit UID is the best we can do.
6446 #ifdef __NR_setuid32
6447 #define __NR_sys_setuid __NR_setuid32
6448 #else
6449 #define __NR_sys_setuid __NR_setuid
6450 #endif
6451 #ifdef __NR_setgid32
6452 #define __NR_sys_setgid __NR_setgid32
6453 #else
6454 #define __NR_sys_setgid __NR_setgid
6455 #endif
6456 #ifdef __NR_setresuid32
6457 #define __NR_sys_setresuid __NR_setresuid32
6458 #else
6459 #define __NR_sys_setresuid __NR_setresuid
6460 #endif
6461 #ifdef __NR_setresgid32
6462 #define __NR_sys_setresgid __NR_setresgid32
6463 #else
6464 #define __NR_sys_setresgid __NR_setresgid
6465 #endif
6467 _syscall1(int, sys_setuid, uid_t, uid)
6468 _syscall1(int, sys_setgid, gid_t, gid)
6469 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6470 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6472 void syscall_init(void)
6474 IOCTLEntry *ie;
6475 const argtype *arg_type;
6476 int size;
6477 int i;
6479 thunk_init(STRUCT_MAX);
6481 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6482 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6483 #include "syscall_types.h"
6484 #undef STRUCT
6485 #undef STRUCT_SPECIAL
6487 /* Build target_to_host_errno_table[] table from
6488 * host_to_target_errno_table[]. */
6489 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6490 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6493 /* we patch the ioctl size if necessary. We rely on the fact that
6494 no ioctl has all the bits at '1' in the size field */
6495 ie = ioctl_entries;
6496 while (ie->target_cmd != 0) {
6497 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6498 TARGET_IOC_SIZEMASK) {
6499 arg_type = ie->arg_type;
6500 if (arg_type[0] != TYPE_PTR) {
6501 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6502 ie->target_cmd);
6503 exit(1);
6505 arg_type++;
6506 size = thunk_type_size(arg_type, 0);
6507 ie->target_cmd = (ie->target_cmd &
6508 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6509 (size << TARGET_IOC_SIZESHIFT);
6512 /* automatic consistency check if same arch */
6513 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6514 (defined(__x86_64__) && defined(TARGET_X86_64))
6515 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6516 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6517 ie->name, ie->target_cmd, ie->host_cmd);
6519 #endif
6520 ie++;
6524 #if TARGET_ABI_BITS == 32
6525 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6527 #ifdef TARGET_WORDS_BIGENDIAN
6528 return ((uint64_t)word0 << 32) | word1;
6529 #else
6530 return ((uint64_t)word1 << 32) | word0;
6531 #endif
6533 #else /* TARGET_ABI_BITS == 32 */
6534 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6536 return word0;
6538 #endif /* TARGET_ABI_BITS != 32 */
6540 #ifdef TARGET_NR_truncate64
6541 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6542 abi_long arg2,
6543 abi_long arg3,
6544 abi_long arg4)
6546 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6547 arg2 = arg3;
6548 arg3 = arg4;
6550 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6552 #endif
6554 #ifdef TARGET_NR_ftruncate64
6555 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6556 abi_long arg2,
6557 abi_long arg3,
6558 abi_long arg4)
6560 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6561 arg2 = arg3;
6562 arg3 = arg4;
6564 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6566 #endif
6568 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6569 abi_ulong target_addr)
6571 struct target_itimerspec *target_itspec;
6573 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6574 return -TARGET_EFAULT;
6577 host_itspec->it_interval.tv_sec =
6578 tswapal(target_itspec->it_interval.tv_sec);
6579 host_itspec->it_interval.tv_nsec =
6580 tswapal(target_itspec->it_interval.tv_nsec);
6581 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6582 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6584 unlock_user_struct(target_itspec, target_addr, 1);
6585 return 0;
6588 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6589 struct itimerspec *host_its)
6591 struct target_itimerspec *target_itspec;
6593 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6594 return -TARGET_EFAULT;
6597 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6598 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6600 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6601 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6603 unlock_user_struct(target_itspec, target_addr, 0);
6604 return 0;
6607 static inline abi_long target_to_host_timex(struct timex *host_tx,
6608 abi_long target_addr)
6610 struct target_timex *target_tx;
6612 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6613 return -TARGET_EFAULT;
6616 __get_user(host_tx->modes, &target_tx->modes);
6617 __get_user(host_tx->offset, &target_tx->offset);
6618 __get_user(host_tx->freq, &target_tx->freq);
6619 __get_user(host_tx->maxerror, &target_tx->maxerror);
6620 __get_user(host_tx->esterror, &target_tx->esterror);
6621 __get_user(host_tx->status, &target_tx->status);
6622 __get_user(host_tx->constant, &target_tx->constant);
6623 __get_user(host_tx->precision, &target_tx->precision);
6624 __get_user(host_tx->tolerance, &target_tx->tolerance);
6625 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6626 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6627 __get_user(host_tx->tick, &target_tx->tick);
6628 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6629 __get_user(host_tx->jitter, &target_tx->jitter);
6630 __get_user(host_tx->shift, &target_tx->shift);
6631 __get_user(host_tx->stabil, &target_tx->stabil);
6632 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6633 __get_user(host_tx->calcnt, &target_tx->calcnt);
6634 __get_user(host_tx->errcnt, &target_tx->errcnt);
6635 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6636 __get_user(host_tx->tai, &target_tx->tai);
6638 unlock_user_struct(target_tx, target_addr, 0);
6639 return 0;
6642 static inline abi_long host_to_target_timex(abi_long target_addr,
6643 struct timex *host_tx)
6645 struct target_timex *target_tx;
6647 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6648 return -TARGET_EFAULT;
6651 __put_user(host_tx->modes, &target_tx->modes);
6652 __put_user(host_tx->offset, &target_tx->offset);
6653 __put_user(host_tx->freq, &target_tx->freq);
6654 __put_user(host_tx->maxerror, &target_tx->maxerror);
6655 __put_user(host_tx->esterror, &target_tx->esterror);
6656 __put_user(host_tx->status, &target_tx->status);
6657 __put_user(host_tx->constant, &target_tx->constant);
6658 __put_user(host_tx->precision, &target_tx->precision);
6659 __put_user(host_tx->tolerance, &target_tx->tolerance);
6660 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6661 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6662 __put_user(host_tx->tick, &target_tx->tick);
6663 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6664 __put_user(host_tx->jitter, &target_tx->jitter);
6665 __put_user(host_tx->shift, &target_tx->shift);
6666 __put_user(host_tx->stabil, &target_tx->stabil);
6667 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6668 __put_user(host_tx->calcnt, &target_tx->calcnt);
6669 __put_user(host_tx->errcnt, &target_tx->errcnt);
6670 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6671 __put_user(host_tx->tai, &target_tx->tai);
6673 unlock_user_struct(target_tx, target_addr, 1);
6674 return 0;
6678 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6679 abi_ulong target_addr)
6681 struct target_sigevent *target_sevp;
6683 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6684 return -TARGET_EFAULT;
6687 /* This union is awkward on 64 bit systems because it has a 32 bit
6688 * integer and a pointer in it; we follow the conversion approach
6689 * used for handling sigval types in signal.c so the guest should get
6690 * the correct value back even if we did a 64 bit byteswap and it's
6691 * using the 32 bit integer.
6693 host_sevp->sigev_value.sival_ptr =
6694 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6695 host_sevp->sigev_signo =
6696 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6697 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6698 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6700 unlock_user_struct(target_sevp, target_addr, 1);
6701 return 0;
6704 #if defined(TARGET_NR_mlockall)
6705 static inline int target_to_host_mlockall_arg(int arg)
6707 int result = 0;
6709 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6710 result |= MCL_CURRENT;
6712 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6713 result |= MCL_FUTURE;
6715 return result;
6717 #endif
6719 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6720 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6721 defined(TARGET_NR_newfstatat))
6722 static inline abi_long host_to_target_stat64(void *cpu_env,
6723 abi_ulong target_addr,
6724 struct stat *host_st)
6726 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6727 if (((CPUARMState *)cpu_env)->eabi) {
6728 struct target_eabi_stat64 *target_st;
6730 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6731 return -TARGET_EFAULT;
6732 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6733 __put_user(host_st->st_dev, &target_st->st_dev);
6734 __put_user(host_st->st_ino, &target_st->st_ino);
6735 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6736 __put_user(host_st->st_ino, &target_st->__st_ino);
6737 #endif
6738 __put_user(host_st->st_mode, &target_st->st_mode);
6739 __put_user(host_st->st_nlink, &target_st->st_nlink);
6740 __put_user(host_st->st_uid, &target_st->st_uid);
6741 __put_user(host_st->st_gid, &target_st->st_gid);
6742 __put_user(host_st->st_rdev, &target_st->st_rdev);
6743 __put_user(host_st->st_size, &target_st->st_size);
6744 __put_user(host_st->st_blksize, &target_st->st_blksize);
6745 __put_user(host_st->st_blocks, &target_st->st_blocks);
6746 __put_user(host_st->st_atime, &target_st->target_st_atime);
6747 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6748 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6749 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6750 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6751 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6752 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6753 #endif
6754 unlock_user_struct(target_st, target_addr, 1);
6755 } else
6756 #endif
6758 #if defined(TARGET_HAS_STRUCT_STAT64)
6759 struct target_stat64 *target_st;
6760 #else
6761 struct target_stat *target_st;
6762 #endif
6764 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6765 return -TARGET_EFAULT;
6766 memset(target_st, 0, sizeof(*target_st));
6767 __put_user(host_st->st_dev, &target_st->st_dev);
6768 __put_user(host_st->st_ino, &target_st->st_ino);
6769 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6770 __put_user(host_st->st_ino, &target_st->__st_ino);
6771 #endif
6772 __put_user(host_st->st_mode, &target_st->st_mode);
6773 __put_user(host_st->st_nlink, &target_st->st_nlink);
6774 __put_user(host_st->st_uid, &target_st->st_uid);
6775 __put_user(host_st->st_gid, &target_st->st_gid);
6776 __put_user(host_st->st_rdev, &target_st->st_rdev);
6777 /* XXX: better use of kernel struct */
6778 __put_user(host_st->st_size, &target_st->st_size);
6779 __put_user(host_st->st_blksize, &target_st->st_blksize);
6780 __put_user(host_st->st_blocks, &target_st->st_blocks);
6781 __put_user(host_st->st_atime, &target_st->target_st_atime);
6782 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6783 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6784 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6785 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6786 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6787 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6788 #endif
6789 unlock_user_struct(target_st, target_addr, 1);
6792 return 0;
6794 #endif
6796 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6797 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6798 abi_ulong target_addr)
6800 struct target_statx *target_stx;
6802 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
6803 return -TARGET_EFAULT;
6805 memset(target_stx, 0, sizeof(*target_stx));
6807 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6808 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6809 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6810 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6811 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6812 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6813 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6814 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6815 __put_user(host_stx->stx_size, &target_stx->stx_size);
6816 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6817 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6818 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6819 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6820 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6821 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6822 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6823 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6824 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6825 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6826 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6827 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6828 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6829 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6831 unlock_user_struct(target_stx, target_addr, 1);
6833 return 0;
6835 #endif
6838 /* ??? Using host futex calls even when target atomic operations
6839 are not really atomic probably breaks things. However implementing
6840 futexes locally would make futexes shared between multiple processes
6841 tricky. However they're probably useless because guest atomic
6842 operations won't work either. */
6843 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6844 target_ulong uaddr2, int val3)
6846 struct timespec ts, *pts;
6847 int base_op;
6849 /* ??? We assume FUTEX_* constants are the same on both host
6850 and target. */
6851 #ifdef FUTEX_CMD_MASK
6852 base_op = op & FUTEX_CMD_MASK;
6853 #else
6854 base_op = op;
6855 #endif
6856 switch (base_op) {
6857 case FUTEX_WAIT:
6858 case FUTEX_WAIT_BITSET:
6859 if (timeout) {
6860 pts = &ts;
6861 target_to_host_timespec(pts, timeout);
6862 } else {
6863 pts = NULL;
6865 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6866 pts, NULL, val3));
6867 case FUTEX_WAKE:
6868 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6869 case FUTEX_FD:
6870 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6871 case FUTEX_REQUEUE:
6872 case FUTEX_CMP_REQUEUE:
6873 case FUTEX_WAKE_OP:
6874 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6875 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6876 But the prototype takes a `struct timespec *'; insert casts
6877 to satisfy the compiler. We do not need to tswap TIMEOUT
6878 since it's not compared to guest memory. */
6879 pts = (struct timespec *)(uintptr_t) timeout;
6880 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6881 g2h(uaddr2),
6882 (base_op == FUTEX_CMP_REQUEUE
6883 ? tswap32(val3)
6884 : val3)));
6885 default:
6886 return -TARGET_ENOSYS;
6889 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6890 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6891 abi_long handle, abi_long mount_id,
6892 abi_long flags)
6894 struct file_handle *target_fh;
6895 struct file_handle *fh;
6896 int mid = 0;
6897 abi_long ret;
6898 char *name;
6899 unsigned int size, total_size;
6901 if (get_user_s32(size, handle)) {
6902 return -TARGET_EFAULT;
6905 name = lock_user_string(pathname);
6906 if (!name) {
6907 return -TARGET_EFAULT;
6910 total_size = sizeof(struct file_handle) + size;
6911 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6912 if (!target_fh) {
6913 unlock_user(name, pathname, 0);
6914 return -TARGET_EFAULT;
6917 fh = g_malloc0(total_size);
6918 fh->handle_bytes = size;
6920 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6921 unlock_user(name, pathname, 0);
6923 /* man name_to_handle_at(2):
6924 * Other than the use of the handle_bytes field, the caller should treat
6925 * the file_handle structure as an opaque data type
6928 memcpy(target_fh, fh, total_size);
6929 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6930 target_fh->handle_type = tswap32(fh->handle_type);
6931 g_free(fh);
6932 unlock_user(target_fh, handle, total_size);
6934 if (put_user_s32(mid, mount_id)) {
6935 return -TARGET_EFAULT;
6938 return ret;
6941 #endif
6943 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6944 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6945 abi_long flags)
6947 struct file_handle *target_fh;
6948 struct file_handle *fh;
6949 unsigned int size, total_size;
6950 abi_long ret;
6952 if (get_user_s32(size, handle)) {
6953 return -TARGET_EFAULT;
6956 total_size = sizeof(struct file_handle) + size;
6957 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6958 if (!target_fh) {
6959 return -TARGET_EFAULT;
6962 fh = g_memdup(target_fh, total_size);
6963 fh->handle_bytes = size;
6964 fh->handle_type = tswap32(target_fh->handle_type);
6966 ret = get_errno(open_by_handle_at(mount_fd, fh,
6967 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6969 g_free(fh);
6971 unlock_user(target_fh, handle, total_size);
6973 return ret;
6975 #endif
6977 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6979 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6981 int host_flags;
6982 target_sigset_t *target_mask;
6983 sigset_t host_mask;
6984 abi_long ret;
6986 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6987 return -TARGET_EINVAL;
6989 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6990 return -TARGET_EFAULT;
6993 target_to_host_sigset(&host_mask, target_mask);
6995 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6997 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6998 if (ret >= 0) {
6999 fd_trans_register(ret, &target_signalfd_trans);
7002 unlock_user_struct(target_mask, mask, 0);
7004 return ret;
7006 #endif
7008 /* Map host to target signal numbers for the wait family of syscalls.
7009 Assume all other status bits are the same. */
7010 int host_to_target_waitstatus(int status)
7012 if (WIFSIGNALED(status)) {
7013 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7015 if (WIFSTOPPED(status)) {
7016 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7017 | (status & 0xff);
7019 return status;
7022 static int open_self_cmdline(void *cpu_env, int fd)
7024 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7025 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7026 int i;
7028 for (i = 0; i < bprm->argc; i++) {
7029 size_t len = strlen(bprm->argv[i]) + 1;
7031 if (write(fd, bprm->argv[i], len) != len) {
7032 return -1;
7036 return 0;
7039 static int open_self_maps(void *cpu_env, int fd)
7041 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7042 TaskState *ts = cpu->opaque;
7043 FILE *fp;
7044 char *line = NULL;
7045 size_t len = 0;
7046 ssize_t read;
7048 fp = fopen("/proc/self/maps", "r");
7049 if (fp == NULL) {
7050 return -1;
7053 while ((read = getline(&line, &len, fp)) != -1) {
7054 int fields, dev_maj, dev_min, inode;
7055 uint64_t min, max, offset;
7056 char flag_r, flag_w, flag_x, flag_p;
7057 char path[512] = "";
7058 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7059 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7060 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7062 if ((fields < 10) || (fields > 11)) {
7063 continue;
7065 if (h2g_valid(min)) {
7066 int flags = page_get_flags(h2g(min));
7067 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7068 if (page_check_range(h2g(min), max - min, flags) == -1) {
7069 continue;
7071 if (h2g(min) == ts->info->stack_limit) {
7072 pstrcpy(path, sizeof(path), " [stack]");
7074 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7075 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7076 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7077 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7078 path[0] ? " " : "", path);
7082 free(line);
7083 fclose(fp);
7085 return 0;
7088 static int open_self_stat(void *cpu_env, int fd)
7090 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7091 TaskState *ts = cpu->opaque;
7092 abi_ulong start_stack = ts->info->start_stack;
7093 int i;
7095 for (i = 0; i < 44; i++) {
7096 char buf[128];
7097 int len;
7098 uint64_t val = 0;
7100 if (i == 0) {
7101 /* pid */
7102 val = getpid();
7103 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7104 } else if (i == 1) {
7105 /* app name */
7106 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7107 } else if (i == 27) {
7108 /* stack bottom */
7109 val = start_stack;
7110 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7111 } else {
7112 /* for the rest, there is MasterCard */
7113 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7116 len = strlen(buf);
7117 if (write(fd, buf, len) != len) {
7118 return -1;
7122 return 0;
7125 static int open_self_auxv(void *cpu_env, int fd)
7127 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7128 TaskState *ts = cpu->opaque;
7129 abi_ulong auxv = ts->info->saved_auxv;
7130 abi_ulong len = ts->info->auxv_len;
7131 char *ptr;
7134 * Auxiliary vector is stored in target process stack.
7135 * read in whole auxv vector and copy it to file
7137 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7138 if (ptr != NULL) {
7139 while (len > 0) {
7140 ssize_t r;
7141 r = write(fd, ptr, len);
7142 if (r <= 0) {
7143 break;
7145 len -= r;
7146 ptr += r;
7148 lseek(fd, 0, SEEK_SET);
7149 unlock_user(ptr, auxv, len);
7152 return 0;
7155 static int is_proc_myself(const char *filename, const char *entry)
7157 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7158 filename += strlen("/proc/");
7159 if (!strncmp(filename, "self/", strlen("self/"))) {
7160 filename += strlen("self/");
7161 } else if (*filename >= '1' && *filename <= '9') {
7162 char myself[80];
7163 snprintf(myself, sizeof(myself), "%d/", getpid());
7164 if (!strncmp(filename, myself, strlen(myself))) {
7165 filename += strlen(myself);
7166 } else {
7167 return 0;
7169 } else {
7170 return 0;
7172 if (!strcmp(filename, entry)) {
7173 return 1;
7176 return 0;
7179 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7180 defined(TARGET_SPARC) || defined(TARGET_M68K)
7181 static int is_proc(const char *filename, const char *entry)
7183 return strcmp(filename, entry) == 0;
7185 #endif
7187 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7188 static int open_net_route(void *cpu_env, int fd)
7190 FILE *fp;
7191 char *line = NULL;
7192 size_t len = 0;
7193 ssize_t read;
7195 fp = fopen("/proc/net/route", "r");
7196 if (fp == NULL) {
7197 return -1;
7200 /* read header */
7202 read = getline(&line, &len, fp);
7203 dprintf(fd, "%s", line);
7205 /* read routes */
7207 while ((read = getline(&line, &len, fp)) != -1) {
7208 char iface[16];
7209 uint32_t dest, gw, mask;
7210 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7211 int fields;
7213 fields = sscanf(line,
7214 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7215 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7216 &mask, &mtu, &window, &irtt);
7217 if (fields != 11) {
7218 continue;
7220 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7221 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7222 metric, tswap32(mask), mtu, window, irtt);
7225 free(line);
7226 fclose(fp);
7228 return 0;
7230 #endif
7232 #if defined(TARGET_SPARC)
7233 static int open_cpuinfo(void *cpu_env, int fd)
7235 dprintf(fd, "type\t\t: sun4u\n");
7236 return 0;
7238 #endif
7240 #if defined(TARGET_M68K)
7241 static int open_hardware(void *cpu_env, int fd)
7243 dprintf(fd, "Model:\t\tqemu-m68k\n");
7244 return 0;
7246 #endif
7248 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7250 struct fake_open {
7251 const char *filename;
7252 int (*fill)(void *cpu_env, int fd);
7253 int (*cmp)(const char *s1, const char *s2);
7255 const struct fake_open *fake_open;
7256 static const struct fake_open fakes[] = {
7257 { "maps", open_self_maps, is_proc_myself },
7258 { "stat", open_self_stat, is_proc_myself },
7259 { "auxv", open_self_auxv, is_proc_myself },
7260 { "cmdline", open_self_cmdline, is_proc_myself },
7261 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7262 { "/proc/net/route", open_net_route, is_proc },
7263 #endif
7264 #if defined(TARGET_SPARC)
7265 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7266 #endif
7267 #if defined(TARGET_M68K)
7268 { "/proc/hardware", open_hardware, is_proc },
7269 #endif
7270 { NULL, NULL, NULL }
7273 if (is_proc_myself(pathname, "exe")) {
7274 int execfd = qemu_getauxval(AT_EXECFD);
7275 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7278 for (fake_open = fakes; fake_open->filename; fake_open++) {
7279 if (fake_open->cmp(pathname, fake_open->filename)) {
7280 break;
7284 if (fake_open->filename) {
7285 const char *tmpdir;
7286 char filename[PATH_MAX];
7287 int fd, r;
7289 /* create temporary file to map stat to */
7290 tmpdir = getenv("TMPDIR");
7291 if (!tmpdir)
7292 tmpdir = "/tmp";
7293 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7294 fd = mkstemp(filename);
7295 if (fd < 0) {
7296 return fd;
7298 unlink(filename);
7300 if ((r = fake_open->fill(cpu_env, fd))) {
7301 int e = errno;
7302 close(fd);
7303 errno = e;
7304 return r;
7306 lseek(fd, 0, SEEK_SET);
7308 return fd;
7311 return safe_openat(dirfd, path(pathname), flags, mode);
7314 #define TIMER_MAGIC 0x0caf0000
7315 #define TIMER_MAGIC_MASK 0xffff0000
7317 /* Convert QEMU provided timer ID back to internal 16bit index format */
7318 static target_timer_t get_timer_id(abi_long arg)
7320 target_timer_t timerid = arg;
7322 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7323 return -TARGET_EINVAL;
7326 timerid &= 0xffff;
7328 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7329 return -TARGET_EINVAL;
7332 return timerid;
7335 static int target_to_host_cpu_mask(unsigned long *host_mask,
7336 size_t host_size,
7337 abi_ulong target_addr,
7338 size_t target_size)
7340 unsigned target_bits = sizeof(abi_ulong) * 8;
7341 unsigned host_bits = sizeof(*host_mask) * 8;
7342 abi_ulong *target_mask;
7343 unsigned i, j;
7345 assert(host_size >= target_size);
7347 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7348 if (!target_mask) {
7349 return -TARGET_EFAULT;
7351 memset(host_mask, 0, host_size);
7353 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7354 unsigned bit = i * target_bits;
7355 abi_ulong val;
7357 __get_user(val, &target_mask[i]);
7358 for (j = 0; j < target_bits; j++, bit++) {
7359 if (val & (1UL << j)) {
7360 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7365 unlock_user(target_mask, target_addr, 0);
7366 return 0;
7369 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7370 size_t host_size,
7371 abi_ulong target_addr,
7372 size_t target_size)
7374 unsigned target_bits = sizeof(abi_ulong) * 8;
7375 unsigned host_bits = sizeof(*host_mask) * 8;
7376 abi_ulong *target_mask;
7377 unsigned i, j;
7379 assert(host_size >= target_size);
7381 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7382 if (!target_mask) {
7383 return -TARGET_EFAULT;
7386 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7387 unsigned bit = i * target_bits;
7388 abi_ulong val = 0;
7390 for (j = 0; j < target_bits; j++, bit++) {
7391 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7392 val |= 1UL << j;
7395 __put_user(val, &target_mask[i]);
7398 unlock_user(target_mask, target_addr, target_size);
7399 return 0;
7402 /* This is an internal helper for do_syscall so that it is easier
7403 * to have a single return point, so that actions, such as logging
7404 * of syscall results, can be performed.
7405 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7407 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7408 abi_long arg2, abi_long arg3, abi_long arg4,
7409 abi_long arg5, abi_long arg6, abi_long arg7,
7410 abi_long arg8)
7412 CPUState *cpu = env_cpu(cpu_env);
7413 abi_long ret;
7414 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7415 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7416 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7417 || defined(TARGET_NR_statx)
7418 struct stat st;
7419 #endif
7420 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7421 || defined(TARGET_NR_fstatfs)
7422 struct statfs stfs;
7423 #endif
7424 void *p;
7426 switch(num) {
7427 case TARGET_NR_exit:
7428 /* In old applications this may be used to implement _exit(2).
7429 However in threaded applictions it is used for thread termination,
7430 and _exit_group is used for application termination.
7431 Do thread termination if we have more then one thread. */
7433 if (block_signals()) {
7434 return -TARGET_ERESTARTSYS;
7437 cpu_list_lock();
7439 if (CPU_NEXT(first_cpu)) {
7440 TaskState *ts;
7442 /* Remove the CPU from the list. */
7443 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7445 cpu_list_unlock();
7447 ts = cpu->opaque;
7448 if (ts->child_tidptr) {
7449 put_user_u32(0, ts->child_tidptr);
7450 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7451 NULL, NULL, 0);
7453 thread_cpu = NULL;
7454 object_unref(OBJECT(cpu));
7455 g_free(ts);
7456 rcu_unregister_thread();
7457 pthread_exit(NULL);
7460 cpu_list_unlock();
7461 preexit_cleanup(cpu_env, arg1);
7462 _exit(arg1);
7463 return 0; /* avoid warning */
7464 case TARGET_NR_read:
7465 if (arg2 == 0 && arg3 == 0) {
7466 return get_errno(safe_read(arg1, 0, 0));
7467 } else {
7468 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7469 return -TARGET_EFAULT;
7470 ret = get_errno(safe_read(arg1, p, arg3));
7471 if (ret >= 0 &&
7472 fd_trans_host_to_target_data(arg1)) {
7473 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7475 unlock_user(p, arg2, ret);
7477 return ret;
7478 case TARGET_NR_write:
7479 if (arg2 == 0 && arg3 == 0) {
7480 return get_errno(safe_write(arg1, 0, 0));
7482 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7483 return -TARGET_EFAULT;
7484 if (fd_trans_target_to_host_data(arg1)) {
7485 void *copy = g_malloc(arg3);
7486 memcpy(copy, p, arg3);
7487 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7488 if (ret >= 0) {
7489 ret = get_errno(safe_write(arg1, copy, ret));
7491 g_free(copy);
7492 } else {
7493 ret = get_errno(safe_write(arg1, p, arg3));
7495 unlock_user(p, arg2, 0);
7496 return ret;
7498 #ifdef TARGET_NR_open
7499 case TARGET_NR_open:
7500 if (!(p = lock_user_string(arg1)))
7501 return -TARGET_EFAULT;
7502 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7503 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7504 arg3));
7505 fd_trans_unregister(ret);
7506 unlock_user(p, arg1, 0);
7507 return ret;
7508 #endif
7509 case TARGET_NR_openat:
7510 if (!(p = lock_user_string(arg2)))
7511 return -TARGET_EFAULT;
7512 ret = get_errno(do_openat(cpu_env, arg1, p,
7513 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7514 arg4));
7515 fd_trans_unregister(ret);
7516 unlock_user(p, arg2, 0);
7517 return ret;
7518 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7519 case TARGET_NR_name_to_handle_at:
7520 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7521 return ret;
7522 #endif
7523 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7524 case TARGET_NR_open_by_handle_at:
7525 ret = do_open_by_handle_at(arg1, arg2, arg3);
7526 fd_trans_unregister(ret);
7527 return ret;
7528 #endif
7529 case TARGET_NR_close:
7530 fd_trans_unregister(arg1);
7531 return get_errno(close(arg1));
7533 case TARGET_NR_brk:
7534 return do_brk(arg1);
7535 #ifdef TARGET_NR_fork
7536 case TARGET_NR_fork:
7537 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7538 #endif
7539 #ifdef TARGET_NR_waitpid
7540 case TARGET_NR_waitpid:
7542 int status;
7543 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7544 if (!is_error(ret) && arg2 && ret
7545 && put_user_s32(host_to_target_waitstatus(status), arg2))
7546 return -TARGET_EFAULT;
7548 return ret;
7549 #endif
7550 #ifdef TARGET_NR_waitid
7551 case TARGET_NR_waitid:
7553 siginfo_t info;
7554 info.si_pid = 0;
7555 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7556 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7557 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7558 return -TARGET_EFAULT;
7559 host_to_target_siginfo(p, &info);
7560 unlock_user(p, arg3, sizeof(target_siginfo_t));
7563 return ret;
7564 #endif
7565 #ifdef TARGET_NR_creat /* not on alpha */
7566 case TARGET_NR_creat:
7567 if (!(p = lock_user_string(arg1)))
7568 return -TARGET_EFAULT;
7569 ret = get_errno(creat(p, arg2));
7570 fd_trans_unregister(ret);
7571 unlock_user(p, arg1, 0);
7572 return ret;
7573 #endif
7574 #ifdef TARGET_NR_link
7575 case TARGET_NR_link:
7577 void * p2;
7578 p = lock_user_string(arg1);
7579 p2 = lock_user_string(arg2);
7580 if (!p || !p2)
7581 ret = -TARGET_EFAULT;
7582 else
7583 ret = get_errno(link(p, p2));
7584 unlock_user(p2, arg2, 0);
7585 unlock_user(p, arg1, 0);
7587 return ret;
7588 #endif
7589 #if defined(TARGET_NR_linkat)
7590 case TARGET_NR_linkat:
7592 void * p2 = NULL;
7593 if (!arg2 || !arg4)
7594 return -TARGET_EFAULT;
7595 p = lock_user_string(arg2);
7596 p2 = lock_user_string(arg4);
7597 if (!p || !p2)
7598 ret = -TARGET_EFAULT;
7599 else
7600 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7601 unlock_user(p, arg2, 0);
7602 unlock_user(p2, arg4, 0);
7604 return ret;
7605 #endif
7606 #ifdef TARGET_NR_unlink
7607 case TARGET_NR_unlink:
7608 if (!(p = lock_user_string(arg1)))
7609 return -TARGET_EFAULT;
7610 ret = get_errno(unlink(p));
7611 unlock_user(p, arg1, 0);
7612 return ret;
7613 #endif
7614 #if defined(TARGET_NR_unlinkat)
7615 case TARGET_NR_unlinkat:
7616 if (!(p = lock_user_string(arg2)))
7617 return -TARGET_EFAULT;
7618 ret = get_errno(unlinkat(arg1, p, arg3));
7619 unlock_user(p, arg2, 0);
7620 return ret;
7621 #endif
7622 case TARGET_NR_execve:
7624 char **argp, **envp;
7625 int argc, envc;
7626 abi_ulong gp;
7627 abi_ulong guest_argp;
7628 abi_ulong guest_envp;
7629 abi_ulong addr;
7630 char **q;
7631 int total_size = 0;
7633 argc = 0;
7634 guest_argp = arg2;
7635 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7636 if (get_user_ual(addr, gp))
7637 return -TARGET_EFAULT;
7638 if (!addr)
7639 break;
7640 argc++;
7642 envc = 0;
7643 guest_envp = arg3;
7644 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7645 if (get_user_ual(addr, gp))
7646 return -TARGET_EFAULT;
7647 if (!addr)
7648 break;
7649 envc++;
7652 argp = g_new0(char *, argc + 1);
7653 envp = g_new0(char *, envc + 1);
7655 for (gp = guest_argp, q = argp; gp;
7656 gp += sizeof(abi_ulong), q++) {
7657 if (get_user_ual(addr, gp))
7658 goto execve_efault;
7659 if (!addr)
7660 break;
7661 if (!(*q = lock_user_string(addr)))
7662 goto execve_efault;
7663 total_size += strlen(*q) + 1;
7665 *q = NULL;
7667 for (gp = guest_envp, q = envp; gp;
7668 gp += sizeof(abi_ulong), q++) {
7669 if (get_user_ual(addr, gp))
7670 goto execve_efault;
7671 if (!addr)
7672 break;
7673 if (!(*q = lock_user_string(addr)))
7674 goto execve_efault;
7675 total_size += strlen(*q) + 1;
7677 *q = NULL;
7679 if (!(p = lock_user_string(arg1)))
7680 goto execve_efault;
7681 /* Although execve() is not an interruptible syscall it is
7682 * a special case where we must use the safe_syscall wrapper:
7683 * if we allow a signal to happen before we make the host
7684 * syscall then we will 'lose' it, because at the point of
7685 * execve the process leaves QEMU's control. So we use the
7686 * safe syscall wrapper to ensure that we either take the
7687 * signal as a guest signal, or else it does not happen
7688 * before the execve completes and makes it the other
7689 * program's problem.
7691 ret = get_errno(safe_execve(p, argp, envp));
7692 unlock_user(p, arg1, 0);
7694 goto execve_end;
7696 execve_efault:
7697 ret = -TARGET_EFAULT;
7699 execve_end:
7700 for (gp = guest_argp, q = argp; *q;
7701 gp += sizeof(abi_ulong), q++) {
7702 if (get_user_ual(addr, gp)
7703 || !addr)
7704 break;
7705 unlock_user(*q, addr, 0);
7707 for (gp = guest_envp, q = envp; *q;
7708 gp += sizeof(abi_ulong), q++) {
7709 if (get_user_ual(addr, gp)
7710 || !addr)
7711 break;
7712 unlock_user(*q, addr, 0);
7715 g_free(argp);
7716 g_free(envp);
7718 return ret;
7719 case TARGET_NR_chdir:
7720 if (!(p = lock_user_string(arg1)))
7721 return -TARGET_EFAULT;
7722 ret = get_errno(chdir(p));
7723 unlock_user(p, arg1, 0);
7724 return ret;
7725 #ifdef TARGET_NR_time
7726 case TARGET_NR_time:
7728 time_t host_time;
7729 ret = get_errno(time(&host_time));
7730 if (!is_error(ret)
7731 && arg1
7732 && put_user_sal(host_time, arg1))
7733 return -TARGET_EFAULT;
7735 return ret;
7736 #endif
7737 #ifdef TARGET_NR_mknod
7738 case TARGET_NR_mknod:
7739 if (!(p = lock_user_string(arg1)))
7740 return -TARGET_EFAULT;
7741 ret = get_errno(mknod(p, arg2, arg3));
7742 unlock_user(p, arg1, 0);
7743 return ret;
7744 #endif
7745 #if defined(TARGET_NR_mknodat)
7746 case TARGET_NR_mknodat:
7747 if (!(p = lock_user_string(arg2)))
7748 return -TARGET_EFAULT;
7749 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7750 unlock_user(p, arg2, 0);
7751 return ret;
7752 #endif
7753 #ifdef TARGET_NR_chmod
7754 case TARGET_NR_chmod:
7755 if (!(p = lock_user_string(arg1)))
7756 return -TARGET_EFAULT;
7757 ret = get_errno(chmod(p, arg2));
7758 unlock_user(p, arg1, 0);
7759 return ret;
7760 #endif
7761 #ifdef TARGET_NR_lseek
7762 case TARGET_NR_lseek:
7763 return get_errno(lseek(arg1, arg2, arg3));
7764 #endif
7765 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7766 /* Alpha specific */
7767 case TARGET_NR_getxpid:
7768 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7769 return get_errno(getpid());
7770 #endif
7771 #ifdef TARGET_NR_getpid
7772 case TARGET_NR_getpid:
7773 return get_errno(getpid());
7774 #endif
7775 case TARGET_NR_mount:
7777 /* need to look at the data field */
7778 void *p2, *p3;
7780 if (arg1) {
7781 p = lock_user_string(arg1);
7782 if (!p) {
7783 return -TARGET_EFAULT;
7785 } else {
7786 p = NULL;
7789 p2 = lock_user_string(arg2);
7790 if (!p2) {
7791 if (arg1) {
7792 unlock_user(p, arg1, 0);
7794 return -TARGET_EFAULT;
7797 if (arg3) {
7798 p3 = lock_user_string(arg3);
7799 if (!p3) {
7800 if (arg1) {
7801 unlock_user(p, arg1, 0);
7803 unlock_user(p2, arg2, 0);
7804 return -TARGET_EFAULT;
7806 } else {
7807 p3 = NULL;
7810 /* FIXME - arg5 should be locked, but it isn't clear how to
7811 * do that since it's not guaranteed to be a NULL-terminated
7812 * string.
7814 if (!arg5) {
7815 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7816 } else {
7817 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7819 ret = get_errno(ret);
7821 if (arg1) {
7822 unlock_user(p, arg1, 0);
7824 unlock_user(p2, arg2, 0);
7825 if (arg3) {
7826 unlock_user(p3, arg3, 0);
7829 return ret;
7830 #ifdef TARGET_NR_umount
7831 case TARGET_NR_umount:
7832 if (!(p = lock_user_string(arg1)))
7833 return -TARGET_EFAULT;
7834 ret = get_errno(umount(p));
7835 unlock_user(p, arg1, 0);
7836 return ret;
7837 #endif
7838 #ifdef TARGET_NR_stime /* not on alpha */
7839 case TARGET_NR_stime:
7841 struct timespec ts;
7842 ts.tv_nsec = 0;
7843 if (get_user_sal(ts.tv_sec, arg1)) {
7844 return -TARGET_EFAULT;
7846 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7848 #endif
7849 #ifdef TARGET_NR_alarm /* not on alpha */
7850 case TARGET_NR_alarm:
7851 return alarm(arg1);
7852 #endif
7853 #ifdef TARGET_NR_pause /* not on alpha */
7854 case TARGET_NR_pause:
7855 if (!block_signals()) {
7856 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7858 return -TARGET_EINTR;
7859 #endif
7860 #ifdef TARGET_NR_utime
7861 case TARGET_NR_utime:
7863 struct utimbuf tbuf, *host_tbuf;
7864 struct target_utimbuf *target_tbuf;
7865 if (arg2) {
7866 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7867 return -TARGET_EFAULT;
7868 tbuf.actime = tswapal(target_tbuf->actime);
7869 tbuf.modtime = tswapal(target_tbuf->modtime);
7870 unlock_user_struct(target_tbuf, arg2, 0);
7871 host_tbuf = &tbuf;
7872 } else {
7873 host_tbuf = NULL;
7875 if (!(p = lock_user_string(arg1)))
7876 return -TARGET_EFAULT;
7877 ret = get_errno(utime(p, host_tbuf));
7878 unlock_user(p, arg1, 0);
7880 return ret;
7881 #endif
7882 #ifdef TARGET_NR_utimes
7883 case TARGET_NR_utimes:
7885 struct timeval *tvp, tv[2];
7886 if (arg2) {
7887 if (copy_from_user_timeval(&tv[0], arg2)
7888 || copy_from_user_timeval(&tv[1],
7889 arg2 + sizeof(struct target_timeval)))
7890 return -TARGET_EFAULT;
7891 tvp = tv;
7892 } else {
7893 tvp = NULL;
7895 if (!(p = lock_user_string(arg1)))
7896 return -TARGET_EFAULT;
7897 ret = get_errno(utimes(p, tvp));
7898 unlock_user(p, arg1, 0);
7900 return ret;
7901 #endif
7902 #if defined(TARGET_NR_futimesat)
7903 case TARGET_NR_futimesat:
7905 struct timeval *tvp, tv[2];
7906 if (arg3) {
7907 if (copy_from_user_timeval(&tv[0], arg3)
7908 || copy_from_user_timeval(&tv[1],
7909 arg3 + sizeof(struct target_timeval)))
7910 return -TARGET_EFAULT;
7911 tvp = tv;
7912 } else {
7913 tvp = NULL;
7915 if (!(p = lock_user_string(arg2))) {
7916 return -TARGET_EFAULT;
7918 ret = get_errno(futimesat(arg1, path(p), tvp));
7919 unlock_user(p, arg2, 0);
7921 return ret;
7922 #endif
7923 #ifdef TARGET_NR_access
7924 case TARGET_NR_access:
7925 if (!(p = lock_user_string(arg1))) {
7926 return -TARGET_EFAULT;
7928 ret = get_errno(access(path(p), arg2));
7929 unlock_user(p, arg1, 0);
7930 return ret;
7931 #endif
7932 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7933 case TARGET_NR_faccessat:
7934 if (!(p = lock_user_string(arg2))) {
7935 return -TARGET_EFAULT;
7937 ret = get_errno(faccessat(arg1, p, arg3, 0));
7938 unlock_user(p, arg2, 0);
7939 return ret;
7940 #endif
7941 #ifdef TARGET_NR_nice /* not on alpha */
7942 case TARGET_NR_nice:
7943 return get_errno(nice(arg1));
7944 #endif
7945 case TARGET_NR_sync:
7946 sync();
7947 return 0;
7948 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7949 case TARGET_NR_syncfs:
7950 return get_errno(syncfs(arg1));
7951 #endif
7952 case TARGET_NR_kill:
7953 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7954 #ifdef TARGET_NR_rename
7955 case TARGET_NR_rename:
7957 void *p2;
7958 p = lock_user_string(arg1);
7959 p2 = lock_user_string(arg2);
7960 if (!p || !p2)
7961 ret = -TARGET_EFAULT;
7962 else
7963 ret = get_errno(rename(p, p2));
7964 unlock_user(p2, arg2, 0);
7965 unlock_user(p, arg1, 0);
7967 return ret;
7968 #endif
7969 #if defined(TARGET_NR_renameat)
7970 case TARGET_NR_renameat:
7972 void *p2;
7973 p = lock_user_string(arg2);
7974 p2 = lock_user_string(arg4);
7975 if (!p || !p2)
7976 ret = -TARGET_EFAULT;
7977 else
7978 ret = get_errno(renameat(arg1, p, arg3, p2));
7979 unlock_user(p2, arg4, 0);
7980 unlock_user(p, arg2, 0);
7982 return ret;
7983 #endif
7984 #if defined(TARGET_NR_renameat2)
7985 case TARGET_NR_renameat2:
7987 void *p2;
7988 p = lock_user_string(arg2);
7989 p2 = lock_user_string(arg4);
7990 if (!p || !p2) {
7991 ret = -TARGET_EFAULT;
7992 } else {
7993 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7995 unlock_user(p2, arg4, 0);
7996 unlock_user(p, arg2, 0);
7998 return ret;
7999 #endif
8000 #ifdef TARGET_NR_mkdir
8001 case TARGET_NR_mkdir:
8002 if (!(p = lock_user_string(arg1)))
8003 return -TARGET_EFAULT;
8004 ret = get_errno(mkdir(p, arg2));
8005 unlock_user(p, arg1, 0);
8006 return ret;
8007 #endif
8008 #if defined(TARGET_NR_mkdirat)
8009 case TARGET_NR_mkdirat:
8010 if (!(p = lock_user_string(arg2)))
8011 return -TARGET_EFAULT;
8012 ret = get_errno(mkdirat(arg1, p, arg3));
8013 unlock_user(p, arg2, 0);
8014 return ret;
8015 #endif
8016 #ifdef TARGET_NR_rmdir
8017 case TARGET_NR_rmdir:
8018 if (!(p = lock_user_string(arg1)))
8019 return -TARGET_EFAULT;
8020 ret = get_errno(rmdir(p));
8021 unlock_user(p, arg1, 0);
8022 return ret;
8023 #endif
8024 case TARGET_NR_dup:
8025 ret = get_errno(dup(arg1));
8026 if (ret >= 0) {
8027 fd_trans_dup(arg1, ret);
8029 return ret;
8030 #ifdef TARGET_NR_pipe
8031 case TARGET_NR_pipe:
8032 return do_pipe(cpu_env, arg1, 0, 0);
8033 #endif
8034 #ifdef TARGET_NR_pipe2
8035 case TARGET_NR_pipe2:
8036 return do_pipe(cpu_env, arg1,
8037 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8038 #endif
8039 case TARGET_NR_times:
8041 struct target_tms *tmsp;
8042 struct tms tms;
8043 ret = get_errno(times(&tms));
8044 if (arg1) {
8045 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8046 if (!tmsp)
8047 return -TARGET_EFAULT;
8048 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8049 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8050 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8051 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8053 if (!is_error(ret))
8054 ret = host_to_target_clock_t(ret);
8056 return ret;
8057 case TARGET_NR_acct:
8058 if (arg1 == 0) {
8059 ret = get_errno(acct(NULL));
8060 } else {
8061 if (!(p = lock_user_string(arg1))) {
8062 return -TARGET_EFAULT;
8064 ret = get_errno(acct(path(p)));
8065 unlock_user(p, arg1, 0);
8067 return ret;
8068 #ifdef TARGET_NR_umount2
8069 case TARGET_NR_umount2:
8070 if (!(p = lock_user_string(arg1)))
8071 return -TARGET_EFAULT;
8072 ret = get_errno(umount2(p, arg2));
8073 unlock_user(p, arg1, 0);
8074 return ret;
8075 #endif
8076 case TARGET_NR_ioctl:
8077 return do_ioctl(arg1, arg2, arg3);
8078 #ifdef TARGET_NR_fcntl
8079 case TARGET_NR_fcntl:
8080 return do_fcntl(arg1, arg2, arg3);
8081 #endif
8082 case TARGET_NR_setpgid:
8083 return get_errno(setpgid(arg1, arg2));
8084 case TARGET_NR_umask:
8085 return get_errno(umask(arg1));
8086 case TARGET_NR_chroot:
8087 if (!(p = lock_user_string(arg1)))
8088 return -TARGET_EFAULT;
8089 ret = get_errno(chroot(p));
8090 unlock_user(p, arg1, 0);
8091 return ret;
8092 #ifdef TARGET_NR_dup2
8093 case TARGET_NR_dup2:
8094 ret = get_errno(dup2(arg1, arg2));
8095 if (ret >= 0) {
8096 fd_trans_dup(arg1, arg2);
8098 return ret;
8099 #endif
8100 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8101 case TARGET_NR_dup3:
8103 int host_flags;
8105 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8106 return -EINVAL;
8108 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8109 ret = get_errno(dup3(arg1, arg2, host_flags));
8110 if (ret >= 0) {
8111 fd_trans_dup(arg1, arg2);
8113 return ret;
8115 #endif
8116 #ifdef TARGET_NR_getppid /* not on alpha */
8117 case TARGET_NR_getppid:
8118 return get_errno(getppid());
8119 #endif
8120 #ifdef TARGET_NR_getpgrp
8121 case TARGET_NR_getpgrp:
8122 return get_errno(getpgrp());
8123 #endif
8124 case TARGET_NR_setsid:
8125 return get_errno(setsid());
8126 #ifdef TARGET_NR_sigaction
8127 case TARGET_NR_sigaction:
8129 #if defined(TARGET_ALPHA)
8130 struct target_sigaction act, oact, *pact = 0;
8131 struct target_old_sigaction *old_act;
8132 if (arg2) {
8133 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8134 return -TARGET_EFAULT;
8135 act._sa_handler = old_act->_sa_handler;
8136 target_siginitset(&act.sa_mask, old_act->sa_mask);
8137 act.sa_flags = old_act->sa_flags;
8138 act.sa_restorer = 0;
8139 unlock_user_struct(old_act, arg2, 0);
8140 pact = &act;
8142 ret = get_errno(do_sigaction(arg1, pact, &oact));
8143 if (!is_error(ret) && arg3) {
8144 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8145 return -TARGET_EFAULT;
8146 old_act->_sa_handler = oact._sa_handler;
8147 old_act->sa_mask = oact.sa_mask.sig[0];
8148 old_act->sa_flags = oact.sa_flags;
8149 unlock_user_struct(old_act, arg3, 1);
8151 #elif defined(TARGET_MIPS)
8152 struct target_sigaction act, oact, *pact, *old_act;
8154 if (arg2) {
8155 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8156 return -TARGET_EFAULT;
8157 act._sa_handler = old_act->_sa_handler;
8158 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8159 act.sa_flags = old_act->sa_flags;
8160 unlock_user_struct(old_act, arg2, 0);
8161 pact = &act;
8162 } else {
8163 pact = NULL;
8166 ret = get_errno(do_sigaction(arg1, pact, &oact));
8168 if (!is_error(ret) && arg3) {
8169 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8170 return -TARGET_EFAULT;
8171 old_act->_sa_handler = oact._sa_handler;
8172 old_act->sa_flags = oact.sa_flags;
8173 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8174 old_act->sa_mask.sig[1] = 0;
8175 old_act->sa_mask.sig[2] = 0;
8176 old_act->sa_mask.sig[3] = 0;
8177 unlock_user_struct(old_act, arg3, 1);
8179 #else
8180 struct target_old_sigaction *old_act;
8181 struct target_sigaction act, oact, *pact;
8182 if (arg2) {
8183 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8184 return -TARGET_EFAULT;
8185 act._sa_handler = old_act->_sa_handler;
8186 target_siginitset(&act.sa_mask, old_act->sa_mask);
8187 act.sa_flags = old_act->sa_flags;
8188 act.sa_restorer = old_act->sa_restorer;
8189 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8190 act.ka_restorer = 0;
8191 #endif
8192 unlock_user_struct(old_act, arg2, 0);
8193 pact = &act;
8194 } else {
8195 pact = NULL;
8197 ret = get_errno(do_sigaction(arg1, pact, &oact));
8198 if (!is_error(ret) && arg3) {
8199 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8200 return -TARGET_EFAULT;
8201 old_act->_sa_handler = oact._sa_handler;
8202 old_act->sa_mask = oact.sa_mask.sig[0];
8203 old_act->sa_flags = oact.sa_flags;
8204 old_act->sa_restorer = oact.sa_restorer;
8205 unlock_user_struct(old_act, arg3, 1);
8207 #endif
8209 return ret;
8210 #endif
8211 case TARGET_NR_rt_sigaction:
8213 #if defined(TARGET_ALPHA)
8214 /* For Alpha and SPARC this is a 5 argument syscall, with
8215 * a 'restorer' parameter which must be copied into the
8216 * sa_restorer field of the sigaction struct.
8217 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8218 * and arg5 is the sigsetsize.
8219 * Alpha also has a separate rt_sigaction struct that it uses
8220 * here; SPARC uses the usual sigaction struct.
8222 struct target_rt_sigaction *rt_act;
8223 struct target_sigaction act, oact, *pact = 0;
8225 if (arg4 != sizeof(target_sigset_t)) {
8226 return -TARGET_EINVAL;
8228 if (arg2) {
8229 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8230 return -TARGET_EFAULT;
8231 act._sa_handler = rt_act->_sa_handler;
8232 act.sa_mask = rt_act->sa_mask;
8233 act.sa_flags = rt_act->sa_flags;
8234 act.sa_restorer = arg5;
8235 unlock_user_struct(rt_act, arg2, 0);
8236 pact = &act;
8238 ret = get_errno(do_sigaction(arg1, pact, &oact));
8239 if (!is_error(ret) && arg3) {
8240 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8241 return -TARGET_EFAULT;
8242 rt_act->_sa_handler = oact._sa_handler;
8243 rt_act->sa_mask = oact.sa_mask;
8244 rt_act->sa_flags = oact.sa_flags;
8245 unlock_user_struct(rt_act, arg3, 1);
8247 #else
8248 #ifdef TARGET_SPARC
8249 target_ulong restorer = arg4;
8250 target_ulong sigsetsize = arg5;
8251 #else
8252 target_ulong sigsetsize = arg4;
8253 #endif
8254 struct target_sigaction *act;
8255 struct target_sigaction *oact;
8257 if (sigsetsize != sizeof(target_sigset_t)) {
8258 return -TARGET_EINVAL;
8260 if (arg2) {
8261 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8262 return -TARGET_EFAULT;
8264 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8265 act->ka_restorer = restorer;
8266 #endif
8267 } else {
8268 act = NULL;
8270 if (arg3) {
8271 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8272 ret = -TARGET_EFAULT;
8273 goto rt_sigaction_fail;
8275 } else
8276 oact = NULL;
8277 ret = get_errno(do_sigaction(arg1, act, oact));
8278 rt_sigaction_fail:
8279 if (act)
8280 unlock_user_struct(act, arg2, 0);
8281 if (oact)
8282 unlock_user_struct(oact, arg3, 1);
8283 #endif
8285 return ret;
8286 #ifdef TARGET_NR_sgetmask /* not on alpha */
8287 case TARGET_NR_sgetmask:
8289 sigset_t cur_set;
8290 abi_ulong target_set;
8291 ret = do_sigprocmask(0, NULL, &cur_set);
8292 if (!ret) {
8293 host_to_target_old_sigset(&target_set, &cur_set);
8294 ret = target_set;
8297 return ret;
8298 #endif
8299 #ifdef TARGET_NR_ssetmask /* not on alpha */
8300 case TARGET_NR_ssetmask:
8302 sigset_t set, oset;
8303 abi_ulong target_set = arg1;
8304 target_to_host_old_sigset(&set, &target_set);
8305 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8306 if (!ret) {
8307 host_to_target_old_sigset(&target_set, &oset);
8308 ret = target_set;
8311 return ret;
8312 #endif
8313 #ifdef TARGET_NR_sigprocmask
8314 case TARGET_NR_sigprocmask:
8316 #if defined(TARGET_ALPHA)
8317 sigset_t set, oldset;
8318 abi_ulong mask;
8319 int how;
8321 switch (arg1) {
8322 case TARGET_SIG_BLOCK:
8323 how = SIG_BLOCK;
8324 break;
8325 case TARGET_SIG_UNBLOCK:
8326 how = SIG_UNBLOCK;
8327 break;
8328 case TARGET_SIG_SETMASK:
8329 how = SIG_SETMASK;
8330 break;
8331 default:
8332 return -TARGET_EINVAL;
8334 mask = arg2;
8335 target_to_host_old_sigset(&set, &mask);
8337 ret = do_sigprocmask(how, &set, &oldset);
8338 if (!is_error(ret)) {
8339 host_to_target_old_sigset(&mask, &oldset);
8340 ret = mask;
8341 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8343 #else
8344 sigset_t set, oldset, *set_ptr;
8345 int how;
8347 if (arg2) {
8348 switch (arg1) {
8349 case TARGET_SIG_BLOCK:
8350 how = SIG_BLOCK;
8351 break;
8352 case TARGET_SIG_UNBLOCK:
8353 how = SIG_UNBLOCK;
8354 break;
8355 case TARGET_SIG_SETMASK:
8356 how = SIG_SETMASK;
8357 break;
8358 default:
8359 return -TARGET_EINVAL;
8361 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8362 return -TARGET_EFAULT;
8363 target_to_host_old_sigset(&set, p);
8364 unlock_user(p, arg2, 0);
8365 set_ptr = &set;
8366 } else {
8367 how = 0;
8368 set_ptr = NULL;
8370 ret = do_sigprocmask(how, set_ptr, &oldset);
8371 if (!is_error(ret) && arg3) {
8372 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8373 return -TARGET_EFAULT;
8374 host_to_target_old_sigset(p, &oldset);
8375 unlock_user(p, arg3, sizeof(target_sigset_t));
8377 #endif
8379 return ret;
8380 #endif
8381 case TARGET_NR_rt_sigprocmask:
8383 int how = arg1;
8384 sigset_t set, oldset, *set_ptr;
8386 if (arg4 != sizeof(target_sigset_t)) {
8387 return -TARGET_EINVAL;
8390 if (arg2) {
8391 switch(how) {
8392 case TARGET_SIG_BLOCK:
8393 how = SIG_BLOCK;
8394 break;
8395 case TARGET_SIG_UNBLOCK:
8396 how = SIG_UNBLOCK;
8397 break;
8398 case TARGET_SIG_SETMASK:
8399 how = SIG_SETMASK;
8400 break;
8401 default:
8402 return -TARGET_EINVAL;
8404 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8405 return -TARGET_EFAULT;
8406 target_to_host_sigset(&set, p);
8407 unlock_user(p, arg2, 0);
8408 set_ptr = &set;
8409 } else {
8410 how = 0;
8411 set_ptr = NULL;
8413 ret = do_sigprocmask(how, set_ptr, &oldset);
8414 if (!is_error(ret) && arg3) {
8415 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8416 return -TARGET_EFAULT;
8417 host_to_target_sigset(p, &oldset);
8418 unlock_user(p, arg3, sizeof(target_sigset_t));
8421 return ret;
8422 #ifdef TARGET_NR_sigpending
8423 case TARGET_NR_sigpending:
8425 sigset_t set;
8426 ret = get_errno(sigpending(&set));
8427 if (!is_error(ret)) {
8428 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8429 return -TARGET_EFAULT;
8430 host_to_target_old_sigset(p, &set);
8431 unlock_user(p, arg1, sizeof(target_sigset_t));
8434 return ret;
8435 #endif
8436 case TARGET_NR_rt_sigpending:
8438 sigset_t set;
8440 /* Yes, this check is >, not != like most. We follow the kernel's
8441 * logic and it does it like this because it implements
8442 * NR_sigpending through the same code path, and in that case
8443 * the old_sigset_t is smaller in size.
8445 if (arg2 > sizeof(target_sigset_t)) {
8446 return -TARGET_EINVAL;
8449 ret = get_errno(sigpending(&set));
8450 if (!is_error(ret)) {
8451 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8452 return -TARGET_EFAULT;
8453 host_to_target_sigset(p, &set);
8454 unlock_user(p, arg1, sizeof(target_sigset_t));
8457 return ret;
8458 #ifdef TARGET_NR_sigsuspend
8459 case TARGET_NR_sigsuspend:
8461 TaskState *ts = cpu->opaque;
8462 #if defined(TARGET_ALPHA)
8463 abi_ulong mask = arg1;
8464 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8465 #else
8466 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8467 return -TARGET_EFAULT;
8468 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8469 unlock_user(p, arg1, 0);
8470 #endif
8471 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8472 SIGSET_T_SIZE));
8473 if (ret != -TARGET_ERESTARTSYS) {
8474 ts->in_sigsuspend = 1;
8477 return ret;
8478 #endif
8479 case TARGET_NR_rt_sigsuspend:
8481 TaskState *ts = cpu->opaque;
8483 if (arg2 != sizeof(target_sigset_t)) {
8484 return -TARGET_EINVAL;
8486 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8487 return -TARGET_EFAULT;
8488 target_to_host_sigset(&ts->sigsuspend_mask, p);
8489 unlock_user(p, arg1, 0);
8490 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8491 SIGSET_T_SIZE));
8492 if (ret != -TARGET_ERESTARTSYS) {
8493 ts->in_sigsuspend = 1;
8496 return ret;
8497 case TARGET_NR_rt_sigtimedwait:
8499 sigset_t set;
8500 struct timespec uts, *puts;
8501 siginfo_t uinfo;
8503 if (arg4 != sizeof(target_sigset_t)) {
8504 return -TARGET_EINVAL;
8507 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8508 return -TARGET_EFAULT;
8509 target_to_host_sigset(&set, p);
8510 unlock_user(p, arg1, 0);
8511 if (arg3) {
8512 puts = &uts;
8513 target_to_host_timespec(puts, arg3);
8514 } else {
8515 puts = NULL;
8517 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8518 SIGSET_T_SIZE));
8519 if (!is_error(ret)) {
8520 if (arg2) {
8521 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8523 if (!p) {
8524 return -TARGET_EFAULT;
8526 host_to_target_siginfo(p, &uinfo);
8527 unlock_user(p, arg2, sizeof(target_siginfo_t));
8529 ret = host_to_target_signal(ret);
8532 return ret;
8533 case TARGET_NR_rt_sigqueueinfo:
8535 siginfo_t uinfo;
8537 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8538 if (!p) {
8539 return -TARGET_EFAULT;
8541 target_to_host_siginfo(&uinfo, p);
8542 unlock_user(p, arg3, 0);
8543 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8545 return ret;
8546 case TARGET_NR_rt_tgsigqueueinfo:
8548 siginfo_t uinfo;
8550 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8551 if (!p) {
8552 return -TARGET_EFAULT;
8554 target_to_host_siginfo(&uinfo, p);
8555 unlock_user(p, arg4, 0);
8556 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8558 return ret;
8559 #ifdef TARGET_NR_sigreturn
8560 case TARGET_NR_sigreturn:
8561 if (block_signals()) {
8562 return -TARGET_ERESTARTSYS;
8564 return do_sigreturn(cpu_env);
8565 #endif
8566 case TARGET_NR_rt_sigreturn:
8567 if (block_signals()) {
8568 return -TARGET_ERESTARTSYS;
8570 return do_rt_sigreturn(cpu_env);
8571 case TARGET_NR_sethostname:
8572 if (!(p = lock_user_string(arg1)))
8573 return -TARGET_EFAULT;
8574 ret = get_errno(sethostname(p, arg2));
8575 unlock_user(p, arg1, 0);
8576 return ret;
8577 #ifdef TARGET_NR_setrlimit
8578 case TARGET_NR_setrlimit:
8580 int resource = target_to_host_resource(arg1);
8581 struct target_rlimit *target_rlim;
8582 struct rlimit rlim;
8583 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8584 return -TARGET_EFAULT;
8585 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8586 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8587 unlock_user_struct(target_rlim, arg2, 0);
8589 * If we just passed through resource limit settings for memory then
8590 * they would also apply to QEMU's own allocations, and QEMU will
8591 * crash or hang or die if its allocations fail. Ideally we would
8592 * track the guest allocations in QEMU and apply the limits ourselves.
8593 * For now, just tell the guest the call succeeded but don't actually
8594 * limit anything.
8596 if (resource != RLIMIT_AS &&
8597 resource != RLIMIT_DATA &&
8598 resource != RLIMIT_STACK) {
8599 return get_errno(setrlimit(resource, &rlim));
8600 } else {
8601 return 0;
8604 #endif
8605 #ifdef TARGET_NR_getrlimit
8606 case TARGET_NR_getrlimit:
8608 int resource = target_to_host_resource(arg1);
8609 struct target_rlimit *target_rlim;
8610 struct rlimit rlim;
8612 ret = get_errno(getrlimit(resource, &rlim));
8613 if (!is_error(ret)) {
8614 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8615 return -TARGET_EFAULT;
8616 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8617 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8618 unlock_user_struct(target_rlim, arg2, 1);
8621 return ret;
8622 #endif
8623 case TARGET_NR_getrusage:
8625 struct rusage rusage;
8626 ret = get_errno(getrusage(arg1, &rusage));
8627 if (!is_error(ret)) {
8628 ret = host_to_target_rusage(arg2, &rusage);
8631 return ret;
8632 case TARGET_NR_gettimeofday:
8634 struct timeval tv;
8635 ret = get_errno(gettimeofday(&tv, NULL));
8636 if (!is_error(ret)) {
8637 if (copy_to_user_timeval(arg1, &tv))
8638 return -TARGET_EFAULT;
8641 return ret;
8642 case TARGET_NR_settimeofday:
8644 struct timeval tv, *ptv = NULL;
8645 struct timezone tz, *ptz = NULL;
8647 if (arg1) {
8648 if (copy_from_user_timeval(&tv, arg1)) {
8649 return -TARGET_EFAULT;
8651 ptv = &tv;
8654 if (arg2) {
8655 if (copy_from_user_timezone(&tz, arg2)) {
8656 return -TARGET_EFAULT;
8658 ptz = &tz;
8661 return get_errno(settimeofday(ptv, ptz));
8663 #if defined(TARGET_NR_select)
8664 case TARGET_NR_select:
8665 #if defined(TARGET_WANT_NI_OLD_SELECT)
8666 /* some architectures used to have old_select here
8667 * but now ENOSYS it.
8669 ret = -TARGET_ENOSYS;
8670 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8671 ret = do_old_select(arg1);
8672 #else
8673 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8674 #endif
8675 return ret;
8676 #endif
8677 #ifdef TARGET_NR_pselect6
8678 case TARGET_NR_pselect6:
8680 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8681 fd_set rfds, wfds, efds;
8682 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8683 struct timespec ts, *ts_ptr;
8686 * The 6th arg is actually two args smashed together,
8687 * so we cannot use the C library.
8689 sigset_t set;
8690 struct {
8691 sigset_t *set;
8692 size_t size;
8693 } sig, *sig_ptr;
8695 abi_ulong arg_sigset, arg_sigsize, *arg7;
8696 target_sigset_t *target_sigset;
8698 n = arg1;
8699 rfd_addr = arg2;
8700 wfd_addr = arg3;
8701 efd_addr = arg4;
8702 ts_addr = arg5;
8704 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8705 if (ret) {
8706 return ret;
8708 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8709 if (ret) {
8710 return ret;
8712 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8713 if (ret) {
8714 return ret;
8718 * This takes a timespec, and not a timeval, so we cannot
8719 * use the do_select() helper ...
8721 if (ts_addr) {
8722 if (target_to_host_timespec(&ts, ts_addr)) {
8723 return -TARGET_EFAULT;
8725 ts_ptr = &ts;
8726 } else {
8727 ts_ptr = NULL;
8730 /* Extract the two packed args for the sigset */
8731 if (arg6) {
8732 sig_ptr = &sig;
8733 sig.size = SIGSET_T_SIZE;
8735 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8736 if (!arg7) {
8737 return -TARGET_EFAULT;
8739 arg_sigset = tswapal(arg7[0]);
8740 arg_sigsize = tswapal(arg7[1]);
8741 unlock_user(arg7, arg6, 0);
8743 if (arg_sigset) {
8744 sig.set = &set;
8745 if (arg_sigsize != sizeof(*target_sigset)) {
8746 /* Like the kernel, we enforce correct size sigsets */
8747 return -TARGET_EINVAL;
8749 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8750 sizeof(*target_sigset), 1);
8751 if (!target_sigset) {
8752 return -TARGET_EFAULT;
8754 target_to_host_sigset(&set, target_sigset);
8755 unlock_user(target_sigset, arg_sigset, 0);
8756 } else {
8757 sig.set = NULL;
8759 } else {
8760 sig_ptr = NULL;
8763 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8764 ts_ptr, sig_ptr));
8766 if (!is_error(ret)) {
8767 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8768 return -TARGET_EFAULT;
8769 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8770 return -TARGET_EFAULT;
8771 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8772 return -TARGET_EFAULT;
8774 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8775 return -TARGET_EFAULT;
8778 return ret;
8779 #endif
8780 #ifdef TARGET_NR_symlink
8781 case TARGET_NR_symlink:
8783 void *p2;
8784 p = lock_user_string(arg1);
8785 p2 = lock_user_string(arg2);
8786 if (!p || !p2)
8787 ret = -TARGET_EFAULT;
8788 else
8789 ret = get_errno(symlink(p, p2));
8790 unlock_user(p2, arg2, 0);
8791 unlock_user(p, arg1, 0);
8793 return ret;
8794 #endif
8795 #if defined(TARGET_NR_symlinkat)
8796 case TARGET_NR_symlinkat:
8798 void *p2;
8799 p = lock_user_string(arg1);
8800 p2 = lock_user_string(arg3);
8801 if (!p || !p2)
8802 ret = -TARGET_EFAULT;
8803 else
8804 ret = get_errno(symlinkat(p, arg2, p2));
8805 unlock_user(p2, arg3, 0);
8806 unlock_user(p, arg1, 0);
8808 return ret;
8809 #endif
8810 #ifdef TARGET_NR_readlink
8811 case TARGET_NR_readlink:
8813 void *p2;
8814 p = lock_user_string(arg1);
8815 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8816 if (!p || !p2) {
8817 ret = -TARGET_EFAULT;
8818 } else if (!arg3) {
8819 /* Short circuit this for the magic exe check. */
8820 ret = -TARGET_EINVAL;
8821 } else if (is_proc_myself((const char *)p, "exe")) {
8822 char real[PATH_MAX], *temp;
8823 temp = realpath(exec_path, real);
8824 /* Return value is # of bytes that we wrote to the buffer. */
8825 if (temp == NULL) {
8826 ret = get_errno(-1);
8827 } else {
8828 /* Don't worry about sign mismatch as earlier mapping
8829 * logic would have thrown a bad address error. */
8830 ret = MIN(strlen(real), arg3);
8831 /* We cannot NUL terminate the string. */
8832 memcpy(p2, real, ret);
8834 } else {
8835 ret = get_errno(readlink(path(p), p2, arg3));
8837 unlock_user(p2, arg2, ret);
8838 unlock_user(p, arg1, 0);
8840 return ret;
8841 #endif
8842 #if defined(TARGET_NR_readlinkat)
8843 case TARGET_NR_readlinkat:
8845 void *p2;
8846 p = lock_user_string(arg2);
8847 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8848 if (!p || !p2) {
8849 ret = -TARGET_EFAULT;
8850 } else if (is_proc_myself((const char *)p, "exe")) {
8851 char real[PATH_MAX], *temp;
8852 temp = realpath(exec_path, real);
8853 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8854 snprintf((char *)p2, arg4, "%s", real);
8855 } else {
8856 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8858 unlock_user(p2, arg3, ret);
8859 unlock_user(p, arg2, 0);
8861 return ret;
8862 #endif
8863 #ifdef TARGET_NR_swapon
8864 case TARGET_NR_swapon:
8865 if (!(p = lock_user_string(arg1)))
8866 return -TARGET_EFAULT;
8867 ret = get_errno(swapon(p, arg2));
8868 unlock_user(p, arg1, 0);
8869 return ret;
8870 #endif
8871 case TARGET_NR_reboot:
8872 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8873 /* arg4 must be ignored in all other cases */
8874 p = lock_user_string(arg4);
8875 if (!p) {
8876 return -TARGET_EFAULT;
8878 ret = get_errno(reboot(arg1, arg2, arg3, p));
8879 unlock_user(p, arg4, 0);
8880 } else {
8881 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8883 return ret;
8884 #ifdef TARGET_NR_mmap
8885 case TARGET_NR_mmap:
8886 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8887 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8888 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8889 || defined(TARGET_S390X)
8891 abi_ulong *v;
8892 abi_ulong v1, v2, v3, v4, v5, v6;
8893 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8894 return -TARGET_EFAULT;
8895 v1 = tswapal(v[0]);
8896 v2 = tswapal(v[1]);
8897 v3 = tswapal(v[2]);
8898 v4 = tswapal(v[3]);
8899 v5 = tswapal(v[4]);
8900 v6 = tswapal(v[5]);
8901 unlock_user(v, arg1, 0);
8902 ret = get_errno(target_mmap(v1, v2, v3,
8903 target_to_host_bitmask(v4, mmap_flags_tbl),
8904 v5, v6));
8906 #else
8907 ret = get_errno(target_mmap(arg1, arg2, arg3,
8908 target_to_host_bitmask(arg4, mmap_flags_tbl),
8909 arg5,
8910 arg6));
8911 #endif
8912 return ret;
8913 #endif
8914 #ifdef TARGET_NR_mmap2
8915 case TARGET_NR_mmap2:
8916 #ifndef MMAP_SHIFT
8917 #define MMAP_SHIFT 12
8918 #endif
8919 ret = target_mmap(arg1, arg2, arg3,
8920 target_to_host_bitmask(arg4, mmap_flags_tbl),
8921 arg5, arg6 << MMAP_SHIFT);
8922 return get_errno(ret);
8923 #endif
8924 case TARGET_NR_munmap:
8925 return get_errno(target_munmap(arg1, arg2));
8926 case TARGET_NR_mprotect:
8928 TaskState *ts = cpu->opaque;
8929 /* Special hack to detect libc making the stack executable. */
8930 if ((arg3 & PROT_GROWSDOWN)
8931 && arg1 >= ts->info->stack_limit
8932 && arg1 <= ts->info->start_stack) {
8933 arg3 &= ~PROT_GROWSDOWN;
8934 arg2 = arg2 + arg1 - ts->info->stack_limit;
8935 arg1 = ts->info->stack_limit;
8938 return get_errno(target_mprotect(arg1, arg2, arg3));
8939 #ifdef TARGET_NR_mremap
8940 case TARGET_NR_mremap:
8941 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8942 #endif
8943 /* ??? msync/mlock/munlock are broken for softmmu. */
8944 #ifdef TARGET_NR_msync
8945 case TARGET_NR_msync:
8946 return get_errno(msync(g2h(arg1), arg2, arg3));
8947 #endif
8948 #ifdef TARGET_NR_mlock
8949 case TARGET_NR_mlock:
8950 return get_errno(mlock(g2h(arg1), arg2));
8951 #endif
8952 #ifdef TARGET_NR_munlock
8953 case TARGET_NR_munlock:
8954 return get_errno(munlock(g2h(arg1), arg2));
8955 #endif
8956 #ifdef TARGET_NR_mlockall
8957 case TARGET_NR_mlockall:
8958 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8959 #endif
8960 #ifdef TARGET_NR_munlockall
8961 case TARGET_NR_munlockall:
8962 return get_errno(munlockall());
8963 #endif
8964 #ifdef TARGET_NR_truncate
8965 case TARGET_NR_truncate:
8966 if (!(p = lock_user_string(arg1)))
8967 return -TARGET_EFAULT;
8968 ret = get_errno(truncate(p, arg2));
8969 unlock_user(p, arg1, 0);
8970 return ret;
8971 #endif
8972 #ifdef TARGET_NR_ftruncate
8973 case TARGET_NR_ftruncate:
8974 return get_errno(ftruncate(arg1, arg2));
8975 #endif
8976 case TARGET_NR_fchmod:
8977 return get_errno(fchmod(arg1, arg2));
8978 #if defined(TARGET_NR_fchmodat)
8979 case TARGET_NR_fchmodat:
8980 if (!(p = lock_user_string(arg2)))
8981 return -TARGET_EFAULT;
8982 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8983 unlock_user(p, arg2, 0);
8984 return ret;
8985 #endif
8986 case TARGET_NR_getpriority:
8987 /* Note that negative values are valid for getpriority, so we must
8988 differentiate based on errno settings. */
8989 errno = 0;
8990 ret = getpriority(arg1, arg2);
8991 if (ret == -1 && errno != 0) {
8992 return -host_to_target_errno(errno);
8994 #ifdef TARGET_ALPHA
8995 /* Return value is the unbiased priority. Signal no error. */
8996 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8997 #else
8998 /* Return value is a biased priority to avoid negative numbers. */
8999 ret = 20 - ret;
9000 #endif
9001 return ret;
9002 case TARGET_NR_setpriority:
9003 return get_errno(setpriority(arg1, arg2, arg3));
9004 #ifdef TARGET_NR_statfs
9005 case TARGET_NR_statfs:
9006 if (!(p = lock_user_string(arg1))) {
9007 return -TARGET_EFAULT;
9009 ret = get_errno(statfs(path(p), &stfs));
9010 unlock_user(p, arg1, 0);
9011 convert_statfs:
9012 if (!is_error(ret)) {
9013 struct target_statfs *target_stfs;
9015 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9016 return -TARGET_EFAULT;
9017 __put_user(stfs.f_type, &target_stfs->f_type);
9018 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9019 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9020 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9021 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9022 __put_user(stfs.f_files, &target_stfs->f_files);
9023 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9024 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9025 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9026 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9027 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9028 #ifdef _STATFS_F_FLAGS
9029 __put_user(stfs.f_flags, &target_stfs->f_flags);
9030 #else
9031 __put_user(0, &target_stfs->f_flags);
9032 #endif
9033 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9034 unlock_user_struct(target_stfs, arg2, 1);
9036 return ret;
9037 #endif
9038 #ifdef TARGET_NR_fstatfs
9039 case TARGET_NR_fstatfs:
9040 ret = get_errno(fstatfs(arg1, &stfs));
9041 goto convert_statfs;
9042 #endif
9043 #ifdef TARGET_NR_statfs64
9044 case TARGET_NR_statfs64:
9045 if (!(p = lock_user_string(arg1))) {
9046 return -TARGET_EFAULT;
9048 ret = get_errno(statfs(path(p), &stfs));
9049 unlock_user(p, arg1, 0);
9050 convert_statfs64:
9051 if (!is_error(ret)) {
9052 struct target_statfs64 *target_stfs;
9054 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9055 return -TARGET_EFAULT;
9056 __put_user(stfs.f_type, &target_stfs->f_type);
9057 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9058 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9059 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9060 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9061 __put_user(stfs.f_files, &target_stfs->f_files);
9062 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9063 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9064 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9065 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9066 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9067 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9068 unlock_user_struct(target_stfs, arg3, 1);
9070 return ret;
9071 case TARGET_NR_fstatfs64:
9072 ret = get_errno(fstatfs(arg1, &stfs));
9073 goto convert_statfs64;
9074 #endif
9075 #ifdef TARGET_NR_socketcall
9076 case TARGET_NR_socketcall:
9077 return do_socketcall(arg1, arg2);
9078 #endif
9079 #ifdef TARGET_NR_accept
9080 case TARGET_NR_accept:
9081 return do_accept4(arg1, arg2, arg3, 0);
9082 #endif
9083 #ifdef TARGET_NR_accept4
9084 case TARGET_NR_accept4:
9085 return do_accept4(arg1, arg2, arg3, arg4);
9086 #endif
9087 #ifdef TARGET_NR_bind
9088 case TARGET_NR_bind:
9089 return do_bind(arg1, arg2, arg3);
9090 #endif
9091 #ifdef TARGET_NR_connect
9092 case TARGET_NR_connect:
9093 return do_connect(arg1, arg2, arg3);
9094 #endif
9095 #ifdef TARGET_NR_getpeername
9096 case TARGET_NR_getpeername:
9097 return do_getpeername(arg1, arg2, arg3);
9098 #endif
9099 #ifdef TARGET_NR_getsockname
9100 case TARGET_NR_getsockname:
9101 return do_getsockname(arg1, arg2, arg3);
9102 #endif
9103 #ifdef TARGET_NR_getsockopt
9104 case TARGET_NR_getsockopt:
9105 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9106 #endif
9107 #ifdef TARGET_NR_listen
9108 case TARGET_NR_listen:
9109 return get_errno(listen(arg1, arg2));
9110 #endif
9111 #ifdef TARGET_NR_recv
9112 case TARGET_NR_recv:
9113 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9114 #endif
9115 #ifdef TARGET_NR_recvfrom
9116 case TARGET_NR_recvfrom:
9117 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9118 #endif
9119 #ifdef TARGET_NR_recvmsg
9120 case TARGET_NR_recvmsg:
9121 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9122 #endif
9123 #ifdef TARGET_NR_send
9124 case TARGET_NR_send:
9125 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9126 #endif
9127 #ifdef TARGET_NR_sendmsg
9128 case TARGET_NR_sendmsg:
9129 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9130 #endif
9131 #ifdef TARGET_NR_sendmmsg
9132 case TARGET_NR_sendmmsg:
9133 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9134 case TARGET_NR_recvmmsg:
9135 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9136 #endif
9137 #ifdef TARGET_NR_sendto
9138 case TARGET_NR_sendto:
9139 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9140 #endif
9141 #ifdef TARGET_NR_shutdown
9142 case TARGET_NR_shutdown:
9143 return get_errno(shutdown(arg1, arg2));
9144 #endif
9145 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9146 case TARGET_NR_getrandom:
9147 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9148 if (!p) {
9149 return -TARGET_EFAULT;
9151 ret = get_errno(getrandom(p, arg2, arg3));
9152 unlock_user(p, arg1, ret);
9153 return ret;
9154 #endif
9155 #ifdef TARGET_NR_socket
9156 case TARGET_NR_socket:
9157 return do_socket(arg1, arg2, arg3);
9158 #endif
9159 #ifdef TARGET_NR_socketpair
9160 case TARGET_NR_socketpair:
9161 return do_socketpair(arg1, arg2, arg3, arg4);
9162 #endif
9163 #ifdef TARGET_NR_setsockopt
9164 case TARGET_NR_setsockopt:
9165 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9166 #endif
9167 #if defined(TARGET_NR_syslog)
9168 case TARGET_NR_syslog:
9170 int len = arg2;
9172 switch (arg1) {
9173 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9174 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9175 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9176 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9177 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9178 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9179 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9180 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9181 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9182 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9183 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9184 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9186 if (len < 0) {
9187 return -TARGET_EINVAL;
9189 if (len == 0) {
9190 return 0;
9192 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9193 if (!p) {
9194 return -TARGET_EFAULT;
9196 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9197 unlock_user(p, arg2, arg3);
9199 return ret;
9200 default:
9201 return -TARGET_EINVAL;
9204 break;
9205 #endif
9206 case TARGET_NR_setitimer:
9208 struct itimerval value, ovalue, *pvalue;
9210 if (arg2) {
9211 pvalue = &value;
9212 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9213 || copy_from_user_timeval(&pvalue->it_value,
9214 arg2 + sizeof(struct target_timeval)))
9215 return -TARGET_EFAULT;
9216 } else {
9217 pvalue = NULL;
9219 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9220 if (!is_error(ret) && arg3) {
9221 if (copy_to_user_timeval(arg3,
9222 &ovalue.it_interval)
9223 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9224 &ovalue.it_value))
9225 return -TARGET_EFAULT;
9228 return ret;
9229 case TARGET_NR_getitimer:
9231 struct itimerval value;
9233 ret = get_errno(getitimer(arg1, &value));
9234 if (!is_error(ret) && arg2) {
9235 if (copy_to_user_timeval(arg2,
9236 &value.it_interval)
9237 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9238 &value.it_value))
9239 return -TARGET_EFAULT;
9242 return ret;
9243 #ifdef TARGET_NR_stat
9244 case TARGET_NR_stat:
9245 if (!(p = lock_user_string(arg1))) {
9246 return -TARGET_EFAULT;
9248 ret = get_errno(stat(path(p), &st));
9249 unlock_user(p, arg1, 0);
9250 goto do_stat;
9251 #endif
9252 #ifdef TARGET_NR_lstat
9253 case TARGET_NR_lstat:
9254 if (!(p = lock_user_string(arg1))) {
9255 return -TARGET_EFAULT;
9257 ret = get_errno(lstat(path(p), &st));
9258 unlock_user(p, arg1, 0);
9259 goto do_stat;
9260 #endif
9261 #ifdef TARGET_NR_fstat
9262 case TARGET_NR_fstat:
9264 ret = get_errno(fstat(arg1, &st));
9265 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9266 do_stat:
9267 #endif
9268 if (!is_error(ret)) {
9269 struct target_stat *target_st;
9271 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9272 return -TARGET_EFAULT;
9273 memset(target_st, 0, sizeof(*target_st));
9274 __put_user(st.st_dev, &target_st->st_dev);
9275 __put_user(st.st_ino, &target_st->st_ino);
9276 __put_user(st.st_mode, &target_st->st_mode);
9277 __put_user(st.st_uid, &target_st->st_uid);
9278 __put_user(st.st_gid, &target_st->st_gid);
9279 __put_user(st.st_nlink, &target_st->st_nlink);
9280 __put_user(st.st_rdev, &target_st->st_rdev);
9281 __put_user(st.st_size, &target_st->st_size);
9282 __put_user(st.st_blksize, &target_st->st_blksize);
9283 __put_user(st.st_blocks, &target_st->st_blocks);
9284 __put_user(st.st_atime, &target_st->target_st_atime);
9285 __put_user(st.st_mtime, &target_st->target_st_mtime);
9286 __put_user(st.st_ctime, &target_st->target_st_ctime);
9287 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9288 defined(TARGET_STAT_HAVE_NSEC)
9289 __put_user(st.st_atim.tv_nsec,
9290 &target_st->target_st_atime_nsec);
9291 __put_user(st.st_mtim.tv_nsec,
9292 &target_st->target_st_mtime_nsec);
9293 __put_user(st.st_ctim.tv_nsec,
9294 &target_st->target_st_ctime_nsec);
9295 #endif
9296 unlock_user_struct(target_st, arg2, 1);
9299 return ret;
9300 #endif
9301 case TARGET_NR_vhangup:
9302 return get_errno(vhangup());
9303 #ifdef TARGET_NR_syscall
9304 case TARGET_NR_syscall:
9305 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9306 arg6, arg7, arg8, 0);
9307 #endif
9308 case TARGET_NR_wait4:
9310 int status;
9311 abi_long status_ptr = arg2;
9312 struct rusage rusage, *rusage_ptr;
9313 abi_ulong target_rusage = arg4;
9314 abi_long rusage_err;
9315 if (target_rusage)
9316 rusage_ptr = &rusage;
9317 else
9318 rusage_ptr = NULL;
9319 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9320 if (!is_error(ret)) {
9321 if (status_ptr && ret) {
9322 status = host_to_target_waitstatus(status);
9323 if (put_user_s32(status, status_ptr))
9324 return -TARGET_EFAULT;
9326 if (target_rusage) {
9327 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9328 if (rusage_err) {
9329 ret = rusage_err;
9334 return ret;
9335 #ifdef TARGET_NR_swapoff
9336 case TARGET_NR_swapoff:
9337 if (!(p = lock_user_string(arg1)))
9338 return -TARGET_EFAULT;
9339 ret = get_errno(swapoff(p));
9340 unlock_user(p, arg1, 0);
9341 return ret;
9342 #endif
9343 case TARGET_NR_sysinfo:
9345 struct target_sysinfo *target_value;
9346 struct sysinfo value;
9347 ret = get_errno(sysinfo(&value));
9348 if (!is_error(ret) && arg1)
9350 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9351 return -TARGET_EFAULT;
9352 __put_user(value.uptime, &target_value->uptime);
9353 __put_user(value.loads[0], &target_value->loads[0]);
9354 __put_user(value.loads[1], &target_value->loads[1]);
9355 __put_user(value.loads[2], &target_value->loads[2]);
9356 __put_user(value.totalram, &target_value->totalram);
9357 __put_user(value.freeram, &target_value->freeram);
9358 __put_user(value.sharedram, &target_value->sharedram);
9359 __put_user(value.bufferram, &target_value->bufferram);
9360 __put_user(value.totalswap, &target_value->totalswap);
9361 __put_user(value.freeswap, &target_value->freeswap);
9362 __put_user(value.procs, &target_value->procs);
9363 __put_user(value.totalhigh, &target_value->totalhigh);
9364 __put_user(value.freehigh, &target_value->freehigh);
9365 __put_user(value.mem_unit, &target_value->mem_unit);
9366 unlock_user_struct(target_value, arg1, 1);
9369 return ret;
9370 #ifdef TARGET_NR_ipc
9371 case TARGET_NR_ipc:
9372 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9373 #endif
9374 #ifdef TARGET_NR_semget
9375 case TARGET_NR_semget:
9376 return get_errno(semget(arg1, arg2, arg3));
9377 #endif
9378 #ifdef TARGET_NR_semop
9379 case TARGET_NR_semop:
9380 return do_semop(arg1, arg2, arg3);
9381 #endif
9382 #ifdef TARGET_NR_semctl
9383 case TARGET_NR_semctl:
9384 return do_semctl(arg1, arg2, arg3, arg4);
9385 #endif
9386 #ifdef TARGET_NR_msgctl
9387 case TARGET_NR_msgctl:
9388 return do_msgctl(arg1, arg2, arg3);
9389 #endif
9390 #ifdef TARGET_NR_msgget
9391 case TARGET_NR_msgget:
9392 return get_errno(msgget(arg1, arg2));
9393 #endif
9394 #ifdef TARGET_NR_msgrcv
9395 case TARGET_NR_msgrcv:
9396 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9397 #endif
9398 #ifdef TARGET_NR_msgsnd
9399 case TARGET_NR_msgsnd:
9400 return do_msgsnd(arg1, arg2, arg3, arg4);
9401 #endif
9402 #ifdef TARGET_NR_shmget
9403 case TARGET_NR_shmget:
9404 return get_errno(shmget(arg1, arg2, arg3));
9405 #endif
9406 #ifdef TARGET_NR_shmctl
9407 case TARGET_NR_shmctl:
9408 return do_shmctl(arg1, arg2, arg3);
9409 #endif
9410 #ifdef TARGET_NR_shmat
9411 case TARGET_NR_shmat:
9412 return do_shmat(cpu_env, arg1, arg2, arg3);
9413 #endif
9414 #ifdef TARGET_NR_shmdt
9415 case TARGET_NR_shmdt:
9416 return do_shmdt(arg1);
9417 #endif
9418 case TARGET_NR_fsync:
9419 return get_errno(fsync(arg1));
9420 case TARGET_NR_clone:
9421 /* Linux manages to have three different orderings for its
9422 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9423 * match the kernel's CONFIG_CLONE_* settings.
9424 * Microblaze is further special in that it uses a sixth
9425 * implicit argument to clone for the TLS pointer.
9427 #if defined(TARGET_MICROBLAZE)
9428 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9429 #elif defined(TARGET_CLONE_BACKWARDS)
9430 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9431 #elif defined(TARGET_CLONE_BACKWARDS2)
9432 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9433 #else
9434 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9435 #endif
9436 return ret;
9437 #ifdef __NR_exit_group
9438 /* new thread calls */
9439 case TARGET_NR_exit_group:
9440 preexit_cleanup(cpu_env, arg1);
9441 return get_errno(exit_group(arg1));
9442 #endif
9443 case TARGET_NR_setdomainname:
9444 if (!(p = lock_user_string(arg1)))
9445 return -TARGET_EFAULT;
9446 ret = get_errno(setdomainname(p, arg2));
9447 unlock_user(p, arg1, 0);
9448 return ret;
9449 case TARGET_NR_uname:
9450 /* no need to transcode because we use the linux syscall */
9452 struct new_utsname * buf;
9454 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9455 return -TARGET_EFAULT;
9456 ret = get_errno(sys_uname(buf));
9457 if (!is_error(ret)) {
9458 /* Overwrite the native machine name with whatever is being
9459 emulated. */
9460 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9461 sizeof(buf->machine));
9462 /* Allow the user to override the reported release. */
9463 if (qemu_uname_release && *qemu_uname_release) {
9464 g_strlcpy(buf->release, qemu_uname_release,
9465 sizeof(buf->release));
9468 unlock_user_struct(buf, arg1, 1);
9470 return ret;
9471 #ifdef TARGET_I386
9472 case TARGET_NR_modify_ldt:
9473 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9474 #if !defined(TARGET_X86_64)
9475 case TARGET_NR_vm86:
9476 return do_vm86(cpu_env, arg1, arg2);
9477 #endif
9478 #endif
9479 case TARGET_NR_adjtimex:
9481 struct timex host_buf;
9483 if (target_to_host_timex(&host_buf, arg1) != 0) {
9484 return -TARGET_EFAULT;
9486 ret = get_errno(adjtimex(&host_buf));
9487 if (!is_error(ret)) {
9488 if (host_to_target_timex(arg1, &host_buf) != 0) {
9489 return -TARGET_EFAULT;
9493 return ret;
9494 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9495 case TARGET_NR_clock_adjtime:
9497 struct timex htx, *phtx = &htx;
9499 if (target_to_host_timex(phtx, arg2) != 0) {
9500 return -TARGET_EFAULT;
9502 ret = get_errno(clock_adjtime(arg1, phtx));
9503 if (!is_error(ret) && phtx) {
9504 if (host_to_target_timex(arg2, phtx) != 0) {
9505 return -TARGET_EFAULT;
9509 return ret;
9510 #endif
9511 case TARGET_NR_getpgid:
9512 return get_errno(getpgid(arg1));
9513 case TARGET_NR_fchdir:
9514 return get_errno(fchdir(arg1));
9515 case TARGET_NR_personality:
9516 return get_errno(personality(arg1));
9517 #ifdef TARGET_NR__llseek /* Not on alpha */
9518 case TARGET_NR__llseek:
9520 int64_t res;
9521 #if !defined(__NR_llseek)
9522 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9523 if (res == -1) {
9524 ret = get_errno(res);
9525 } else {
9526 ret = 0;
9528 #else
9529 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9530 #endif
9531 if ((ret == 0) && put_user_s64(res, arg4)) {
9532 return -TARGET_EFAULT;
9535 return ret;
9536 #endif
9537 #ifdef TARGET_NR_getdents
9538 case TARGET_NR_getdents:
9539 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9540 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9542 struct target_dirent *target_dirp;
9543 struct linux_dirent *dirp;
9544 abi_long count = arg3;
9546 dirp = g_try_malloc(count);
9547 if (!dirp) {
9548 return -TARGET_ENOMEM;
9551 ret = get_errno(sys_getdents(arg1, dirp, count));
9552 if (!is_error(ret)) {
9553 struct linux_dirent *de;
9554 struct target_dirent *tde;
9555 int len = ret;
9556 int reclen, treclen;
9557 int count1, tnamelen;
9559 count1 = 0;
9560 de = dirp;
9561 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9562 return -TARGET_EFAULT;
9563 tde = target_dirp;
9564 while (len > 0) {
9565 reclen = de->d_reclen;
9566 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9567 assert(tnamelen >= 0);
9568 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9569 assert(count1 + treclen <= count);
9570 tde->d_reclen = tswap16(treclen);
9571 tde->d_ino = tswapal(de->d_ino);
9572 tde->d_off = tswapal(de->d_off);
9573 memcpy(tde->d_name, de->d_name, tnamelen);
9574 de = (struct linux_dirent *)((char *)de + reclen);
9575 len -= reclen;
9576 tde = (struct target_dirent *)((char *)tde + treclen);
9577 count1 += treclen;
9579 ret = count1;
9580 unlock_user(target_dirp, arg2, ret);
9582 g_free(dirp);
9584 #else
9586 struct linux_dirent *dirp;
9587 abi_long count = arg3;
9589 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9590 return -TARGET_EFAULT;
9591 ret = get_errno(sys_getdents(arg1, dirp, count));
9592 if (!is_error(ret)) {
9593 struct linux_dirent *de;
9594 int len = ret;
9595 int reclen;
9596 de = dirp;
9597 while (len > 0) {
9598 reclen = de->d_reclen;
9599 if (reclen > len)
9600 break;
9601 de->d_reclen = tswap16(reclen);
9602 tswapls(&de->d_ino);
9603 tswapls(&de->d_off);
9604 de = (struct linux_dirent *)((char *)de + reclen);
9605 len -= reclen;
9608 unlock_user(dirp, arg2, ret);
9610 #endif
9611 #else
9612 /* Implement getdents in terms of getdents64 */
9614 struct linux_dirent64 *dirp;
9615 abi_long count = arg3;
9617 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9618 if (!dirp) {
9619 return -TARGET_EFAULT;
9621 ret = get_errno(sys_getdents64(arg1, dirp, count));
9622 if (!is_error(ret)) {
9623 /* Convert the dirent64 structs to target dirent. We do this
9624 * in-place, since we can guarantee that a target_dirent is no
9625 * larger than a dirent64; however this means we have to be
9626 * careful to read everything before writing in the new format.
9628 struct linux_dirent64 *de;
9629 struct target_dirent *tde;
9630 int len = ret;
9631 int tlen = 0;
9633 de = dirp;
9634 tde = (struct target_dirent *)dirp;
9635 while (len > 0) {
9636 int namelen, treclen;
9637 int reclen = de->d_reclen;
9638 uint64_t ino = de->d_ino;
9639 int64_t off = de->d_off;
9640 uint8_t type = de->d_type;
9642 namelen = strlen(de->d_name);
9643 treclen = offsetof(struct target_dirent, d_name)
9644 + namelen + 2;
9645 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9647 memmove(tde->d_name, de->d_name, namelen + 1);
9648 tde->d_ino = tswapal(ino);
9649 tde->d_off = tswapal(off);
9650 tde->d_reclen = tswap16(treclen);
9651 /* The target_dirent type is in what was formerly a padding
9652 * byte at the end of the structure:
9654 *(((char *)tde) + treclen - 1) = type;
9656 de = (struct linux_dirent64 *)((char *)de + reclen);
9657 tde = (struct target_dirent *)((char *)tde + treclen);
9658 len -= reclen;
9659 tlen += treclen;
9661 ret = tlen;
9663 unlock_user(dirp, arg2, ret);
9665 #endif
9666 return ret;
9667 #endif /* TARGET_NR_getdents */
9668 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9669 case TARGET_NR_getdents64:
9671 struct linux_dirent64 *dirp;
9672 abi_long count = arg3;
9673 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9674 return -TARGET_EFAULT;
9675 ret = get_errno(sys_getdents64(arg1, dirp, count));
9676 if (!is_error(ret)) {
9677 struct linux_dirent64 *de;
9678 int len = ret;
9679 int reclen;
9680 de = dirp;
9681 while (len > 0) {
9682 reclen = de->d_reclen;
9683 if (reclen > len)
9684 break;
9685 de->d_reclen = tswap16(reclen);
9686 tswap64s((uint64_t *)&de->d_ino);
9687 tswap64s((uint64_t *)&de->d_off);
9688 de = (struct linux_dirent64 *)((char *)de + reclen);
9689 len -= reclen;
9692 unlock_user(dirp, arg2, ret);
9694 return ret;
9695 #endif /* TARGET_NR_getdents64 */
9696 #if defined(TARGET_NR__newselect)
9697 case TARGET_NR__newselect:
9698 return do_select(arg1, arg2, arg3, arg4, arg5);
9699 #endif
9700 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9701 # ifdef TARGET_NR_poll
9702 case TARGET_NR_poll:
9703 # endif
9704 # ifdef TARGET_NR_ppoll
9705 case TARGET_NR_ppoll:
9706 # endif
9708 struct target_pollfd *target_pfd;
9709 unsigned int nfds = arg2;
9710 struct pollfd *pfd;
9711 unsigned int i;
9713 pfd = NULL;
9714 target_pfd = NULL;
9715 if (nfds) {
9716 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9717 return -TARGET_EINVAL;
9720 target_pfd = lock_user(VERIFY_WRITE, arg1,
9721 sizeof(struct target_pollfd) * nfds, 1);
9722 if (!target_pfd) {
9723 return -TARGET_EFAULT;
9726 pfd = alloca(sizeof(struct pollfd) * nfds);
9727 for (i = 0; i < nfds; i++) {
9728 pfd[i].fd = tswap32(target_pfd[i].fd);
9729 pfd[i].events = tswap16(target_pfd[i].events);
9733 switch (num) {
9734 # ifdef TARGET_NR_ppoll
9735 case TARGET_NR_ppoll:
9737 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9738 target_sigset_t *target_set;
9739 sigset_t _set, *set = &_set;
9741 if (arg3) {
9742 if (target_to_host_timespec(timeout_ts, arg3)) {
9743 unlock_user(target_pfd, arg1, 0);
9744 return -TARGET_EFAULT;
9746 } else {
9747 timeout_ts = NULL;
9750 if (arg4) {
9751 if (arg5 != sizeof(target_sigset_t)) {
9752 unlock_user(target_pfd, arg1, 0);
9753 return -TARGET_EINVAL;
9756 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9757 if (!target_set) {
9758 unlock_user(target_pfd, arg1, 0);
9759 return -TARGET_EFAULT;
9761 target_to_host_sigset(set, target_set);
9762 } else {
9763 set = NULL;
9766 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9767 set, SIGSET_T_SIZE));
9769 if (!is_error(ret) && arg3) {
9770 host_to_target_timespec(arg3, timeout_ts);
9772 if (arg4) {
9773 unlock_user(target_set, arg4, 0);
9775 break;
9777 # endif
9778 # ifdef TARGET_NR_poll
9779 case TARGET_NR_poll:
9781 struct timespec ts, *pts;
9783 if (arg3 >= 0) {
9784 /* Convert ms to secs, ns */
9785 ts.tv_sec = arg3 / 1000;
9786 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9787 pts = &ts;
9788 } else {
9789 /* -ve poll() timeout means "infinite" */
9790 pts = NULL;
9792 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9793 break;
9795 # endif
9796 default:
9797 g_assert_not_reached();
9800 if (!is_error(ret)) {
9801 for(i = 0; i < nfds; i++) {
9802 target_pfd[i].revents = tswap16(pfd[i].revents);
9805 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9807 return ret;
9808 #endif
9809 case TARGET_NR_flock:
9810 /* NOTE: the flock constant seems to be the same for every
9811 Linux platform */
9812 return get_errno(safe_flock(arg1, arg2));
9813 case TARGET_NR_readv:
9815 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9816 if (vec != NULL) {
9817 ret = get_errno(safe_readv(arg1, vec, arg3));
9818 unlock_iovec(vec, arg2, arg3, 1);
9819 } else {
9820 ret = -host_to_target_errno(errno);
9823 return ret;
9824 case TARGET_NR_writev:
9826 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9827 if (vec != NULL) {
9828 ret = get_errno(safe_writev(arg1, vec, arg3));
9829 unlock_iovec(vec, arg2, arg3, 0);
9830 } else {
9831 ret = -host_to_target_errno(errno);
9834 return ret;
9835 #if defined(TARGET_NR_preadv)
9836 case TARGET_NR_preadv:
9838 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9839 if (vec != NULL) {
9840 unsigned long low, high;
9842 target_to_host_low_high(arg4, arg5, &low, &high);
9843 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9844 unlock_iovec(vec, arg2, arg3, 1);
9845 } else {
9846 ret = -host_to_target_errno(errno);
9849 return ret;
9850 #endif
9851 #if defined(TARGET_NR_pwritev)
9852 case TARGET_NR_pwritev:
9854 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9855 if (vec != NULL) {
9856 unsigned long low, high;
9858 target_to_host_low_high(arg4, arg5, &low, &high);
9859 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9860 unlock_iovec(vec, arg2, arg3, 0);
9861 } else {
9862 ret = -host_to_target_errno(errno);
9865 return ret;
9866 #endif
9867 case TARGET_NR_getsid:
9868 return get_errno(getsid(arg1));
9869 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9870 case TARGET_NR_fdatasync:
9871 return get_errno(fdatasync(arg1));
9872 #endif
9873 #ifdef TARGET_NR__sysctl
9874 case TARGET_NR__sysctl:
9875 /* We don't implement this, but ENOTDIR is always a safe
9876 return value. */
9877 return -TARGET_ENOTDIR;
9878 #endif
9879 case TARGET_NR_sched_getaffinity:
9881 unsigned int mask_size;
9882 unsigned long *mask;
9885 * sched_getaffinity needs multiples of ulong, so need to take
9886 * care of mismatches between target ulong and host ulong sizes.
9888 if (arg2 & (sizeof(abi_ulong) - 1)) {
9889 return -TARGET_EINVAL;
9891 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9893 mask = alloca(mask_size);
9894 memset(mask, 0, mask_size);
9895 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9897 if (!is_error(ret)) {
9898 if (ret > arg2) {
9899 /* More data returned than the caller's buffer will fit.
9900 * This only happens if sizeof(abi_long) < sizeof(long)
9901 * and the caller passed us a buffer holding an odd number
9902 * of abi_longs. If the host kernel is actually using the
9903 * extra 4 bytes then fail EINVAL; otherwise we can just
9904 * ignore them and only copy the interesting part.
9906 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9907 if (numcpus > arg2 * 8) {
9908 return -TARGET_EINVAL;
9910 ret = arg2;
9913 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9914 return -TARGET_EFAULT;
9918 return ret;
9919 case TARGET_NR_sched_setaffinity:
9921 unsigned int mask_size;
9922 unsigned long *mask;
9925 * sched_setaffinity needs multiples of ulong, so need to take
9926 * care of mismatches between target ulong and host ulong sizes.
9928 if (arg2 & (sizeof(abi_ulong) - 1)) {
9929 return -TARGET_EINVAL;
9931 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9932 mask = alloca(mask_size);
9934 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9935 if (ret) {
9936 return ret;
9939 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9941 case TARGET_NR_getcpu:
9943 unsigned cpu, node;
9944 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9945 arg2 ? &node : NULL,
9946 NULL));
9947 if (is_error(ret)) {
9948 return ret;
9950 if (arg1 && put_user_u32(cpu, arg1)) {
9951 return -TARGET_EFAULT;
9953 if (arg2 && put_user_u32(node, arg2)) {
9954 return -TARGET_EFAULT;
9957 return ret;
9958 case TARGET_NR_sched_setparam:
9960 struct sched_param *target_schp;
9961 struct sched_param schp;
9963 if (arg2 == 0) {
9964 return -TARGET_EINVAL;
9966 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9967 return -TARGET_EFAULT;
9968 schp.sched_priority = tswap32(target_schp->sched_priority);
9969 unlock_user_struct(target_schp, arg2, 0);
9970 return get_errno(sched_setparam(arg1, &schp));
9972 case TARGET_NR_sched_getparam:
9974 struct sched_param *target_schp;
9975 struct sched_param schp;
9977 if (arg2 == 0) {
9978 return -TARGET_EINVAL;
9980 ret = get_errno(sched_getparam(arg1, &schp));
9981 if (!is_error(ret)) {
9982 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9983 return -TARGET_EFAULT;
9984 target_schp->sched_priority = tswap32(schp.sched_priority);
9985 unlock_user_struct(target_schp, arg2, 1);
9988 return ret;
9989 case TARGET_NR_sched_setscheduler:
9991 struct sched_param *target_schp;
9992 struct sched_param schp;
9993 if (arg3 == 0) {
9994 return -TARGET_EINVAL;
9996 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9997 return -TARGET_EFAULT;
9998 schp.sched_priority = tswap32(target_schp->sched_priority);
9999 unlock_user_struct(target_schp, arg3, 0);
10000 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10002 case TARGET_NR_sched_getscheduler:
10003 return get_errno(sched_getscheduler(arg1));
10004 case TARGET_NR_sched_yield:
10005 return get_errno(sched_yield());
10006 case TARGET_NR_sched_get_priority_max:
10007 return get_errno(sched_get_priority_max(arg1));
10008 case TARGET_NR_sched_get_priority_min:
10009 return get_errno(sched_get_priority_min(arg1));
10010 case TARGET_NR_sched_rr_get_interval:
10012 struct timespec ts;
10013 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10014 if (!is_error(ret)) {
10015 ret = host_to_target_timespec(arg2, &ts);
10018 return ret;
10019 case TARGET_NR_nanosleep:
10021 struct timespec req, rem;
10022 target_to_host_timespec(&req, arg1);
10023 ret = get_errno(safe_nanosleep(&req, &rem));
10024 if (is_error(ret) && arg2) {
10025 host_to_target_timespec(arg2, &rem);
10028 return ret;
10029 case TARGET_NR_prctl:
10030 switch (arg1) {
10031 case PR_GET_PDEATHSIG:
10033 int deathsig;
10034 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10035 if (!is_error(ret) && arg2
10036 && put_user_ual(deathsig, arg2)) {
10037 return -TARGET_EFAULT;
10039 return ret;
10041 #ifdef PR_GET_NAME
10042 case PR_GET_NAME:
10044 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10045 if (!name) {
10046 return -TARGET_EFAULT;
10048 ret = get_errno(prctl(arg1, (unsigned long)name,
10049 arg3, arg4, arg5));
10050 unlock_user(name, arg2, 16);
10051 return ret;
10053 case PR_SET_NAME:
10055 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10056 if (!name) {
10057 return -TARGET_EFAULT;
10059 ret = get_errno(prctl(arg1, (unsigned long)name,
10060 arg3, arg4, arg5));
10061 unlock_user(name, arg2, 0);
10062 return ret;
10064 #endif
10065 #ifdef TARGET_MIPS
10066 case TARGET_PR_GET_FP_MODE:
10068 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10069 ret = 0;
10070 if (env->CP0_Status & (1 << CP0St_FR)) {
10071 ret |= TARGET_PR_FP_MODE_FR;
10073 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10074 ret |= TARGET_PR_FP_MODE_FRE;
10076 return ret;
10078 case TARGET_PR_SET_FP_MODE:
10080 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10081 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10082 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10083 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10084 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10086 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10087 TARGET_PR_FP_MODE_FRE;
10089 /* If nothing to change, return right away, successfully. */
10090 if (old_fr == new_fr && old_fre == new_fre) {
10091 return 0;
10093 /* Check the value is valid */
10094 if (arg2 & ~known_bits) {
10095 return -TARGET_EOPNOTSUPP;
10097 /* Setting FRE without FR is not supported. */
10098 if (new_fre && !new_fr) {
10099 return -TARGET_EOPNOTSUPP;
10101 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10102 /* FR1 is not supported */
10103 return -TARGET_EOPNOTSUPP;
10105 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10106 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10107 /* cannot set FR=0 */
10108 return -TARGET_EOPNOTSUPP;
10110 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10111 /* Cannot set FRE=1 */
10112 return -TARGET_EOPNOTSUPP;
10115 int i;
10116 fpr_t *fpr = env->active_fpu.fpr;
10117 for (i = 0; i < 32 ; i += 2) {
10118 if (!old_fr && new_fr) {
10119 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10120 } else if (old_fr && !new_fr) {
10121 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10125 if (new_fr) {
10126 env->CP0_Status |= (1 << CP0St_FR);
10127 env->hflags |= MIPS_HFLAG_F64;
10128 } else {
10129 env->CP0_Status &= ~(1 << CP0St_FR);
10130 env->hflags &= ~MIPS_HFLAG_F64;
10132 if (new_fre) {
10133 env->CP0_Config5 |= (1 << CP0C5_FRE);
10134 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10135 env->hflags |= MIPS_HFLAG_FRE;
10137 } else {
10138 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10139 env->hflags &= ~MIPS_HFLAG_FRE;
10142 return 0;
10144 #endif /* MIPS */
10145 #ifdef TARGET_AARCH64
10146 case TARGET_PR_SVE_SET_VL:
10148 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10149 * PR_SVE_VL_INHERIT. Note the kernel definition
10150 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10151 * even though the current architectural maximum is VQ=16.
10153 ret = -TARGET_EINVAL;
10154 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10155 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10156 CPUARMState *env = cpu_env;
10157 ARMCPU *cpu = env_archcpu(env);
10158 uint32_t vq, old_vq;
10160 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10161 vq = MAX(arg2 / 16, 1);
10162 vq = MIN(vq, cpu->sve_max_vq);
10164 if (vq < old_vq) {
10165 aarch64_sve_narrow_vq(env, vq);
10167 env->vfp.zcr_el[1] = vq - 1;
10168 arm_rebuild_hflags(env);
10169 ret = vq * 16;
10171 return ret;
10172 case TARGET_PR_SVE_GET_VL:
10173 ret = -TARGET_EINVAL;
10175 ARMCPU *cpu = env_archcpu(cpu_env);
10176 if (cpu_isar_feature(aa64_sve, cpu)) {
10177 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10180 return ret;
10181 case TARGET_PR_PAC_RESET_KEYS:
10183 CPUARMState *env = cpu_env;
10184 ARMCPU *cpu = env_archcpu(env);
10186 if (arg3 || arg4 || arg5) {
10187 return -TARGET_EINVAL;
10189 if (cpu_isar_feature(aa64_pauth, cpu)) {
10190 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10191 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10192 TARGET_PR_PAC_APGAKEY);
10193 int ret = 0;
10194 Error *err = NULL;
10196 if (arg2 == 0) {
10197 arg2 = all;
10198 } else if (arg2 & ~all) {
10199 return -TARGET_EINVAL;
10201 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10202 ret |= qemu_guest_getrandom(&env->keys.apia,
10203 sizeof(ARMPACKey), &err);
10205 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10206 ret |= qemu_guest_getrandom(&env->keys.apib,
10207 sizeof(ARMPACKey), &err);
10209 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10210 ret |= qemu_guest_getrandom(&env->keys.apda,
10211 sizeof(ARMPACKey), &err);
10213 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10214 ret |= qemu_guest_getrandom(&env->keys.apdb,
10215 sizeof(ARMPACKey), &err);
10217 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10218 ret |= qemu_guest_getrandom(&env->keys.apga,
10219 sizeof(ARMPACKey), &err);
10221 if (ret != 0) {
10223 * Some unknown failure in the crypto. The best
10224 * we can do is log it and fail the syscall.
10225 * The real syscall cannot fail this way.
10227 qemu_log_mask(LOG_UNIMP,
10228 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10229 error_get_pretty(err));
10230 error_free(err);
10231 return -TARGET_EIO;
10233 return 0;
10236 return -TARGET_EINVAL;
10237 #endif /* AARCH64 */
10238 case PR_GET_SECCOMP:
10239 case PR_SET_SECCOMP:
10240 /* Disable seccomp to prevent the target disabling syscalls we
10241 * need. */
10242 return -TARGET_EINVAL;
10243 default:
10244 /* Most prctl options have no pointer arguments */
10245 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10247 break;
10248 #ifdef TARGET_NR_arch_prctl
10249 case TARGET_NR_arch_prctl:
10250 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10251 return do_arch_prctl(cpu_env, arg1, arg2);
10252 #else
10253 #error unreachable
10254 #endif
10255 #endif
10256 #ifdef TARGET_NR_pread64
10257 case TARGET_NR_pread64:
10258 if (regpairs_aligned(cpu_env, num)) {
10259 arg4 = arg5;
10260 arg5 = arg6;
10262 if (arg2 == 0 && arg3 == 0) {
10263 /* Special-case NULL buffer and zero length, which should succeed */
10264 p = 0;
10265 } else {
10266 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10267 if (!p) {
10268 return -TARGET_EFAULT;
10271 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10272 unlock_user(p, arg2, ret);
10273 return ret;
10274 case TARGET_NR_pwrite64:
10275 if (regpairs_aligned(cpu_env, num)) {
10276 arg4 = arg5;
10277 arg5 = arg6;
10279 if (arg2 == 0 && arg3 == 0) {
10280 /* Special-case NULL buffer and zero length, which should succeed */
10281 p = 0;
10282 } else {
10283 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10284 if (!p) {
10285 return -TARGET_EFAULT;
10288 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10289 unlock_user(p, arg2, 0);
10290 return ret;
10291 #endif
10292 case TARGET_NR_getcwd:
10293 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10294 return -TARGET_EFAULT;
10295 ret = get_errno(sys_getcwd1(p, arg2));
10296 unlock_user(p, arg1, ret);
10297 return ret;
10298 case TARGET_NR_capget:
10299 case TARGET_NR_capset:
10301 struct target_user_cap_header *target_header;
10302 struct target_user_cap_data *target_data = NULL;
10303 struct __user_cap_header_struct header;
10304 struct __user_cap_data_struct data[2];
10305 struct __user_cap_data_struct *dataptr = NULL;
10306 int i, target_datalen;
10307 int data_items = 1;
10309 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10310 return -TARGET_EFAULT;
10312 header.version = tswap32(target_header->version);
10313 header.pid = tswap32(target_header->pid);
10315 if (header.version != _LINUX_CAPABILITY_VERSION) {
10316 /* Version 2 and up takes pointer to two user_data structs */
10317 data_items = 2;
10320 target_datalen = sizeof(*target_data) * data_items;
10322 if (arg2) {
10323 if (num == TARGET_NR_capget) {
10324 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10325 } else {
10326 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10328 if (!target_data) {
10329 unlock_user_struct(target_header, arg1, 0);
10330 return -TARGET_EFAULT;
10333 if (num == TARGET_NR_capset) {
10334 for (i = 0; i < data_items; i++) {
10335 data[i].effective = tswap32(target_data[i].effective);
10336 data[i].permitted = tswap32(target_data[i].permitted);
10337 data[i].inheritable = tswap32(target_data[i].inheritable);
10341 dataptr = data;
10344 if (num == TARGET_NR_capget) {
10345 ret = get_errno(capget(&header, dataptr));
10346 } else {
10347 ret = get_errno(capset(&header, dataptr));
10350 /* The kernel always updates version for both capget and capset */
10351 target_header->version = tswap32(header.version);
10352 unlock_user_struct(target_header, arg1, 1);
10354 if (arg2) {
10355 if (num == TARGET_NR_capget) {
10356 for (i = 0; i < data_items; i++) {
10357 target_data[i].effective = tswap32(data[i].effective);
10358 target_data[i].permitted = tswap32(data[i].permitted);
10359 target_data[i].inheritable = tswap32(data[i].inheritable);
10361 unlock_user(target_data, arg2, target_datalen);
10362 } else {
10363 unlock_user(target_data, arg2, 0);
10366 return ret;
10368 case TARGET_NR_sigaltstack:
10369 return do_sigaltstack(arg1, arg2,
10370 get_sp_from_cpustate((CPUArchState *)cpu_env));
10372 #ifdef CONFIG_SENDFILE
10373 #ifdef TARGET_NR_sendfile
10374 case TARGET_NR_sendfile:
10376 off_t *offp = NULL;
10377 off_t off;
10378 if (arg3) {
10379 ret = get_user_sal(off, arg3);
10380 if (is_error(ret)) {
10381 return ret;
10383 offp = &off;
10385 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10386 if (!is_error(ret) && arg3) {
10387 abi_long ret2 = put_user_sal(off, arg3);
10388 if (is_error(ret2)) {
10389 ret = ret2;
10392 return ret;
10394 #endif
10395 #ifdef TARGET_NR_sendfile64
10396 case TARGET_NR_sendfile64:
10398 off_t *offp = NULL;
10399 off_t off;
10400 if (arg3) {
10401 ret = get_user_s64(off, arg3);
10402 if (is_error(ret)) {
10403 return ret;
10405 offp = &off;
10407 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10408 if (!is_error(ret) && arg3) {
10409 abi_long ret2 = put_user_s64(off, arg3);
10410 if (is_error(ret2)) {
10411 ret = ret2;
10414 return ret;
10416 #endif
10417 #endif
10418 #ifdef TARGET_NR_vfork
10419 case TARGET_NR_vfork:
10420 return get_errno(do_fork(cpu_env,
10421 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10422 0, 0, 0, 0));
10423 #endif
10424 #ifdef TARGET_NR_ugetrlimit
10425 case TARGET_NR_ugetrlimit:
10427 struct rlimit rlim;
10428 int resource = target_to_host_resource(arg1);
10429 ret = get_errno(getrlimit(resource, &rlim));
10430 if (!is_error(ret)) {
10431 struct target_rlimit *target_rlim;
10432 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10433 return -TARGET_EFAULT;
10434 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10435 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10436 unlock_user_struct(target_rlim, arg2, 1);
10438 return ret;
10440 #endif
10441 #ifdef TARGET_NR_truncate64
10442 case TARGET_NR_truncate64:
10443 if (!(p = lock_user_string(arg1)))
10444 return -TARGET_EFAULT;
10445 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10446 unlock_user(p, arg1, 0);
10447 return ret;
10448 #endif
10449 #ifdef TARGET_NR_ftruncate64
10450 case TARGET_NR_ftruncate64:
10451 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10452 #endif
10453 #ifdef TARGET_NR_stat64
10454 case TARGET_NR_stat64:
10455 if (!(p = lock_user_string(arg1))) {
10456 return -TARGET_EFAULT;
10458 ret = get_errno(stat(path(p), &st));
10459 unlock_user(p, arg1, 0);
10460 if (!is_error(ret))
10461 ret = host_to_target_stat64(cpu_env, arg2, &st);
10462 return ret;
10463 #endif
10464 #ifdef TARGET_NR_lstat64
10465 case TARGET_NR_lstat64:
10466 if (!(p = lock_user_string(arg1))) {
10467 return -TARGET_EFAULT;
10469 ret = get_errno(lstat(path(p), &st));
10470 unlock_user(p, arg1, 0);
10471 if (!is_error(ret))
10472 ret = host_to_target_stat64(cpu_env, arg2, &st);
10473 return ret;
10474 #endif
10475 #ifdef TARGET_NR_fstat64
10476 case TARGET_NR_fstat64:
10477 ret = get_errno(fstat(arg1, &st));
10478 if (!is_error(ret))
10479 ret = host_to_target_stat64(cpu_env, arg2, &st);
10480 return ret;
10481 #endif
10482 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10483 #ifdef TARGET_NR_fstatat64
10484 case TARGET_NR_fstatat64:
10485 #endif
10486 #ifdef TARGET_NR_newfstatat
10487 case TARGET_NR_newfstatat:
10488 #endif
10489 if (!(p = lock_user_string(arg2))) {
10490 return -TARGET_EFAULT;
10492 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10493 unlock_user(p, arg2, 0);
10494 if (!is_error(ret))
10495 ret = host_to_target_stat64(cpu_env, arg3, &st);
10496 return ret;
10497 #endif
10498 #if defined(TARGET_NR_statx)
10499 case TARGET_NR_statx:
10501 struct target_statx *target_stx;
10502 int dirfd = arg1;
10503 int flags = arg3;
10505 p = lock_user_string(arg2);
10506 if (p == NULL) {
10507 return -TARGET_EFAULT;
10509 #if defined(__NR_statx)
10512 * It is assumed that struct statx is architecture independent.
10514 struct target_statx host_stx;
10515 int mask = arg4;
10517 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10518 if (!is_error(ret)) {
10519 if (host_to_target_statx(&host_stx, arg5) != 0) {
10520 unlock_user(p, arg2, 0);
10521 return -TARGET_EFAULT;
10525 if (ret != -TARGET_ENOSYS) {
10526 unlock_user(p, arg2, 0);
10527 return ret;
10530 #endif
10531 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10532 unlock_user(p, arg2, 0);
10534 if (!is_error(ret)) {
10535 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10536 return -TARGET_EFAULT;
10538 memset(target_stx, 0, sizeof(*target_stx));
10539 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10540 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10541 __put_user(st.st_ino, &target_stx->stx_ino);
10542 __put_user(st.st_mode, &target_stx->stx_mode);
10543 __put_user(st.st_uid, &target_stx->stx_uid);
10544 __put_user(st.st_gid, &target_stx->stx_gid);
10545 __put_user(st.st_nlink, &target_stx->stx_nlink);
10546 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10547 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10548 __put_user(st.st_size, &target_stx->stx_size);
10549 __put_user(st.st_blksize, &target_stx->stx_blksize);
10550 __put_user(st.st_blocks, &target_stx->stx_blocks);
10551 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10552 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10553 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10554 unlock_user_struct(target_stx, arg5, 1);
10557 return ret;
10558 #endif
10559 #ifdef TARGET_NR_lchown
10560 case TARGET_NR_lchown:
10561 if (!(p = lock_user_string(arg1)))
10562 return -TARGET_EFAULT;
10563 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10564 unlock_user(p, arg1, 0);
10565 return ret;
10566 #endif
10567 #ifdef TARGET_NR_getuid
10568 case TARGET_NR_getuid:
10569 return get_errno(high2lowuid(getuid()));
10570 #endif
10571 #ifdef TARGET_NR_getgid
10572 case TARGET_NR_getgid:
10573 return get_errno(high2lowgid(getgid()));
10574 #endif
10575 #ifdef TARGET_NR_geteuid
10576 case TARGET_NR_geteuid:
10577 return get_errno(high2lowuid(geteuid()));
10578 #endif
10579 #ifdef TARGET_NR_getegid
10580 case TARGET_NR_getegid:
10581 return get_errno(high2lowgid(getegid()));
10582 #endif
10583 case TARGET_NR_setreuid:
10584 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10585 case TARGET_NR_setregid:
10586 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10587 case TARGET_NR_getgroups:
10589 int gidsetsize = arg1;
10590 target_id *target_grouplist;
10591 gid_t *grouplist;
10592 int i;
10594 grouplist = alloca(gidsetsize * sizeof(gid_t));
10595 ret = get_errno(getgroups(gidsetsize, grouplist));
10596 if (gidsetsize == 0)
10597 return ret;
10598 if (!is_error(ret)) {
10599 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10600 if (!target_grouplist)
10601 return -TARGET_EFAULT;
10602 for(i = 0;i < ret; i++)
10603 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10604 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10607 return ret;
10608 case TARGET_NR_setgroups:
10610 int gidsetsize = arg1;
10611 target_id *target_grouplist;
10612 gid_t *grouplist = NULL;
10613 int i;
10614 if (gidsetsize) {
10615 grouplist = alloca(gidsetsize * sizeof(gid_t));
10616 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10617 if (!target_grouplist) {
10618 return -TARGET_EFAULT;
10620 for (i = 0; i < gidsetsize; i++) {
10621 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10623 unlock_user(target_grouplist, arg2, 0);
10625 return get_errno(setgroups(gidsetsize, grouplist));
10627 case TARGET_NR_fchown:
10628 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10629 #if defined(TARGET_NR_fchownat)
10630 case TARGET_NR_fchownat:
10631 if (!(p = lock_user_string(arg2)))
10632 return -TARGET_EFAULT;
10633 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10634 low2highgid(arg4), arg5));
10635 unlock_user(p, arg2, 0);
10636 return ret;
10637 #endif
10638 #ifdef TARGET_NR_setresuid
10639 case TARGET_NR_setresuid:
10640 return get_errno(sys_setresuid(low2highuid(arg1),
10641 low2highuid(arg2),
10642 low2highuid(arg3)));
10643 #endif
10644 #ifdef TARGET_NR_getresuid
10645 case TARGET_NR_getresuid:
10647 uid_t ruid, euid, suid;
10648 ret = get_errno(getresuid(&ruid, &euid, &suid));
10649 if (!is_error(ret)) {
10650 if (put_user_id(high2lowuid(ruid), arg1)
10651 || put_user_id(high2lowuid(euid), arg2)
10652 || put_user_id(high2lowuid(suid), arg3))
10653 return -TARGET_EFAULT;
10656 return ret;
10657 #endif
10658 #ifdef TARGET_NR_getresgid
10659 case TARGET_NR_setresgid:
10660 return get_errno(sys_setresgid(low2highgid(arg1),
10661 low2highgid(arg2),
10662 low2highgid(arg3)));
10663 #endif
10664 #ifdef TARGET_NR_getresgid
10665 case TARGET_NR_getresgid:
10667 gid_t rgid, egid, sgid;
10668 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10669 if (!is_error(ret)) {
10670 if (put_user_id(high2lowgid(rgid), arg1)
10671 || put_user_id(high2lowgid(egid), arg2)
10672 || put_user_id(high2lowgid(sgid), arg3))
10673 return -TARGET_EFAULT;
10676 return ret;
10677 #endif
10678 #ifdef TARGET_NR_chown
10679 case TARGET_NR_chown:
10680 if (!(p = lock_user_string(arg1)))
10681 return -TARGET_EFAULT;
10682 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10683 unlock_user(p, arg1, 0);
10684 return ret;
10685 #endif
10686 case TARGET_NR_setuid:
10687 return get_errno(sys_setuid(low2highuid(arg1)));
10688 case TARGET_NR_setgid:
10689 return get_errno(sys_setgid(low2highgid(arg1)));
10690 case TARGET_NR_setfsuid:
10691 return get_errno(setfsuid(arg1));
10692 case TARGET_NR_setfsgid:
10693 return get_errno(setfsgid(arg1));
10695 #ifdef TARGET_NR_lchown32
10696 case TARGET_NR_lchown32:
10697 if (!(p = lock_user_string(arg1)))
10698 return -TARGET_EFAULT;
10699 ret = get_errno(lchown(p, arg2, arg3));
10700 unlock_user(p, arg1, 0);
10701 return ret;
10702 #endif
10703 #ifdef TARGET_NR_getuid32
10704 case TARGET_NR_getuid32:
10705 return get_errno(getuid());
10706 #endif
10708 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10709 /* Alpha specific */
10710 case TARGET_NR_getxuid:
10712 uid_t euid;
10713 euid=geteuid();
10714 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10716 return get_errno(getuid());
10717 #endif
10718 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10719 /* Alpha specific */
10720 case TARGET_NR_getxgid:
10722 uid_t egid;
10723 egid=getegid();
10724 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10726 return get_errno(getgid());
10727 #endif
10728 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10729 /* Alpha specific */
10730 case TARGET_NR_osf_getsysinfo:
10731 ret = -TARGET_EOPNOTSUPP;
10732 switch (arg1) {
10733 case TARGET_GSI_IEEE_FP_CONTROL:
10735 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10736 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10738 swcr &= ~SWCR_STATUS_MASK;
10739 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10741 if (put_user_u64 (swcr, arg2))
10742 return -TARGET_EFAULT;
10743 ret = 0;
10745 break;
10747 /* case GSI_IEEE_STATE_AT_SIGNAL:
10748 -- Not implemented in linux kernel.
10749 case GSI_UACPROC:
10750 -- Retrieves current unaligned access state; not much used.
10751 case GSI_PROC_TYPE:
10752 -- Retrieves implver information; surely not used.
10753 case GSI_GET_HWRPB:
10754 -- Grabs a copy of the HWRPB; surely not used.
10757 return ret;
10758 #endif
10759 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10760 /* Alpha specific */
10761 case TARGET_NR_osf_setsysinfo:
10762 ret = -TARGET_EOPNOTSUPP;
10763 switch (arg1) {
10764 case TARGET_SSI_IEEE_FP_CONTROL:
10766 uint64_t swcr, fpcr;
10768 if (get_user_u64 (swcr, arg2)) {
10769 return -TARGET_EFAULT;
10773 * The kernel calls swcr_update_status to update the
10774 * status bits from the fpcr at every point that it
10775 * could be queried. Therefore, we store the status
10776 * bits only in FPCR.
10778 ((CPUAlphaState *)cpu_env)->swcr
10779 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10781 fpcr = cpu_alpha_load_fpcr(cpu_env);
10782 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10783 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10784 cpu_alpha_store_fpcr(cpu_env, fpcr);
10785 ret = 0;
10787 break;
10789 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10791 uint64_t exc, fpcr, fex;
10793 if (get_user_u64(exc, arg2)) {
10794 return -TARGET_EFAULT;
10796 exc &= SWCR_STATUS_MASK;
10797 fpcr = cpu_alpha_load_fpcr(cpu_env);
10799 /* Old exceptions are not signaled. */
10800 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10801 fex = exc & ~fex;
10802 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10803 fex &= ((CPUArchState *)cpu_env)->swcr;
10805 /* Update the hardware fpcr. */
10806 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10807 cpu_alpha_store_fpcr(cpu_env, fpcr);
10809 if (fex) {
10810 int si_code = TARGET_FPE_FLTUNK;
10811 target_siginfo_t info;
10813 if (fex & SWCR_TRAP_ENABLE_DNO) {
10814 si_code = TARGET_FPE_FLTUND;
10816 if (fex & SWCR_TRAP_ENABLE_INE) {
10817 si_code = TARGET_FPE_FLTRES;
10819 if (fex & SWCR_TRAP_ENABLE_UNF) {
10820 si_code = TARGET_FPE_FLTUND;
10822 if (fex & SWCR_TRAP_ENABLE_OVF) {
10823 si_code = TARGET_FPE_FLTOVF;
10825 if (fex & SWCR_TRAP_ENABLE_DZE) {
10826 si_code = TARGET_FPE_FLTDIV;
10828 if (fex & SWCR_TRAP_ENABLE_INV) {
10829 si_code = TARGET_FPE_FLTINV;
10832 info.si_signo = SIGFPE;
10833 info.si_errno = 0;
10834 info.si_code = si_code;
10835 info._sifields._sigfault._addr
10836 = ((CPUArchState *)cpu_env)->pc;
10837 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10838 QEMU_SI_FAULT, &info);
10840 ret = 0;
10842 break;
10844 /* case SSI_NVPAIRS:
10845 -- Used with SSIN_UACPROC to enable unaligned accesses.
10846 case SSI_IEEE_STATE_AT_SIGNAL:
10847 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10848 -- Not implemented in linux kernel
10851 return ret;
10852 #endif
10853 #ifdef TARGET_NR_osf_sigprocmask
10854 /* Alpha specific. */
10855 case TARGET_NR_osf_sigprocmask:
10857 abi_ulong mask;
10858 int how;
10859 sigset_t set, oldset;
10861 switch(arg1) {
10862 case TARGET_SIG_BLOCK:
10863 how = SIG_BLOCK;
10864 break;
10865 case TARGET_SIG_UNBLOCK:
10866 how = SIG_UNBLOCK;
10867 break;
10868 case TARGET_SIG_SETMASK:
10869 how = SIG_SETMASK;
10870 break;
10871 default:
10872 return -TARGET_EINVAL;
10874 mask = arg2;
10875 target_to_host_old_sigset(&set, &mask);
10876 ret = do_sigprocmask(how, &set, &oldset);
10877 if (!ret) {
10878 host_to_target_old_sigset(&mask, &oldset);
10879 ret = mask;
10882 return ret;
10883 #endif
10885 #ifdef TARGET_NR_getgid32
10886 case TARGET_NR_getgid32:
10887 return get_errno(getgid());
10888 #endif
10889 #ifdef TARGET_NR_geteuid32
10890 case TARGET_NR_geteuid32:
10891 return get_errno(geteuid());
10892 #endif
10893 #ifdef TARGET_NR_getegid32
10894 case TARGET_NR_getegid32:
10895 return get_errno(getegid());
10896 #endif
10897 #ifdef TARGET_NR_setreuid32
10898 case TARGET_NR_setreuid32:
10899 return get_errno(setreuid(arg1, arg2));
10900 #endif
10901 #ifdef TARGET_NR_setregid32
10902 case TARGET_NR_setregid32:
10903 return get_errno(setregid(arg1, arg2));
10904 #endif
10905 #ifdef TARGET_NR_getgroups32
10906 case TARGET_NR_getgroups32:
10908 int gidsetsize = arg1;
10909 uint32_t *target_grouplist;
10910 gid_t *grouplist;
10911 int i;
10913 grouplist = alloca(gidsetsize * sizeof(gid_t));
10914 ret = get_errno(getgroups(gidsetsize, grouplist));
10915 if (gidsetsize == 0)
10916 return ret;
10917 if (!is_error(ret)) {
10918 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10919 if (!target_grouplist) {
10920 return -TARGET_EFAULT;
10922 for(i = 0;i < ret; i++)
10923 target_grouplist[i] = tswap32(grouplist[i]);
10924 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10927 return ret;
10928 #endif
10929 #ifdef TARGET_NR_setgroups32
10930 case TARGET_NR_setgroups32:
10932 int gidsetsize = arg1;
10933 uint32_t *target_grouplist;
10934 gid_t *grouplist;
10935 int i;
10937 grouplist = alloca(gidsetsize * sizeof(gid_t));
10938 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10939 if (!target_grouplist) {
10940 return -TARGET_EFAULT;
10942 for(i = 0;i < gidsetsize; i++)
10943 grouplist[i] = tswap32(target_grouplist[i]);
10944 unlock_user(target_grouplist, arg2, 0);
10945 return get_errno(setgroups(gidsetsize, grouplist));
10947 #endif
10948 #ifdef TARGET_NR_fchown32
10949 case TARGET_NR_fchown32:
10950 return get_errno(fchown(arg1, arg2, arg3));
10951 #endif
10952 #ifdef TARGET_NR_setresuid32
10953 case TARGET_NR_setresuid32:
10954 return get_errno(sys_setresuid(arg1, arg2, arg3));
10955 #endif
10956 #ifdef TARGET_NR_getresuid32
10957 case TARGET_NR_getresuid32:
10959 uid_t ruid, euid, suid;
10960 ret = get_errno(getresuid(&ruid, &euid, &suid));
10961 if (!is_error(ret)) {
10962 if (put_user_u32(ruid, arg1)
10963 || put_user_u32(euid, arg2)
10964 || put_user_u32(suid, arg3))
10965 return -TARGET_EFAULT;
10968 return ret;
10969 #endif
10970 #ifdef TARGET_NR_setresgid32
10971 case TARGET_NR_setresgid32:
10972 return get_errno(sys_setresgid(arg1, arg2, arg3));
10973 #endif
10974 #ifdef TARGET_NR_getresgid32
10975 case TARGET_NR_getresgid32:
10977 gid_t rgid, egid, sgid;
10978 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10979 if (!is_error(ret)) {
10980 if (put_user_u32(rgid, arg1)
10981 || put_user_u32(egid, arg2)
10982 || put_user_u32(sgid, arg3))
10983 return -TARGET_EFAULT;
10986 return ret;
10987 #endif
10988 #ifdef TARGET_NR_chown32
10989 case TARGET_NR_chown32:
10990 if (!(p = lock_user_string(arg1)))
10991 return -TARGET_EFAULT;
10992 ret = get_errno(chown(p, arg2, arg3));
10993 unlock_user(p, arg1, 0);
10994 return ret;
10995 #endif
10996 #ifdef TARGET_NR_setuid32
10997 case TARGET_NR_setuid32:
10998 return get_errno(sys_setuid(arg1));
10999 #endif
11000 #ifdef TARGET_NR_setgid32
11001 case TARGET_NR_setgid32:
11002 return get_errno(sys_setgid(arg1));
11003 #endif
11004 #ifdef TARGET_NR_setfsuid32
11005 case TARGET_NR_setfsuid32:
11006 return get_errno(setfsuid(arg1));
11007 #endif
11008 #ifdef TARGET_NR_setfsgid32
11009 case TARGET_NR_setfsgid32:
11010 return get_errno(setfsgid(arg1));
11011 #endif
11012 #ifdef TARGET_NR_mincore
11013 case TARGET_NR_mincore:
11015 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11016 if (!a) {
11017 return -TARGET_ENOMEM;
11019 p = lock_user_string(arg3);
11020 if (!p) {
11021 ret = -TARGET_EFAULT;
11022 } else {
11023 ret = get_errno(mincore(a, arg2, p));
11024 unlock_user(p, arg3, ret);
11026 unlock_user(a, arg1, 0);
11028 return ret;
11029 #endif
11030 #ifdef TARGET_NR_arm_fadvise64_64
11031 case TARGET_NR_arm_fadvise64_64:
11032 /* arm_fadvise64_64 looks like fadvise64_64 but
11033 * with different argument order: fd, advice, offset, len
11034 * rather than the usual fd, offset, len, advice.
11035 * Note that offset and len are both 64-bit so appear as
11036 * pairs of 32-bit registers.
11038 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11039 target_offset64(arg5, arg6), arg2);
11040 return -host_to_target_errno(ret);
11041 #endif
11043 #if TARGET_ABI_BITS == 32
11045 #ifdef TARGET_NR_fadvise64_64
11046 case TARGET_NR_fadvise64_64:
11047 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11048 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11049 ret = arg2;
11050 arg2 = arg3;
11051 arg3 = arg4;
11052 arg4 = arg5;
11053 arg5 = arg6;
11054 arg6 = ret;
11055 #else
11056 /* 6 args: fd, offset (high, low), len (high, low), advice */
11057 if (regpairs_aligned(cpu_env, num)) {
11058 /* offset is in (3,4), len in (5,6) and advice in 7 */
11059 arg2 = arg3;
11060 arg3 = arg4;
11061 arg4 = arg5;
11062 arg5 = arg6;
11063 arg6 = arg7;
11065 #endif
11066 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11067 target_offset64(arg4, arg5), arg6);
11068 return -host_to_target_errno(ret);
11069 #endif
11071 #ifdef TARGET_NR_fadvise64
11072 case TARGET_NR_fadvise64:
11073 /* 5 args: fd, offset (high, low), len, advice */
11074 if (regpairs_aligned(cpu_env, num)) {
11075 /* offset is in (3,4), len in 5 and advice in 6 */
11076 arg2 = arg3;
11077 arg3 = arg4;
11078 arg4 = arg5;
11079 arg5 = arg6;
11081 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11082 return -host_to_target_errno(ret);
11083 #endif
11085 #else /* not a 32-bit ABI */
11086 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11087 #ifdef TARGET_NR_fadvise64_64
11088 case TARGET_NR_fadvise64_64:
11089 #endif
11090 #ifdef TARGET_NR_fadvise64
11091 case TARGET_NR_fadvise64:
11092 #endif
11093 #ifdef TARGET_S390X
11094 switch (arg4) {
11095 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11096 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11097 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11098 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11099 default: break;
11101 #endif
11102 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11103 #endif
11104 #endif /* end of 64-bit ABI fadvise handling */
11106 #ifdef TARGET_NR_madvise
11107 case TARGET_NR_madvise:
11108 /* A straight passthrough may not be safe because qemu sometimes
11109 turns private file-backed mappings into anonymous mappings.
11110 This will break MADV_DONTNEED.
11111 This is a hint, so ignoring and returning success is ok. */
11112 return 0;
11113 #endif
11114 #if TARGET_ABI_BITS == 32
11115 case TARGET_NR_fcntl64:
11117 int cmd;
11118 struct flock64 fl;
11119 from_flock64_fn *copyfrom = copy_from_user_flock64;
11120 to_flock64_fn *copyto = copy_to_user_flock64;
11122 #ifdef TARGET_ARM
11123 if (!((CPUARMState *)cpu_env)->eabi) {
11124 copyfrom = copy_from_user_oabi_flock64;
11125 copyto = copy_to_user_oabi_flock64;
11127 #endif
11129 cmd = target_to_host_fcntl_cmd(arg2);
11130 if (cmd == -TARGET_EINVAL) {
11131 return cmd;
11134 switch(arg2) {
11135 case TARGET_F_GETLK64:
11136 ret = copyfrom(&fl, arg3);
11137 if (ret) {
11138 break;
11140 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11141 if (ret == 0) {
11142 ret = copyto(arg3, &fl);
11144 break;
11146 case TARGET_F_SETLK64:
11147 case TARGET_F_SETLKW64:
11148 ret = copyfrom(&fl, arg3);
11149 if (ret) {
11150 break;
11152 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11153 break;
11154 default:
11155 ret = do_fcntl(arg1, arg2, arg3);
11156 break;
11158 return ret;
11160 #endif
11161 #ifdef TARGET_NR_cacheflush
11162 case TARGET_NR_cacheflush:
11163 /* self-modifying code is handled automatically, so nothing needed */
11164 return 0;
11165 #endif
11166 #ifdef TARGET_NR_getpagesize
11167 case TARGET_NR_getpagesize:
11168 return TARGET_PAGE_SIZE;
11169 #endif
11170 case TARGET_NR_gettid:
11171 return get_errno(sys_gettid());
11172 #ifdef TARGET_NR_readahead
11173 case TARGET_NR_readahead:
11174 #if TARGET_ABI_BITS == 32
11175 if (regpairs_aligned(cpu_env, num)) {
11176 arg2 = arg3;
11177 arg3 = arg4;
11178 arg4 = arg5;
11180 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11181 #else
11182 ret = get_errno(readahead(arg1, arg2, arg3));
11183 #endif
11184 return ret;
11185 #endif
11186 #ifdef CONFIG_ATTR
11187 #ifdef TARGET_NR_setxattr
11188 case TARGET_NR_listxattr:
11189 case TARGET_NR_llistxattr:
11191 void *p, *b = 0;
11192 if (arg2) {
11193 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11194 if (!b) {
11195 return -TARGET_EFAULT;
11198 p = lock_user_string(arg1);
11199 if (p) {
11200 if (num == TARGET_NR_listxattr) {
11201 ret = get_errno(listxattr(p, b, arg3));
11202 } else {
11203 ret = get_errno(llistxattr(p, b, arg3));
11205 } else {
11206 ret = -TARGET_EFAULT;
11208 unlock_user(p, arg1, 0);
11209 unlock_user(b, arg2, arg3);
11210 return ret;
11212 case TARGET_NR_flistxattr:
11214 void *b = 0;
11215 if (arg2) {
11216 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11217 if (!b) {
11218 return -TARGET_EFAULT;
11221 ret = get_errno(flistxattr(arg1, b, arg3));
11222 unlock_user(b, arg2, arg3);
11223 return ret;
11225 case TARGET_NR_setxattr:
11226 case TARGET_NR_lsetxattr:
11228 void *p, *n, *v = 0;
11229 if (arg3) {
11230 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11231 if (!v) {
11232 return -TARGET_EFAULT;
11235 p = lock_user_string(arg1);
11236 n = lock_user_string(arg2);
11237 if (p && n) {
11238 if (num == TARGET_NR_setxattr) {
11239 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11240 } else {
11241 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11243 } else {
11244 ret = -TARGET_EFAULT;
11246 unlock_user(p, arg1, 0);
11247 unlock_user(n, arg2, 0);
11248 unlock_user(v, arg3, 0);
11250 return ret;
11251 case TARGET_NR_fsetxattr:
11253 void *n, *v = 0;
11254 if (arg3) {
11255 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11256 if (!v) {
11257 return -TARGET_EFAULT;
11260 n = lock_user_string(arg2);
11261 if (n) {
11262 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11263 } else {
11264 ret = -TARGET_EFAULT;
11266 unlock_user(n, arg2, 0);
11267 unlock_user(v, arg3, 0);
11269 return ret;
11270 case TARGET_NR_getxattr:
11271 case TARGET_NR_lgetxattr:
11273 void *p, *n, *v = 0;
11274 if (arg3) {
11275 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11276 if (!v) {
11277 return -TARGET_EFAULT;
11280 p = lock_user_string(arg1);
11281 n = lock_user_string(arg2);
11282 if (p && n) {
11283 if (num == TARGET_NR_getxattr) {
11284 ret = get_errno(getxattr(p, n, v, arg4));
11285 } else {
11286 ret = get_errno(lgetxattr(p, n, v, arg4));
11288 } else {
11289 ret = -TARGET_EFAULT;
11291 unlock_user(p, arg1, 0);
11292 unlock_user(n, arg2, 0);
11293 unlock_user(v, arg3, arg4);
11295 return ret;
11296 case TARGET_NR_fgetxattr:
11298 void *n, *v = 0;
11299 if (arg3) {
11300 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11301 if (!v) {
11302 return -TARGET_EFAULT;
11305 n = lock_user_string(arg2);
11306 if (n) {
11307 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11308 } else {
11309 ret = -TARGET_EFAULT;
11311 unlock_user(n, arg2, 0);
11312 unlock_user(v, arg3, arg4);
11314 return ret;
11315 case TARGET_NR_removexattr:
11316 case TARGET_NR_lremovexattr:
11318 void *p, *n;
11319 p = lock_user_string(arg1);
11320 n = lock_user_string(arg2);
11321 if (p && n) {
11322 if (num == TARGET_NR_removexattr) {
11323 ret = get_errno(removexattr(p, n));
11324 } else {
11325 ret = get_errno(lremovexattr(p, n));
11327 } else {
11328 ret = -TARGET_EFAULT;
11330 unlock_user(p, arg1, 0);
11331 unlock_user(n, arg2, 0);
11333 return ret;
11334 case TARGET_NR_fremovexattr:
11336 void *n;
11337 n = lock_user_string(arg2);
11338 if (n) {
11339 ret = get_errno(fremovexattr(arg1, n));
11340 } else {
11341 ret = -TARGET_EFAULT;
11343 unlock_user(n, arg2, 0);
11345 return ret;
11346 #endif
11347 #endif /* CONFIG_ATTR */
11348 #ifdef TARGET_NR_set_thread_area
11349 case TARGET_NR_set_thread_area:
11350 #if defined(TARGET_MIPS)
11351 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11352 return 0;
11353 #elif defined(TARGET_CRIS)
11354 if (arg1 & 0xff)
11355 ret = -TARGET_EINVAL;
11356 else {
11357 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11358 ret = 0;
11360 return ret;
11361 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11362 return do_set_thread_area(cpu_env, arg1);
11363 #elif defined(TARGET_M68K)
11365 TaskState *ts = cpu->opaque;
11366 ts->tp_value = arg1;
11367 return 0;
11369 #else
11370 return -TARGET_ENOSYS;
11371 #endif
11372 #endif
11373 #ifdef TARGET_NR_get_thread_area
11374 case TARGET_NR_get_thread_area:
11375 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11376 return do_get_thread_area(cpu_env, arg1);
11377 #elif defined(TARGET_M68K)
11379 TaskState *ts = cpu->opaque;
11380 return ts->tp_value;
11382 #else
11383 return -TARGET_ENOSYS;
11384 #endif
11385 #endif
11386 #ifdef TARGET_NR_getdomainname
11387 case TARGET_NR_getdomainname:
11388 return -TARGET_ENOSYS;
11389 #endif
11391 #ifdef TARGET_NR_clock_settime
11392 case TARGET_NR_clock_settime:
11394 struct timespec ts;
11396 ret = target_to_host_timespec(&ts, arg2);
11397 if (!is_error(ret)) {
11398 ret = get_errno(clock_settime(arg1, &ts));
11400 return ret;
11402 #endif
11403 #ifdef TARGET_NR_clock_gettime
11404 case TARGET_NR_clock_gettime:
11406 struct timespec ts;
11407 ret = get_errno(clock_gettime(arg1, &ts));
11408 if (!is_error(ret)) {
11409 ret = host_to_target_timespec(arg2, &ts);
11411 return ret;
11413 #endif
11414 #ifdef TARGET_NR_clock_getres
11415 case TARGET_NR_clock_getres:
11417 struct timespec ts;
11418 ret = get_errno(clock_getres(arg1, &ts));
11419 if (!is_error(ret)) {
11420 host_to_target_timespec(arg2, &ts);
11422 return ret;
11424 #endif
11425 #ifdef TARGET_NR_clock_nanosleep
11426 case TARGET_NR_clock_nanosleep:
11428 struct timespec ts;
11429 target_to_host_timespec(&ts, arg3);
11430 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11431 &ts, arg4 ? &ts : NULL));
11432 if (arg4)
11433 host_to_target_timespec(arg4, &ts);
11435 #if defined(TARGET_PPC)
11436 /* clock_nanosleep is odd in that it returns positive errno values.
11437 * On PPC, CR0 bit 3 should be set in such a situation. */
11438 if (ret && ret != -TARGET_ERESTARTSYS) {
11439 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11441 #endif
11442 return ret;
11444 #endif
11446 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11447 case TARGET_NR_set_tid_address:
11448 return get_errno(set_tid_address((int *)g2h(arg1)));
11449 #endif
11451 case TARGET_NR_tkill:
11452 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11454 case TARGET_NR_tgkill:
11455 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11456 target_to_host_signal(arg3)));
11458 #ifdef TARGET_NR_set_robust_list
11459 case TARGET_NR_set_robust_list:
11460 case TARGET_NR_get_robust_list:
11461 /* The ABI for supporting robust futexes has userspace pass
11462 * the kernel a pointer to a linked list which is updated by
11463 * userspace after the syscall; the list is walked by the kernel
11464 * when the thread exits. Since the linked list in QEMU guest
11465 * memory isn't a valid linked list for the host and we have
11466 * no way to reliably intercept the thread-death event, we can't
11467 * support these. Silently return ENOSYS so that guest userspace
11468 * falls back to a non-robust futex implementation (which should
11469 * be OK except in the corner case of the guest crashing while
11470 * holding a mutex that is shared with another process via
11471 * shared memory).
11473 return -TARGET_ENOSYS;
11474 #endif
11476 #if defined(TARGET_NR_utimensat)
11477 case TARGET_NR_utimensat:
11479 struct timespec *tsp, ts[2];
11480 if (!arg3) {
11481 tsp = NULL;
11482 } else {
11483 target_to_host_timespec(ts, arg3);
11484 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11485 tsp = ts;
11487 if (!arg2)
11488 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11489 else {
11490 if (!(p = lock_user_string(arg2))) {
11491 return -TARGET_EFAULT;
11493 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11494 unlock_user(p, arg2, 0);
11497 return ret;
11498 #endif
11499 case TARGET_NR_futex:
11500 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11502 case TARGET_NR_inotify_init:
11503 ret = get_errno(sys_inotify_init());
11504 if (ret >= 0) {
11505 fd_trans_register(ret, &target_inotify_trans);
11507 return ret;
11508 #endif
11509 #ifdef CONFIG_INOTIFY1
11510 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11511 case TARGET_NR_inotify_init1:
11512 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11513 fcntl_flags_tbl)));
11514 if (ret >= 0) {
11515 fd_trans_register(ret, &target_inotify_trans);
11517 return ret;
11518 #endif
11519 #endif
11520 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11521 case TARGET_NR_inotify_add_watch:
11522 p = lock_user_string(arg2);
11523 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11524 unlock_user(p, arg2, 0);
11525 return ret;
11526 #endif
11527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11528 case TARGET_NR_inotify_rm_watch:
11529 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11530 #endif
11532 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11533 case TARGET_NR_mq_open:
11535 struct mq_attr posix_mq_attr;
11536 struct mq_attr *pposix_mq_attr;
11537 int host_flags;
11539 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11540 pposix_mq_attr = NULL;
11541 if (arg4) {
11542 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11543 return -TARGET_EFAULT;
11545 pposix_mq_attr = &posix_mq_attr;
11547 p = lock_user_string(arg1 - 1);
11548 if (!p) {
11549 return -TARGET_EFAULT;
11551 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11552 unlock_user (p, arg1, 0);
11554 return ret;
11556 case TARGET_NR_mq_unlink:
11557 p = lock_user_string(arg1 - 1);
11558 if (!p) {
11559 return -TARGET_EFAULT;
11561 ret = get_errno(mq_unlink(p));
11562 unlock_user (p, arg1, 0);
11563 return ret;
11565 case TARGET_NR_mq_timedsend:
11567 struct timespec ts;
11569 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11570 if (arg5 != 0) {
11571 target_to_host_timespec(&ts, arg5);
11572 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11573 host_to_target_timespec(arg5, &ts);
11574 } else {
11575 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11577 unlock_user (p, arg2, arg3);
11579 return ret;
11581 case TARGET_NR_mq_timedreceive:
11583 struct timespec ts;
11584 unsigned int prio;
11586 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11587 if (arg5 != 0) {
11588 target_to_host_timespec(&ts, arg5);
11589 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11590 &prio, &ts));
11591 host_to_target_timespec(arg5, &ts);
11592 } else {
11593 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11594 &prio, NULL));
11596 unlock_user (p, arg2, arg3);
11597 if (arg4 != 0)
11598 put_user_u32(prio, arg4);
11600 return ret;
11602 /* Not implemented for now... */
11603 /* case TARGET_NR_mq_notify: */
11604 /* break; */
11606 case TARGET_NR_mq_getsetattr:
11608 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11609 ret = 0;
11610 if (arg2 != 0) {
11611 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11612 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11613 &posix_mq_attr_out));
11614 } else if (arg3 != 0) {
11615 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11617 if (ret == 0 && arg3 != 0) {
11618 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11621 return ret;
11622 #endif
11624 #ifdef CONFIG_SPLICE
11625 #ifdef TARGET_NR_tee
11626 case TARGET_NR_tee:
11628 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11630 return ret;
11631 #endif
11632 #ifdef TARGET_NR_splice
11633 case TARGET_NR_splice:
11635 loff_t loff_in, loff_out;
11636 loff_t *ploff_in = NULL, *ploff_out = NULL;
11637 if (arg2) {
11638 if (get_user_u64(loff_in, arg2)) {
11639 return -TARGET_EFAULT;
11641 ploff_in = &loff_in;
11643 if (arg4) {
11644 if (get_user_u64(loff_out, arg4)) {
11645 return -TARGET_EFAULT;
11647 ploff_out = &loff_out;
11649 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11650 if (arg2) {
11651 if (put_user_u64(loff_in, arg2)) {
11652 return -TARGET_EFAULT;
11655 if (arg4) {
11656 if (put_user_u64(loff_out, arg4)) {
11657 return -TARGET_EFAULT;
11661 return ret;
11662 #endif
11663 #ifdef TARGET_NR_vmsplice
11664 case TARGET_NR_vmsplice:
11666 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11667 if (vec != NULL) {
11668 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11669 unlock_iovec(vec, arg2, arg3, 0);
11670 } else {
11671 ret = -host_to_target_errno(errno);
11674 return ret;
11675 #endif
11676 #endif /* CONFIG_SPLICE */
11677 #ifdef CONFIG_EVENTFD
11678 #if defined(TARGET_NR_eventfd)
11679 case TARGET_NR_eventfd:
11680 ret = get_errno(eventfd(arg1, 0));
11681 if (ret >= 0) {
11682 fd_trans_register(ret, &target_eventfd_trans);
11684 return ret;
11685 #endif
11686 #if defined(TARGET_NR_eventfd2)
11687 case TARGET_NR_eventfd2:
11689 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11690 if (arg2 & TARGET_O_NONBLOCK) {
11691 host_flags |= O_NONBLOCK;
11693 if (arg2 & TARGET_O_CLOEXEC) {
11694 host_flags |= O_CLOEXEC;
11696 ret = get_errno(eventfd(arg1, host_flags));
11697 if (ret >= 0) {
11698 fd_trans_register(ret, &target_eventfd_trans);
11700 return ret;
11702 #endif
11703 #endif /* CONFIG_EVENTFD */
11704 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11705 case TARGET_NR_fallocate:
11706 #if TARGET_ABI_BITS == 32
11707 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11708 target_offset64(arg5, arg6)));
11709 #else
11710 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11711 #endif
11712 return ret;
11713 #endif
11714 #if defined(CONFIG_SYNC_FILE_RANGE)
11715 #if defined(TARGET_NR_sync_file_range)
11716 case TARGET_NR_sync_file_range:
11717 #if TARGET_ABI_BITS == 32
11718 #if defined(TARGET_MIPS)
11719 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11720 target_offset64(arg5, arg6), arg7));
11721 #else
11722 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11723 target_offset64(arg4, arg5), arg6));
11724 #endif /* !TARGET_MIPS */
11725 #else
11726 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11727 #endif
11728 return ret;
11729 #endif
11730 #if defined(TARGET_NR_sync_file_range2)
11731 case TARGET_NR_sync_file_range2:
11732 /* This is like sync_file_range but the arguments are reordered */
11733 #if TARGET_ABI_BITS == 32
11734 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11735 target_offset64(arg5, arg6), arg2));
11736 #else
11737 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11738 #endif
11739 return ret;
11740 #endif
11741 #endif
11742 #if defined(TARGET_NR_signalfd4)
11743 case TARGET_NR_signalfd4:
11744 return do_signalfd4(arg1, arg2, arg4);
11745 #endif
11746 #if defined(TARGET_NR_signalfd)
11747 case TARGET_NR_signalfd:
11748 return do_signalfd4(arg1, arg2, 0);
11749 #endif
11750 #if defined(CONFIG_EPOLL)
11751 #if defined(TARGET_NR_epoll_create)
11752 case TARGET_NR_epoll_create:
11753 return get_errno(epoll_create(arg1));
11754 #endif
11755 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11756 case TARGET_NR_epoll_create1:
11757 return get_errno(epoll_create1(arg1));
11758 #endif
11759 #if defined(TARGET_NR_epoll_ctl)
11760 case TARGET_NR_epoll_ctl:
11762 struct epoll_event ep;
11763 struct epoll_event *epp = 0;
11764 if (arg4) {
11765 struct target_epoll_event *target_ep;
11766 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11767 return -TARGET_EFAULT;
11769 ep.events = tswap32(target_ep->events);
11770 /* The epoll_data_t union is just opaque data to the kernel,
11771 * so we transfer all 64 bits across and need not worry what
11772 * actual data type it is.
11774 ep.data.u64 = tswap64(target_ep->data.u64);
11775 unlock_user_struct(target_ep, arg4, 0);
11776 epp = &ep;
11778 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11780 #endif
11782 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11783 #if defined(TARGET_NR_epoll_wait)
11784 case TARGET_NR_epoll_wait:
11785 #endif
11786 #if defined(TARGET_NR_epoll_pwait)
11787 case TARGET_NR_epoll_pwait:
11788 #endif
11790 struct target_epoll_event *target_ep;
11791 struct epoll_event *ep;
11792 int epfd = arg1;
11793 int maxevents = arg3;
11794 int timeout = arg4;
11796 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11797 return -TARGET_EINVAL;
11800 target_ep = lock_user(VERIFY_WRITE, arg2,
11801 maxevents * sizeof(struct target_epoll_event), 1);
11802 if (!target_ep) {
11803 return -TARGET_EFAULT;
11806 ep = g_try_new(struct epoll_event, maxevents);
11807 if (!ep) {
11808 unlock_user(target_ep, arg2, 0);
11809 return -TARGET_ENOMEM;
11812 switch (num) {
11813 #if defined(TARGET_NR_epoll_pwait)
11814 case TARGET_NR_epoll_pwait:
11816 target_sigset_t *target_set;
11817 sigset_t _set, *set = &_set;
11819 if (arg5) {
11820 if (arg6 != sizeof(target_sigset_t)) {
11821 ret = -TARGET_EINVAL;
11822 break;
11825 target_set = lock_user(VERIFY_READ, arg5,
11826 sizeof(target_sigset_t), 1);
11827 if (!target_set) {
11828 ret = -TARGET_EFAULT;
11829 break;
11831 target_to_host_sigset(set, target_set);
11832 unlock_user(target_set, arg5, 0);
11833 } else {
11834 set = NULL;
11837 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11838 set, SIGSET_T_SIZE));
11839 break;
11841 #endif
11842 #if defined(TARGET_NR_epoll_wait)
11843 case TARGET_NR_epoll_wait:
11844 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11845 NULL, 0));
11846 break;
11847 #endif
11848 default:
11849 ret = -TARGET_ENOSYS;
11851 if (!is_error(ret)) {
11852 int i;
11853 for (i = 0; i < ret; i++) {
11854 target_ep[i].events = tswap32(ep[i].events);
11855 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11857 unlock_user(target_ep, arg2,
11858 ret * sizeof(struct target_epoll_event));
11859 } else {
11860 unlock_user(target_ep, arg2, 0);
11862 g_free(ep);
11863 return ret;
11865 #endif
11866 #endif
11867 #ifdef TARGET_NR_prlimit64
11868 case TARGET_NR_prlimit64:
11870 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11871 struct target_rlimit64 *target_rnew, *target_rold;
11872 struct host_rlimit64 rnew, rold, *rnewp = 0;
11873 int resource = target_to_host_resource(arg2);
11874 if (arg3) {
11875 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11876 return -TARGET_EFAULT;
11878 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11879 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11880 unlock_user_struct(target_rnew, arg3, 0);
11881 rnewp = &rnew;
11884 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11885 if (!is_error(ret) && arg4) {
11886 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11887 return -TARGET_EFAULT;
11889 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11890 target_rold->rlim_max = tswap64(rold.rlim_max);
11891 unlock_user_struct(target_rold, arg4, 1);
11893 return ret;
11895 #endif
11896 #ifdef TARGET_NR_gethostname
11897 case TARGET_NR_gethostname:
11899 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11900 if (name) {
11901 ret = get_errno(gethostname(name, arg2));
11902 unlock_user(name, arg1, arg2);
11903 } else {
11904 ret = -TARGET_EFAULT;
11906 return ret;
11908 #endif
11909 #ifdef TARGET_NR_atomic_cmpxchg_32
11910 case TARGET_NR_atomic_cmpxchg_32:
11912 /* should use start_exclusive from main.c */
11913 abi_ulong mem_value;
11914 if (get_user_u32(mem_value, arg6)) {
11915 target_siginfo_t info;
11916 info.si_signo = SIGSEGV;
11917 info.si_errno = 0;
11918 info.si_code = TARGET_SEGV_MAPERR;
11919 info._sifields._sigfault._addr = arg6;
11920 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11921 QEMU_SI_FAULT, &info);
11922 ret = 0xdeadbeef;
11925 if (mem_value == arg2)
11926 put_user_u32(arg1, arg6);
11927 return mem_value;
11929 #endif
11930 #ifdef TARGET_NR_atomic_barrier
11931 case TARGET_NR_atomic_barrier:
11932 /* Like the kernel implementation and the
11933 qemu arm barrier, no-op this? */
11934 return 0;
11935 #endif
11937 #ifdef TARGET_NR_timer_create
11938 case TARGET_NR_timer_create:
11940 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11942 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11944 int clkid = arg1;
11945 int timer_index = next_free_host_timer();
11947 if (timer_index < 0) {
11948 ret = -TARGET_EAGAIN;
11949 } else {
11950 timer_t *phtimer = g_posix_timers + timer_index;
11952 if (arg2) {
11953 phost_sevp = &host_sevp;
11954 ret = target_to_host_sigevent(phost_sevp, arg2);
11955 if (ret != 0) {
11956 return ret;
11960 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11961 if (ret) {
11962 phtimer = NULL;
11963 } else {
11964 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11965 return -TARGET_EFAULT;
11969 return ret;
11971 #endif
11973 #ifdef TARGET_NR_timer_settime
11974 case TARGET_NR_timer_settime:
11976 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11977 * struct itimerspec * old_value */
11978 target_timer_t timerid = get_timer_id(arg1);
11980 if (timerid < 0) {
11981 ret = timerid;
11982 } else if (arg3 == 0) {
11983 ret = -TARGET_EINVAL;
11984 } else {
11985 timer_t htimer = g_posix_timers[timerid];
11986 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11988 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11989 return -TARGET_EFAULT;
11991 ret = get_errno(
11992 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11993 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11994 return -TARGET_EFAULT;
11997 return ret;
11999 #endif
12001 #ifdef TARGET_NR_timer_gettime
12002 case TARGET_NR_timer_gettime:
12004 /* args: timer_t timerid, struct itimerspec *curr_value */
12005 target_timer_t timerid = get_timer_id(arg1);
12007 if (timerid < 0) {
12008 ret = timerid;
12009 } else if (!arg2) {
12010 ret = -TARGET_EFAULT;
12011 } else {
12012 timer_t htimer = g_posix_timers[timerid];
12013 struct itimerspec hspec;
12014 ret = get_errno(timer_gettime(htimer, &hspec));
12016 if (host_to_target_itimerspec(arg2, &hspec)) {
12017 ret = -TARGET_EFAULT;
12020 return ret;
12022 #endif
12024 #ifdef TARGET_NR_timer_getoverrun
12025 case TARGET_NR_timer_getoverrun:
12027 /* args: timer_t timerid */
12028 target_timer_t timerid = get_timer_id(arg1);
12030 if (timerid < 0) {
12031 ret = timerid;
12032 } else {
12033 timer_t htimer = g_posix_timers[timerid];
12034 ret = get_errno(timer_getoverrun(htimer));
12036 return ret;
12038 #endif
12040 #ifdef TARGET_NR_timer_delete
12041 case TARGET_NR_timer_delete:
12043 /* args: timer_t timerid */
12044 target_timer_t timerid = get_timer_id(arg1);
12046 if (timerid < 0) {
12047 ret = timerid;
12048 } else {
12049 timer_t htimer = g_posix_timers[timerid];
12050 ret = get_errno(timer_delete(htimer));
12051 g_posix_timers[timerid] = 0;
12053 return ret;
12055 #endif
12057 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12058 case TARGET_NR_timerfd_create:
12059 return get_errno(timerfd_create(arg1,
12060 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12061 #endif
12063 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12064 case TARGET_NR_timerfd_gettime:
12066 struct itimerspec its_curr;
12068 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12070 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12071 return -TARGET_EFAULT;
12074 return ret;
12075 #endif
12077 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12078 case TARGET_NR_timerfd_settime:
12080 struct itimerspec its_new, its_old, *p_new;
12082 if (arg3) {
12083 if (target_to_host_itimerspec(&its_new, arg3)) {
12084 return -TARGET_EFAULT;
12086 p_new = &its_new;
12087 } else {
12088 p_new = NULL;
12091 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12093 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12094 return -TARGET_EFAULT;
12097 return ret;
12098 #endif
12100 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12101 case TARGET_NR_ioprio_get:
12102 return get_errno(ioprio_get(arg1, arg2));
12103 #endif
12105 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12106 case TARGET_NR_ioprio_set:
12107 return get_errno(ioprio_set(arg1, arg2, arg3));
12108 #endif
12110 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12111 case TARGET_NR_setns:
12112 return get_errno(setns(arg1, arg2));
12113 #endif
12114 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12115 case TARGET_NR_unshare:
12116 return get_errno(unshare(arg1));
12117 #endif
12118 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12119 case TARGET_NR_kcmp:
12120 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12121 #endif
12122 #ifdef TARGET_NR_swapcontext
12123 case TARGET_NR_swapcontext:
12124 /* PowerPC specific. */
12125 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12126 #endif
12127 #ifdef TARGET_NR_memfd_create
12128 case TARGET_NR_memfd_create:
12129 p = lock_user_string(arg1);
12130 if (!p) {
12131 return -TARGET_EFAULT;
12133 ret = get_errno(memfd_create(p, arg2));
12134 fd_trans_unregister(ret);
12135 unlock_user(p, arg1, 0);
12136 return ret;
12137 #endif
12138 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12139 case TARGET_NR_membarrier:
12140 return get_errno(membarrier(arg1, arg2));
12141 #endif
12143 default:
12144 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12145 return -TARGET_ENOSYS;
12147 return ret;
12150 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12151 abi_long arg2, abi_long arg3, abi_long arg4,
12152 abi_long arg5, abi_long arg6, abi_long arg7,
12153 abi_long arg8)
12155 CPUState *cpu = env_cpu(cpu_env);
12156 abi_long ret;
12158 #ifdef DEBUG_ERESTARTSYS
12159 /* Debug-only code for exercising the syscall-restart code paths
12160 * in the per-architecture cpu main loops: restart every syscall
12161 * the guest makes once before letting it through.
12164 static bool flag;
12165 flag = !flag;
12166 if (flag) {
12167 return -TARGET_ERESTARTSYS;
12170 #endif
12172 record_syscall_start(cpu, num, arg1,
12173 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12175 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12176 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12179 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12180 arg5, arg6, arg7, arg8);
12182 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12183 print_syscall_ret(num, ret);
12186 record_syscall_return(cpu, num, ret);
12187 return ret;