hw: AC97: make it more QOMconventional
[qemu/ar7.git] / linux-user / syscall.c
blob15b03e17b94ab6035975d080276a28f390a12969
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
113 #include "fd-trans.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
214 type5,arg5) \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
224 type6 arg6) \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256 errno. */
257 static int gettid(void) {
258 return -ENOSYS;
260 #endif
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
282 loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286 siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296 const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300 unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303 unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307 void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309 struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311 struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324 unsigned long, idx1, unsigned long, idx2)
325 #endif
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
329 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
330 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
331 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
332 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
333 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
334 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
335 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
336 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
337 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
338 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
339 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
343 #endif
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
346 #endif
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
349 #endif
350 #if defined(O_PATH)
351 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
352 #endif
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
355 #endif
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360 { 0, 0, 0, 0 }
363 static int sys_getcwd1(char *buf, size_t size)
365 if (getcwd(buf, size) == NULL) {
366 /* getcwd() sets errno */
367 return (-1);
369 return strlen(buf)+1;
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376 const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379 const struct timespec times[2], int flags)
381 errno = ENOSYS;
382 return -1;
384 #endif
385 #endif /* TARGET_NR_utimensat */
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391 const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394 int newfd, const char *new, int flags)
396 if (flags == 0) {
397 return renameat(oldfd, old, newfd, new);
399 errno = ENOSYS;
400 return -1;
402 #endif
403 #endif /* TARGET_NR_renameat2 */
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
411 return (inotify_init());
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
417 return (inotify_add_watch(fd, pathname, mask));
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
423 return (inotify_rm_watch(fd, wd));
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
430 return (inotify_init1(flags));
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY */
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
480 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486 * of registers which translates to the same as ARM/MIPS, because we start with
487 * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
493 switch (num) {
494 case TARGET_NR_pread64:
495 case TARGET_NR_pwrite64:
496 return 1;
498 default:
499 return 0;
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
508 #define ERRNO_TABLE_SIZE 1200
510 /* target_to_host_errno_table[] is initialized from
511 * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
516 * This list is the union of errno values overridden in asm-<arch>/errno.h
517 * minus the errnos that are not actually generic to all archs.
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520 [EAGAIN] = TARGET_EAGAIN,
521 [EIDRM] = TARGET_EIDRM,
522 [ECHRNG] = TARGET_ECHRNG,
523 [EL2NSYNC] = TARGET_EL2NSYNC,
524 [EL3HLT] = TARGET_EL3HLT,
525 [EL3RST] = TARGET_EL3RST,
526 [ELNRNG] = TARGET_ELNRNG,
527 [EUNATCH] = TARGET_EUNATCH,
528 [ENOCSI] = TARGET_ENOCSI,
529 [EL2HLT] = TARGET_EL2HLT,
530 [EDEADLK] = TARGET_EDEADLK,
531 [ENOLCK] = TARGET_ENOLCK,
532 [EBADE] = TARGET_EBADE,
533 [EBADR] = TARGET_EBADR,
534 [EXFULL] = TARGET_EXFULL,
535 [ENOANO] = TARGET_ENOANO,
536 [EBADRQC] = TARGET_EBADRQC,
537 [EBADSLT] = TARGET_EBADSLT,
538 [EBFONT] = TARGET_EBFONT,
539 [ENOSTR] = TARGET_ENOSTR,
540 [ENODATA] = TARGET_ENODATA,
541 [ETIME] = TARGET_ETIME,
542 [ENOSR] = TARGET_ENOSR,
543 [ENONET] = TARGET_ENONET,
544 [ENOPKG] = TARGET_ENOPKG,
545 [EREMOTE] = TARGET_EREMOTE,
546 [ENOLINK] = TARGET_ENOLINK,
547 [EADV] = TARGET_EADV,
548 [ESRMNT] = TARGET_ESRMNT,
549 [ECOMM] = TARGET_ECOMM,
550 [EPROTO] = TARGET_EPROTO,
551 [EDOTDOT] = TARGET_EDOTDOT,
552 [EMULTIHOP] = TARGET_EMULTIHOP,
553 [EBADMSG] = TARGET_EBADMSG,
554 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
555 [EOVERFLOW] = TARGET_EOVERFLOW,
556 [ENOTUNIQ] = TARGET_ENOTUNIQ,
557 [EBADFD] = TARGET_EBADFD,
558 [EREMCHG] = TARGET_EREMCHG,
559 [ELIBACC] = TARGET_ELIBACC,
560 [ELIBBAD] = TARGET_ELIBBAD,
561 [ELIBSCN] = TARGET_ELIBSCN,
562 [ELIBMAX] = TARGET_ELIBMAX,
563 [ELIBEXEC] = TARGET_ELIBEXEC,
564 [EILSEQ] = TARGET_EILSEQ,
565 [ENOSYS] = TARGET_ENOSYS,
566 [ELOOP] = TARGET_ELOOP,
567 [ERESTART] = TARGET_ERESTART,
568 [ESTRPIPE] = TARGET_ESTRPIPE,
569 [ENOTEMPTY] = TARGET_ENOTEMPTY,
570 [EUSERS] = TARGET_EUSERS,
571 [ENOTSOCK] = TARGET_ENOTSOCK,
572 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
573 [EMSGSIZE] = TARGET_EMSGSIZE,
574 [EPROTOTYPE] = TARGET_EPROTOTYPE,
575 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
576 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
577 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
578 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
579 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
580 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
581 [EADDRINUSE] = TARGET_EADDRINUSE,
582 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
583 [ENETDOWN] = TARGET_ENETDOWN,
584 [ENETUNREACH] = TARGET_ENETUNREACH,
585 [ENETRESET] = TARGET_ENETRESET,
586 [ECONNABORTED] = TARGET_ECONNABORTED,
587 [ECONNRESET] = TARGET_ECONNRESET,
588 [ENOBUFS] = TARGET_ENOBUFS,
589 [EISCONN] = TARGET_EISCONN,
590 [ENOTCONN] = TARGET_ENOTCONN,
591 [EUCLEAN] = TARGET_EUCLEAN,
592 [ENOTNAM] = TARGET_ENOTNAM,
593 [ENAVAIL] = TARGET_ENAVAIL,
594 [EISNAM] = TARGET_EISNAM,
595 [EREMOTEIO] = TARGET_EREMOTEIO,
596 [EDQUOT] = TARGET_EDQUOT,
597 [ESHUTDOWN] = TARGET_ESHUTDOWN,
598 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
599 [ETIMEDOUT] = TARGET_ETIMEDOUT,
600 [ECONNREFUSED] = TARGET_ECONNREFUSED,
601 [EHOSTDOWN] = TARGET_EHOSTDOWN,
602 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
603 [EALREADY] = TARGET_EALREADY,
604 [EINPROGRESS] = TARGET_EINPROGRESS,
605 [ESTALE] = TARGET_ESTALE,
606 [ECANCELED] = TARGET_ECANCELED,
607 [ENOMEDIUM] = TARGET_ENOMEDIUM,
608 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610 [ENOKEY] = TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616 [EKEYREVOKED] = TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619 [EKEYREJECTED] = TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622 [EOWNERDEAD] = TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628 [ENOMSG] = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631 [ERFKILL] = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634 [EHWPOISON] = TARGET_EHWPOISON,
635 #endif
638 static inline int host_to_target_errno(int err)
640 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641 host_to_target_errno_table[err]) {
642 return host_to_target_errno_table[err];
644 return err;
647 static inline int target_to_host_errno(int err)
649 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650 target_to_host_errno_table[err]) {
651 return target_to_host_errno_table[err];
653 return err;
656 static inline abi_long get_errno(abi_long ret)
658 if (ret == -1)
659 return -host_to_target_errno(errno);
660 else
661 return ret;
664 const char *target_strerror(int err)
666 if (err == TARGET_ERESTARTSYS) {
667 return "To be restarted";
669 if (err == TARGET_QEMU_ESIGRETURN) {
670 return "Successful exit from sigreturn";
673 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674 return NULL;
676 return strerror(target_to_host_errno(err));
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
682 return safe_syscall(__NR_##name); \
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
688 return safe_syscall(__NR_##name, arg1); \
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
694 return safe_syscall(__NR_##name, arg1, arg2); \
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713 type5 arg5) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 type5 arg5, type6 arg6) \
723 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729 int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731 struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733 int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738 struct timespec *, tsp, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741 int, maxevents, int, timeout, const sigset_t *, sigmask,
742 size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744 const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752 unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754 unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756 socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765 const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767 int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769 struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772 const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776 int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778 long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780 unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783 * wrappers for the sub-operations to hide this implementation detail.
784 * Annoyingly we can't include linux/ipc.h to get the constant definitions
785 * for the call parameter because some structs in there conflict with the
786 * sys/ipc.h ones. So we just define them here, and rely on them being
787 * the same for all host architectures.
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795 void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
798 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
802 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805 const struct timespec *timeout)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808 (long)timeout);
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813 size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815 size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
819 * the libc function.
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
833 static inline int host_to_target_sock_type(int host_type)
835 int target_type;
837 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838 case SOCK_DGRAM:
839 target_type = TARGET_SOCK_DGRAM;
840 break;
841 case SOCK_STREAM:
842 target_type = TARGET_SOCK_STREAM;
843 break;
844 default:
845 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846 break;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type & SOCK_CLOEXEC) {
851 target_type |= TARGET_SOCK_CLOEXEC;
853 #endif
855 #if defined(SOCK_NONBLOCK)
856 if (host_type & SOCK_NONBLOCK) {
857 target_type |= TARGET_SOCK_NONBLOCK;
859 #endif
861 return target_type;
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
868 void target_set_brk(abi_ulong new_brk)
870 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871 brk_page = HOST_PAGE_ALIGN(target_brk);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
880 abi_long mapped_addr;
881 abi_ulong new_alloc_size;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
885 if (!new_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887 return target_brk;
889 if (new_brk < target_original_brk) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891 target_brk);
892 return target_brk;
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk <= brk_page) {
898 /* Heap contents are initialized to zero, as for anonymous
899 * mapped pages. */
900 if (new_brk > target_brk) {
901 memset(g2h(target_brk), 0, new_brk - target_brk);
903 target_brk = new_brk;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 return target_brk;
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916 PROT_READ|PROT_WRITE,
917 MAP_ANON|MAP_PRIVATE, 0, 0));
919 if (mapped_addr == brk_page) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
926 * then shrunken). */
927 memset(g2h(target_brk), 0, brk_page - target_brk);
929 target_brk = new_brk;
930 brk_page = HOST_PAGE_ALIGN(target_brk);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932 target_brk);
933 return target_brk;
934 } else if (mapped_addr != -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr, new_alloc_size);
939 mapped_addr = -1;
940 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
942 else {
943 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM;
950 #endif
951 /* For everything else, return the previous break. */
952 return target_brk;
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956 abi_ulong target_fds_addr,
957 int n)
959 int i, nw, j, k;
960 abi_ulong b, *target_fds;
962 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963 if (!(target_fds = lock_user(VERIFY_READ,
964 target_fds_addr,
965 sizeof(abi_ulong) * nw,
966 1)))
967 return -TARGET_EFAULT;
969 FD_ZERO(fds);
970 k = 0;
971 for (i = 0; i < nw; i++) {
972 /* grab the abi_ulong */
973 __get_user(b, &target_fds[i]);
974 for (j = 0; j < TARGET_ABI_BITS; j++) {
975 /* check the bit inside the abi_ulong */
976 if ((b >> j) & 1)
977 FD_SET(k, fds);
978 k++;
982 unlock_user(target_fds, target_fds_addr, 0);
984 return 0;
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988 abi_ulong target_fds_addr,
989 int n)
991 if (target_fds_addr) {
992 if (copy_from_user_fdset(fds, target_fds_addr, n))
993 return -TARGET_EFAULT;
994 *fds_ptr = fds;
995 } else {
996 *fds_ptr = NULL;
998 return 0;
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002 const fd_set *fds,
1003 int n)
1005 int i, nw, j, k;
1006 abi_long v;
1007 abi_ulong *target_fds;
1009 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010 if (!(target_fds = lock_user(VERIFY_WRITE,
1011 target_fds_addr,
1012 sizeof(abi_ulong) * nw,
1013 0)))
1014 return -TARGET_EFAULT;
1016 k = 0;
1017 for (i = 0; i < nw; i++) {
1018 v = 0;
1019 for (j = 0; j < TARGET_ABI_BITS; j++) {
1020 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021 k++;
1023 __put_user(v, &target_fds[i]);
1026 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1028 return 0;
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1037 static inline abi_long host_to_target_clock_t(long ticks)
1039 #if HOST_HZ == TARGET_HZ
1040 return ticks;
1041 #else
1042 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047 const struct rusage *rusage)
1049 struct target_rusage *target_rusage;
1051 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052 return -TARGET_EFAULT;
1053 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071 unlock_user_struct(target_rusage, target_addr, 1);
1073 return 0;
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1079 rlim_t result;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1089 return result;
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1094 abi_ulong target_rlim_swap;
1095 abi_ulong result;
1097 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098 target_rlim_swap = TARGET_RLIM_INFINITY;
1099 else
1100 target_rlim_swap = rlim;
1101 result = tswapal(target_rlim_swap);
1103 return result;
1106 static inline int target_to_host_resource(int code)
1108 switch (code) {
1109 case TARGET_RLIMIT_AS:
1110 return RLIMIT_AS;
1111 case TARGET_RLIMIT_CORE:
1112 return RLIMIT_CORE;
1113 case TARGET_RLIMIT_CPU:
1114 return RLIMIT_CPU;
1115 case TARGET_RLIMIT_DATA:
1116 return RLIMIT_DATA;
1117 case TARGET_RLIMIT_FSIZE:
1118 return RLIMIT_FSIZE;
1119 case TARGET_RLIMIT_LOCKS:
1120 return RLIMIT_LOCKS;
1121 case TARGET_RLIMIT_MEMLOCK:
1122 return RLIMIT_MEMLOCK;
1123 case TARGET_RLIMIT_MSGQUEUE:
1124 return RLIMIT_MSGQUEUE;
1125 case TARGET_RLIMIT_NICE:
1126 return RLIMIT_NICE;
1127 case TARGET_RLIMIT_NOFILE:
1128 return RLIMIT_NOFILE;
1129 case TARGET_RLIMIT_NPROC:
1130 return RLIMIT_NPROC;
1131 case TARGET_RLIMIT_RSS:
1132 return RLIMIT_RSS;
1133 case TARGET_RLIMIT_RTPRIO:
1134 return RLIMIT_RTPRIO;
1135 case TARGET_RLIMIT_SIGPENDING:
1136 return RLIMIT_SIGPENDING;
1137 case TARGET_RLIMIT_STACK:
1138 return RLIMIT_STACK;
1139 default:
1140 return code;
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145 abi_ulong target_tv_addr)
1147 struct target_timeval *target_tv;
1149 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150 return -TARGET_EFAULT;
1152 __get_user(tv->tv_sec, &target_tv->tv_sec);
1153 __get_user(tv->tv_usec, &target_tv->tv_usec);
1155 unlock_user_struct(target_tv, target_tv_addr, 0);
1157 return 0;
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161 const struct timeval *tv)
1163 struct target_timeval *target_tv;
1165 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166 return -TARGET_EFAULT;
1168 __put_user(tv->tv_sec, &target_tv->tv_sec);
1169 __put_user(tv->tv_usec, &target_tv->tv_usec);
1171 unlock_user_struct(target_tv, target_tv_addr, 1);
1173 return 0;
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177 abi_ulong target_tz_addr)
1179 struct target_timezone *target_tz;
1181 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182 return -TARGET_EFAULT;
1185 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1188 unlock_user_struct(target_tz, target_tz_addr, 0);
1190 return 0;
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197 abi_ulong target_mq_attr_addr)
1199 struct target_mq_attr *target_mq_attr;
1201 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202 target_mq_attr_addr, 1))
1203 return -TARGET_EFAULT;
1205 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1210 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1212 return 0;
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216 const struct mq_attr *attr)
1218 struct target_mq_attr *target_mq_attr;
1220 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221 target_mq_attr_addr, 0))
1222 return -TARGET_EFAULT;
1224 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1229 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1231 return 0;
1233 #endif
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238 abi_ulong rfd_addr, abi_ulong wfd_addr,
1239 abi_ulong efd_addr, abi_ulong target_tv_addr)
1241 fd_set rfds, wfds, efds;
1242 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243 struct timeval tv;
1244 struct timespec ts, *ts_ptr;
1245 abi_long ret;
1247 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248 if (ret) {
1249 return ret;
1251 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252 if (ret) {
1253 return ret;
1255 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256 if (ret) {
1257 return ret;
1260 if (target_tv_addr) {
1261 if (copy_from_user_timeval(&tv, target_tv_addr))
1262 return -TARGET_EFAULT;
1263 ts.tv_sec = tv.tv_sec;
1264 ts.tv_nsec = tv.tv_usec * 1000;
1265 ts_ptr = &ts;
1266 } else {
1267 ts_ptr = NULL;
1270 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271 ts_ptr, NULL));
1273 if (!is_error(ret)) {
1274 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275 return -TARGET_EFAULT;
1276 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277 return -TARGET_EFAULT;
1278 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279 return -TARGET_EFAULT;
1281 if (target_tv_addr) {
1282 tv.tv_sec = ts.tv_sec;
1283 tv.tv_usec = ts.tv_nsec / 1000;
1284 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285 return -TARGET_EFAULT;
1290 return ret;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1296 struct target_sel_arg_struct *sel;
1297 abi_ulong inp, outp, exp, tvp;
1298 long nsel;
1300 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301 return -TARGET_EFAULT;
1304 nsel = tswapal(sel->n);
1305 inp = tswapal(sel->inp);
1306 outp = tswapal(sel->outp);
1307 exp = tswapal(sel->exp);
1308 tvp = tswapal(sel->tvp);
1310 unlock_user_struct(sel, arg1, 0);
1312 return do_select(nsel, inp, outp, exp, tvp);
1314 #endif
1315 #endif
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1319 #ifdef CONFIG_PIPE2
1320 return pipe2(host_pipe, flags);
1321 #else
1322 return -ENOSYS;
1323 #endif
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327 int flags, int is_pipe2)
1329 int host_pipe[2];
1330 abi_long ret;
1331 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1333 if (is_error(ret))
1334 return get_errno(ret);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1338 if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341 return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344 return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347 return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350 return host_pipe[0];
1351 #endif
1354 if (put_user_s32(host_pipe[0], pipedes)
1355 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356 return -TARGET_EFAULT;
1357 return get_errno(ret);
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361 abi_ulong target_addr,
1362 socklen_t len)
1364 struct target_ip_mreqn *target_smreqn;
1366 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367 if (!target_smreqn)
1368 return -TARGET_EFAULT;
1369 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371 if (len == sizeof(struct target_ip_mreqn))
1372 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373 unlock_user(target_smreqn, target_addr, 0);
1375 return 0;
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379 abi_ulong target_addr,
1380 socklen_t len)
1382 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383 sa_family_t sa_family;
1384 struct target_sockaddr *target_saddr;
1386 if (fd_trans_target_to_host_addr(fd)) {
1387 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1390 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391 if (!target_saddr)
1392 return -TARGET_EFAULT;
1394 sa_family = tswap16(target_saddr->sa_family);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family == AF_UNIX) {
1405 if (len < unix_maxlen && len > 0) {
1406 char *cp = (char*)target_saddr;
1408 if ( cp[len-1] && !cp[len] )
1409 len++;
1411 if (len > unix_maxlen)
1412 len = unix_maxlen;
1415 memcpy(addr, target_saddr, len);
1416 addr->sa_family = sa_family;
1417 if (sa_family == AF_NETLINK) {
1418 struct sockaddr_nl *nladdr;
1420 nladdr = (struct sockaddr_nl *)addr;
1421 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423 } else if (sa_family == AF_PACKET) {
1424 struct target_sockaddr_ll *lladdr;
1426 lladdr = (struct target_sockaddr_ll *)addr;
1427 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1430 unlock_user(target_saddr, target_addr, 0);
1432 return 0;
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436 struct sockaddr *addr,
1437 socklen_t len)
1439 struct target_sockaddr *target_saddr;
1441 if (len == 0) {
1442 return 0;
1444 assert(addr);
1446 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447 if (!target_saddr)
1448 return -TARGET_EFAULT;
1449 memcpy(target_saddr, addr, len);
1450 if (len >= offsetof(struct target_sockaddr, sa_family) +
1451 sizeof(target_saddr->sa_family)) {
1452 target_saddr->sa_family = tswap16(addr->sa_family);
1454 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458 } else if (addr->sa_family == AF_PACKET) {
1459 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462 } else if (addr->sa_family == AF_INET6 &&
1463 len >= sizeof(struct target_sockaddr_in6)) {
1464 struct target_sockaddr_in6 *target_in6 =
1465 (struct target_sockaddr_in6 *)target_saddr;
1466 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1468 unlock_user(target_saddr, target_addr, len);
1470 return 0;
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474 struct target_msghdr *target_msgh)
1476 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477 abi_long msg_controllen;
1478 abi_ulong target_cmsg_addr;
1479 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480 socklen_t space = 0;
1482 msg_controllen = tswapal(target_msgh->msg_controllen);
1483 if (msg_controllen < sizeof (struct target_cmsghdr))
1484 goto the_end;
1485 target_cmsg_addr = tswapal(target_msgh->msg_control);
1486 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487 target_cmsg_start = target_cmsg;
1488 if (!target_cmsg)
1489 return -TARGET_EFAULT;
1491 while (cmsg && target_cmsg) {
1492 void *data = CMSG_DATA(cmsg);
1493 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1495 int len = tswapal(target_cmsg->cmsg_len)
1496 - sizeof(struct target_cmsghdr);
1498 space += CMSG_SPACE(len);
1499 if (space > msgh->msg_controllen) {
1500 space -= CMSG_SPACE(len);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1511 break;
1514 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515 cmsg->cmsg_level = SOL_SOCKET;
1516 } else {
1517 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1519 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520 cmsg->cmsg_len = CMSG_LEN(len);
1522 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523 int *fd = (int *)data;
1524 int *target_fd = (int *)target_data;
1525 int i, numfds = len / sizeof(int);
1527 for (i = 0; i < numfds; i++) {
1528 __get_user(fd[i], target_fd + i);
1530 } else if (cmsg->cmsg_level == SOL_SOCKET
1531 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1532 struct ucred *cred = (struct ucred *)data;
1533 struct target_ucred *target_cred =
1534 (struct target_ucred *)target_data;
1536 __get_user(cred->pid, &target_cred->pid);
1537 __get_user(cred->uid, &target_cred->uid);
1538 __get_user(cred->gid, &target_cred->gid);
1539 } else {
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg->cmsg_level, cmsg->cmsg_type);
1542 memcpy(data, target_data, len);
1545 cmsg = CMSG_NXTHDR(msgh, cmsg);
1546 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547 target_cmsg_start);
1549 unlock_user(target_cmsg, target_cmsg_addr, 0);
1550 the_end:
1551 msgh->msg_controllen = space;
1552 return 0;
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556 struct msghdr *msgh)
1558 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559 abi_long msg_controllen;
1560 abi_ulong target_cmsg_addr;
1561 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562 socklen_t space = 0;
1564 msg_controllen = tswapal(target_msgh->msg_controllen);
1565 if (msg_controllen < sizeof (struct target_cmsghdr))
1566 goto the_end;
1567 target_cmsg_addr = tswapal(target_msgh->msg_control);
1568 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569 target_cmsg_start = target_cmsg;
1570 if (!target_cmsg)
1571 return -TARGET_EFAULT;
1573 while (cmsg && target_cmsg) {
1574 void *data = CMSG_DATA(cmsg);
1575 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1577 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578 int tgt_len, tgt_space;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588 break;
1591 if (cmsg->cmsg_level == SOL_SOCKET) {
1592 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593 } else {
1594 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1596 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1601 tgt_len = len;
1602 switch (cmsg->cmsg_level) {
1603 case SOL_SOCKET:
1604 switch (cmsg->cmsg_type) {
1605 case SO_TIMESTAMP:
1606 tgt_len = sizeof(struct target_timeval);
1607 break;
1608 default:
1609 break;
1611 break;
1612 default:
1613 break;
1616 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg->cmsg_level) {
1627 case SOL_SOCKET:
1628 switch (cmsg->cmsg_type) {
1629 case SCM_RIGHTS:
1631 int *fd = (int *)data;
1632 int *target_fd = (int *)target_data;
1633 int i, numfds = tgt_len / sizeof(int);
1635 for (i = 0; i < numfds; i++) {
1636 __put_user(fd[i], target_fd + i);
1638 break;
1640 case SO_TIMESTAMP:
1642 struct timeval *tv = (struct timeval *)data;
1643 struct target_timeval *target_tv =
1644 (struct target_timeval *)target_data;
1646 if (len != sizeof(struct timeval) ||
1647 tgt_len != sizeof(struct target_timeval)) {
1648 goto unimplemented;
1651 /* copy struct timeval to target */
1652 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654 break;
1656 case SCM_CREDENTIALS:
1658 struct ucred *cred = (struct ucred *)data;
1659 struct target_ucred *target_cred =
1660 (struct target_ucred *)target_data;
1662 __put_user(cred->pid, &target_cred->pid);
1663 __put_user(cred->uid, &target_cred->uid);
1664 __put_user(cred->gid, &target_cred->gid);
1665 break;
1667 default:
1668 goto unimplemented;
1670 break;
1672 case SOL_IP:
1673 switch (cmsg->cmsg_type) {
1674 case IP_TTL:
1676 uint32_t *v = (uint32_t *)data;
1677 uint32_t *t_int = (uint32_t *)target_data;
1679 if (len != sizeof(uint32_t) ||
1680 tgt_len != sizeof(uint32_t)) {
1681 goto unimplemented;
1683 __put_user(*v, t_int);
1684 break;
1686 case IP_RECVERR:
1688 struct errhdr_t {
1689 struct sock_extended_err ee;
1690 struct sockaddr_in offender;
1692 struct errhdr_t *errh = (struct errhdr_t *)data;
1693 struct errhdr_t *target_errh =
1694 (struct errhdr_t *)target_data;
1696 if (len != sizeof(struct errhdr_t) ||
1697 tgt_len != sizeof(struct errhdr_t)) {
1698 goto unimplemented;
1700 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1703 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708 (void *) &errh->offender, sizeof(errh->offender));
1709 break;
1711 default:
1712 goto unimplemented;
1714 break;
1716 case SOL_IPV6:
1717 switch (cmsg->cmsg_type) {
1718 case IPV6_HOPLIMIT:
1720 uint32_t *v = (uint32_t *)data;
1721 uint32_t *t_int = (uint32_t *)target_data;
1723 if (len != sizeof(uint32_t) ||
1724 tgt_len != sizeof(uint32_t)) {
1725 goto unimplemented;
1727 __put_user(*v, t_int);
1728 break;
1730 case IPV6_RECVERR:
1732 struct errhdr6_t {
1733 struct sock_extended_err ee;
1734 struct sockaddr_in6 offender;
1736 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737 struct errhdr6_t *target_errh =
1738 (struct errhdr6_t *)target_data;
1740 if (len != sizeof(struct errhdr6_t) ||
1741 tgt_len != sizeof(struct errhdr6_t)) {
1742 goto unimplemented;
1744 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1747 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752 (void *) &errh->offender, sizeof(errh->offender));
1753 break;
1755 default:
1756 goto unimplemented;
1758 break;
1760 default:
1761 unimplemented:
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg->cmsg_level, cmsg->cmsg_type);
1764 memcpy(target_data, data, MIN(len, tgt_len));
1765 if (tgt_len > len) {
1766 memset(target_data + len, 0, tgt_len - len);
1770 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772 if (msg_controllen < tgt_space) {
1773 tgt_space = msg_controllen;
1775 msg_controllen -= tgt_space;
1776 space += tgt_space;
1777 cmsg = CMSG_NXTHDR(msgh, cmsg);
1778 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779 target_cmsg_start);
1781 unlock_user(target_cmsg, target_cmsg_addr, space);
1782 the_end:
1783 target_msgh->msg_controllen = tswapal(space);
1784 return 0;
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789 abi_ulong optval_addr, socklen_t optlen)
1791 abi_long ret;
1792 int val;
1793 struct ip_mreqn *ip_mreq;
1794 struct ip_mreq_source *ip_mreq_source;
1796 switch(level) {
1797 case SOL_TCP:
1798 /* TCP options all take an 'int' value. */
1799 if (optlen < sizeof(uint32_t))
1800 return -TARGET_EINVAL;
1802 if (get_user_u32(val, optval_addr))
1803 return -TARGET_EFAULT;
1804 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805 break;
1806 case SOL_IP:
1807 switch(optname) {
1808 case IP_TOS:
1809 case IP_TTL:
1810 case IP_HDRINCL:
1811 case IP_ROUTER_ALERT:
1812 case IP_RECVOPTS:
1813 case IP_RETOPTS:
1814 case IP_PKTINFO:
1815 case IP_MTU_DISCOVER:
1816 case IP_RECVERR:
1817 case IP_RECVTTL:
1818 case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820 case IP_FREEBIND:
1821 #endif
1822 case IP_MULTICAST_TTL:
1823 case IP_MULTICAST_LOOP:
1824 val = 0;
1825 if (optlen >= sizeof(uint32_t)) {
1826 if (get_user_u32(val, optval_addr))
1827 return -TARGET_EFAULT;
1828 } else if (optlen >= 1) {
1829 if (get_user_u8(val, optval_addr))
1830 return -TARGET_EFAULT;
1832 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833 break;
1834 case IP_ADD_MEMBERSHIP:
1835 case IP_DROP_MEMBERSHIP:
1836 if (optlen < sizeof (struct target_ip_mreq) ||
1837 optlen > sizeof (struct target_ip_mreqn))
1838 return -TARGET_EINVAL;
1840 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843 break;
1845 case IP_BLOCK_SOURCE:
1846 case IP_UNBLOCK_SOURCE:
1847 case IP_ADD_SOURCE_MEMBERSHIP:
1848 case IP_DROP_SOURCE_MEMBERSHIP:
1849 if (optlen != sizeof (struct target_ip_mreq_source))
1850 return -TARGET_EINVAL;
1852 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854 unlock_user (ip_mreq_source, optval_addr, 0);
1855 break;
1857 default:
1858 goto unimplemented;
1860 break;
1861 case SOL_IPV6:
1862 switch (optname) {
1863 case IPV6_MTU_DISCOVER:
1864 case IPV6_MTU:
1865 case IPV6_V6ONLY:
1866 case IPV6_RECVPKTINFO:
1867 case IPV6_UNICAST_HOPS:
1868 case IPV6_MULTICAST_HOPS:
1869 case IPV6_MULTICAST_LOOP:
1870 case IPV6_RECVERR:
1871 case IPV6_RECVHOPLIMIT:
1872 case IPV6_2292HOPLIMIT:
1873 case IPV6_CHECKSUM:
1874 val = 0;
1875 if (optlen < sizeof(uint32_t)) {
1876 return -TARGET_EINVAL;
1878 if (get_user_u32(val, optval_addr)) {
1879 return -TARGET_EFAULT;
1881 ret = get_errno(setsockopt(sockfd, level, optname,
1882 &val, sizeof(val)));
1883 break;
1884 case IPV6_PKTINFO:
1886 struct in6_pktinfo pki;
1888 if (optlen < sizeof(pki)) {
1889 return -TARGET_EINVAL;
1892 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893 return -TARGET_EFAULT;
1896 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1898 ret = get_errno(setsockopt(sockfd, level, optname,
1899 &pki, sizeof(pki)));
1900 break;
1902 default:
1903 goto unimplemented;
1905 break;
1906 case SOL_ICMPV6:
1907 switch (optname) {
1908 case ICMPV6_FILTER:
1910 struct icmp6_filter icmp6f;
1912 if (optlen > sizeof(icmp6f)) {
1913 optlen = sizeof(icmp6f);
1916 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917 return -TARGET_EFAULT;
1920 for (val = 0; val < 8; val++) {
1921 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1924 ret = get_errno(setsockopt(sockfd, level, optname,
1925 &icmp6f, optlen));
1926 break;
1928 default:
1929 goto unimplemented;
1931 break;
1932 case SOL_RAW:
1933 switch (optname) {
1934 case ICMP_FILTER:
1935 case IPV6_CHECKSUM:
1936 /* those take an u32 value */
1937 if (optlen < sizeof(uint32_t)) {
1938 return -TARGET_EINVAL;
1941 if (get_user_u32(val, optval_addr)) {
1942 return -TARGET_EFAULT;
1944 ret = get_errno(setsockopt(sockfd, level, optname,
1945 &val, sizeof(val)));
1946 break;
1948 default:
1949 goto unimplemented;
1951 break;
1952 case TARGET_SOL_SOCKET:
1953 switch (optname) {
1954 case TARGET_SO_RCVTIMEO:
1956 struct timeval tv;
1958 optname = SO_RCVTIMEO;
1960 set_timeout:
1961 if (optlen != sizeof(struct target_timeval)) {
1962 return -TARGET_EINVAL;
1965 if (copy_from_user_timeval(&tv, optval_addr)) {
1966 return -TARGET_EFAULT;
1969 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970 &tv, sizeof(tv)));
1971 return ret;
1973 case TARGET_SO_SNDTIMEO:
1974 optname = SO_SNDTIMEO;
1975 goto set_timeout;
1976 case TARGET_SO_ATTACH_FILTER:
1978 struct target_sock_fprog *tfprog;
1979 struct target_sock_filter *tfilter;
1980 struct sock_fprog fprog;
1981 struct sock_filter *filter;
1982 int i;
1984 if (optlen != sizeof(*tfprog)) {
1985 return -TARGET_EINVAL;
1987 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988 return -TARGET_EFAULT;
1990 if (!lock_user_struct(VERIFY_READ, tfilter,
1991 tswapal(tfprog->filter), 0)) {
1992 unlock_user_struct(tfprog, optval_addr, 1);
1993 return -TARGET_EFAULT;
1996 fprog.len = tswap16(tfprog->len);
1997 filter = g_try_new(struct sock_filter, fprog.len);
1998 if (filter == NULL) {
1999 unlock_user_struct(tfilter, tfprog->filter, 1);
2000 unlock_user_struct(tfprog, optval_addr, 1);
2001 return -TARGET_ENOMEM;
2003 for (i = 0; i < fprog.len; i++) {
2004 filter[i].code = tswap16(tfilter[i].code);
2005 filter[i].jt = tfilter[i].jt;
2006 filter[i].jf = tfilter[i].jf;
2007 filter[i].k = tswap32(tfilter[i].k);
2009 fprog.filter = filter;
2011 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013 g_free(filter);
2015 unlock_user_struct(tfilter, tfprog->filter, 1);
2016 unlock_user_struct(tfprog, optval_addr, 1);
2017 return ret;
2019 case TARGET_SO_BINDTODEVICE:
2021 char *dev_ifname, *addr_ifname;
2023 if (optlen > IFNAMSIZ - 1) {
2024 optlen = IFNAMSIZ - 1;
2026 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 if (!dev_ifname) {
2028 return -TARGET_EFAULT;
2030 optname = SO_BINDTODEVICE;
2031 addr_ifname = alloca(IFNAMSIZ);
2032 memcpy(addr_ifname, dev_ifname, optlen);
2033 addr_ifname[optlen] = 0;
2034 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035 addr_ifname, optlen));
2036 unlock_user (dev_ifname, optval_addr, 0);
2037 return ret;
2039 case TARGET_SO_LINGER:
2041 struct linger lg;
2042 struct target_linger *tlg;
2044 if (optlen != sizeof(struct target_linger)) {
2045 return -TARGET_EINVAL;
2047 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048 return -TARGET_EFAULT;
2050 __get_user(lg.l_onoff, &tlg->l_onoff);
2051 __get_user(lg.l_linger, &tlg->l_linger);
2052 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053 &lg, sizeof(lg)));
2054 unlock_user_struct(tlg, optval_addr, 0);
2055 return ret;
2057 /* Options with 'int' argument. */
2058 case TARGET_SO_DEBUG:
2059 optname = SO_DEBUG;
2060 break;
2061 case TARGET_SO_REUSEADDR:
2062 optname = SO_REUSEADDR;
2063 break;
2064 case TARGET_SO_TYPE:
2065 optname = SO_TYPE;
2066 break;
2067 case TARGET_SO_ERROR:
2068 optname = SO_ERROR;
2069 break;
2070 case TARGET_SO_DONTROUTE:
2071 optname = SO_DONTROUTE;
2072 break;
2073 case TARGET_SO_BROADCAST:
2074 optname = SO_BROADCAST;
2075 break;
2076 case TARGET_SO_SNDBUF:
2077 optname = SO_SNDBUF;
2078 break;
2079 case TARGET_SO_SNDBUFFORCE:
2080 optname = SO_SNDBUFFORCE;
2081 break;
2082 case TARGET_SO_RCVBUF:
2083 optname = SO_RCVBUF;
2084 break;
2085 case TARGET_SO_RCVBUFFORCE:
2086 optname = SO_RCVBUFFORCE;
2087 break;
2088 case TARGET_SO_KEEPALIVE:
2089 optname = SO_KEEPALIVE;
2090 break;
2091 case TARGET_SO_OOBINLINE:
2092 optname = SO_OOBINLINE;
2093 break;
2094 case TARGET_SO_NO_CHECK:
2095 optname = SO_NO_CHECK;
2096 break;
2097 case TARGET_SO_PRIORITY:
2098 optname = SO_PRIORITY;
2099 break;
2100 #ifdef SO_BSDCOMPAT
2101 case TARGET_SO_BSDCOMPAT:
2102 optname = SO_BSDCOMPAT;
2103 break;
2104 #endif
2105 case TARGET_SO_PASSCRED:
2106 optname = SO_PASSCRED;
2107 break;
2108 case TARGET_SO_PASSSEC:
2109 optname = SO_PASSSEC;
2110 break;
2111 case TARGET_SO_TIMESTAMP:
2112 optname = SO_TIMESTAMP;
2113 break;
2114 case TARGET_SO_RCVLOWAT:
2115 optname = SO_RCVLOWAT;
2116 break;
2117 default:
2118 goto unimplemented;
2120 if (optlen < sizeof(uint32_t))
2121 return -TARGET_EINVAL;
2123 if (get_user_u32(val, optval_addr))
2124 return -TARGET_EFAULT;
2125 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2126 break;
2127 default:
2128 unimplemented:
2129 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2130 ret = -TARGET_ENOPROTOOPT;
2132 return ret;
2135 /* do_getsockopt() Must return target values and target errnos. */
2136 static abi_long do_getsockopt(int sockfd, int level, int optname,
2137 abi_ulong optval_addr, abi_ulong optlen)
2139 abi_long ret;
2140 int len, val;
2141 socklen_t lv;
2143 switch(level) {
2144 case TARGET_SOL_SOCKET:
2145 level = SOL_SOCKET;
2146 switch (optname) {
2147 /* These don't just return a single integer */
2148 case TARGET_SO_RCVTIMEO:
2149 case TARGET_SO_SNDTIMEO:
2150 case TARGET_SO_PEERNAME:
2151 goto unimplemented;
2152 case TARGET_SO_PEERCRED: {
2153 struct ucred cr;
2154 socklen_t crlen;
2155 struct target_ucred *tcr;
2157 if (get_user_u32(len, optlen)) {
2158 return -TARGET_EFAULT;
2160 if (len < 0) {
2161 return -TARGET_EINVAL;
2164 crlen = sizeof(cr);
2165 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2166 &cr, &crlen));
2167 if (ret < 0) {
2168 return ret;
2170 if (len > crlen) {
2171 len = crlen;
2173 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2174 return -TARGET_EFAULT;
2176 __put_user(cr.pid, &tcr->pid);
2177 __put_user(cr.uid, &tcr->uid);
2178 __put_user(cr.gid, &tcr->gid);
2179 unlock_user_struct(tcr, optval_addr, 1);
2180 if (put_user_u32(len, optlen)) {
2181 return -TARGET_EFAULT;
2183 break;
2185 case TARGET_SO_LINGER:
2187 struct linger lg;
2188 socklen_t lglen;
2189 struct target_linger *tlg;
2191 if (get_user_u32(len, optlen)) {
2192 return -TARGET_EFAULT;
2194 if (len < 0) {
2195 return -TARGET_EINVAL;
2198 lglen = sizeof(lg);
2199 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2200 &lg, &lglen));
2201 if (ret < 0) {
2202 return ret;
2204 if (len > lglen) {
2205 len = lglen;
2207 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2208 return -TARGET_EFAULT;
2210 __put_user(lg.l_onoff, &tlg->l_onoff);
2211 __put_user(lg.l_linger, &tlg->l_linger);
2212 unlock_user_struct(tlg, optval_addr, 1);
2213 if (put_user_u32(len, optlen)) {
2214 return -TARGET_EFAULT;
2216 break;
2218 /* Options with 'int' argument. */
2219 case TARGET_SO_DEBUG:
2220 optname = SO_DEBUG;
2221 goto int_case;
2222 case TARGET_SO_REUSEADDR:
2223 optname = SO_REUSEADDR;
2224 goto int_case;
2225 case TARGET_SO_TYPE:
2226 optname = SO_TYPE;
2227 goto int_case;
2228 case TARGET_SO_ERROR:
2229 optname = SO_ERROR;
2230 goto int_case;
2231 case TARGET_SO_DONTROUTE:
2232 optname = SO_DONTROUTE;
2233 goto int_case;
2234 case TARGET_SO_BROADCAST:
2235 optname = SO_BROADCAST;
2236 goto int_case;
2237 case TARGET_SO_SNDBUF:
2238 optname = SO_SNDBUF;
2239 goto int_case;
2240 case TARGET_SO_RCVBUF:
2241 optname = SO_RCVBUF;
2242 goto int_case;
2243 case TARGET_SO_KEEPALIVE:
2244 optname = SO_KEEPALIVE;
2245 goto int_case;
2246 case TARGET_SO_OOBINLINE:
2247 optname = SO_OOBINLINE;
2248 goto int_case;
2249 case TARGET_SO_NO_CHECK:
2250 optname = SO_NO_CHECK;
2251 goto int_case;
2252 case TARGET_SO_PRIORITY:
2253 optname = SO_PRIORITY;
2254 goto int_case;
2255 #ifdef SO_BSDCOMPAT
2256 case TARGET_SO_BSDCOMPAT:
2257 optname = SO_BSDCOMPAT;
2258 goto int_case;
2259 #endif
2260 case TARGET_SO_PASSCRED:
2261 optname = SO_PASSCRED;
2262 goto int_case;
2263 case TARGET_SO_TIMESTAMP:
2264 optname = SO_TIMESTAMP;
2265 goto int_case;
2266 case TARGET_SO_RCVLOWAT:
2267 optname = SO_RCVLOWAT;
2268 goto int_case;
2269 case TARGET_SO_ACCEPTCONN:
2270 optname = SO_ACCEPTCONN;
2271 goto int_case;
2272 default:
2273 goto int_case;
2275 break;
2276 case SOL_TCP:
2277 /* TCP options all take an 'int' value. */
2278 int_case:
2279 if (get_user_u32(len, optlen))
2280 return -TARGET_EFAULT;
2281 if (len < 0)
2282 return -TARGET_EINVAL;
2283 lv = sizeof(lv);
2284 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2285 if (ret < 0)
2286 return ret;
2287 if (optname == SO_TYPE) {
2288 val = host_to_target_sock_type(val);
2290 if (len > lv)
2291 len = lv;
2292 if (len == 4) {
2293 if (put_user_u32(val, optval_addr))
2294 return -TARGET_EFAULT;
2295 } else {
2296 if (put_user_u8(val, optval_addr))
2297 return -TARGET_EFAULT;
2299 if (put_user_u32(len, optlen))
2300 return -TARGET_EFAULT;
2301 break;
2302 case SOL_IP:
2303 switch(optname) {
2304 case IP_TOS:
2305 case IP_TTL:
2306 case IP_HDRINCL:
2307 case IP_ROUTER_ALERT:
2308 case IP_RECVOPTS:
2309 case IP_RETOPTS:
2310 case IP_PKTINFO:
2311 case IP_MTU_DISCOVER:
2312 case IP_RECVERR:
2313 case IP_RECVTOS:
2314 #ifdef IP_FREEBIND
2315 case IP_FREEBIND:
2316 #endif
2317 case IP_MULTICAST_TTL:
2318 case IP_MULTICAST_LOOP:
2319 if (get_user_u32(len, optlen))
2320 return -TARGET_EFAULT;
2321 if (len < 0)
2322 return -TARGET_EINVAL;
2323 lv = sizeof(lv);
2324 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2325 if (ret < 0)
2326 return ret;
2327 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2328 len = 1;
2329 if (put_user_u32(len, optlen)
2330 || put_user_u8(val, optval_addr))
2331 return -TARGET_EFAULT;
2332 } else {
2333 if (len > sizeof(int))
2334 len = sizeof(int);
2335 if (put_user_u32(len, optlen)
2336 || put_user_u32(val, optval_addr))
2337 return -TARGET_EFAULT;
2339 break;
2340 default:
2341 ret = -TARGET_ENOPROTOOPT;
2342 break;
2344 break;
2345 default:
2346 unimplemented:
2347 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2348 level, optname);
2349 ret = -TARGET_EOPNOTSUPP;
2350 break;
2352 return ret;
2355 /* Convert target low/high pair representing file offset into the host
2356 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2357 * as the kernel doesn't handle them either.
2359 static void target_to_host_low_high(abi_ulong tlow,
2360 abi_ulong thigh,
2361 unsigned long *hlow,
2362 unsigned long *hhigh)
2364 uint64_t off = tlow |
2365 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2366 TARGET_LONG_BITS / 2;
2368 *hlow = off;
2369 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2372 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2373 abi_ulong count, int copy)
2375 struct target_iovec *target_vec;
2376 struct iovec *vec;
2377 abi_ulong total_len, max_len;
2378 int i;
2379 int err = 0;
2380 bool bad_address = false;
2382 if (count == 0) {
2383 errno = 0;
2384 return NULL;
2386 if (count > IOV_MAX) {
2387 errno = EINVAL;
2388 return NULL;
2391 vec = g_try_new0(struct iovec, count);
2392 if (vec == NULL) {
2393 errno = ENOMEM;
2394 return NULL;
2397 target_vec = lock_user(VERIFY_READ, target_addr,
2398 count * sizeof(struct target_iovec), 1);
2399 if (target_vec == NULL) {
2400 err = EFAULT;
2401 goto fail2;
2404 /* ??? If host page size > target page size, this will result in a
2405 value larger than what we can actually support. */
2406 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2407 total_len = 0;
2409 for (i = 0; i < count; i++) {
2410 abi_ulong base = tswapal(target_vec[i].iov_base);
2411 abi_long len = tswapal(target_vec[i].iov_len);
2413 if (len < 0) {
2414 err = EINVAL;
2415 goto fail;
2416 } else if (len == 0) {
2417 /* Zero length pointer is ignored. */
2418 vec[i].iov_base = 0;
2419 } else {
2420 vec[i].iov_base = lock_user(type, base, len, copy);
2421 /* If the first buffer pointer is bad, this is a fault. But
2422 * subsequent bad buffers will result in a partial write; this
2423 * is realized by filling the vector with null pointers and
2424 * zero lengths. */
2425 if (!vec[i].iov_base) {
2426 if (i == 0) {
2427 err = EFAULT;
2428 goto fail;
2429 } else {
2430 bad_address = true;
2433 if (bad_address) {
2434 len = 0;
2436 if (len > max_len - total_len) {
2437 len = max_len - total_len;
2440 vec[i].iov_len = len;
2441 total_len += len;
2444 unlock_user(target_vec, target_addr, 0);
2445 return vec;
2447 fail:
2448 while (--i >= 0) {
2449 if (tswapal(target_vec[i].iov_len) > 0) {
2450 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2453 unlock_user(target_vec, target_addr, 0);
2454 fail2:
2455 g_free(vec);
2456 errno = err;
2457 return NULL;
2460 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2461 abi_ulong count, int copy)
2463 struct target_iovec *target_vec;
2464 int i;
2466 target_vec = lock_user(VERIFY_READ, target_addr,
2467 count * sizeof(struct target_iovec), 1);
2468 if (target_vec) {
2469 for (i = 0; i < count; i++) {
2470 abi_ulong base = tswapal(target_vec[i].iov_base);
2471 abi_long len = tswapal(target_vec[i].iov_len);
2472 if (len < 0) {
2473 break;
2475 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2477 unlock_user(target_vec, target_addr, 0);
2480 g_free(vec);
2483 static inline int target_to_host_sock_type(int *type)
2485 int host_type = 0;
2486 int target_type = *type;
2488 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2489 case TARGET_SOCK_DGRAM:
2490 host_type = SOCK_DGRAM;
2491 break;
2492 case TARGET_SOCK_STREAM:
2493 host_type = SOCK_STREAM;
2494 break;
2495 default:
2496 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2497 break;
2499 if (target_type & TARGET_SOCK_CLOEXEC) {
2500 #if defined(SOCK_CLOEXEC)
2501 host_type |= SOCK_CLOEXEC;
2502 #else
2503 return -TARGET_EINVAL;
2504 #endif
2506 if (target_type & TARGET_SOCK_NONBLOCK) {
2507 #if defined(SOCK_NONBLOCK)
2508 host_type |= SOCK_NONBLOCK;
2509 #elif !defined(O_NONBLOCK)
2510 return -TARGET_EINVAL;
2511 #endif
2513 *type = host_type;
2514 return 0;
2517 /* Try to emulate socket type flags after socket creation. */
2518 static int sock_flags_fixup(int fd, int target_type)
2520 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2521 if (target_type & TARGET_SOCK_NONBLOCK) {
2522 int flags = fcntl(fd, F_GETFL);
2523 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2524 close(fd);
2525 return -TARGET_EINVAL;
2528 #endif
2529 return fd;
2532 /* do_socket() Must return target values and target errnos. */
2533 static abi_long do_socket(int domain, int type, int protocol)
2535 int target_type = type;
2536 int ret;
2538 ret = target_to_host_sock_type(&type);
2539 if (ret) {
2540 return ret;
2543 if (domain == PF_NETLINK && !(
2544 #ifdef CONFIG_RTNETLINK
2545 protocol == NETLINK_ROUTE ||
2546 #endif
2547 protocol == NETLINK_KOBJECT_UEVENT ||
2548 protocol == NETLINK_AUDIT)) {
2549 return -EPFNOSUPPORT;
2552 if (domain == AF_PACKET ||
2553 (domain == AF_INET && type == SOCK_PACKET)) {
2554 protocol = tswap16(protocol);
2557 ret = get_errno(socket(domain, type, protocol));
2558 if (ret >= 0) {
2559 ret = sock_flags_fixup(ret, target_type);
2560 if (type == SOCK_PACKET) {
2561 /* Manage an obsolete case :
2562 * if socket type is SOCK_PACKET, bind by name
2564 fd_trans_register(ret, &target_packet_trans);
2565 } else if (domain == PF_NETLINK) {
2566 switch (protocol) {
2567 #ifdef CONFIG_RTNETLINK
2568 case NETLINK_ROUTE:
2569 fd_trans_register(ret, &target_netlink_route_trans);
2570 break;
2571 #endif
2572 case NETLINK_KOBJECT_UEVENT:
2573 /* nothing to do: messages are strings */
2574 break;
2575 case NETLINK_AUDIT:
2576 fd_trans_register(ret, &target_netlink_audit_trans);
2577 break;
2578 default:
2579 g_assert_not_reached();
2583 return ret;
2586 /* do_bind() Must return target values and target errnos. */
2587 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2588 socklen_t addrlen)
2590 void *addr;
2591 abi_long ret;
2593 if ((int)addrlen < 0) {
2594 return -TARGET_EINVAL;
2597 addr = alloca(addrlen+1);
2599 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2600 if (ret)
2601 return ret;
2603 return get_errno(bind(sockfd, addr, addrlen));
2606 /* do_connect() Must return target values and target errnos. */
2607 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2608 socklen_t addrlen)
2610 void *addr;
2611 abi_long ret;
2613 if ((int)addrlen < 0) {
2614 return -TARGET_EINVAL;
2617 addr = alloca(addrlen+1);
2619 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2620 if (ret)
2621 return ret;
2623 return get_errno(safe_connect(sockfd, addr, addrlen));
2626 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2627 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2628 int flags, int send)
2630 abi_long ret, len;
2631 struct msghdr msg;
2632 abi_ulong count;
2633 struct iovec *vec;
2634 abi_ulong target_vec;
2636 if (msgp->msg_name) {
2637 msg.msg_namelen = tswap32(msgp->msg_namelen);
2638 msg.msg_name = alloca(msg.msg_namelen+1);
2639 ret = target_to_host_sockaddr(fd, msg.msg_name,
2640 tswapal(msgp->msg_name),
2641 msg.msg_namelen);
2642 if (ret == -TARGET_EFAULT) {
2643 /* For connected sockets msg_name and msg_namelen must
2644 * be ignored, so returning EFAULT immediately is wrong.
2645 * Instead, pass a bad msg_name to the host kernel, and
2646 * let it decide whether to return EFAULT or not.
2648 msg.msg_name = (void *)-1;
2649 } else if (ret) {
2650 goto out2;
2652 } else {
2653 msg.msg_name = NULL;
2654 msg.msg_namelen = 0;
2656 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2657 msg.msg_control = alloca(msg.msg_controllen);
2658 memset(msg.msg_control, 0, msg.msg_controllen);
2660 msg.msg_flags = tswap32(msgp->msg_flags);
2662 count = tswapal(msgp->msg_iovlen);
2663 target_vec = tswapal(msgp->msg_iov);
2665 if (count > IOV_MAX) {
2666 /* sendrcvmsg returns a different errno for this condition than
2667 * readv/writev, so we must catch it here before lock_iovec() does.
2669 ret = -TARGET_EMSGSIZE;
2670 goto out2;
2673 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2674 target_vec, count, send);
2675 if (vec == NULL) {
2676 ret = -host_to_target_errno(errno);
2677 goto out2;
2679 msg.msg_iovlen = count;
2680 msg.msg_iov = vec;
2682 if (send) {
2683 if (fd_trans_target_to_host_data(fd)) {
2684 void *host_msg;
2686 host_msg = g_malloc(msg.msg_iov->iov_len);
2687 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2688 ret = fd_trans_target_to_host_data(fd)(host_msg,
2689 msg.msg_iov->iov_len);
2690 if (ret >= 0) {
2691 msg.msg_iov->iov_base = host_msg;
2692 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2694 g_free(host_msg);
2695 } else {
2696 ret = target_to_host_cmsg(&msg, msgp);
2697 if (ret == 0) {
2698 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2701 } else {
2702 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2703 if (!is_error(ret)) {
2704 len = ret;
2705 if (fd_trans_host_to_target_data(fd)) {
2706 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2707 MIN(msg.msg_iov->iov_len, len));
2708 } else {
2709 ret = host_to_target_cmsg(msgp, &msg);
2711 if (!is_error(ret)) {
2712 msgp->msg_namelen = tswap32(msg.msg_namelen);
2713 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2714 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2715 msg.msg_name, msg.msg_namelen);
2716 if (ret) {
2717 goto out;
2721 ret = len;
2726 out:
2727 unlock_iovec(vec, target_vec, count, !send);
2728 out2:
2729 return ret;
2732 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2733 int flags, int send)
2735 abi_long ret;
2736 struct target_msghdr *msgp;
2738 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2739 msgp,
2740 target_msg,
2741 send ? 1 : 0)) {
2742 return -TARGET_EFAULT;
2744 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2745 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2746 return ret;
2749 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2750 * so it might not have this *mmsg-specific flag either.
2752 #ifndef MSG_WAITFORONE
2753 #define MSG_WAITFORONE 0x10000
2754 #endif
2756 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2757 unsigned int vlen, unsigned int flags,
2758 int send)
2760 struct target_mmsghdr *mmsgp;
2761 abi_long ret = 0;
2762 int i;
2764 if (vlen > UIO_MAXIOV) {
2765 vlen = UIO_MAXIOV;
2768 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2769 if (!mmsgp) {
2770 return -TARGET_EFAULT;
2773 for (i = 0; i < vlen; i++) {
2774 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2775 if (is_error(ret)) {
2776 break;
2778 mmsgp[i].msg_len = tswap32(ret);
2779 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2780 if (flags & MSG_WAITFORONE) {
2781 flags |= MSG_DONTWAIT;
2785 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2787 /* Return number of datagrams sent if we sent any at all;
2788 * otherwise return the error.
2790 if (i) {
2791 return i;
2793 return ret;
2796 /* do_accept4() Must return target values and target errnos. */
2797 static abi_long do_accept4(int fd, abi_ulong target_addr,
2798 abi_ulong target_addrlen_addr, int flags)
2800 socklen_t addrlen;
2801 void *addr;
2802 abi_long ret;
2803 int host_flags;
2805 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2807 if (target_addr == 0) {
2808 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2811 /* linux returns EINVAL if addrlen pointer is invalid */
2812 if (get_user_u32(addrlen, target_addrlen_addr))
2813 return -TARGET_EINVAL;
2815 if ((int)addrlen < 0) {
2816 return -TARGET_EINVAL;
2819 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2820 return -TARGET_EINVAL;
2822 addr = alloca(addrlen);
2824 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2825 if (!is_error(ret)) {
2826 host_to_target_sockaddr(target_addr, addr, addrlen);
2827 if (put_user_u32(addrlen, target_addrlen_addr))
2828 ret = -TARGET_EFAULT;
2830 return ret;
2833 /* do_getpeername() Must return target values and target errnos. */
2834 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2835 abi_ulong target_addrlen_addr)
2837 socklen_t addrlen;
2838 void *addr;
2839 abi_long ret;
2841 if (get_user_u32(addrlen, target_addrlen_addr))
2842 return -TARGET_EFAULT;
2844 if ((int)addrlen < 0) {
2845 return -TARGET_EINVAL;
2848 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2849 return -TARGET_EFAULT;
2851 addr = alloca(addrlen);
2853 ret = get_errno(getpeername(fd, addr, &addrlen));
2854 if (!is_error(ret)) {
2855 host_to_target_sockaddr(target_addr, addr, addrlen);
2856 if (put_user_u32(addrlen, target_addrlen_addr))
2857 ret = -TARGET_EFAULT;
2859 return ret;
2862 /* do_getsockname() Must return target values and target errnos. */
2863 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2864 abi_ulong target_addrlen_addr)
2866 socklen_t addrlen;
2867 void *addr;
2868 abi_long ret;
2870 if (get_user_u32(addrlen, target_addrlen_addr))
2871 return -TARGET_EFAULT;
2873 if ((int)addrlen < 0) {
2874 return -TARGET_EINVAL;
2877 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2878 return -TARGET_EFAULT;
2880 addr = alloca(addrlen);
2882 ret = get_errno(getsockname(fd, addr, &addrlen));
2883 if (!is_error(ret)) {
2884 host_to_target_sockaddr(target_addr, addr, addrlen);
2885 if (put_user_u32(addrlen, target_addrlen_addr))
2886 ret = -TARGET_EFAULT;
2888 return ret;
2891 /* do_socketpair() Must return target values and target errnos. */
2892 static abi_long do_socketpair(int domain, int type, int protocol,
2893 abi_ulong target_tab_addr)
2895 int tab[2];
2896 abi_long ret;
2898 target_to_host_sock_type(&type);
2900 ret = get_errno(socketpair(domain, type, protocol, tab));
2901 if (!is_error(ret)) {
2902 if (put_user_s32(tab[0], target_tab_addr)
2903 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2904 ret = -TARGET_EFAULT;
2906 return ret;
2909 /* do_sendto() Must return target values and target errnos. */
2910 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2911 abi_ulong target_addr, socklen_t addrlen)
2913 void *addr;
2914 void *host_msg;
2915 void *copy_msg = NULL;
2916 abi_long ret;
2918 if ((int)addrlen < 0) {
2919 return -TARGET_EINVAL;
2922 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2923 if (!host_msg)
2924 return -TARGET_EFAULT;
2925 if (fd_trans_target_to_host_data(fd)) {
2926 copy_msg = host_msg;
2927 host_msg = g_malloc(len);
2928 memcpy(host_msg, copy_msg, len);
2929 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2930 if (ret < 0) {
2931 goto fail;
2934 if (target_addr) {
2935 addr = alloca(addrlen+1);
2936 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2937 if (ret) {
2938 goto fail;
2940 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2941 } else {
2942 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2944 fail:
2945 if (copy_msg) {
2946 g_free(host_msg);
2947 host_msg = copy_msg;
2949 unlock_user(host_msg, msg, 0);
2950 return ret;
2953 /* do_recvfrom() Must return target values and target errnos. */
2954 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2955 abi_ulong target_addr,
2956 abi_ulong target_addrlen)
2958 socklen_t addrlen;
2959 void *addr;
2960 void *host_msg;
2961 abi_long ret;
2963 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2964 if (!host_msg)
2965 return -TARGET_EFAULT;
2966 if (target_addr) {
2967 if (get_user_u32(addrlen, target_addrlen)) {
2968 ret = -TARGET_EFAULT;
2969 goto fail;
2971 if ((int)addrlen < 0) {
2972 ret = -TARGET_EINVAL;
2973 goto fail;
2975 addr = alloca(addrlen);
2976 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2977 addr, &addrlen));
2978 } else {
2979 addr = NULL; /* To keep compiler quiet. */
2980 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2982 if (!is_error(ret)) {
2983 if (fd_trans_host_to_target_data(fd)) {
2984 abi_long trans;
2985 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2986 if (is_error(trans)) {
2987 ret = trans;
2988 goto fail;
2991 if (target_addr) {
2992 host_to_target_sockaddr(target_addr, addr, addrlen);
2993 if (put_user_u32(addrlen, target_addrlen)) {
2994 ret = -TARGET_EFAULT;
2995 goto fail;
2998 unlock_user(host_msg, msg, len);
2999 } else {
3000 fail:
3001 unlock_user(host_msg, msg, 0);
3003 return ret;
3006 #ifdef TARGET_NR_socketcall
3007 /* do_socketcall() must return target values and target errnos. */
3008 static abi_long do_socketcall(int num, abi_ulong vptr)
3010 static const unsigned nargs[] = { /* number of arguments per operation */
3011 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3012 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3013 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3014 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3015 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3016 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3017 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3018 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3019 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3020 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3021 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3022 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3023 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3024 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3025 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3026 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3027 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3028 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3029 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3030 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3032 abi_long a[6]; /* max 6 args */
3033 unsigned i;
3035 /* check the range of the first argument num */
3036 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3037 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3038 return -TARGET_EINVAL;
3040 /* ensure we have space for args */
3041 if (nargs[num] > ARRAY_SIZE(a)) {
3042 return -TARGET_EINVAL;
3044 /* collect the arguments in a[] according to nargs[] */
3045 for (i = 0; i < nargs[num]; ++i) {
3046 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3047 return -TARGET_EFAULT;
3050 /* now when we have the args, invoke the appropriate underlying function */
3051 switch (num) {
3052 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3053 return do_socket(a[0], a[1], a[2]);
3054 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3055 return do_bind(a[0], a[1], a[2]);
3056 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3057 return do_connect(a[0], a[1], a[2]);
3058 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3059 return get_errno(listen(a[0], a[1]));
3060 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3061 return do_accept4(a[0], a[1], a[2], 0);
3062 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3063 return do_getsockname(a[0], a[1], a[2]);
3064 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3065 return do_getpeername(a[0], a[1], a[2]);
3066 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3067 return do_socketpair(a[0], a[1], a[2], a[3]);
3068 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3069 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3070 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3071 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3072 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3073 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3074 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3075 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3076 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3077 return get_errno(shutdown(a[0], a[1]));
3078 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3079 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3080 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3081 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3082 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3083 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3084 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3085 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3086 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3087 return do_accept4(a[0], a[1], a[2], a[3]);
3088 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3089 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3090 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3091 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3092 default:
3093 gemu_log("Unsupported socketcall: %d\n", num);
3094 return -TARGET_EINVAL;
3097 #endif
3099 #define N_SHM_REGIONS 32
3101 static struct shm_region {
3102 abi_ulong start;
3103 abi_ulong size;
3104 bool in_use;
3105 } shm_regions[N_SHM_REGIONS];
3107 #ifndef TARGET_SEMID64_DS
3108 /* asm-generic version of this struct */
3109 struct target_semid64_ds
3111 struct target_ipc_perm sem_perm;
3112 abi_ulong sem_otime;
3113 #if TARGET_ABI_BITS == 32
3114 abi_ulong __unused1;
3115 #endif
3116 abi_ulong sem_ctime;
3117 #if TARGET_ABI_BITS == 32
3118 abi_ulong __unused2;
3119 #endif
3120 abi_ulong sem_nsems;
3121 abi_ulong __unused3;
3122 abi_ulong __unused4;
3124 #endif
3126 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3127 abi_ulong target_addr)
3129 struct target_ipc_perm *target_ip;
3130 struct target_semid64_ds *target_sd;
3132 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3133 return -TARGET_EFAULT;
3134 target_ip = &(target_sd->sem_perm);
3135 host_ip->__key = tswap32(target_ip->__key);
3136 host_ip->uid = tswap32(target_ip->uid);
3137 host_ip->gid = tswap32(target_ip->gid);
3138 host_ip->cuid = tswap32(target_ip->cuid);
3139 host_ip->cgid = tswap32(target_ip->cgid);
3140 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3141 host_ip->mode = tswap32(target_ip->mode);
3142 #else
3143 host_ip->mode = tswap16(target_ip->mode);
3144 #endif
3145 #if defined(TARGET_PPC)
3146 host_ip->__seq = tswap32(target_ip->__seq);
3147 #else
3148 host_ip->__seq = tswap16(target_ip->__seq);
3149 #endif
3150 unlock_user_struct(target_sd, target_addr, 0);
3151 return 0;
3154 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3155 struct ipc_perm *host_ip)
3157 struct target_ipc_perm *target_ip;
3158 struct target_semid64_ds *target_sd;
3160 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3161 return -TARGET_EFAULT;
3162 target_ip = &(target_sd->sem_perm);
3163 target_ip->__key = tswap32(host_ip->__key);
3164 target_ip->uid = tswap32(host_ip->uid);
3165 target_ip->gid = tswap32(host_ip->gid);
3166 target_ip->cuid = tswap32(host_ip->cuid);
3167 target_ip->cgid = tswap32(host_ip->cgid);
3168 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3169 target_ip->mode = tswap32(host_ip->mode);
3170 #else
3171 target_ip->mode = tswap16(host_ip->mode);
3172 #endif
3173 #if defined(TARGET_PPC)
3174 target_ip->__seq = tswap32(host_ip->__seq);
3175 #else
3176 target_ip->__seq = tswap16(host_ip->__seq);
3177 #endif
3178 unlock_user_struct(target_sd, target_addr, 1);
3179 return 0;
3182 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3183 abi_ulong target_addr)
3185 struct target_semid64_ds *target_sd;
3187 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3188 return -TARGET_EFAULT;
3189 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3190 return -TARGET_EFAULT;
3191 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3192 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3193 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3194 unlock_user_struct(target_sd, target_addr, 0);
3195 return 0;
3198 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3199 struct semid_ds *host_sd)
3201 struct target_semid64_ds *target_sd;
3203 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3204 return -TARGET_EFAULT;
3205 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3206 return -TARGET_EFAULT;
3207 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3208 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3209 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3210 unlock_user_struct(target_sd, target_addr, 1);
3211 return 0;
3214 struct target_seminfo {
3215 int semmap;
3216 int semmni;
3217 int semmns;
3218 int semmnu;
3219 int semmsl;
3220 int semopm;
3221 int semume;
3222 int semusz;
3223 int semvmx;
3224 int semaem;
3227 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3228 struct seminfo *host_seminfo)
3230 struct target_seminfo *target_seminfo;
3231 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3232 return -TARGET_EFAULT;
3233 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3234 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3235 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3236 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3237 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3238 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3239 __put_user(host_seminfo->semume, &target_seminfo->semume);
3240 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3241 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3242 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3243 unlock_user_struct(target_seminfo, target_addr, 1);
3244 return 0;
3247 union semun {
3248 int val;
3249 struct semid_ds *buf;
3250 unsigned short *array;
3251 struct seminfo *__buf;
3254 union target_semun {
3255 int val;
3256 abi_ulong buf;
3257 abi_ulong array;
3258 abi_ulong __buf;
3261 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3262 abi_ulong target_addr)
3264 int nsems;
3265 unsigned short *array;
3266 union semun semun;
3267 struct semid_ds semid_ds;
3268 int i, ret;
3270 semun.buf = &semid_ds;
3272 ret = semctl(semid, 0, IPC_STAT, semun);
3273 if (ret == -1)
3274 return get_errno(ret);
3276 nsems = semid_ds.sem_nsems;
3278 *host_array = g_try_new(unsigned short, nsems);
3279 if (!*host_array) {
3280 return -TARGET_ENOMEM;
3282 array = lock_user(VERIFY_READ, target_addr,
3283 nsems*sizeof(unsigned short), 1);
3284 if (!array) {
3285 g_free(*host_array);
3286 return -TARGET_EFAULT;
3289 for(i=0; i<nsems; i++) {
3290 __get_user((*host_array)[i], &array[i]);
3292 unlock_user(array, target_addr, 0);
3294 return 0;
3297 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3298 unsigned short **host_array)
3300 int nsems;
3301 unsigned short *array;
3302 union semun semun;
3303 struct semid_ds semid_ds;
3304 int i, ret;
3306 semun.buf = &semid_ds;
3308 ret = semctl(semid, 0, IPC_STAT, semun);
3309 if (ret == -1)
3310 return get_errno(ret);
3312 nsems = semid_ds.sem_nsems;
3314 array = lock_user(VERIFY_WRITE, target_addr,
3315 nsems*sizeof(unsigned short), 0);
3316 if (!array)
3317 return -TARGET_EFAULT;
3319 for(i=0; i<nsems; i++) {
3320 __put_user((*host_array)[i], &array[i]);
3322 g_free(*host_array);
3323 unlock_user(array, target_addr, 1);
3325 return 0;
3328 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3329 abi_ulong target_arg)
3331 union target_semun target_su = { .buf = target_arg };
3332 union semun arg;
3333 struct semid_ds dsarg;
3334 unsigned short *array = NULL;
3335 struct seminfo seminfo;
3336 abi_long ret = -TARGET_EINVAL;
3337 abi_long err;
3338 cmd &= 0xff;
3340 switch( cmd ) {
3341 case GETVAL:
3342 case SETVAL:
3343 /* In 64 bit cross-endian situations, we will erroneously pick up
3344 * the wrong half of the union for the "val" element. To rectify
3345 * this, the entire 8-byte structure is byteswapped, followed by
3346 * a swap of the 4 byte val field. In other cases, the data is
3347 * already in proper host byte order. */
3348 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3349 target_su.buf = tswapal(target_su.buf);
3350 arg.val = tswap32(target_su.val);
3351 } else {
3352 arg.val = target_su.val;
3354 ret = get_errno(semctl(semid, semnum, cmd, arg));
3355 break;
3356 case GETALL:
3357 case SETALL:
3358 err = target_to_host_semarray(semid, &array, target_su.array);
3359 if (err)
3360 return err;
3361 arg.array = array;
3362 ret = get_errno(semctl(semid, semnum, cmd, arg));
3363 err = host_to_target_semarray(semid, target_su.array, &array);
3364 if (err)
3365 return err;
3366 break;
3367 case IPC_STAT:
3368 case IPC_SET:
3369 case SEM_STAT:
3370 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3371 if (err)
3372 return err;
3373 arg.buf = &dsarg;
3374 ret = get_errno(semctl(semid, semnum, cmd, arg));
3375 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3376 if (err)
3377 return err;
3378 break;
3379 case IPC_INFO:
3380 case SEM_INFO:
3381 arg.__buf = &seminfo;
3382 ret = get_errno(semctl(semid, semnum, cmd, arg));
3383 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3384 if (err)
3385 return err;
3386 break;
3387 case IPC_RMID:
3388 case GETPID:
3389 case GETNCNT:
3390 case GETZCNT:
3391 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3392 break;
3395 return ret;
3398 struct target_sembuf {
3399 unsigned short sem_num;
3400 short sem_op;
3401 short sem_flg;
3404 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3405 abi_ulong target_addr,
3406 unsigned nsops)
3408 struct target_sembuf *target_sembuf;
3409 int i;
3411 target_sembuf = lock_user(VERIFY_READ, target_addr,
3412 nsops*sizeof(struct target_sembuf), 1);
3413 if (!target_sembuf)
3414 return -TARGET_EFAULT;
3416 for(i=0; i<nsops; i++) {
3417 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3418 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3419 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3422 unlock_user(target_sembuf, target_addr, 0);
3424 return 0;
3427 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3429 struct sembuf sops[nsops];
3431 if (target_to_host_sembuf(sops, ptr, nsops))
3432 return -TARGET_EFAULT;
3434 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3437 struct target_msqid_ds
3439 struct target_ipc_perm msg_perm;
3440 abi_ulong msg_stime;
3441 #if TARGET_ABI_BITS == 32
3442 abi_ulong __unused1;
3443 #endif
3444 abi_ulong msg_rtime;
3445 #if TARGET_ABI_BITS == 32
3446 abi_ulong __unused2;
3447 #endif
3448 abi_ulong msg_ctime;
3449 #if TARGET_ABI_BITS == 32
3450 abi_ulong __unused3;
3451 #endif
3452 abi_ulong __msg_cbytes;
3453 abi_ulong msg_qnum;
3454 abi_ulong msg_qbytes;
3455 abi_ulong msg_lspid;
3456 abi_ulong msg_lrpid;
3457 abi_ulong __unused4;
3458 abi_ulong __unused5;
3461 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3462 abi_ulong target_addr)
3464 struct target_msqid_ds *target_md;
3466 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3467 return -TARGET_EFAULT;
3468 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3469 return -TARGET_EFAULT;
3470 host_md->msg_stime = tswapal(target_md->msg_stime);
3471 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3472 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3473 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3474 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3475 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3476 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3477 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3478 unlock_user_struct(target_md, target_addr, 0);
3479 return 0;
3482 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3483 struct msqid_ds *host_md)
3485 struct target_msqid_ds *target_md;
3487 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3488 return -TARGET_EFAULT;
3489 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3490 return -TARGET_EFAULT;
3491 target_md->msg_stime = tswapal(host_md->msg_stime);
3492 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3493 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3494 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3495 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3496 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3497 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3498 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3499 unlock_user_struct(target_md, target_addr, 1);
3500 return 0;
3503 struct target_msginfo {
3504 int msgpool;
3505 int msgmap;
3506 int msgmax;
3507 int msgmnb;
3508 int msgmni;
3509 int msgssz;
3510 int msgtql;
3511 unsigned short int msgseg;
3514 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3515 struct msginfo *host_msginfo)
3517 struct target_msginfo *target_msginfo;
3518 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3519 return -TARGET_EFAULT;
3520 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3521 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3522 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3523 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3524 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3525 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3526 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3527 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3528 unlock_user_struct(target_msginfo, target_addr, 1);
3529 return 0;
3532 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3534 struct msqid_ds dsarg;
3535 struct msginfo msginfo;
3536 abi_long ret = -TARGET_EINVAL;
3538 cmd &= 0xff;
3540 switch (cmd) {
3541 case IPC_STAT:
3542 case IPC_SET:
3543 case MSG_STAT:
3544 if (target_to_host_msqid_ds(&dsarg,ptr))
3545 return -TARGET_EFAULT;
3546 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3547 if (host_to_target_msqid_ds(ptr,&dsarg))
3548 return -TARGET_EFAULT;
3549 break;
3550 case IPC_RMID:
3551 ret = get_errno(msgctl(msgid, cmd, NULL));
3552 break;
3553 case IPC_INFO:
3554 case MSG_INFO:
3555 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3556 if (host_to_target_msginfo(ptr, &msginfo))
3557 return -TARGET_EFAULT;
3558 break;
3561 return ret;
3564 struct target_msgbuf {
3565 abi_long mtype;
3566 char mtext[1];
3569 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3570 ssize_t msgsz, int msgflg)
3572 struct target_msgbuf *target_mb;
3573 struct msgbuf *host_mb;
3574 abi_long ret = 0;
3576 if (msgsz < 0) {
3577 return -TARGET_EINVAL;
3580 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3581 return -TARGET_EFAULT;
3582 host_mb = g_try_malloc(msgsz + sizeof(long));
3583 if (!host_mb) {
3584 unlock_user_struct(target_mb, msgp, 0);
3585 return -TARGET_ENOMEM;
3587 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3588 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3589 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3590 g_free(host_mb);
3591 unlock_user_struct(target_mb, msgp, 0);
3593 return ret;
3596 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3597 ssize_t msgsz, abi_long msgtyp,
3598 int msgflg)
3600 struct target_msgbuf *target_mb;
3601 char *target_mtext;
3602 struct msgbuf *host_mb;
3603 abi_long ret = 0;
3605 if (msgsz < 0) {
3606 return -TARGET_EINVAL;
3609 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3610 return -TARGET_EFAULT;
3612 host_mb = g_try_malloc(msgsz + sizeof(long));
3613 if (!host_mb) {
3614 ret = -TARGET_ENOMEM;
3615 goto end;
3617 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3619 if (ret > 0) {
3620 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3621 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3622 if (!target_mtext) {
3623 ret = -TARGET_EFAULT;
3624 goto end;
3626 memcpy(target_mb->mtext, host_mb->mtext, ret);
3627 unlock_user(target_mtext, target_mtext_addr, ret);
3630 target_mb->mtype = tswapal(host_mb->mtype);
3632 end:
3633 if (target_mb)
3634 unlock_user_struct(target_mb, msgp, 1);
3635 g_free(host_mb);
3636 return ret;
3639 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3640 abi_ulong target_addr)
3642 struct target_shmid_ds *target_sd;
3644 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645 return -TARGET_EFAULT;
3646 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3647 return -TARGET_EFAULT;
3648 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3649 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3650 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3651 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3652 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3653 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3654 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3655 unlock_user_struct(target_sd, target_addr, 0);
3656 return 0;
3659 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3660 struct shmid_ds *host_sd)
3662 struct target_shmid_ds *target_sd;
3664 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3665 return -TARGET_EFAULT;
3666 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3667 return -TARGET_EFAULT;
3668 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3669 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3670 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3671 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3672 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3673 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3674 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3675 unlock_user_struct(target_sd, target_addr, 1);
3676 return 0;
3679 struct target_shminfo {
3680 abi_ulong shmmax;
3681 abi_ulong shmmin;
3682 abi_ulong shmmni;
3683 abi_ulong shmseg;
3684 abi_ulong shmall;
3687 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3688 struct shminfo *host_shminfo)
3690 struct target_shminfo *target_shminfo;
3691 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3692 return -TARGET_EFAULT;
3693 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3694 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3695 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3696 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3697 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3698 unlock_user_struct(target_shminfo, target_addr, 1);
3699 return 0;
3702 struct target_shm_info {
3703 int used_ids;
3704 abi_ulong shm_tot;
3705 abi_ulong shm_rss;
3706 abi_ulong shm_swp;
3707 abi_ulong swap_attempts;
3708 abi_ulong swap_successes;
3711 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3712 struct shm_info *host_shm_info)
3714 struct target_shm_info *target_shm_info;
3715 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3716 return -TARGET_EFAULT;
3717 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3718 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3719 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3720 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3721 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3722 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3723 unlock_user_struct(target_shm_info, target_addr, 1);
3724 return 0;
3727 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3729 struct shmid_ds dsarg;
3730 struct shminfo shminfo;
3731 struct shm_info shm_info;
3732 abi_long ret = -TARGET_EINVAL;
3734 cmd &= 0xff;
3736 switch(cmd) {
3737 case IPC_STAT:
3738 case IPC_SET:
3739 case SHM_STAT:
3740 if (target_to_host_shmid_ds(&dsarg, buf))
3741 return -TARGET_EFAULT;
3742 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3743 if (host_to_target_shmid_ds(buf, &dsarg))
3744 return -TARGET_EFAULT;
3745 break;
3746 case IPC_INFO:
3747 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3748 if (host_to_target_shminfo(buf, &shminfo))
3749 return -TARGET_EFAULT;
3750 break;
3751 case SHM_INFO:
3752 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3753 if (host_to_target_shm_info(buf, &shm_info))
3754 return -TARGET_EFAULT;
3755 break;
3756 case IPC_RMID:
3757 case SHM_LOCK:
3758 case SHM_UNLOCK:
3759 ret = get_errno(shmctl(shmid, cmd, NULL));
3760 break;
3763 return ret;
3766 #ifndef TARGET_FORCE_SHMLBA
3767 /* For most architectures, SHMLBA is the same as the page size;
3768 * some architectures have larger values, in which case they should
3769 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3770 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3771 * and defining its own value for SHMLBA.
3773 * The kernel also permits SHMLBA to be set by the architecture to a
3774 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3775 * this means that addresses are rounded to the large size if
3776 * SHM_RND is set but addresses not aligned to that size are not rejected
3777 * as long as they are at least page-aligned. Since the only architecture
3778 * which uses this is ia64 this code doesn't provide for that oddity.
3780 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3782 return TARGET_PAGE_SIZE;
3784 #endif
3786 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3787 int shmid, abi_ulong shmaddr, int shmflg)
3789 abi_long raddr;
3790 void *host_raddr;
3791 struct shmid_ds shm_info;
3792 int i,ret;
3793 abi_ulong shmlba;
3795 /* find out the length of the shared memory segment */
3796 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3797 if (is_error(ret)) {
3798 /* can't get length, bail out */
3799 return ret;
3802 shmlba = target_shmlba(cpu_env);
3804 if (shmaddr & (shmlba - 1)) {
3805 if (shmflg & SHM_RND) {
3806 shmaddr &= ~(shmlba - 1);
3807 } else {
3808 return -TARGET_EINVAL;
3811 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3812 return -TARGET_EINVAL;
3815 mmap_lock();
3817 if (shmaddr)
3818 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3819 else {
3820 abi_ulong mmap_start;
3822 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3824 if (mmap_start == -1) {
3825 errno = ENOMEM;
3826 host_raddr = (void *)-1;
3827 } else
3828 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3831 if (host_raddr == (void *)-1) {
3832 mmap_unlock();
3833 return get_errno((long)host_raddr);
3835 raddr=h2g((unsigned long)host_raddr);
3837 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3838 PAGE_VALID | PAGE_READ |
3839 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3841 for (i = 0; i < N_SHM_REGIONS; i++) {
3842 if (!shm_regions[i].in_use) {
3843 shm_regions[i].in_use = true;
3844 shm_regions[i].start = raddr;
3845 shm_regions[i].size = shm_info.shm_segsz;
3846 break;
3850 mmap_unlock();
3851 return raddr;
3855 static inline abi_long do_shmdt(abi_ulong shmaddr)
3857 int i;
3858 abi_long rv;
3860 mmap_lock();
3862 for (i = 0; i < N_SHM_REGIONS; ++i) {
3863 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3864 shm_regions[i].in_use = false;
3865 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3866 break;
3869 rv = get_errno(shmdt(g2h(shmaddr)));
3871 mmap_unlock();
3873 return rv;
3876 #ifdef TARGET_NR_ipc
3877 /* ??? This only works with linear mappings. */
3878 /* do_ipc() must return target values and target errnos. */
3879 static abi_long do_ipc(CPUArchState *cpu_env,
3880 unsigned int call, abi_long first,
3881 abi_long second, abi_long third,
3882 abi_long ptr, abi_long fifth)
3884 int version;
3885 abi_long ret = 0;
3887 version = call >> 16;
3888 call &= 0xffff;
3890 switch (call) {
3891 case IPCOP_semop:
3892 ret = do_semop(first, ptr, second);
3893 break;
3895 case IPCOP_semget:
3896 ret = get_errno(semget(first, second, third));
3897 break;
3899 case IPCOP_semctl: {
3900 /* The semun argument to semctl is passed by value, so dereference the
3901 * ptr argument. */
3902 abi_ulong atptr;
3903 get_user_ual(atptr, ptr);
3904 ret = do_semctl(first, second, third, atptr);
3905 break;
3908 case IPCOP_msgget:
3909 ret = get_errno(msgget(first, second));
3910 break;
3912 case IPCOP_msgsnd:
3913 ret = do_msgsnd(first, ptr, second, third);
3914 break;
3916 case IPCOP_msgctl:
3917 ret = do_msgctl(first, second, ptr);
3918 break;
3920 case IPCOP_msgrcv:
3921 switch (version) {
3922 case 0:
3924 struct target_ipc_kludge {
3925 abi_long msgp;
3926 abi_long msgtyp;
3927 } *tmp;
3929 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3930 ret = -TARGET_EFAULT;
3931 break;
3934 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3936 unlock_user_struct(tmp, ptr, 0);
3937 break;
3939 default:
3940 ret = do_msgrcv(first, ptr, second, fifth, third);
3942 break;
3944 case IPCOP_shmat:
3945 switch (version) {
3946 default:
3948 abi_ulong raddr;
3949 raddr = do_shmat(cpu_env, first, ptr, second);
3950 if (is_error(raddr))
3951 return get_errno(raddr);
3952 if (put_user_ual(raddr, third))
3953 return -TARGET_EFAULT;
3954 break;
3956 case 1:
3957 ret = -TARGET_EINVAL;
3958 break;
3960 break;
3961 case IPCOP_shmdt:
3962 ret = do_shmdt(ptr);
3963 break;
3965 case IPCOP_shmget:
3966 /* IPC_* flag values are the same on all linux platforms */
3967 ret = get_errno(shmget(first, second, third));
3968 break;
3970 /* IPC_* and SHM_* command values are the same on all linux platforms */
3971 case IPCOP_shmctl:
3972 ret = do_shmctl(first, second, ptr);
3973 break;
3974 default:
3975 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3976 ret = -TARGET_ENOSYS;
3977 break;
3979 return ret;
3981 #endif
3983 /* kernel structure types definitions */
3985 #define STRUCT(name, ...) STRUCT_ ## name,
3986 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3987 enum {
3988 #include "syscall_types.h"
3989 STRUCT_MAX
3991 #undef STRUCT
3992 #undef STRUCT_SPECIAL
3994 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3995 #define STRUCT_SPECIAL(name)
3996 #include "syscall_types.h"
3997 #undef STRUCT
3998 #undef STRUCT_SPECIAL
4000 typedef struct IOCTLEntry IOCTLEntry;
4002 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4003 int fd, int cmd, abi_long arg);
4005 struct IOCTLEntry {
4006 int target_cmd;
4007 unsigned int host_cmd;
4008 const char *name;
4009 int access;
4010 do_ioctl_fn *do_ioctl;
4011 const argtype arg_type[5];
4014 #define IOC_R 0x0001
4015 #define IOC_W 0x0002
4016 #define IOC_RW (IOC_R | IOC_W)
4018 #define MAX_STRUCT_SIZE 4096
4020 #ifdef CONFIG_FIEMAP
4021 /* So fiemap access checks don't overflow on 32 bit systems.
4022 * This is very slightly smaller than the limit imposed by
4023 * the underlying kernel.
4025 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4026 / sizeof(struct fiemap_extent))
4028 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4029 int fd, int cmd, abi_long arg)
4031 /* The parameter for this ioctl is a struct fiemap followed
4032 * by an array of struct fiemap_extent whose size is set
4033 * in fiemap->fm_extent_count. The array is filled in by the
4034 * ioctl.
4036 int target_size_in, target_size_out;
4037 struct fiemap *fm;
4038 const argtype *arg_type = ie->arg_type;
4039 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4040 void *argptr, *p;
4041 abi_long ret;
4042 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4043 uint32_t outbufsz;
4044 int free_fm = 0;
4046 assert(arg_type[0] == TYPE_PTR);
4047 assert(ie->access == IOC_RW);
4048 arg_type++;
4049 target_size_in = thunk_type_size(arg_type, 0);
4050 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4051 if (!argptr) {
4052 return -TARGET_EFAULT;
4054 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4055 unlock_user(argptr, arg, 0);
4056 fm = (struct fiemap *)buf_temp;
4057 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4058 return -TARGET_EINVAL;
4061 outbufsz = sizeof (*fm) +
4062 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4064 if (outbufsz > MAX_STRUCT_SIZE) {
4065 /* We can't fit all the extents into the fixed size buffer.
4066 * Allocate one that is large enough and use it instead.
4068 fm = g_try_malloc(outbufsz);
4069 if (!fm) {
4070 return -TARGET_ENOMEM;
4072 memcpy(fm, buf_temp, sizeof(struct fiemap));
4073 free_fm = 1;
4075 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4076 if (!is_error(ret)) {
4077 target_size_out = target_size_in;
4078 /* An extent_count of 0 means we were only counting the extents
4079 * so there are no structs to copy
4081 if (fm->fm_extent_count != 0) {
4082 target_size_out += fm->fm_mapped_extents * extent_size;
4084 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4085 if (!argptr) {
4086 ret = -TARGET_EFAULT;
4087 } else {
4088 /* Convert the struct fiemap */
4089 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4090 if (fm->fm_extent_count != 0) {
4091 p = argptr + target_size_in;
4092 /* ...and then all the struct fiemap_extents */
4093 for (i = 0; i < fm->fm_mapped_extents; i++) {
4094 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4095 THUNK_TARGET);
4096 p += extent_size;
4099 unlock_user(argptr, arg, target_size_out);
4102 if (free_fm) {
4103 g_free(fm);
4105 return ret;
4107 #endif
4109 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4110 int fd, int cmd, abi_long arg)
4112 const argtype *arg_type = ie->arg_type;
4113 int target_size;
4114 void *argptr;
4115 int ret;
4116 struct ifconf *host_ifconf;
4117 uint32_t outbufsz;
4118 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4119 int target_ifreq_size;
4120 int nb_ifreq;
4121 int free_buf = 0;
4122 int i;
4123 int target_ifc_len;
4124 abi_long target_ifc_buf;
4125 int host_ifc_len;
4126 char *host_ifc_buf;
4128 assert(arg_type[0] == TYPE_PTR);
4129 assert(ie->access == IOC_RW);
4131 arg_type++;
4132 target_size = thunk_type_size(arg_type, 0);
4134 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4135 if (!argptr)
4136 return -TARGET_EFAULT;
4137 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4138 unlock_user(argptr, arg, 0);
4140 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4141 target_ifc_len = host_ifconf->ifc_len;
4142 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4144 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4145 nb_ifreq = target_ifc_len / target_ifreq_size;
4146 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4148 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4149 if (outbufsz > MAX_STRUCT_SIZE) {
4150 /* We can't fit all the extents into the fixed size buffer.
4151 * Allocate one that is large enough and use it instead.
4153 host_ifconf = malloc(outbufsz);
4154 if (!host_ifconf) {
4155 return -TARGET_ENOMEM;
4157 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4158 free_buf = 1;
4160 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4162 host_ifconf->ifc_len = host_ifc_len;
4163 host_ifconf->ifc_buf = host_ifc_buf;
4165 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4166 if (!is_error(ret)) {
4167 /* convert host ifc_len to target ifc_len */
4169 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4170 target_ifc_len = nb_ifreq * target_ifreq_size;
4171 host_ifconf->ifc_len = target_ifc_len;
4173 /* restore target ifc_buf */
4175 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4177 /* copy struct ifconf to target user */
4179 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4180 if (!argptr)
4181 return -TARGET_EFAULT;
4182 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4183 unlock_user(argptr, arg, target_size);
4185 /* copy ifreq[] to target user */
4187 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4188 for (i = 0; i < nb_ifreq ; i++) {
4189 thunk_convert(argptr + i * target_ifreq_size,
4190 host_ifc_buf + i * sizeof(struct ifreq),
4191 ifreq_arg_type, THUNK_TARGET);
4193 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4196 if (free_buf) {
4197 free(host_ifconf);
4200 return ret;
4203 #if defined(CONFIG_USBFS)
4204 #if HOST_LONG_BITS > 64
4205 #error USBDEVFS thunks do not support >64 bit hosts yet.
4206 #endif
4207 struct live_urb {
4208 uint64_t target_urb_adr;
4209 uint64_t target_buf_adr;
4210 char *target_buf_ptr;
4211 struct usbdevfs_urb host_urb;
4214 static GHashTable *usbdevfs_urb_hashtable(void)
4216 static GHashTable *urb_hashtable;
4218 if (!urb_hashtable) {
4219 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4221 return urb_hashtable;
4224 static void urb_hashtable_insert(struct live_urb *urb)
4226 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4227 g_hash_table_insert(urb_hashtable, urb, urb);
4230 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4232 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4233 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4236 static void urb_hashtable_remove(struct live_urb *urb)
4238 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4239 g_hash_table_remove(urb_hashtable, urb);
4242 static abi_long
4243 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4244 int fd, int cmd, abi_long arg)
4246 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4247 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4248 struct live_urb *lurb;
4249 void *argptr;
4250 uint64_t hurb;
4251 int target_size;
4252 uintptr_t target_urb_adr;
4253 abi_long ret;
4255 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4257 memset(buf_temp, 0, sizeof(uint64_t));
4258 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4259 if (is_error(ret)) {
4260 return ret;
4263 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4264 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4265 if (!lurb->target_urb_adr) {
4266 return -TARGET_EFAULT;
4268 urb_hashtable_remove(lurb);
4269 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4270 lurb->host_urb.buffer_length);
4271 lurb->target_buf_ptr = NULL;
4273 /* restore the guest buffer pointer */
4274 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4276 /* update the guest urb struct */
4277 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4278 if (!argptr) {
4279 g_free(lurb);
4280 return -TARGET_EFAULT;
4282 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4283 unlock_user(argptr, lurb->target_urb_adr, target_size);
4285 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4286 /* write back the urb handle */
4287 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4288 if (!argptr) {
4289 g_free(lurb);
4290 return -TARGET_EFAULT;
4293 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4294 target_urb_adr = lurb->target_urb_adr;
4295 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4296 unlock_user(argptr, arg, target_size);
4298 g_free(lurb);
4299 return ret;
4302 static abi_long
4303 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4304 uint8_t *buf_temp __attribute__((unused)),
4305 int fd, int cmd, abi_long arg)
4307 struct live_urb *lurb;
4309 /* map target address back to host URB with metadata. */
4310 lurb = urb_hashtable_lookup(arg);
4311 if (!lurb) {
4312 return -TARGET_EFAULT;
4314 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4317 static abi_long
4318 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4319 int fd, int cmd, abi_long arg)
4321 const argtype *arg_type = ie->arg_type;
4322 int target_size;
4323 abi_long ret;
4324 void *argptr;
4325 int rw_dir;
4326 struct live_urb *lurb;
4329 * each submitted URB needs to map to a unique ID for the
4330 * kernel, and that unique ID needs to be a pointer to
4331 * host memory. hence, we need to malloc for each URB.
4332 * isochronous transfers have a variable length struct.
4334 arg_type++;
4335 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4337 /* construct host copy of urb and metadata */
4338 lurb = g_try_malloc0(sizeof(struct live_urb));
4339 if (!lurb) {
4340 return -TARGET_ENOMEM;
4343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4344 if (!argptr) {
4345 g_free(lurb);
4346 return -TARGET_EFAULT;
4348 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4349 unlock_user(argptr, arg, 0);
4351 lurb->target_urb_adr = arg;
4352 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4354 /* buffer space used depends on endpoint type so lock the entire buffer */
4355 /* control type urbs should check the buffer contents for true direction */
4356 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4357 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4358 lurb->host_urb.buffer_length, 1);
4359 if (lurb->target_buf_ptr == NULL) {
4360 g_free(lurb);
4361 return -TARGET_EFAULT;
4364 /* update buffer pointer in host copy */
4365 lurb->host_urb.buffer = lurb->target_buf_ptr;
4367 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4368 if (is_error(ret)) {
4369 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4370 g_free(lurb);
4371 } else {
4372 urb_hashtable_insert(lurb);
4375 return ret;
4377 #endif /* CONFIG_USBFS */
4379 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4380 int cmd, abi_long arg)
4382 void *argptr;
4383 struct dm_ioctl *host_dm;
4384 abi_long guest_data;
4385 uint32_t guest_data_size;
4386 int target_size;
4387 const argtype *arg_type = ie->arg_type;
4388 abi_long ret;
4389 void *big_buf = NULL;
4390 char *host_data;
4392 arg_type++;
4393 target_size = thunk_type_size(arg_type, 0);
4394 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4395 if (!argptr) {
4396 ret = -TARGET_EFAULT;
4397 goto out;
4399 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4400 unlock_user(argptr, arg, 0);
4402 /* buf_temp is too small, so fetch things into a bigger buffer */
4403 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4404 memcpy(big_buf, buf_temp, target_size);
4405 buf_temp = big_buf;
4406 host_dm = big_buf;
4408 guest_data = arg + host_dm->data_start;
4409 if ((guest_data - arg) < 0) {
4410 ret = -TARGET_EINVAL;
4411 goto out;
4413 guest_data_size = host_dm->data_size - host_dm->data_start;
4414 host_data = (char*)host_dm + host_dm->data_start;
4416 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4417 if (!argptr) {
4418 ret = -TARGET_EFAULT;
4419 goto out;
4422 switch (ie->host_cmd) {
4423 case DM_REMOVE_ALL:
4424 case DM_LIST_DEVICES:
4425 case DM_DEV_CREATE:
4426 case DM_DEV_REMOVE:
4427 case DM_DEV_SUSPEND:
4428 case DM_DEV_STATUS:
4429 case DM_DEV_WAIT:
4430 case DM_TABLE_STATUS:
4431 case DM_TABLE_CLEAR:
4432 case DM_TABLE_DEPS:
4433 case DM_LIST_VERSIONS:
4434 /* no input data */
4435 break;
4436 case DM_DEV_RENAME:
4437 case DM_DEV_SET_GEOMETRY:
4438 /* data contains only strings */
4439 memcpy(host_data, argptr, guest_data_size);
4440 break;
4441 case DM_TARGET_MSG:
4442 memcpy(host_data, argptr, guest_data_size);
4443 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4444 break;
4445 case DM_TABLE_LOAD:
4447 void *gspec = argptr;
4448 void *cur_data = host_data;
4449 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4450 int spec_size = thunk_type_size(arg_type, 0);
4451 int i;
4453 for (i = 0; i < host_dm->target_count; i++) {
4454 struct dm_target_spec *spec = cur_data;
4455 uint32_t next;
4456 int slen;
4458 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4459 slen = strlen((char*)gspec + spec_size) + 1;
4460 next = spec->next;
4461 spec->next = sizeof(*spec) + slen;
4462 strcpy((char*)&spec[1], gspec + spec_size);
4463 gspec += next;
4464 cur_data += spec->next;
4466 break;
4468 default:
4469 ret = -TARGET_EINVAL;
4470 unlock_user(argptr, guest_data, 0);
4471 goto out;
4473 unlock_user(argptr, guest_data, 0);
4475 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4476 if (!is_error(ret)) {
4477 guest_data = arg + host_dm->data_start;
4478 guest_data_size = host_dm->data_size - host_dm->data_start;
4479 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4480 switch (ie->host_cmd) {
4481 case DM_REMOVE_ALL:
4482 case DM_DEV_CREATE:
4483 case DM_DEV_REMOVE:
4484 case DM_DEV_RENAME:
4485 case DM_DEV_SUSPEND:
4486 case DM_DEV_STATUS:
4487 case DM_TABLE_LOAD:
4488 case DM_TABLE_CLEAR:
4489 case DM_TARGET_MSG:
4490 case DM_DEV_SET_GEOMETRY:
4491 /* no return data */
4492 break;
4493 case DM_LIST_DEVICES:
4495 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4496 uint32_t remaining_data = guest_data_size;
4497 void *cur_data = argptr;
4498 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4499 int nl_size = 12; /* can't use thunk_size due to alignment */
4501 while (1) {
4502 uint32_t next = nl->next;
4503 if (next) {
4504 nl->next = nl_size + (strlen(nl->name) + 1);
4506 if (remaining_data < nl->next) {
4507 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4508 break;
4510 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4511 strcpy(cur_data + nl_size, nl->name);
4512 cur_data += nl->next;
4513 remaining_data -= nl->next;
4514 if (!next) {
4515 break;
4517 nl = (void*)nl + next;
4519 break;
4521 case DM_DEV_WAIT:
4522 case DM_TABLE_STATUS:
4524 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4525 void *cur_data = argptr;
4526 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4527 int spec_size = thunk_type_size(arg_type, 0);
4528 int i;
4530 for (i = 0; i < host_dm->target_count; i++) {
4531 uint32_t next = spec->next;
4532 int slen = strlen((char*)&spec[1]) + 1;
4533 spec->next = (cur_data - argptr) + spec_size + slen;
4534 if (guest_data_size < spec->next) {
4535 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4536 break;
4538 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4539 strcpy(cur_data + spec_size, (char*)&spec[1]);
4540 cur_data = argptr + spec->next;
4541 spec = (void*)host_dm + host_dm->data_start + next;
4543 break;
4545 case DM_TABLE_DEPS:
4547 void *hdata = (void*)host_dm + host_dm->data_start;
4548 int count = *(uint32_t*)hdata;
4549 uint64_t *hdev = hdata + 8;
4550 uint64_t *gdev = argptr + 8;
4551 int i;
4553 *(uint32_t*)argptr = tswap32(count);
4554 for (i = 0; i < count; i++) {
4555 *gdev = tswap64(*hdev);
4556 gdev++;
4557 hdev++;
4559 break;
4561 case DM_LIST_VERSIONS:
4563 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4564 uint32_t remaining_data = guest_data_size;
4565 void *cur_data = argptr;
4566 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4567 int vers_size = thunk_type_size(arg_type, 0);
4569 while (1) {
4570 uint32_t next = vers->next;
4571 if (next) {
4572 vers->next = vers_size + (strlen(vers->name) + 1);
4574 if (remaining_data < vers->next) {
4575 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4576 break;
4578 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4579 strcpy(cur_data + vers_size, vers->name);
4580 cur_data += vers->next;
4581 remaining_data -= vers->next;
4582 if (!next) {
4583 break;
4585 vers = (void*)vers + next;
4587 break;
4589 default:
4590 unlock_user(argptr, guest_data, 0);
4591 ret = -TARGET_EINVAL;
4592 goto out;
4594 unlock_user(argptr, guest_data, guest_data_size);
4596 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4597 if (!argptr) {
4598 ret = -TARGET_EFAULT;
4599 goto out;
4601 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4602 unlock_user(argptr, arg, target_size);
4604 out:
4605 g_free(big_buf);
4606 return ret;
4609 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4610 int cmd, abi_long arg)
4612 void *argptr;
4613 int target_size;
4614 const argtype *arg_type = ie->arg_type;
4615 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4616 abi_long ret;
4618 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4619 struct blkpg_partition host_part;
4621 /* Read and convert blkpg */
4622 arg_type++;
4623 target_size = thunk_type_size(arg_type, 0);
4624 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4625 if (!argptr) {
4626 ret = -TARGET_EFAULT;
4627 goto out;
4629 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4630 unlock_user(argptr, arg, 0);
4632 switch (host_blkpg->op) {
4633 case BLKPG_ADD_PARTITION:
4634 case BLKPG_DEL_PARTITION:
4635 /* payload is struct blkpg_partition */
4636 break;
4637 default:
4638 /* Unknown opcode */
4639 ret = -TARGET_EINVAL;
4640 goto out;
4643 /* Read and convert blkpg->data */
4644 arg = (abi_long)(uintptr_t)host_blkpg->data;
4645 target_size = thunk_type_size(part_arg_type, 0);
4646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4647 if (!argptr) {
4648 ret = -TARGET_EFAULT;
4649 goto out;
4651 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4652 unlock_user(argptr, arg, 0);
4654 /* Swizzle the data pointer to our local copy and call! */
4655 host_blkpg->data = &host_part;
4656 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4658 out:
4659 return ret;
4662 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4663 int fd, int cmd, abi_long arg)
4665 const argtype *arg_type = ie->arg_type;
4666 const StructEntry *se;
4667 const argtype *field_types;
4668 const int *dst_offsets, *src_offsets;
4669 int target_size;
4670 void *argptr;
4671 abi_ulong *target_rt_dev_ptr;
4672 unsigned long *host_rt_dev_ptr;
4673 abi_long ret;
4674 int i;
4676 assert(ie->access == IOC_W);
4677 assert(*arg_type == TYPE_PTR);
4678 arg_type++;
4679 assert(*arg_type == TYPE_STRUCT);
4680 target_size = thunk_type_size(arg_type, 0);
4681 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4682 if (!argptr) {
4683 return -TARGET_EFAULT;
4685 arg_type++;
4686 assert(*arg_type == (int)STRUCT_rtentry);
4687 se = struct_entries + *arg_type++;
4688 assert(se->convert[0] == NULL);
4689 /* convert struct here to be able to catch rt_dev string */
4690 field_types = se->field_types;
4691 dst_offsets = se->field_offsets[THUNK_HOST];
4692 src_offsets = se->field_offsets[THUNK_TARGET];
4693 for (i = 0; i < se->nb_fields; i++) {
4694 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4695 assert(*field_types == TYPE_PTRVOID);
4696 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4697 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4698 if (*target_rt_dev_ptr != 0) {
4699 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4700 tswapal(*target_rt_dev_ptr));
4701 if (!*host_rt_dev_ptr) {
4702 unlock_user(argptr, arg, 0);
4703 return -TARGET_EFAULT;
4705 } else {
4706 *host_rt_dev_ptr = 0;
4708 field_types++;
4709 continue;
4711 field_types = thunk_convert(buf_temp + dst_offsets[i],
4712 argptr + src_offsets[i],
4713 field_types, THUNK_HOST);
4715 unlock_user(argptr, arg, 0);
4717 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4718 if (*host_rt_dev_ptr != 0) {
4719 unlock_user((void *)*host_rt_dev_ptr,
4720 *target_rt_dev_ptr, 0);
4722 return ret;
4725 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4726 int fd, int cmd, abi_long arg)
4728 int sig = target_to_host_signal(arg);
4729 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4732 #ifdef TIOCGPTPEER
4733 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4734 int fd, int cmd, abi_long arg)
4736 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4737 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4739 #endif
4741 static IOCTLEntry ioctl_entries[] = {
4742 #define IOCTL(cmd, access, ...) \
4743 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4744 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4745 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4746 #define IOCTL_IGNORE(cmd) \
4747 { TARGET_ ## cmd, 0, #cmd },
4748 #include "ioctls.h"
4749 { 0, 0, },
4752 /* ??? Implement proper locking for ioctls. */
4753 /* do_ioctl() Must return target values and target errnos. */
4754 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4756 const IOCTLEntry *ie;
4757 const argtype *arg_type;
4758 abi_long ret;
4759 uint8_t buf_temp[MAX_STRUCT_SIZE];
4760 int target_size;
4761 void *argptr;
4763 ie = ioctl_entries;
4764 for(;;) {
4765 if (ie->target_cmd == 0) {
4766 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4767 return -TARGET_ENOSYS;
4769 if (ie->target_cmd == cmd)
4770 break;
4771 ie++;
4773 arg_type = ie->arg_type;
4774 if (ie->do_ioctl) {
4775 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4776 } else if (!ie->host_cmd) {
4777 /* Some architectures define BSD ioctls in their headers
4778 that are not implemented in Linux. */
4779 return -TARGET_ENOSYS;
4782 switch(arg_type[0]) {
4783 case TYPE_NULL:
4784 /* no argument */
4785 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4786 break;
4787 case TYPE_PTRVOID:
4788 case TYPE_INT:
4789 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4790 break;
4791 case TYPE_PTR:
4792 arg_type++;
4793 target_size = thunk_type_size(arg_type, 0);
4794 switch(ie->access) {
4795 case IOC_R:
4796 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4797 if (!is_error(ret)) {
4798 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4799 if (!argptr)
4800 return -TARGET_EFAULT;
4801 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4802 unlock_user(argptr, arg, target_size);
4804 break;
4805 case IOC_W:
4806 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4807 if (!argptr)
4808 return -TARGET_EFAULT;
4809 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4810 unlock_user(argptr, arg, 0);
4811 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812 break;
4813 default:
4814 case IOC_RW:
4815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4816 if (!argptr)
4817 return -TARGET_EFAULT;
4818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4819 unlock_user(argptr, arg, 0);
4820 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4821 if (!is_error(ret)) {
4822 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4823 if (!argptr)
4824 return -TARGET_EFAULT;
4825 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4826 unlock_user(argptr, arg, target_size);
4828 break;
4830 break;
4831 default:
4832 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4833 (long)cmd, arg_type[0]);
4834 ret = -TARGET_ENOSYS;
4835 break;
4837 return ret;
4840 static const bitmask_transtbl iflag_tbl[] = {
4841 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4842 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4843 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4844 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4845 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4846 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4847 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4848 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4849 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4850 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4851 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4852 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4853 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4854 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4855 { 0, 0, 0, 0 }
4858 static const bitmask_transtbl oflag_tbl[] = {
4859 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4860 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4861 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4862 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4863 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4864 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4865 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4866 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4867 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4868 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4869 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4870 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4871 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4872 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4873 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4874 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4875 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4876 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4877 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4878 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4879 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4880 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4881 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4882 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4883 { 0, 0, 0, 0 }
4886 static const bitmask_transtbl cflag_tbl[] = {
4887 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4888 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4889 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4890 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4891 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4892 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4893 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4894 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4895 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4896 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4897 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4898 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4899 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4900 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4901 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4902 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4903 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4904 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4905 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4906 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4907 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4908 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4909 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4910 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4911 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4912 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4913 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4914 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4915 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4916 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4917 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4918 { 0, 0, 0, 0 }
4921 static const bitmask_transtbl lflag_tbl[] = {
4922 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4923 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4924 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4925 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4926 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4927 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4928 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4929 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4930 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4931 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4932 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4933 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4934 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4935 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4936 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4937 { 0, 0, 0, 0 }
4940 static void target_to_host_termios (void *dst, const void *src)
4942 struct host_termios *host = dst;
4943 const struct target_termios *target = src;
4945 host->c_iflag =
4946 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4947 host->c_oflag =
4948 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4949 host->c_cflag =
4950 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4951 host->c_lflag =
4952 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4953 host->c_line = target->c_line;
4955 memset(host->c_cc, 0, sizeof(host->c_cc));
4956 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4957 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4958 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4959 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4960 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4961 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4962 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4963 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4964 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4965 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4966 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4967 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4968 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4969 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4970 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4971 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4972 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4975 static void host_to_target_termios (void *dst, const void *src)
4977 struct target_termios *target = dst;
4978 const struct host_termios *host = src;
4980 target->c_iflag =
4981 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4982 target->c_oflag =
4983 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4984 target->c_cflag =
4985 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4986 target->c_lflag =
4987 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4988 target->c_line = host->c_line;
4990 memset(target->c_cc, 0, sizeof(target->c_cc));
4991 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4992 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4993 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4994 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4995 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4996 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4997 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4998 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4999 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5000 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5001 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5002 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5003 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5004 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5005 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5006 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5007 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5010 static const StructEntry struct_termios_def = {
5011 .convert = { host_to_target_termios, target_to_host_termios },
5012 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5013 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5016 static bitmask_transtbl mmap_flags_tbl[] = {
5017 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5018 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5019 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5020 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5021 MAP_ANONYMOUS, MAP_ANONYMOUS },
5022 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5023 MAP_GROWSDOWN, MAP_GROWSDOWN },
5024 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5025 MAP_DENYWRITE, MAP_DENYWRITE },
5026 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5027 MAP_EXECUTABLE, MAP_EXECUTABLE },
5028 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5029 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5030 MAP_NORESERVE, MAP_NORESERVE },
5031 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5032 /* MAP_STACK had been ignored by the kernel for quite some time.
5033 Recognize it for the target insofar as we do not want to pass
5034 it through to the host. */
5035 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5036 { 0, 0, 0, 0 }
5039 #if defined(TARGET_I386)
5041 /* NOTE: there is really one LDT for all the threads */
5042 static uint8_t *ldt_table;
5044 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5046 int size;
5047 void *p;
5049 if (!ldt_table)
5050 return 0;
5051 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5052 if (size > bytecount)
5053 size = bytecount;
5054 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5055 if (!p)
5056 return -TARGET_EFAULT;
5057 /* ??? Should this by byteswapped? */
5058 memcpy(p, ldt_table, size);
5059 unlock_user(p, ptr, size);
5060 return size;
5063 /* XXX: add locking support */
5064 static abi_long write_ldt(CPUX86State *env,
5065 abi_ulong ptr, unsigned long bytecount, int oldmode)
5067 struct target_modify_ldt_ldt_s ldt_info;
5068 struct target_modify_ldt_ldt_s *target_ldt_info;
5069 int seg_32bit, contents, read_exec_only, limit_in_pages;
5070 int seg_not_present, useable, lm;
5071 uint32_t *lp, entry_1, entry_2;
5073 if (bytecount != sizeof(ldt_info))
5074 return -TARGET_EINVAL;
5075 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5076 return -TARGET_EFAULT;
5077 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5078 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5079 ldt_info.limit = tswap32(target_ldt_info->limit);
5080 ldt_info.flags = tswap32(target_ldt_info->flags);
5081 unlock_user_struct(target_ldt_info, ptr, 0);
5083 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5084 return -TARGET_EINVAL;
5085 seg_32bit = ldt_info.flags & 1;
5086 contents = (ldt_info.flags >> 1) & 3;
5087 read_exec_only = (ldt_info.flags >> 3) & 1;
5088 limit_in_pages = (ldt_info.flags >> 4) & 1;
5089 seg_not_present = (ldt_info.flags >> 5) & 1;
5090 useable = (ldt_info.flags >> 6) & 1;
5091 #ifdef TARGET_ABI32
5092 lm = 0;
5093 #else
5094 lm = (ldt_info.flags >> 7) & 1;
5095 #endif
5096 if (contents == 3) {
5097 if (oldmode)
5098 return -TARGET_EINVAL;
5099 if (seg_not_present == 0)
5100 return -TARGET_EINVAL;
5102 /* allocate the LDT */
5103 if (!ldt_table) {
5104 env->ldt.base = target_mmap(0,
5105 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5106 PROT_READ|PROT_WRITE,
5107 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5108 if (env->ldt.base == -1)
5109 return -TARGET_ENOMEM;
5110 memset(g2h(env->ldt.base), 0,
5111 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5112 env->ldt.limit = 0xffff;
5113 ldt_table = g2h(env->ldt.base);
5116 /* NOTE: same code as Linux kernel */
5117 /* Allow LDTs to be cleared by the user. */
5118 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5119 if (oldmode ||
5120 (contents == 0 &&
5121 read_exec_only == 1 &&
5122 seg_32bit == 0 &&
5123 limit_in_pages == 0 &&
5124 seg_not_present == 1 &&
5125 useable == 0 )) {
5126 entry_1 = 0;
5127 entry_2 = 0;
5128 goto install;
5132 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5133 (ldt_info.limit & 0x0ffff);
5134 entry_2 = (ldt_info.base_addr & 0xff000000) |
5135 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5136 (ldt_info.limit & 0xf0000) |
5137 ((read_exec_only ^ 1) << 9) |
5138 (contents << 10) |
5139 ((seg_not_present ^ 1) << 15) |
5140 (seg_32bit << 22) |
5141 (limit_in_pages << 23) |
5142 (lm << 21) |
5143 0x7000;
5144 if (!oldmode)
5145 entry_2 |= (useable << 20);
5147 /* Install the new entry ... */
5148 install:
5149 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5150 lp[0] = tswap32(entry_1);
5151 lp[1] = tswap32(entry_2);
5152 return 0;
5155 /* specific and weird i386 syscalls */
5156 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5157 unsigned long bytecount)
5159 abi_long ret;
5161 switch (func) {
5162 case 0:
5163 ret = read_ldt(ptr, bytecount);
5164 break;
5165 case 1:
5166 ret = write_ldt(env, ptr, bytecount, 1);
5167 break;
5168 case 0x11:
5169 ret = write_ldt(env, ptr, bytecount, 0);
5170 break;
5171 default:
5172 ret = -TARGET_ENOSYS;
5173 break;
5175 return ret;
5178 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5179 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5181 uint64_t *gdt_table = g2h(env->gdt.base);
5182 struct target_modify_ldt_ldt_s ldt_info;
5183 struct target_modify_ldt_ldt_s *target_ldt_info;
5184 int seg_32bit, contents, read_exec_only, limit_in_pages;
5185 int seg_not_present, useable, lm;
5186 uint32_t *lp, entry_1, entry_2;
5187 int i;
5189 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5190 if (!target_ldt_info)
5191 return -TARGET_EFAULT;
5192 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5193 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5194 ldt_info.limit = tswap32(target_ldt_info->limit);
5195 ldt_info.flags = tswap32(target_ldt_info->flags);
5196 if (ldt_info.entry_number == -1) {
5197 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5198 if (gdt_table[i] == 0) {
5199 ldt_info.entry_number = i;
5200 target_ldt_info->entry_number = tswap32(i);
5201 break;
5205 unlock_user_struct(target_ldt_info, ptr, 1);
5207 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5208 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5209 return -TARGET_EINVAL;
5210 seg_32bit = ldt_info.flags & 1;
5211 contents = (ldt_info.flags >> 1) & 3;
5212 read_exec_only = (ldt_info.flags >> 3) & 1;
5213 limit_in_pages = (ldt_info.flags >> 4) & 1;
5214 seg_not_present = (ldt_info.flags >> 5) & 1;
5215 useable = (ldt_info.flags >> 6) & 1;
5216 #ifdef TARGET_ABI32
5217 lm = 0;
5218 #else
5219 lm = (ldt_info.flags >> 7) & 1;
5220 #endif
5222 if (contents == 3) {
5223 if (seg_not_present == 0)
5224 return -TARGET_EINVAL;
5227 /* NOTE: same code as Linux kernel */
5228 /* Allow LDTs to be cleared by the user. */
5229 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5230 if ((contents == 0 &&
5231 read_exec_only == 1 &&
5232 seg_32bit == 0 &&
5233 limit_in_pages == 0 &&
5234 seg_not_present == 1 &&
5235 useable == 0 )) {
5236 entry_1 = 0;
5237 entry_2 = 0;
5238 goto install;
5242 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5243 (ldt_info.limit & 0x0ffff);
5244 entry_2 = (ldt_info.base_addr & 0xff000000) |
5245 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5246 (ldt_info.limit & 0xf0000) |
5247 ((read_exec_only ^ 1) << 9) |
5248 (contents << 10) |
5249 ((seg_not_present ^ 1) << 15) |
5250 (seg_32bit << 22) |
5251 (limit_in_pages << 23) |
5252 (useable << 20) |
5253 (lm << 21) |
5254 0x7000;
5256 /* Install the new entry ... */
5257 install:
5258 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5259 lp[0] = tswap32(entry_1);
5260 lp[1] = tswap32(entry_2);
5261 return 0;
5264 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5266 struct target_modify_ldt_ldt_s *target_ldt_info;
5267 uint64_t *gdt_table = g2h(env->gdt.base);
5268 uint32_t base_addr, limit, flags;
5269 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5270 int seg_not_present, useable, lm;
5271 uint32_t *lp, entry_1, entry_2;
5273 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5274 if (!target_ldt_info)
5275 return -TARGET_EFAULT;
5276 idx = tswap32(target_ldt_info->entry_number);
5277 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5278 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5279 unlock_user_struct(target_ldt_info, ptr, 1);
5280 return -TARGET_EINVAL;
5282 lp = (uint32_t *)(gdt_table + idx);
5283 entry_1 = tswap32(lp[0]);
5284 entry_2 = tswap32(lp[1]);
5286 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5287 contents = (entry_2 >> 10) & 3;
5288 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5289 seg_32bit = (entry_2 >> 22) & 1;
5290 limit_in_pages = (entry_2 >> 23) & 1;
5291 useable = (entry_2 >> 20) & 1;
5292 #ifdef TARGET_ABI32
5293 lm = 0;
5294 #else
5295 lm = (entry_2 >> 21) & 1;
5296 #endif
5297 flags = (seg_32bit << 0) | (contents << 1) |
5298 (read_exec_only << 3) | (limit_in_pages << 4) |
5299 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5300 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5301 base_addr = (entry_1 >> 16) |
5302 (entry_2 & 0xff000000) |
5303 ((entry_2 & 0xff) << 16);
5304 target_ldt_info->base_addr = tswapal(base_addr);
5305 target_ldt_info->limit = tswap32(limit);
5306 target_ldt_info->flags = tswap32(flags);
5307 unlock_user_struct(target_ldt_info, ptr, 1);
5308 return 0;
5310 #endif /* TARGET_I386 && TARGET_ABI32 */
5312 #ifndef TARGET_ABI32
5313 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5315 abi_long ret = 0;
5316 abi_ulong val;
5317 int idx;
5319 switch(code) {
5320 case TARGET_ARCH_SET_GS:
5321 case TARGET_ARCH_SET_FS:
5322 if (code == TARGET_ARCH_SET_GS)
5323 idx = R_GS;
5324 else
5325 idx = R_FS;
5326 cpu_x86_load_seg(env, idx, 0);
5327 env->segs[idx].base = addr;
5328 break;
5329 case TARGET_ARCH_GET_GS:
5330 case TARGET_ARCH_GET_FS:
5331 if (code == TARGET_ARCH_GET_GS)
5332 idx = R_GS;
5333 else
5334 idx = R_FS;
5335 val = env->segs[idx].base;
5336 if (put_user(val, addr, abi_ulong))
5337 ret = -TARGET_EFAULT;
5338 break;
5339 default:
5340 ret = -TARGET_EINVAL;
5341 break;
5343 return ret;
5345 #endif
5347 #endif /* defined(TARGET_I386) */
5349 #define NEW_STACK_SIZE 0x40000
5352 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5353 typedef struct {
5354 CPUArchState *env;
5355 pthread_mutex_t mutex;
5356 pthread_cond_t cond;
5357 pthread_t thread;
5358 uint32_t tid;
5359 abi_ulong child_tidptr;
5360 abi_ulong parent_tidptr;
5361 sigset_t sigmask;
5362 } new_thread_info;
5364 static void *clone_func(void *arg)
5366 new_thread_info *info = arg;
5367 CPUArchState *env;
5368 CPUState *cpu;
5369 TaskState *ts;
5371 rcu_register_thread();
5372 tcg_register_thread();
5373 env = info->env;
5374 cpu = ENV_GET_CPU(env);
5375 thread_cpu = cpu;
5376 ts = (TaskState *)cpu->opaque;
5377 info->tid = gettid();
5378 task_settid(ts);
5379 if (info->child_tidptr)
5380 put_user_u32(info->tid, info->child_tidptr);
5381 if (info->parent_tidptr)
5382 put_user_u32(info->tid, info->parent_tidptr);
5383 /* Enable signals. */
5384 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5385 /* Signal to the parent that we're ready. */
5386 pthread_mutex_lock(&info->mutex);
5387 pthread_cond_broadcast(&info->cond);
5388 pthread_mutex_unlock(&info->mutex);
5389 /* Wait until the parent has finished initializing the tls state. */
5390 pthread_mutex_lock(&clone_lock);
5391 pthread_mutex_unlock(&clone_lock);
5392 cpu_loop(env);
5393 /* never exits */
5394 return NULL;
5397 /* do_fork() Must return host values and target errnos (unlike most
5398 do_*() functions). */
5399 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5400 abi_ulong parent_tidptr, target_ulong newtls,
5401 abi_ulong child_tidptr)
5403 CPUState *cpu = ENV_GET_CPU(env);
5404 int ret;
5405 TaskState *ts;
5406 CPUState *new_cpu;
5407 CPUArchState *new_env;
5408 sigset_t sigmask;
5410 flags &= ~CLONE_IGNORED_FLAGS;
5412 /* Emulate vfork() with fork() */
5413 if (flags & CLONE_VFORK)
5414 flags &= ~(CLONE_VFORK | CLONE_VM);
5416 if (flags & CLONE_VM) {
5417 TaskState *parent_ts = (TaskState *)cpu->opaque;
5418 new_thread_info info;
5419 pthread_attr_t attr;
5421 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5422 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5423 return -TARGET_EINVAL;
5426 ts = g_new0(TaskState, 1);
5427 init_task_state(ts);
5429 /* Grab a mutex so that thread setup appears atomic. */
5430 pthread_mutex_lock(&clone_lock);
5432 /* we create a new CPU instance. */
5433 new_env = cpu_copy(env);
5434 /* Init regs that differ from the parent. */
5435 cpu_clone_regs(new_env, newsp);
5436 new_cpu = ENV_GET_CPU(new_env);
5437 new_cpu->opaque = ts;
5438 ts->bprm = parent_ts->bprm;
5439 ts->info = parent_ts->info;
5440 ts->signal_mask = parent_ts->signal_mask;
5442 if (flags & CLONE_CHILD_CLEARTID) {
5443 ts->child_tidptr = child_tidptr;
5446 if (flags & CLONE_SETTLS) {
5447 cpu_set_tls (new_env, newtls);
5450 memset(&info, 0, sizeof(info));
5451 pthread_mutex_init(&info.mutex, NULL);
5452 pthread_mutex_lock(&info.mutex);
5453 pthread_cond_init(&info.cond, NULL);
5454 info.env = new_env;
5455 if (flags & CLONE_CHILD_SETTID) {
5456 info.child_tidptr = child_tidptr;
5458 if (flags & CLONE_PARENT_SETTID) {
5459 info.parent_tidptr = parent_tidptr;
5462 ret = pthread_attr_init(&attr);
5463 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5464 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5465 /* It is not safe to deliver signals until the child has finished
5466 initializing, so temporarily block all signals. */
5467 sigfillset(&sigmask);
5468 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5470 /* If this is our first additional thread, we need to ensure we
5471 * generate code for parallel execution and flush old translations.
5473 if (!parallel_cpus) {
5474 parallel_cpus = true;
5475 tb_flush(cpu);
5478 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5479 /* TODO: Free new CPU state if thread creation failed. */
5481 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5482 pthread_attr_destroy(&attr);
5483 if (ret == 0) {
5484 /* Wait for the child to initialize. */
5485 pthread_cond_wait(&info.cond, &info.mutex);
5486 ret = info.tid;
5487 } else {
5488 ret = -1;
5490 pthread_mutex_unlock(&info.mutex);
5491 pthread_cond_destroy(&info.cond);
5492 pthread_mutex_destroy(&info.mutex);
5493 pthread_mutex_unlock(&clone_lock);
5494 } else {
5495 /* if no CLONE_VM, we consider it is a fork */
5496 if (flags & CLONE_INVALID_FORK_FLAGS) {
5497 return -TARGET_EINVAL;
5500 /* We can't support custom termination signals */
5501 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5502 return -TARGET_EINVAL;
5505 if (block_signals()) {
5506 return -TARGET_ERESTARTSYS;
5509 fork_start();
5510 ret = fork();
5511 if (ret == 0) {
5512 /* Child Process. */
5513 cpu_clone_regs(env, newsp);
5514 fork_end(1);
5515 /* There is a race condition here. The parent process could
5516 theoretically read the TID in the child process before the child
5517 tid is set. This would require using either ptrace
5518 (not implemented) or having *_tidptr to point at a shared memory
5519 mapping. We can't repeat the spinlock hack used above because
5520 the child process gets its own copy of the lock. */
5521 if (flags & CLONE_CHILD_SETTID)
5522 put_user_u32(gettid(), child_tidptr);
5523 if (flags & CLONE_PARENT_SETTID)
5524 put_user_u32(gettid(), parent_tidptr);
5525 ts = (TaskState *)cpu->opaque;
5526 if (flags & CLONE_SETTLS)
5527 cpu_set_tls (env, newtls);
5528 if (flags & CLONE_CHILD_CLEARTID)
5529 ts->child_tidptr = child_tidptr;
5530 } else {
5531 fork_end(0);
5534 return ret;
5537 /* warning : doesn't handle linux specific flags... */
5538 static int target_to_host_fcntl_cmd(int cmd)
5540 int ret;
5542 switch(cmd) {
5543 case TARGET_F_DUPFD:
5544 case TARGET_F_GETFD:
5545 case TARGET_F_SETFD:
5546 case TARGET_F_GETFL:
5547 case TARGET_F_SETFL:
5548 ret = cmd;
5549 break;
5550 case TARGET_F_GETLK:
5551 ret = F_GETLK64;
5552 break;
5553 case TARGET_F_SETLK:
5554 ret = F_SETLK64;
5555 break;
5556 case TARGET_F_SETLKW:
5557 ret = F_SETLKW64;
5558 break;
5559 case TARGET_F_GETOWN:
5560 ret = F_GETOWN;
5561 break;
5562 case TARGET_F_SETOWN:
5563 ret = F_SETOWN;
5564 break;
5565 case TARGET_F_GETSIG:
5566 ret = F_GETSIG;
5567 break;
5568 case TARGET_F_SETSIG:
5569 ret = F_SETSIG;
5570 break;
5571 #if TARGET_ABI_BITS == 32
5572 case TARGET_F_GETLK64:
5573 ret = F_GETLK64;
5574 break;
5575 case TARGET_F_SETLK64:
5576 ret = F_SETLK64;
5577 break;
5578 case TARGET_F_SETLKW64:
5579 ret = F_SETLKW64;
5580 break;
5581 #endif
5582 case TARGET_F_SETLEASE:
5583 ret = F_SETLEASE;
5584 break;
5585 case TARGET_F_GETLEASE:
5586 ret = F_GETLEASE;
5587 break;
5588 #ifdef F_DUPFD_CLOEXEC
5589 case TARGET_F_DUPFD_CLOEXEC:
5590 ret = F_DUPFD_CLOEXEC;
5591 break;
5592 #endif
5593 case TARGET_F_NOTIFY:
5594 ret = F_NOTIFY;
5595 break;
5596 #ifdef F_GETOWN_EX
5597 case TARGET_F_GETOWN_EX:
5598 ret = F_GETOWN_EX;
5599 break;
5600 #endif
5601 #ifdef F_SETOWN_EX
5602 case TARGET_F_SETOWN_EX:
5603 ret = F_SETOWN_EX;
5604 break;
5605 #endif
5606 #ifdef F_SETPIPE_SZ
5607 case TARGET_F_SETPIPE_SZ:
5608 ret = F_SETPIPE_SZ;
5609 break;
5610 case TARGET_F_GETPIPE_SZ:
5611 ret = F_GETPIPE_SZ;
5612 break;
5613 #endif
5614 default:
5615 ret = -TARGET_EINVAL;
5616 break;
5619 #if defined(__powerpc64__)
5620 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5621 * is not supported by kernel. The glibc fcntl call actually adjusts
5622 * them to 5, 6 and 7 before making the syscall(). Since we make the
5623 * syscall directly, adjust to what is supported by the kernel.
5625 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5626 ret -= F_GETLK64 - 5;
5628 #endif
5630 return ret;
5633 #define FLOCK_TRANSTBL \
5634 switch (type) { \
5635 TRANSTBL_CONVERT(F_RDLCK); \
5636 TRANSTBL_CONVERT(F_WRLCK); \
5637 TRANSTBL_CONVERT(F_UNLCK); \
5638 TRANSTBL_CONVERT(F_EXLCK); \
5639 TRANSTBL_CONVERT(F_SHLCK); \
5642 static int target_to_host_flock(int type)
5644 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5645 FLOCK_TRANSTBL
5646 #undef TRANSTBL_CONVERT
5647 return -TARGET_EINVAL;
5650 static int host_to_target_flock(int type)
5652 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5653 FLOCK_TRANSTBL
5654 #undef TRANSTBL_CONVERT
5655 /* if we don't know how to convert the value coming
5656 * from the host we copy to the target field as-is
5658 return type;
5661 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5662 abi_ulong target_flock_addr)
5664 struct target_flock *target_fl;
5665 int l_type;
5667 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5668 return -TARGET_EFAULT;
5671 __get_user(l_type, &target_fl->l_type);
5672 l_type = target_to_host_flock(l_type);
5673 if (l_type < 0) {
5674 return l_type;
5676 fl->l_type = l_type;
5677 __get_user(fl->l_whence, &target_fl->l_whence);
5678 __get_user(fl->l_start, &target_fl->l_start);
5679 __get_user(fl->l_len, &target_fl->l_len);
5680 __get_user(fl->l_pid, &target_fl->l_pid);
5681 unlock_user_struct(target_fl, target_flock_addr, 0);
5682 return 0;
5685 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5686 const struct flock64 *fl)
5688 struct target_flock *target_fl;
5689 short l_type;
5691 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5692 return -TARGET_EFAULT;
5695 l_type = host_to_target_flock(fl->l_type);
5696 __put_user(l_type, &target_fl->l_type);
5697 __put_user(fl->l_whence, &target_fl->l_whence);
5698 __put_user(fl->l_start, &target_fl->l_start);
5699 __put_user(fl->l_len, &target_fl->l_len);
5700 __put_user(fl->l_pid, &target_fl->l_pid);
5701 unlock_user_struct(target_fl, target_flock_addr, 1);
5702 return 0;
5705 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5706 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5708 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5709 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5710 abi_ulong target_flock_addr)
5712 struct target_oabi_flock64 *target_fl;
5713 int l_type;
5715 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5716 return -TARGET_EFAULT;
5719 __get_user(l_type, &target_fl->l_type);
5720 l_type = target_to_host_flock(l_type);
5721 if (l_type < 0) {
5722 return l_type;
5724 fl->l_type = l_type;
5725 __get_user(fl->l_whence, &target_fl->l_whence);
5726 __get_user(fl->l_start, &target_fl->l_start);
5727 __get_user(fl->l_len, &target_fl->l_len);
5728 __get_user(fl->l_pid, &target_fl->l_pid);
5729 unlock_user_struct(target_fl, target_flock_addr, 0);
5730 return 0;
5733 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5734 const struct flock64 *fl)
5736 struct target_oabi_flock64 *target_fl;
5737 short l_type;
5739 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5740 return -TARGET_EFAULT;
5743 l_type = host_to_target_flock(fl->l_type);
5744 __put_user(l_type, &target_fl->l_type);
5745 __put_user(fl->l_whence, &target_fl->l_whence);
5746 __put_user(fl->l_start, &target_fl->l_start);
5747 __put_user(fl->l_len, &target_fl->l_len);
5748 __put_user(fl->l_pid, &target_fl->l_pid);
5749 unlock_user_struct(target_fl, target_flock_addr, 1);
5750 return 0;
5752 #endif
5754 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5755 abi_ulong target_flock_addr)
5757 struct target_flock64 *target_fl;
5758 int l_type;
5760 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5761 return -TARGET_EFAULT;
5764 __get_user(l_type, &target_fl->l_type);
5765 l_type = target_to_host_flock(l_type);
5766 if (l_type < 0) {
5767 return l_type;
5769 fl->l_type = l_type;
5770 __get_user(fl->l_whence, &target_fl->l_whence);
5771 __get_user(fl->l_start, &target_fl->l_start);
5772 __get_user(fl->l_len, &target_fl->l_len);
5773 __get_user(fl->l_pid, &target_fl->l_pid);
5774 unlock_user_struct(target_fl, target_flock_addr, 0);
5775 return 0;
5778 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5779 const struct flock64 *fl)
5781 struct target_flock64 *target_fl;
5782 short l_type;
5784 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5785 return -TARGET_EFAULT;
5788 l_type = host_to_target_flock(fl->l_type);
5789 __put_user(l_type, &target_fl->l_type);
5790 __put_user(fl->l_whence, &target_fl->l_whence);
5791 __put_user(fl->l_start, &target_fl->l_start);
5792 __put_user(fl->l_len, &target_fl->l_len);
5793 __put_user(fl->l_pid, &target_fl->l_pid);
5794 unlock_user_struct(target_fl, target_flock_addr, 1);
5795 return 0;
5798 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5800 struct flock64 fl64;
5801 #ifdef F_GETOWN_EX
5802 struct f_owner_ex fox;
5803 struct target_f_owner_ex *target_fox;
5804 #endif
5805 abi_long ret;
5806 int host_cmd = target_to_host_fcntl_cmd(cmd);
5808 if (host_cmd == -TARGET_EINVAL)
5809 return host_cmd;
5811 switch(cmd) {
5812 case TARGET_F_GETLK:
5813 ret = copy_from_user_flock(&fl64, arg);
5814 if (ret) {
5815 return ret;
5817 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5818 if (ret == 0) {
5819 ret = copy_to_user_flock(arg, &fl64);
5821 break;
5823 case TARGET_F_SETLK:
5824 case TARGET_F_SETLKW:
5825 ret = copy_from_user_flock(&fl64, arg);
5826 if (ret) {
5827 return ret;
5829 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5830 break;
5832 case TARGET_F_GETLK64:
5833 ret = copy_from_user_flock64(&fl64, arg);
5834 if (ret) {
5835 return ret;
5837 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5838 if (ret == 0) {
5839 ret = copy_to_user_flock64(arg, &fl64);
5841 break;
5842 case TARGET_F_SETLK64:
5843 case TARGET_F_SETLKW64:
5844 ret = copy_from_user_flock64(&fl64, arg);
5845 if (ret) {
5846 return ret;
5848 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5849 break;
5851 case TARGET_F_GETFL:
5852 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5853 if (ret >= 0) {
5854 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5856 break;
5858 case TARGET_F_SETFL:
5859 ret = get_errno(safe_fcntl(fd, host_cmd,
5860 target_to_host_bitmask(arg,
5861 fcntl_flags_tbl)));
5862 break;
5864 #ifdef F_GETOWN_EX
5865 case TARGET_F_GETOWN_EX:
5866 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5867 if (ret >= 0) {
5868 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5869 return -TARGET_EFAULT;
5870 target_fox->type = tswap32(fox.type);
5871 target_fox->pid = tswap32(fox.pid);
5872 unlock_user_struct(target_fox, arg, 1);
5874 break;
5875 #endif
5877 #ifdef F_SETOWN_EX
5878 case TARGET_F_SETOWN_EX:
5879 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5880 return -TARGET_EFAULT;
5881 fox.type = tswap32(target_fox->type);
5882 fox.pid = tswap32(target_fox->pid);
5883 unlock_user_struct(target_fox, arg, 0);
5884 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5885 break;
5886 #endif
5888 case TARGET_F_SETOWN:
5889 case TARGET_F_GETOWN:
5890 case TARGET_F_SETSIG:
5891 case TARGET_F_GETSIG:
5892 case TARGET_F_SETLEASE:
5893 case TARGET_F_GETLEASE:
5894 case TARGET_F_SETPIPE_SZ:
5895 case TARGET_F_GETPIPE_SZ:
5896 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5897 break;
5899 default:
5900 ret = get_errno(safe_fcntl(fd, cmd, arg));
5901 break;
5903 return ret;
5906 #ifdef USE_UID16
5908 static inline int high2lowuid(int uid)
5910 if (uid > 65535)
5911 return 65534;
5912 else
5913 return uid;
5916 static inline int high2lowgid(int gid)
5918 if (gid > 65535)
5919 return 65534;
5920 else
5921 return gid;
5924 static inline int low2highuid(int uid)
5926 if ((int16_t)uid == -1)
5927 return -1;
5928 else
5929 return uid;
5932 static inline int low2highgid(int gid)
5934 if ((int16_t)gid == -1)
5935 return -1;
5936 else
5937 return gid;
5939 static inline int tswapid(int id)
5941 return tswap16(id);
5944 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5946 #else /* !USE_UID16 */
5947 static inline int high2lowuid(int uid)
5949 return uid;
5951 static inline int high2lowgid(int gid)
5953 return gid;
5955 static inline int low2highuid(int uid)
5957 return uid;
5959 static inline int low2highgid(int gid)
5961 return gid;
5963 static inline int tswapid(int id)
5965 return tswap32(id);
5968 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5970 #endif /* USE_UID16 */
5972 /* We must do direct syscalls for setting UID/GID, because we want to
5973 * implement the Linux system call semantics of "change only for this thread",
5974 * not the libc/POSIX semantics of "change for all threads in process".
5975 * (See http://ewontfix.com/17/ for more details.)
5976 * We use the 32-bit version of the syscalls if present; if it is not
5977 * then either the host architecture supports 32-bit UIDs natively with
5978 * the standard syscall, or the 16-bit UID is the best we can do.
5980 #ifdef __NR_setuid32
5981 #define __NR_sys_setuid __NR_setuid32
5982 #else
5983 #define __NR_sys_setuid __NR_setuid
5984 #endif
5985 #ifdef __NR_setgid32
5986 #define __NR_sys_setgid __NR_setgid32
5987 #else
5988 #define __NR_sys_setgid __NR_setgid
5989 #endif
5990 #ifdef __NR_setresuid32
5991 #define __NR_sys_setresuid __NR_setresuid32
5992 #else
5993 #define __NR_sys_setresuid __NR_setresuid
5994 #endif
5995 #ifdef __NR_setresgid32
5996 #define __NR_sys_setresgid __NR_setresgid32
5997 #else
5998 #define __NR_sys_setresgid __NR_setresgid
5999 #endif
6001 _syscall1(int, sys_setuid, uid_t, uid)
6002 _syscall1(int, sys_setgid, gid_t, gid)
6003 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6004 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6006 void syscall_init(void)
6008 IOCTLEntry *ie;
6009 const argtype *arg_type;
6010 int size;
6011 int i;
6013 thunk_init(STRUCT_MAX);
6015 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6016 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6017 #include "syscall_types.h"
6018 #undef STRUCT
6019 #undef STRUCT_SPECIAL
6021 /* Build target_to_host_errno_table[] table from
6022 * host_to_target_errno_table[]. */
6023 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6024 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6027 /* we patch the ioctl size if necessary. We rely on the fact that
6028 no ioctl has all the bits at '1' in the size field */
6029 ie = ioctl_entries;
6030 while (ie->target_cmd != 0) {
6031 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6032 TARGET_IOC_SIZEMASK) {
6033 arg_type = ie->arg_type;
6034 if (arg_type[0] != TYPE_PTR) {
6035 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6036 ie->target_cmd);
6037 exit(1);
6039 arg_type++;
6040 size = thunk_type_size(arg_type, 0);
6041 ie->target_cmd = (ie->target_cmd &
6042 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6043 (size << TARGET_IOC_SIZESHIFT);
6046 /* automatic consistency check if same arch */
6047 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6048 (defined(__x86_64__) && defined(TARGET_X86_64))
6049 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6050 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6051 ie->name, ie->target_cmd, ie->host_cmd);
6053 #endif
6054 ie++;
6058 #if TARGET_ABI_BITS == 32
6059 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6061 #ifdef TARGET_WORDS_BIGENDIAN
6062 return ((uint64_t)word0 << 32) | word1;
6063 #else
6064 return ((uint64_t)word1 << 32) | word0;
6065 #endif
6067 #else /* TARGET_ABI_BITS == 32 */
6068 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6070 return word0;
6072 #endif /* TARGET_ABI_BITS != 32 */
6074 #ifdef TARGET_NR_truncate64
6075 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6076 abi_long arg2,
6077 abi_long arg3,
6078 abi_long arg4)
6080 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6081 arg2 = arg3;
6082 arg3 = arg4;
6084 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6086 #endif
6088 #ifdef TARGET_NR_ftruncate64
6089 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6090 abi_long arg2,
6091 abi_long arg3,
6092 abi_long arg4)
6094 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6095 arg2 = arg3;
6096 arg3 = arg4;
6098 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6100 #endif
6102 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6103 abi_ulong target_addr)
6105 struct target_timespec *target_ts;
6107 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6108 return -TARGET_EFAULT;
6109 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6110 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6111 unlock_user_struct(target_ts, target_addr, 0);
6112 return 0;
6115 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6116 struct timespec *host_ts)
6118 struct target_timespec *target_ts;
6120 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6121 return -TARGET_EFAULT;
6122 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6123 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6124 unlock_user_struct(target_ts, target_addr, 1);
6125 return 0;
6128 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6129 abi_ulong target_addr)
6131 struct target_itimerspec *target_itspec;
6133 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6134 return -TARGET_EFAULT;
6137 host_itspec->it_interval.tv_sec =
6138 tswapal(target_itspec->it_interval.tv_sec);
6139 host_itspec->it_interval.tv_nsec =
6140 tswapal(target_itspec->it_interval.tv_nsec);
6141 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6142 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6144 unlock_user_struct(target_itspec, target_addr, 1);
6145 return 0;
6148 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6149 struct itimerspec *host_its)
6151 struct target_itimerspec *target_itspec;
6153 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6154 return -TARGET_EFAULT;
6157 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6158 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6160 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6161 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6163 unlock_user_struct(target_itspec, target_addr, 0);
6164 return 0;
6167 static inline abi_long target_to_host_timex(struct timex *host_tx,
6168 abi_long target_addr)
6170 struct target_timex *target_tx;
6172 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6173 return -TARGET_EFAULT;
6176 __get_user(host_tx->modes, &target_tx->modes);
6177 __get_user(host_tx->offset, &target_tx->offset);
6178 __get_user(host_tx->freq, &target_tx->freq);
6179 __get_user(host_tx->maxerror, &target_tx->maxerror);
6180 __get_user(host_tx->esterror, &target_tx->esterror);
6181 __get_user(host_tx->status, &target_tx->status);
6182 __get_user(host_tx->constant, &target_tx->constant);
6183 __get_user(host_tx->precision, &target_tx->precision);
6184 __get_user(host_tx->tolerance, &target_tx->tolerance);
6185 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6186 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6187 __get_user(host_tx->tick, &target_tx->tick);
6188 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6189 __get_user(host_tx->jitter, &target_tx->jitter);
6190 __get_user(host_tx->shift, &target_tx->shift);
6191 __get_user(host_tx->stabil, &target_tx->stabil);
6192 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6193 __get_user(host_tx->calcnt, &target_tx->calcnt);
6194 __get_user(host_tx->errcnt, &target_tx->errcnt);
6195 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6196 __get_user(host_tx->tai, &target_tx->tai);
6198 unlock_user_struct(target_tx, target_addr, 0);
6199 return 0;
6202 static inline abi_long host_to_target_timex(abi_long target_addr,
6203 struct timex *host_tx)
6205 struct target_timex *target_tx;
6207 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6208 return -TARGET_EFAULT;
6211 __put_user(host_tx->modes, &target_tx->modes);
6212 __put_user(host_tx->offset, &target_tx->offset);
6213 __put_user(host_tx->freq, &target_tx->freq);
6214 __put_user(host_tx->maxerror, &target_tx->maxerror);
6215 __put_user(host_tx->esterror, &target_tx->esterror);
6216 __put_user(host_tx->status, &target_tx->status);
6217 __put_user(host_tx->constant, &target_tx->constant);
6218 __put_user(host_tx->precision, &target_tx->precision);
6219 __put_user(host_tx->tolerance, &target_tx->tolerance);
6220 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6221 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6222 __put_user(host_tx->tick, &target_tx->tick);
6223 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6224 __put_user(host_tx->jitter, &target_tx->jitter);
6225 __put_user(host_tx->shift, &target_tx->shift);
6226 __put_user(host_tx->stabil, &target_tx->stabil);
6227 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6228 __put_user(host_tx->calcnt, &target_tx->calcnt);
6229 __put_user(host_tx->errcnt, &target_tx->errcnt);
6230 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6231 __put_user(host_tx->tai, &target_tx->tai);
6233 unlock_user_struct(target_tx, target_addr, 1);
6234 return 0;
6238 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6239 abi_ulong target_addr)
6241 struct target_sigevent *target_sevp;
6243 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6244 return -TARGET_EFAULT;
6247 /* This union is awkward on 64 bit systems because it has a 32 bit
6248 * integer and a pointer in it; we follow the conversion approach
6249 * used for handling sigval types in signal.c so the guest should get
6250 * the correct value back even if we did a 64 bit byteswap and it's
6251 * using the 32 bit integer.
6253 host_sevp->sigev_value.sival_ptr =
6254 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6255 host_sevp->sigev_signo =
6256 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6257 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6258 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6260 unlock_user_struct(target_sevp, target_addr, 1);
6261 return 0;
6264 #if defined(TARGET_NR_mlockall)
6265 static inline int target_to_host_mlockall_arg(int arg)
6267 int result = 0;
6269 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6270 result |= MCL_CURRENT;
6272 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6273 result |= MCL_FUTURE;
6275 return result;
6277 #endif
6279 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6280 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6281 defined(TARGET_NR_newfstatat))
6282 static inline abi_long host_to_target_stat64(void *cpu_env,
6283 abi_ulong target_addr,
6284 struct stat *host_st)
6286 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6287 if (((CPUARMState *)cpu_env)->eabi) {
6288 struct target_eabi_stat64 *target_st;
6290 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6291 return -TARGET_EFAULT;
6292 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6293 __put_user(host_st->st_dev, &target_st->st_dev);
6294 __put_user(host_st->st_ino, &target_st->st_ino);
6295 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6296 __put_user(host_st->st_ino, &target_st->__st_ino);
6297 #endif
6298 __put_user(host_st->st_mode, &target_st->st_mode);
6299 __put_user(host_st->st_nlink, &target_st->st_nlink);
6300 __put_user(host_st->st_uid, &target_st->st_uid);
6301 __put_user(host_st->st_gid, &target_st->st_gid);
6302 __put_user(host_st->st_rdev, &target_st->st_rdev);
6303 __put_user(host_st->st_size, &target_st->st_size);
6304 __put_user(host_st->st_blksize, &target_st->st_blksize);
6305 __put_user(host_st->st_blocks, &target_st->st_blocks);
6306 __put_user(host_st->st_atime, &target_st->target_st_atime);
6307 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6308 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6309 unlock_user_struct(target_st, target_addr, 1);
6310 } else
6311 #endif
6313 #if defined(TARGET_HAS_STRUCT_STAT64)
6314 struct target_stat64 *target_st;
6315 #else
6316 struct target_stat *target_st;
6317 #endif
6319 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6320 return -TARGET_EFAULT;
6321 memset(target_st, 0, sizeof(*target_st));
6322 __put_user(host_st->st_dev, &target_st->st_dev);
6323 __put_user(host_st->st_ino, &target_st->st_ino);
6324 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6325 __put_user(host_st->st_ino, &target_st->__st_ino);
6326 #endif
6327 __put_user(host_st->st_mode, &target_st->st_mode);
6328 __put_user(host_st->st_nlink, &target_st->st_nlink);
6329 __put_user(host_st->st_uid, &target_st->st_uid);
6330 __put_user(host_st->st_gid, &target_st->st_gid);
6331 __put_user(host_st->st_rdev, &target_st->st_rdev);
6332 /* XXX: better use of kernel struct */
6333 __put_user(host_st->st_size, &target_st->st_size);
6334 __put_user(host_st->st_blksize, &target_st->st_blksize);
6335 __put_user(host_st->st_blocks, &target_st->st_blocks);
6336 __put_user(host_st->st_atime, &target_st->target_st_atime);
6337 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6338 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6339 unlock_user_struct(target_st, target_addr, 1);
6342 return 0;
6344 #endif
6346 /* ??? Using host futex calls even when target atomic operations
6347 are not really atomic probably breaks things. However implementing
6348 futexes locally would make futexes shared between multiple processes
6349 tricky. However they're probably useless because guest atomic
6350 operations won't work either. */
6351 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6352 target_ulong uaddr2, int val3)
6354 struct timespec ts, *pts;
6355 int base_op;
6357 /* ??? We assume FUTEX_* constants are the same on both host
6358 and target. */
6359 #ifdef FUTEX_CMD_MASK
6360 base_op = op & FUTEX_CMD_MASK;
6361 #else
6362 base_op = op;
6363 #endif
6364 switch (base_op) {
6365 case FUTEX_WAIT:
6366 case FUTEX_WAIT_BITSET:
6367 if (timeout) {
6368 pts = &ts;
6369 target_to_host_timespec(pts, timeout);
6370 } else {
6371 pts = NULL;
6373 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6374 pts, NULL, val3));
6375 case FUTEX_WAKE:
6376 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6377 case FUTEX_FD:
6378 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6379 case FUTEX_REQUEUE:
6380 case FUTEX_CMP_REQUEUE:
6381 case FUTEX_WAKE_OP:
6382 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6383 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6384 But the prototype takes a `struct timespec *'; insert casts
6385 to satisfy the compiler. We do not need to tswap TIMEOUT
6386 since it's not compared to guest memory. */
6387 pts = (struct timespec *)(uintptr_t) timeout;
6388 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6389 g2h(uaddr2),
6390 (base_op == FUTEX_CMP_REQUEUE
6391 ? tswap32(val3)
6392 : val3)));
6393 default:
6394 return -TARGET_ENOSYS;
6397 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6398 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6399 abi_long handle, abi_long mount_id,
6400 abi_long flags)
6402 struct file_handle *target_fh;
6403 struct file_handle *fh;
6404 int mid = 0;
6405 abi_long ret;
6406 char *name;
6407 unsigned int size, total_size;
6409 if (get_user_s32(size, handle)) {
6410 return -TARGET_EFAULT;
6413 name = lock_user_string(pathname);
6414 if (!name) {
6415 return -TARGET_EFAULT;
6418 total_size = sizeof(struct file_handle) + size;
6419 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6420 if (!target_fh) {
6421 unlock_user(name, pathname, 0);
6422 return -TARGET_EFAULT;
6425 fh = g_malloc0(total_size);
6426 fh->handle_bytes = size;
6428 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6429 unlock_user(name, pathname, 0);
6431 /* man name_to_handle_at(2):
6432 * Other than the use of the handle_bytes field, the caller should treat
6433 * the file_handle structure as an opaque data type
6436 memcpy(target_fh, fh, total_size);
6437 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6438 target_fh->handle_type = tswap32(fh->handle_type);
6439 g_free(fh);
6440 unlock_user(target_fh, handle, total_size);
6442 if (put_user_s32(mid, mount_id)) {
6443 return -TARGET_EFAULT;
6446 return ret;
6449 #endif
6451 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6452 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6453 abi_long flags)
6455 struct file_handle *target_fh;
6456 struct file_handle *fh;
6457 unsigned int size, total_size;
6458 abi_long ret;
6460 if (get_user_s32(size, handle)) {
6461 return -TARGET_EFAULT;
6464 total_size = sizeof(struct file_handle) + size;
6465 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6466 if (!target_fh) {
6467 return -TARGET_EFAULT;
6470 fh = g_memdup(target_fh, total_size);
6471 fh->handle_bytes = size;
6472 fh->handle_type = tswap32(target_fh->handle_type);
6474 ret = get_errno(open_by_handle_at(mount_fd, fh,
6475 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6477 g_free(fh);
6479 unlock_user(target_fh, handle, total_size);
6481 return ret;
6483 #endif
6485 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6487 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6489 int host_flags;
6490 target_sigset_t *target_mask;
6491 sigset_t host_mask;
6492 abi_long ret;
6494 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6495 return -TARGET_EINVAL;
6497 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6498 return -TARGET_EFAULT;
6501 target_to_host_sigset(&host_mask, target_mask);
6503 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6505 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6506 if (ret >= 0) {
6507 fd_trans_register(ret, &target_signalfd_trans);
6510 unlock_user_struct(target_mask, mask, 0);
6512 return ret;
6514 #endif
6516 /* Map host to target signal numbers for the wait family of syscalls.
6517 Assume all other status bits are the same. */
6518 int host_to_target_waitstatus(int status)
6520 if (WIFSIGNALED(status)) {
6521 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6523 if (WIFSTOPPED(status)) {
6524 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6525 | (status & 0xff);
6527 return status;
6530 static int open_self_cmdline(void *cpu_env, int fd)
6532 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6533 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6534 int i;
6536 for (i = 0; i < bprm->argc; i++) {
6537 size_t len = strlen(bprm->argv[i]) + 1;
6539 if (write(fd, bprm->argv[i], len) != len) {
6540 return -1;
6544 return 0;
6547 static int open_self_maps(void *cpu_env, int fd)
6549 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6550 TaskState *ts = cpu->opaque;
6551 FILE *fp;
6552 char *line = NULL;
6553 size_t len = 0;
6554 ssize_t read;
6556 fp = fopen("/proc/self/maps", "r");
6557 if (fp == NULL) {
6558 return -1;
6561 while ((read = getline(&line, &len, fp)) != -1) {
6562 int fields, dev_maj, dev_min, inode;
6563 uint64_t min, max, offset;
6564 char flag_r, flag_w, flag_x, flag_p;
6565 char path[512] = "";
6566 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6567 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6568 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6570 if ((fields < 10) || (fields > 11)) {
6571 continue;
6573 if (h2g_valid(min)) {
6574 int flags = page_get_flags(h2g(min));
6575 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6576 if (page_check_range(h2g(min), max - min, flags) == -1) {
6577 continue;
6579 if (h2g(min) == ts->info->stack_limit) {
6580 pstrcpy(path, sizeof(path), " [stack]");
6582 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6583 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6584 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6585 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6586 path[0] ? " " : "", path);
6590 free(line);
6591 fclose(fp);
6593 return 0;
6596 static int open_self_stat(void *cpu_env, int fd)
6598 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6599 TaskState *ts = cpu->opaque;
6600 abi_ulong start_stack = ts->info->start_stack;
6601 int i;
6603 for (i = 0; i < 44; i++) {
6604 char buf[128];
6605 int len;
6606 uint64_t val = 0;
6608 if (i == 0) {
6609 /* pid */
6610 val = getpid();
6611 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6612 } else if (i == 1) {
6613 /* app name */
6614 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6615 } else if (i == 27) {
6616 /* stack bottom */
6617 val = start_stack;
6618 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6619 } else {
6620 /* for the rest, there is MasterCard */
6621 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6624 len = strlen(buf);
6625 if (write(fd, buf, len) != len) {
6626 return -1;
6630 return 0;
6633 static int open_self_auxv(void *cpu_env, int fd)
6635 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6636 TaskState *ts = cpu->opaque;
6637 abi_ulong auxv = ts->info->saved_auxv;
6638 abi_ulong len = ts->info->auxv_len;
6639 char *ptr;
6642 * Auxiliary vector is stored in target process stack.
6643 * read in whole auxv vector and copy it to file
6645 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6646 if (ptr != NULL) {
6647 while (len > 0) {
6648 ssize_t r;
6649 r = write(fd, ptr, len);
6650 if (r <= 0) {
6651 break;
6653 len -= r;
6654 ptr += r;
6656 lseek(fd, 0, SEEK_SET);
6657 unlock_user(ptr, auxv, len);
6660 return 0;
6663 static int is_proc_myself(const char *filename, const char *entry)
6665 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6666 filename += strlen("/proc/");
6667 if (!strncmp(filename, "self/", strlen("self/"))) {
6668 filename += strlen("self/");
6669 } else if (*filename >= '1' && *filename <= '9') {
6670 char myself[80];
6671 snprintf(myself, sizeof(myself), "%d/", getpid());
6672 if (!strncmp(filename, myself, strlen(myself))) {
6673 filename += strlen(myself);
6674 } else {
6675 return 0;
6677 } else {
6678 return 0;
6680 if (!strcmp(filename, entry)) {
6681 return 1;
6684 return 0;
6687 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6688 static int is_proc(const char *filename, const char *entry)
6690 return strcmp(filename, entry) == 0;
6693 static int open_net_route(void *cpu_env, int fd)
6695 FILE *fp;
6696 char *line = NULL;
6697 size_t len = 0;
6698 ssize_t read;
6700 fp = fopen("/proc/net/route", "r");
6701 if (fp == NULL) {
6702 return -1;
6705 /* read header */
6707 read = getline(&line, &len, fp);
6708 dprintf(fd, "%s", line);
6710 /* read routes */
6712 while ((read = getline(&line, &len, fp)) != -1) {
6713 char iface[16];
6714 uint32_t dest, gw, mask;
6715 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6716 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6717 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6718 &mask, &mtu, &window, &irtt);
6719 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6720 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6721 metric, tswap32(mask), mtu, window, irtt);
6724 free(line);
6725 fclose(fp);
6727 return 0;
6729 #endif
6731 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6733 struct fake_open {
6734 const char *filename;
6735 int (*fill)(void *cpu_env, int fd);
6736 int (*cmp)(const char *s1, const char *s2);
6738 const struct fake_open *fake_open;
6739 static const struct fake_open fakes[] = {
6740 { "maps", open_self_maps, is_proc_myself },
6741 { "stat", open_self_stat, is_proc_myself },
6742 { "auxv", open_self_auxv, is_proc_myself },
6743 { "cmdline", open_self_cmdline, is_proc_myself },
6744 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6745 { "/proc/net/route", open_net_route, is_proc },
6746 #endif
6747 { NULL, NULL, NULL }
6750 if (is_proc_myself(pathname, "exe")) {
6751 int execfd = qemu_getauxval(AT_EXECFD);
6752 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6755 for (fake_open = fakes; fake_open->filename; fake_open++) {
6756 if (fake_open->cmp(pathname, fake_open->filename)) {
6757 break;
6761 if (fake_open->filename) {
6762 const char *tmpdir;
6763 char filename[PATH_MAX];
6764 int fd, r;
6766 /* create temporary file to map stat to */
6767 tmpdir = getenv("TMPDIR");
6768 if (!tmpdir)
6769 tmpdir = "/tmp";
6770 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6771 fd = mkstemp(filename);
6772 if (fd < 0) {
6773 return fd;
6775 unlink(filename);
6777 if ((r = fake_open->fill(cpu_env, fd))) {
6778 int e = errno;
6779 close(fd);
6780 errno = e;
6781 return r;
6783 lseek(fd, 0, SEEK_SET);
6785 return fd;
6788 return safe_openat(dirfd, path(pathname), flags, mode);
6791 #define TIMER_MAGIC 0x0caf0000
6792 #define TIMER_MAGIC_MASK 0xffff0000
6794 /* Convert QEMU provided timer ID back to internal 16bit index format */
6795 static target_timer_t get_timer_id(abi_long arg)
6797 target_timer_t timerid = arg;
6799 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6800 return -TARGET_EINVAL;
6803 timerid &= 0xffff;
6805 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6806 return -TARGET_EINVAL;
6809 return timerid;
6812 static int target_to_host_cpu_mask(unsigned long *host_mask,
6813 size_t host_size,
6814 abi_ulong target_addr,
6815 size_t target_size)
6817 unsigned target_bits = sizeof(abi_ulong) * 8;
6818 unsigned host_bits = sizeof(*host_mask) * 8;
6819 abi_ulong *target_mask;
6820 unsigned i, j;
6822 assert(host_size >= target_size);
6824 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6825 if (!target_mask) {
6826 return -TARGET_EFAULT;
6828 memset(host_mask, 0, host_size);
6830 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6831 unsigned bit = i * target_bits;
6832 abi_ulong val;
6834 __get_user(val, &target_mask[i]);
6835 for (j = 0; j < target_bits; j++, bit++) {
6836 if (val & (1UL << j)) {
6837 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6842 unlock_user(target_mask, target_addr, 0);
6843 return 0;
6846 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6847 size_t host_size,
6848 abi_ulong target_addr,
6849 size_t target_size)
6851 unsigned target_bits = sizeof(abi_ulong) * 8;
6852 unsigned host_bits = sizeof(*host_mask) * 8;
6853 abi_ulong *target_mask;
6854 unsigned i, j;
6856 assert(host_size >= target_size);
6858 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6859 if (!target_mask) {
6860 return -TARGET_EFAULT;
6863 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6864 unsigned bit = i * target_bits;
6865 abi_ulong val = 0;
6867 for (j = 0; j < target_bits; j++, bit++) {
6868 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6869 val |= 1UL << j;
6872 __put_user(val, &target_mask[i]);
6875 unlock_user(target_mask, target_addr, target_size);
6876 return 0;
6879 /* This is an internal helper for do_syscall so that it is easier
6880 * to have a single return point, so that actions, such as logging
6881 * of syscall results, can be performed.
6882 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6884 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6885 abi_long arg2, abi_long arg3, abi_long arg4,
6886 abi_long arg5, abi_long arg6, abi_long arg7,
6887 abi_long arg8)
6889 CPUState *cpu = ENV_GET_CPU(cpu_env);
6890 abi_long ret;
6891 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6892 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6893 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6894 struct stat st;
6895 #endif
6896 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6897 || defined(TARGET_NR_fstatfs)
6898 struct statfs stfs;
6899 #endif
6900 void *p;
6902 switch(num) {
6903 case TARGET_NR_exit:
6904 /* In old applications this may be used to implement _exit(2).
6905 However in threaded applictions it is used for thread termination,
6906 and _exit_group is used for application termination.
6907 Do thread termination if we have more then one thread. */
6909 if (block_signals()) {
6910 return -TARGET_ERESTARTSYS;
6913 cpu_list_lock();
6915 if (CPU_NEXT(first_cpu)) {
6916 TaskState *ts;
6918 /* Remove the CPU from the list. */
6919 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6921 cpu_list_unlock();
6923 ts = cpu->opaque;
6924 if (ts->child_tidptr) {
6925 put_user_u32(0, ts->child_tidptr);
6926 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6927 NULL, NULL, 0);
6929 thread_cpu = NULL;
6930 object_unref(OBJECT(cpu));
6931 g_free(ts);
6932 rcu_unregister_thread();
6933 pthread_exit(NULL);
6936 cpu_list_unlock();
6937 preexit_cleanup(cpu_env, arg1);
6938 _exit(arg1);
6939 return 0; /* avoid warning */
6940 case TARGET_NR_read:
6941 if (arg3 == 0) {
6942 return 0;
6943 } else {
6944 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6945 return -TARGET_EFAULT;
6946 ret = get_errno(safe_read(arg1, p, arg3));
6947 if (ret >= 0 &&
6948 fd_trans_host_to_target_data(arg1)) {
6949 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6951 unlock_user(p, arg2, ret);
6953 return ret;
6954 case TARGET_NR_write:
6955 if (arg2 == 0 && arg3 == 0) {
6956 return get_errno(safe_write(arg1, 0, 0));
6958 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6959 return -TARGET_EFAULT;
6960 if (fd_trans_target_to_host_data(arg1)) {
6961 void *copy = g_malloc(arg3);
6962 memcpy(copy, p, arg3);
6963 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6964 if (ret >= 0) {
6965 ret = get_errno(safe_write(arg1, copy, ret));
6967 g_free(copy);
6968 } else {
6969 ret = get_errno(safe_write(arg1, p, arg3));
6971 unlock_user(p, arg2, 0);
6972 return ret;
6974 #ifdef TARGET_NR_open
6975 case TARGET_NR_open:
6976 if (!(p = lock_user_string(arg1)))
6977 return -TARGET_EFAULT;
6978 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6979 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6980 arg3));
6981 fd_trans_unregister(ret);
6982 unlock_user(p, arg1, 0);
6983 return ret;
6984 #endif
6985 case TARGET_NR_openat:
6986 if (!(p = lock_user_string(arg2)))
6987 return -TARGET_EFAULT;
6988 ret = get_errno(do_openat(cpu_env, arg1, p,
6989 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6990 arg4));
6991 fd_trans_unregister(ret);
6992 unlock_user(p, arg2, 0);
6993 return ret;
6994 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6995 case TARGET_NR_name_to_handle_at:
6996 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6997 return ret;
6998 #endif
6999 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7000 case TARGET_NR_open_by_handle_at:
7001 ret = do_open_by_handle_at(arg1, arg2, arg3);
7002 fd_trans_unregister(ret);
7003 return ret;
7004 #endif
7005 case TARGET_NR_close:
7006 fd_trans_unregister(arg1);
7007 return get_errno(close(arg1));
7009 case TARGET_NR_brk:
7010 return do_brk(arg1);
7011 #ifdef TARGET_NR_fork
7012 case TARGET_NR_fork:
7013 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7014 #endif
7015 #ifdef TARGET_NR_waitpid
7016 case TARGET_NR_waitpid:
7018 int status;
7019 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7020 if (!is_error(ret) && arg2 && ret
7021 && put_user_s32(host_to_target_waitstatus(status), arg2))
7022 return -TARGET_EFAULT;
7024 return ret;
7025 #endif
7026 #ifdef TARGET_NR_waitid
7027 case TARGET_NR_waitid:
7029 siginfo_t info;
7030 info.si_pid = 0;
7031 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7032 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7033 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7034 return -TARGET_EFAULT;
7035 host_to_target_siginfo(p, &info);
7036 unlock_user(p, arg3, sizeof(target_siginfo_t));
7039 return ret;
7040 #endif
7041 #ifdef TARGET_NR_creat /* not on alpha */
7042 case TARGET_NR_creat:
7043 if (!(p = lock_user_string(arg1)))
7044 return -TARGET_EFAULT;
7045 ret = get_errno(creat(p, arg2));
7046 fd_trans_unregister(ret);
7047 unlock_user(p, arg1, 0);
7048 return ret;
7049 #endif
7050 #ifdef TARGET_NR_link
7051 case TARGET_NR_link:
7053 void * p2;
7054 p = lock_user_string(arg1);
7055 p2 = lock_user_string(arg2);
7056 if (!p || !p2)
7057 ret = -TARGET_EFAULT;
7058 else
7059 ret = get_errno(link(p, p2));
7060 unlock_user(p2, arg2, 0);
7061 unlock_user(p, arg1, 0);
7063 return ret;
7064 #endif
7065 #if defined(TARGET_NR_linkat)
7066 case TARGET_NR_linkat:
7068 void * p2 = NULL;
7069 if (!arg2 || !arg4)
7070 return -TARGET_EFAULT;
7071 p = lock_user_string(arg2);
7072 p2 = lock_user_string(arg4);
7073 if (!p || !p2)
7074 ret = -TARGET_EFAULT;
7075 else
7076 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7077 unlock_user(p, arg2, 0);
7078 unlock_user(p2, arg4, 0);
7080 return ret;
7081 #endif
7082 #ifdef TARGET_NR_unlink
7083 case TARGET_NR_unlink:
7084 if (!(p = lock_user_string(arg1)))
7085 return -TARGET_EFAULT;
7086 ret = get_errno(unlink(p));
7087 unlock_user(p, arg1, 0);
7088 return ret;
7089 #endif
7090 #if defined(TARGET_NR_unlinkat)
7091 case TARGET_NR_unlinkat:
7092 if (!(p = lock_user_string(arg2)))
7093 return -TARGET_EFAULT;
7094 ret = get_errno(unlinkat(arg1, p, arg3));
7095 unlock_user(p, arg2, 0);
7096 return ret;
7097 #endif
7098 case TARGET_NR_execve:
7100 char **argp, **envp;
7101 int argc, envc;
7102 abi_ulong gp;
7103 abi_ulong guest_argp;
7104 abi_ulong guest_envp;
7105 abi_ulong addr;
7106 char **q;
7107 int total_size = 0;
7109 argc = 0;
7110 guest_argp = arg2;
7111 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7112 if (get_user_ual(addr, gp))
7113 return -TARGET_EFAULT;
7114 if (!addr)
7115 break;
7116 argc++;
7118 envc = 0;
7119 guest_envp = arg3;
7120 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7121 if (get_user_ual(addr, gp))
7122 return -TARGET_EFAULT;
7123 if (!addr)
7124 break;
7125 envc++;
7128 argp = g_new0(char *, argc + 1);
7129 envp = g_new0(char *, envc + 1);
7131 for (gp = guest_argp, q = argp; gp;
7132 gp += sizeof(abi_ulong), q++) {
7133 if (get_user_ual(addr, gp))
7134 goto execve_efault;
7135 if (!addr)
7136 break;
7137 if (!(*q = lock_user_string(addr)))
7138 goto execve_efault;
7139 total_size += strlen(*q) + 1;
7141 *q = NULL;
7143 for (gp = guest_envp, q = envp; gp;
7144 gp += sizeof(abi_ulong), q++) {
7145 if (get_user_ual(addr, gp))
7146 goto execve_efault;
7147 if (!addr)
7148 break;
7149 if (!(*q = lock_user_string(addr)))
7150 goto execve_efault;
7151 total_size += strlen(*q) + 1;
7153 *q = NULL;
7155 if (!(p = lock_user_string(arg1)))
7156 goto execve_efault;
7157 /* Although execve() is not an interruptible syscall it is
7158 * a special case where we must use the safe_syscall wrapper:
7159 * if we allow a signal to happen before we make the host
7160 * syscall then we will 'lose' it, because at the point of
7161 * execve the process leaves QEMU's control. So we use the
7162 * safe syscall wrapper to ensure that we either take the
7163 * signal as a guest signal, or else it does not happen
7164 * before the execve completes and makes it the other
7165 * program's problem.
7167 ret = get_errno(safe_execve(p, argp, envp));
7168 unlock_user(p, arg1, 0);
7170 goto execve_end;
7172 execve_efault:
7173 ret = -TARGET_EFAULT;
7175 execve_end:
7176 for (gp = guest_argp, q = argp; *q;
7177 gp += sizeof(abi_ulong), q++) {
7178 if (get_user_ual(addr, gp)
7179 || !addr)
7180 break;
7181 unlock_user(*q, addr, 0);
7183 for (gp = guest_envp, q = envp; *q;
7184 gp += sizeof(abi_ulong), q++) {
7185 if (get_user_ual(addr, gp)
7186 || !addr)
7187 break;
7188 unlock_user(*q, addr, 0);
7191 g_free(argp);
7192 g_free(envp);
7194 return ret;
7195 case TARGET_NR_chdir:
7196 if (!(p = lock_user_string(arg1)))
7197 return -TARGET_EFAULT;
7198 ret = get_errno(chdir(p));
7199 unlock_user(p, arg1, 0);
7200 return ret;
7201 #ifdef TARGET_NR_time
7202 case TARGET_NR_time:
7204 time_t host_time;
7205 ret = get_errno(time(&host_time));
7206 if (!is_error(ret)
7207 && arg1
7208 && put_user_sal(host_time, arg1))
7209 return -TARGET_EFAULT;
7211 return ret;
7212 #endif
7213 #ifdef TARGET_NR_mknod
7214 case TARGET_NR_mknod:
7215 if (!(p = lock_user_string(arg1)))
7216 return -TARGET_EFAULT;
7217 ret = get_errno(mknod(p, arg2, arg3));
7218 unlock_user(p, arg1, 0);
7219 return ret;
7220 #endif
7221 #if defined(TARGET_NR_mknodat)
7222 case TARGET_NR_mknodat:
7223 if (!(p = lock_user_string(arg2)))
7224 return -TARGET_EFAULT;
7225 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7226 unlock_user(p, arg2, 0);
7227 return ret;
7228 #endif
7229 #ifdef TARGET_NR_chmod
7230 case TARGET_NR_chmod:
7231 if (!(p = lock_user_string(arg1)))
7232 return -TARGET_EFAULT;
7233 ret = get_errno(chmod(p, arg2));
7234 unlock_user(p, arg1, 0);
7235 return ret;
7236 #endif
7237 #ifdef TARGET_NR_lseek
7238 case TARGET_NR_lseek:
7239 return get_errno(lseek(arg1, arg2, arg3));
7240 #endif
7241 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7242 /* Alpha specific */
7243 case TARGET_NR_getxpid:
7244 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7245 return get_errno(getpid());
7246 #endif
7247 #ifdef TARGET_NR_getpid
7248 case TARGET_NR_getpid:
7249 return get_errno(getpid());
7250 #endif
7251 case TARGET_NR_mount:
7253 /* need to look at the data field */
7254 void *p2, *p3;
7256 if (arg1) {
7257 p = lock_user_string(arg1);
7258 if (!p) {
7259 return -TARGET_EFAULT;
7261 } else {
7262 p = NULL;
7265 p2 = lock_user_string(arg2);
7266 if (!p2) {
7267 if (arg1) {
7268 unlock_user(p, arg1, 0);
7270 return -TARGET_EFAULT;
7273 if (arg3) {
7274 p3 = lock_user_string(arg3);
7275 if (!p3) {
7276 if (arg1) {
7277 unlock_user(p, arg1, 0);
7279 unlock_user(p2, arg2, 0);
7280 return -TARGET_EFAULT;
7282 } else {
7283 p3 = NULL;
7286 /* FIXME - arg5 should be locked, but it isn't clear how to
7287 * do that since it's not guaranteed to be a NULL-terminated
7288 * string.
7290 if (!arg5) {
7291 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7292 } else {
7293 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7295 ret = get_errno(ret);
7297 if (arg1) {
7298 unlock_user(p, arg1, 0);
7300 unlock_user(p2, arg2, 0);
7301 if (arg3) {
7302 unlock_user(p3, arg3, 0);
7305 return ret;
7306 #ifdef TARGET_NR_umount
7307 case TARGET_NR_umount:
7308 if (!(p = lock_user_string(arg1)))
7309 return -TARGET_EFAULT;
7310 ret = get_errno(umount(p));
7311 unlock_user(p, arg1, 0);
7312 return ret;
7313 #endif
7314 #ifdef TARGET_NR_stime /* not on alpha */
7315 case TARGET_NR_stime:
7317 time_t host_time;
7318 if (get_user_sal(host_time, arg1))
7319 return -TARGET_EFAULT;
7320 return get_errno(stime(&host_time));
7322 #endif
7323 #ifdef TARGET_NR_alarm /* not on alpha */
7324 case TARGET_NR_alarm:
7325 return alarm(arg1);
7326 #endif
7327 #ifdef TARGET_NR_pause /* not on alpha */
7328 case TARGET_NR_pause:
7329 if (!block_signals()) {
7330 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7332 return -TARGET_EINTR;
7333 #endif
7334 #ifdef TARGET_NR_utime
7335 case TARGET_NR_utime:
7337 struct utimbuf tbuf, *host_tbuf;
7338 struct target_utimbuf *target_tbuf;
7339 if (arg2) {
7340 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7341 return -TARGET_EFAULT;
7342 tbuf.actime = tswapal(target_tbuf->actime);
7343 tbuf.modtime = tswapal(target_tbuf->modtime);
7344 unlock_user_struct(target_tbuf, arg2, 0);
7345 host_tbuf = &tbuf;
7346 } else {
7347 host_tbuf = NULL;
7349 if (!(p = lock_user_string(arg1)))
7350 return -TARGET_EFAULT;
7351 ret = get_errno(utime(p, host_tbuf));
7352 unlock_user(p, arg1, 0);
7354 return ret;
7355 #endif
7356 #ifdef TARGET_NR_utimes
7357 case TARGET_NR_utimes:
7359 struct timeval *tvp, tv[2];
7360 if (arg2) {
7361 if (copy_from_user_timeval(&tv[0], arg2)
7362 || copy_from_user_timeval(&tv[1],
7363 arg2 + sizeof(struct target_timeval)))
7364 return -TARGET_EFAULT;
7365 tvp = tv;
7366 } else {
7367 tvp = NULL;
7369 if (!(p = lock_user_string(arg1)))
7370 return -TARGET_EFAULT;
7371 ret = get_errno(utimes(p, tvp));
7372 unlock_user(p, arg1, 0);
7374 return ret;
7375 #endif
7376 #if defined(TARGET_NR_futimesat)
7377 case TARGET_NR_futimesat:
7379 struct timeval *tvp, tv[2];
7380 if (arg3) {
7381 if (copy_from_user_timeval(&tv[0], arg3)
7382 || copy_from_user_timeval(&tv[1],
7383 arg3 + sizeof(struct target_timeval)))
7384 return -TARGET_EFAULT;
7385 tvp = tv;
7386 } else {
7387 tvp = NULL;
7389 if (!(p = lock_user_string(arg2))) {
7390 return -TARGET_EFAULT;
7392 ret = get_errno(futimesat(arg1, path(p), tvp));
7393 unlock_user(p, arg2, 0);
7395 return ret;
7396 #endif
7397 #ifdef TARGET_NR_access
7398 case TARGET_NR_access:
7399 if (!(p = lock_user_string(arg1))) {
7400 return -TARGET_EFAULT;
7402 ret = get_errno(access(path(p), arg2));
7403 unlock_user(p, arg1, 0);
7404 return ret;
7405 #endif
7406 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7407 case TARGET_NR_faccessat:
7408 if (!(p = lock_user_string(arg2))) {
7409 return -TARGET_EFAULT;
7411 ret = get_errno(faccessat(arg1, p, arg3, 0));
7412 unlock_user(p, arg2, 0);
7413 return ret;
7414 #endif
7415 #ifdef TARGET_NR_nice /* not on alpha */
7416 case TARGET_NR_nice:
7417 return get_errno(nice(arg1));
7418 #endif
7419 case TARGET_NR_sync:
7420 sync();
7421 return 0;
7422 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7423 case TARGET_NR_syncfs:
7424 return get_errno(syncfs(arg1));
7425 #endif
7426 case TARGET_NR_kill:
7427 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7428 #ifdef TARGET_NR_rename
7429 case TARGET_NR_rename:
7431 void *p2;
7432 p = lock_user_string(arg1);
7433 p2 = lock_user_string(arg2);
7434 if (!p || !p2)
7435 ret = -TARGET_EFAULT;
7436 else
7437 ret = get_errno(rename(p, p2));
7438 unlock_user(p2, arg2, 0);
7439 unlock_user(p, arg1, 0);
7441 return ret;
7442 #endif
7443 #if defined(TARGET_NR_renameat)
7444 case TARGET_NR_renameat:
7446 void *p2;
7447 p = lock_user_string(arg2);
7448 p2 = lock_user_string(arg4);
7449 if (!p || !p2)
7450 ret = -TARGET_EFAULT;
7451 else
7452 ret = get_errno(renameat(arg1, p, arg3, p2));
7453 unlock_user(p2, arg4, 0);
7454 unlock_user(p, arg2, 0);
7456 return ret;
7457 #endif
7458 #if defined(TARGET_NR_renameat2)
7459 case TARGET_NR_renameat2:
7461 void *p2;
7462 p = lock_user_string(arg2);
7463 p2 = lock_user_string(arg4);
7464 if (!p || !p2) {
7465 ret = -TARGET_EFAULT;
7466 } else {
7467 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7469 unlock_user(p2, arg4, 0);
7470 unlock_user(p, arg2, 0);
7472 return ret;
7473 #endif
7474 #ifdef TARGET_NR_mkdir
7475 case TARGET_NR_mkdir:
7476 if (!(p = lock_user_string(arg1)))
7477 return -TARGET_EFAULT;
7478 ret = get_errno(mkdir(p, arg2));
7479 unlock_user(p, arg1, 0);
7480 return ret;
7481 #endif
7482 #if defined(TARGET_NR_mkdirat)
7483 case TARGET_NR_mkdirat:
7484 if (!(p = lock_user_string(arg2)))
7485 return -TARGET_EFAULT;
7486 ret = get_errno(mkdirat(arg1, p, arg3));
7487 unlock_user(p, arg2, 0);
7488 return ret;
7489 #endif
7490 #ifdef TARGET_NR_rmdir
7491 case TARGET_NR_rmdir:
7492 if (!(p = lock_user_string(arg1)))
7493 return -TARGET_EFAULT;
7494 ret = get_errno(rmdir(p));
7495 unlock_user(p, arg1, 0);
7496 return ret;
7497 #endif
7498 case TARGET_NR_dup:
7499 ret = get_errno(dup(arg1));
7500 if (ret >= 0) {
7501 fd_trans_dup(arg1, ret);
7503 return ret;
7504 #ifdef TARGET_NR_pipe
7505 case TARGET_NR_pipe:
7506 return do_pipe(cpu_env, arg1, 0, 0);
7507 #endif
7508 #ifdef TARGET_NR_pipe2
7509 case TARGET_NR_pipe2:
7510 return do_pipe(cpu_env, arg1,
7511 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7512 #endif
7513 case TARGET_NR_times:
7515 struct target_tms *tmsp;
7516 struct tms tms;
7517 ret = get_errno(times(&tms));
7518 if (arg1) {
7519 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7520 if (!tmsp)
7521 return -TARGET_EFAULT;
7522 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7523 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7524 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7525 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7527 if (!is_error(ret))
7528 ret = host_to_target_clock_t(ret);
7530 return ret;
7531 case TARGET_NR_acct:
7532 if (arg1 == 0) {
7533 ret = get_errno(acct(NULL));
7534 } else {
7535 if (!(p = lock_user_string(arg1))) {
7536 return -TARGET_EFAULT;
7538 ret = get_errno(acct(path(p)));
7539 unlock_user(p, arg1, 0);
7541 return ret;
7542 #ifdef TARGET_NR_umount2
7543 case TARGET_NR_umount2:
7544 if (!(p = lock_user_string(arg1)))
7545 return -TARGET_EFAULT;
7546 ret = get_errno(umount2(p, arg2));
7547 unlock_user(p, arg1, 0);
7548 return ret;
7549 #endif
7550 case TARGET_NR_ioctl:
7551 return do_ioctl(arg1, arg2, arg3);
7552 #ifdef TARGET_NR_fcntl
7553 case TARGET_NR_fcntl:
7554 return do_fcntl(arg1, arg2, arg3);
7555 #endif
7556 case TARGET_NR_setpgid:
7557 return get_errno(setpgid(arg1, arg2));
7558 case TARGET_NR_umask:
7559 return get_errno(umask(arg1));
7560 case TARGET_NR_chroot:
7561 if (!(p = lock_user_string(arg1)))
7562 return -TARGET_EFAULT;
7563 ret = get_errno(chroot(p));
7564 unlock_user(p, arg1, 0);
7565 return ret;
7566 #ifdef TARGET_NR_dup2
7567 case TARGET_NR_dup2:
7568 ret = get_errno(dup2(arg1, arg2));
7569 if (ret >= 0) {
7570 fd_trans_dup(arg1, arg2);
7572 return ret;
7573 #endif
7574 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7575 case TARGET_NR_dup3:
7577 int host_flags;
7579 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7580 return -EINVAL;
7582 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7583 ret = get_errno(dup3(arg1, arg2, host_flags));
7584 if (ret >= 0) {
7585 fd_trans_dup(arg1, arg2);
7587 return ret;
7589 #endif
7590 #ifdef TARGET_NR_getppid /* not on alpha */
7591 case TARGET_NR_getppid:
7592 return get_errno(getppid());
7593 #endif
7594 #ifdef TARGET_NR_getpgrp
7595 case TARGET_NR_getpgrp:
7596 return get_errno(getpgrp());
7597 #endif
7598 case TARGET_NR_setsid:
7599 return get_errno(setsid());
7600 #ifdef TARGET_NR_sigaction
7601 case TARGET_NR_sigaction:
7603 #if defined(TARGET_ALPHA)
7604 struct target_sigaction act, oact, *pact = 0;
7605 struct target_old_sigaction *old_act;
7606 if (arg2) {
7607 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7608 return -TARGET_EFAULT;
7609 act._sa_handler = old_act->_sa_handler;
7610 target_siginitset(&act.sa_mask, old_act->sa_mask);
7611 act.sa_flags = old_act->sa_flags;
7612 act.sa_restorer = 0;
7613 unlock_user_struct(old_act, arg2, 0);
7614 pact = &act;
7616 ret = get_errno(do_sigaction(arg1, pact, &oact));
7617 if (!is_error(ret) && arg3) {
7618 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7619 return -TARGET_EFAULT;
7620 old_act->_sa_handler = oact._sa_handler;
7621 old_act->sa_mask = oact.sa_mask.sig[0];
7622 old_act->sa_flags = oact.sa_flags;
7623 unlock_user_struct(old_act, arg3, 1);
7625 #elif defined(TARGET_MIPS)
7626 struct target_sigaction act, oact, *pact, *old_act;
7628 if (arg2) {
7629 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7630 return -TARGET_EFAULT;
7631 act._sa_handler = old_act->_sa_handler;
7632 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7633 act.sa_flags = old_act->sa_flags;
7634 unlock_user_struct(old_act, arg2, 0);
7635 pact = &act;
7636 } else {
7637 pact = NULL;
7640 ret = get_errno(do_sigaction(arg1, pact, &oact));
7642 if (!is_error(ret) && arg3) {
7643 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7644 return -TARGET_EFAULT;
7645 old_act->_sa_handler = oact._sa_handler;
7646 old_act->sa_flags = oact.sa_flags;
7647 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7648 old_act->sa_mask.sig[1] = 0;
7649 old_act->sa_mask.sig[2] = 0;
7650 old_act->sa_mask.sig[3] = 0;
7651 unlock_user_struct(old_act, arg3, 1);
7653 #else
7654 struct target_old_sigaction *old_act;
7655 struct target_sigaction act, oact, *pact;
7656 if (arg2) {
7657 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7658 return -TARGET_EFAULT;
7659 act._sa_handler = old_act->_sa_handler;
7660 target_siginitset(&act.sa_mask, old_act->sa_mask);
7661 act.sa_flags = old_act->sa_flags;
7662 act.sa_restorer = old_act->sa_restorer;
7663 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7664 act.ka_restorer = 0;
7665 #endif
7666 unlock_user_struct(old_act, arg2, 0);
7667 pact = &act;
7668 } else {
7669 pact = NULL;
7671 ret = get_errno(do_sigaction(arg1, pact, &oact));
7672 if (!is_error(ret) && arg3) {
7673 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7674 return -TARGET_EFAULT;
7675 old_act->_sa_handler = oact._sa_handler;
7676 old_act->sa_mask = oact.sa_mask.sig[0];
7677 old_act->sa_flags = oact.sa_flags;
7678 old_act->sa_restorer = oact.sa_restorer;
7679 unlock_user_struct(old_act, arg3, 1);
7681 #endif
7683 return ret;
7684 #endif
7685 case TARGET_NR_rt_sigaction:
7687 #if defined(TARGET_ALPHA)
7688 /* For Alpha and SPARC this is a 5 argument syscall, with
7689 * a 'restorer' parameter which must be copied into the
7690 * sa_restorer field of the sigaction struct.
7691 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7692 * and arg5 is the sigsetsize.
7693 * Alpha also has a separate rt_sigaction struct that it uses
7694 * here; SPARC uses the usual sigaction struct.
7696 struct target_rt_sigaction *rt_act;
7697 struct target_sigaction act, oact, *pact = 0;
7699 if (arg4 != sizeof(target_sigset_t)) {
7700 return -TARGET_EINVAL;
7702 if (arg2) {
7703 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7704 return -TARGET_EFAULT;
7705 act._sa_handler = rt_act->_sa_handler;
7706 act.sa_mask = rt_act->sa_mask;
7707 act.sa_flags = rt_act->sa_flags;
7708 act.sa_restorer = arg5;
7709 unlock_user_struct(rt_act, arg2, 0);
7710 pact = &act;
7712 ret = get_errno(do_sigaction(arg1, pact, &oact));
7713 if (!is_error(ret) && arg3) {
7714 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7715 return -TARGET_EFAULT;
7716 rt_act->_sa_handler = oact._sa_handler;
7717 rt_act->sa_mask = oact.sa_mask;
7718 rt_act->sa_flags = oact.sa_flags;
7719 unlock_user_struct(rt_act, arg3, 1);
7721 #else
7722 #ifdef TARGET_SPARC
7723 target_ulong restorer = arg4;
7724 target_ulong sigsetsize = arg5;
7725 #else
7726 target_ulong sigsetsize = arg4;
7727 #endif
7728 struct target_sigaction *act;
7729 struct target_sigaction *oact;
7731 if (sigsetsize != sizeof(target_sigset_t)) {
7732 return -TARGET_EINVAL;
7734 if (arg2) {
7735 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7736 return -TARGET_EFAULT;
7738 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7739 act->ka_restorer = restorer;
7740 #endif
7741 } else {
7742 act = NULL;
7744 if (arg3) {
7745 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7746 ret = -TARGET_EFAULT;
7747 goto rt_sigaction_fail;
7749 } else
7750 oact = NULL;
7751 ret = get_errno(do_sigaction(arg1, act, oact));
7752 rt_sigaction_fail:
7753 if (act)
7754 unlock_user_struct(act, arg2, 0);
7755 if (oact)
7756 unlock_user_struct(oact, arg3, 1);
7757 #endif
7759 return ret;
7760 #ifdef TARGET_NR_sgetmask /* not on alpha */
7761 case TARGET_NR_sgetmask:
7763 sigset_t cur_set;
7764 abi_ulong target_set;
7765 ret = do_sigprocmask(0, NULL, &cur_set);
7766 if (!ret) {
7767 host_to_target_old_sigset(&target_set, &cur_set);
7768 ret = target_set;
7771 return ret;
7772 #endif
7773 #ifdef TARGET_NR_ssetmask /* not on alpha */
7774 case TARGET_NR_ssetmask:
7776 sigset_t set, oset;
7777 abi_ulong target_set = arg1;
7778 target_to_host_old_sigset(&set, &target_set);
7779 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7780 if (!ret) {
7781 host_to_target_old_sigset(&target_set, &oset);
7782 ret = target_set;
7785 return ret;
7786 #endif
7787 #ifdef TARGET_NR_sigprocmask
7788 case TARGET_NR_sigprocmask:
7790 #if defined(TARGET_ALPHA)
7791 sigset_t set, oldset;
7792 abi_ulong mask;
7793 int how;
7795 switch (arg1) {
7796 case TARGET_SIG_BLOCK:
7797 how = SIG_BLOCK;
7798 break;
7799 case TARGET_SIG_UNBLOCK:
7800 how = SIG_UNBLOCK;
7801 break;
7802 case TARGET_SIG_SETMASK:
7803 how = SIG_SETMASK;
7804 break;
7805 default:
7806 return -TARGET_EINVAL;
7808 mask = arg2;
7809 target_to_host_old_sigset(&set, &mask);
7811 ret = do_sigprocmask(how, &set, &oldset);
7812 if (!is_error(ret)) {
7813 host_to_target_old_sigset(&mask, &oldset);
7814 ret = mask;
7815 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7817 #else
7818 sigset_t set, oldset, *set_ptr;
7819 int how;
7821 if (arg2) {
7822 switch (arg1) {
7823 case TARGET_SIG_BLOCK:
7824 how = SIG_BLOCK;
7825 break;
7826 case TARGET_SIG_UNBLOCK:
7827 how = SIG_UNBLOCK;
7828 break;
7829 case TARGET_SIG_SETMASK:
7830 how = SIG_SETMASK;
7831 break;
7832 default:
7833 return -TARGET_EINVAL;
7835 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7836 return -TARGET_EFAULT;
7837 target_to_host_old_sigset(&set, p);
7838 unlock_user(p, arg2, 0);
7839 set_ptr = &set;
7840 } else {
7841 how = 0;
7842 set_ptr = NULL;
7844 ret = do_sigprocmask(how, set_ptr, &oldset);
7845 if (!is_error(ret) && arg3) {
7846 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7847 return -TARGET_EFAULT;
7848 host_to_target_old_sigset(p, &oldset);
7849 unlock_user(p, arg3, sizeof(target_sigset_t));
7851 #endif
7853 return ret;
7854 #endif
7855 case TARGET_NR_rt_sigprocmask:
7857 int how = arg1;
7858 sigset_t set, oldset, *set_ptr;
7860 if (arg4 != sizeof(target_sigset_t)) {
7861 return -TARGET_EINVAL;
7864 if (arg2) {
7865 switch(how) {
7866 case TARGET_SIG_BLOCK:
7867 how = SIG_BLOCK;
7868 break;
7869 case TARGET_SIG_UNBLOCK:
7870 how = SIG_UNBLOCK;
7871 break;
7872 case TARGET_SIG_SETMASK:
7873 how = SIG_SETMASK;
7874 break;
7875 default:
7876 return -TARGET_EINVAL;
7878 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7879 return -TARGET_EFAULT;
7880 target_to_host_sigset(&set, p);
7881 unlock_user(p, arg2, 0);
7882 set_ptr = &set;
7883 } else {
7884 how = 0;
7885 set_ptr = NULL;
7887 ret = do_sigprocmask(how, set_ptr, &oldset);
7888 if (!is_error(ret) && arg3) {
7889 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7890 return -TARGET_EFAULT;
7891 host_to_target_sigset(p, &oldset);
7892 unlock_user(p, arg3, sizeof(target_sigset_t));
7895 return ret;
7896 #ifdef TARGET_NR_sigpending
7897 case TARGET_NR_sigpending:
7899 sigset_t set;
7900 ret = get_errno(sigpending(&set));
7901 if (!is_error(ret)) {
7902 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7903 return -TARGET_EFAULT;
7904 host_to_target_old_sigset(p, &set);
7905 unlock_user(p, arg1, sizeof(target_sigset_t));
7908 return ret;
7909 #endif
7910 case TARGET_NR_rt_sigpending:
7912 sigset_t set;
7914 /* Yes, this check is >, not != like most. We follow the kernel's
7915 * logic and it does it like this because it implements
7916 * NR_sigpending through the same code path, and in that case
7917 * the old_sigset_t is smaller in size.
7919 if (arg2 > sizeof(target_sigset_t)) {
7920 return -TARGET_EINVAL;
7923 ret = get_errno(sigpending(&set));
7924 if (!is_error(ret)) {
7925 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7926 return -TARGET_EFAULT;
7927 host_to_target_sigset(p, &set);
7928 unlock_user(p, arg1, sizeof(target_sigset_t));
7931 return ret;
7932 #ifdef TARGET_NR_sigsuspend
7933 case TARGET_NR_sigsuspend:
7935 TaskState *ts = cpu->opaque;
7936 #if defined(TARGET_ALPHA)
7937 abi_ulong mask = arg1;
7938 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7939 #else
7940 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7941 return -TARGET_EFAULT;
7942 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7943 unlock_user(p, arg1, 0);
7944 #endif
7945 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7946 SIGSET_T_SIZE));
7947 if (ret != -TARGET_ERESTARTSYS) {
7948 ts->in_sigsuspend = 1;
7951 return ret;
7952 #endif
7953 case TARGET_NR_rt_sigsuspend:
7955 TaskState *ts = cpu->opaque;
7957 if (arg2 != sizeof(target_sigset_t)) {
7958 return -TARGET_EINVAL;
7960 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7961 return -TARGET_EFAULT;
7962 target_to_host_sigset(&ts->sigsuspend_mask, p);
7963 unlock_user(p, arg1, 0);
7964 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7965 SIGSET_T_SIZE));
7966 if (ret != -TARGET_ERESTARTSYS) {
7967 ts->in_sigsuspend = 1;
7970 return ret;
7971 case TARGET_NR_rt_sigtimedwait:
7973 sigset_t set;
7974 struct timespec uts, *puts;
7975 siginfo_t uinfo;
7977 if (arg4 != sizeof(target_sigset_t)) {
7978 return -TARGET_EINVAL;
7981 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7982 return -TARGET_EFAULT;
7983 target_to_host_sigset(&set, p);
7984 unlock_user(p, arg1, 0);
7985 if (arg3) {
7986 puts = &uts;
7987 target_to_host_timespec(puts, arg3);
7988 } else {
7989 puts = NULL;
7991 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7992 SIGSET_T_SIZE));
7993 if (!is_error(ret)) {
7994 if (arg2) {
7995 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7997 if (!p) {
7998 return -TARGET_EFAULT;
8000 host_to_target_siginfo(p, &uinfo);
8001 unlock_user(p, arg2, sizeof(target_siginfo_t));
8003 ret = host_to_target_signal(ret);
8006 return ret;
8007 case TARGET_NR_rt_sigqueueinfo:
8009 siginfo_t uinfo;
8011 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8012 if (!p) {
8013 return -TARGET_EFAULT;
8015 target_to_host_siginfo(&uinfo, p);
8016 unlock_user(p, arg3, 0);
8017 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8019 return ret;
8020 case TARGET_NR_rt_tgsigqueueinfo:
8022 siginfo_t uinfo;
8024 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8025 if (!p) {
8026 return -TARGET_EFAULT;
8028 target_to_host_siginfo(&uinfo, p);
8029 unlock_user(p, arg4, 0);
8030 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8032 return ret;
8033 #ifdef TARGET_NR_sigreturn
8034 case TARGET_NR_sigreturn:
8035 if (block_signals()) {
8036 return -TARGET_ERESTARTSYS;
8038 return do_sigreturn(cpu_env);
8039 #endif
8040 case TARGET_NR_rt_sigreturn:
8041 if (block_signals()) {
8042 return -TARGET_ERESTARTSYS;
8044 return do_rt_sigreturn(cpu_env);
8045 case TARGET_NR_sethostname:
8046 if (!(p = lock_user_string(arg1)))
8047 return -TARGET_EFAULT;
8048 ret = get_errno(sethostname(p, arg2));
8049 unlock_user(p, arg1, 0);
8050 return ret;
8051 #ifdef TARGET_NR_setrlimit
8052 case TARGET_NR_setrlimit:
8054 int resource = target_to_host_resource(arg1);
8055 struct target_rlimit *target_rlim;
8056 struct rlimit rlim;
8057 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8058 return -TARGET_EFAULT;
8059 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8060 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8061 unlock_user_struct(target_rlim, arg2, 0);
8063 * If we just passed through resource limit settings for memory then
8064 * they would also apply to QEMU's own allocations, and QEMU will
8065 * crash or hang or die if its allocations fail. Ideally we would
8066 * track the guest allocations in QEMU and apply the limits ourselves.
8067 * For now, just tell the guest the call succeeded but don't actually
8068 * limit anything.
8070 if (resource != RLIMIT_AS &&
8071 resource != RLIMIT_DATA &&
8072 resource != RLIMIT_STACK) {
8073 return get_errno(setrlimit(resource, &rlim));
8074 } else {
8075 return 0;
8078 #endif
8079 #ifdef TARGET_NR_getrlimit
8080 case TARGET_NR_getrlimit:
8082 int resource = target_to_host_resource(arg1);
8083 struct target_rlimit *target_rlim;
8084 struct rlimit rlim;
8086 ret = get_errno(getrlimit(resource, &rlim));
8087 if (!is_error(ret)) {
8088 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8089 return -TARGET_EFAULT;
8090 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8091 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8092 unlock_user_struct(target_rlim, arg2, 1);
8095 return ret;
8096 #endif
8097 case TARGET_NR_getrusage:
8099 struct rusage rusage;
8100 ret = get_errno(getrusage(arg1, &rusage));
8101 if (!is_error(ret)) {
8102 ret = host_to_target_rusage(arg2, &rusage);
8105 return ret;
8106 case TARGET_NR_gettimeofday:
8108 struct timeval tv;
8109 ret = get_errno(gettimeofday(&tv, NULL));
8110 if (!is_error(ret)) {
8111 if (copy_to_user_timeval(arg1, &tv))
8112 return -TARGET_EFAULT;
8115 return ret;
8116 case TARGET_NR_settimeofday:
8118 struct timeval tv, *ptv = NULL;
8119 struct timezone tz, *ptz = NULL;
8121 if (arg1) {
8122 if (copy_from_user_timeval(&tv, arg1)) {
8123 return -TARGET_EFAULT;
8125 ptv = &tv;
8128 if (arg2) {
8129 if (copy_from_user_timezone(&tz, arg2)) {
8130 return -TARGET_EFAULT;
8132 ptz = &tz;
8135 return get_errno(settimeofday(ptv, ptz));
8137 #if defined(TARGET_NR_select)
8138 case TARGET_NR_select:
8139 #if defined(TARGET_WANT_NI_OLD_SELECT)
8140 /* some architectures used to have old_select here
8141 * but now ENOSYS it.
8143 ret = -TARGET_ENOSYS;
8144 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8145 ret = do_old_select(arg1);
8146 #else
8147 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8148 #endif
8149 return ret;
8150 #endif
8151 #ifdef TARGET_NR_pselect6
8152 case TARGET_NR_pselect6:
8154 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8155 fd_set rfds, wfds, efds;
8156 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8157 struct timespec ts, *ts_ptr;
8160 * The 6th arg is actually two args smashed together,
8161 * so we cannot use the C library.
8163 sigset_t set;
8164 struct {
8165 sigset_t *set;
8166 size_t size;
8167 } sig, *sig_ptr;
8169 abi_ulong arg_sigset, arg_sigsize, *arg7;
8170 target_sigset_t *target_sigset;
8172 n = arg1;
8173 rfd_addr = arg2;
8174 wfd_addr = arg3;
8175 efd_addr = arg4;
8176 ts_addr = arg5;
8178 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8179 if (ret) {
8180 return ret;
8182 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8183 if (ret) {
8184 return ret;
8186 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8187 if (ret) {
8188 return ret;
8192 * This takes a timespec, and not a timeval, so we cannot
8193 * use the do_select() helper ...
8195 if (ts_addr) {
8196 if (target_to_host_timespec(&ts, ts_addr)) {
8197 return -TARGET_EFAULT;
8199 ts_ptr = &ts;
8200 } else {
8201 ts_ptr = NULL;
8204 /* Extract the two packed args for the sigset */
8205 if (arg6) {
8206 sig_ptr = &sig;
8207 sig.size = SIGSET_T_SIZE;
8209 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8210 if (!arg7) {
8211 return -TARGET_EFAULT;
8213 arg_sigset = tswapal(arg7[0]);
8214 arg_sigsize = tswapal(arg7[1]);
8215 unlock_user(arg7, arg6, 0);
8217 if (arg_sigset) {
8218 sig.set = &set;
8219 if (arg_sigsize != sizeof(*target_sigset)) {
8220 /* Like the kernel, we enforce correct size sigsets */
8221 return -TARGET_EINVAL;
8223 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8224 sizeof(*target_sigset), 1);
8225 if (!target_sigset) {
8226 return -TARGET_EFAULT;
8228 target_to_host_sigset(&set, target_sigset);
8229 unlock_user(target_sigset, arg_sigset, 0);
8230 } else {
8231 sig.set = NULL;
8233 } else {
8234 sig_ptr = NULL;
8237 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8238 ts_ptr, sig_ptr));
8240 if (!is_error(ret)) {
8241 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8242 return -TARGET_EFAULT;
8243 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8244 return -TARGET_EFAULT;
8245 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8246 return -TARGET_EFAULT;
8248 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8249 return -TARGET_EFAULT;
8252 return ret;
8253 #endif
8254 #ifdef TARGET_NR_symlink
8255 case TARGET_NR_symlink:
8257 void *p2;
8258 p = lock_user_string(arg1);
8259 p2 = lock_user_string(arg2);
8260 if (!p || !p2)
8261 ret = -TARGET_EFAULT;
8262 else
8263 ret = get_errno(symlink(p, p2));
8264 unlock_user(p2, arg2, 0);
8265 unlock_user(p, arg1, 0);
8267 return ret;
8268 #endif
8269 #if defined(TARGET_NR_symlinkat)
8270 case TARGET_NR_symlinkat:
8272 void *p2;
8273 p = lock_user_string(arg1);
8274 p2 = lock_user_string(arg3);
8275 if (!p || !p2)
8276 ret = -TARGET_EFAULT;
8277 else
8278 ret = get_errno(symlinkat(p, arg2, p2));
8279 unlock_user(p2, arg3, 0);
8280 unlock_user(p, arg1, 0);
8282 return ret;
8283 #endif
8284 #ifdef TARGET_NR_readlink
8285 case TARGET_NR_readlink:
8287 void *p2;
8288 p = lock_user_string(arg1);
8289 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8290 if (!p || !p2) {
8291 ret = -TARGET_EFAULT;
8292 } else if (!arg3) {
8293 /* Short circuit this for the magic exe check. */
8294 ret = -TARGET_EINVAL;
8295 } else if (is_proc_myself((const char *)p, "exe")) {
8296 char real[PATH_MAX], *temp;
8297 temp = realpath(exec_path, real);
8298 /* Return value is # of bytes that we wrote to the buffer. */
8299 if (temp == NULL) {
8300 ret = get_errno(-1);
8301 } else {
8302 /* Don't worry about sign mismatch as earlier mapping
8303 * logic would have thrown a bad address error. */
8304 ret = MIN(strlen(real), arg3);
8305 /* We cannot NUL terminate the string. */
8306 memcpy(p2, real, ret);
8308 } else {
8309 ret = get_errno(readlink(path(p), p2, arg3));
8311 unlock_user(p2, arg2, ret);
8312 unlock_user(p, arg1, 0);
8314 return ret;
8315 #endif
8316 #if defined(TARGET_NR_readlinkat)
8317 case TARGET_NR_readlinkat:
8319 void *p2;
8320 p = lock_user_string(arg2);
8321 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8322 if (!p || !p2) {
8323 ret = -TARGET_EFAULT;
8324 } else if (is_proc_myself((const char *)p, "exe")) {
8325 char real[PATH_MAX], *temp;
8326 temp = realpath(exec_path, real);
8327 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8328 snprintf((char *)p2, arg4, "%s", real);
8329 } else {
8330 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8332 unlock_user(p2, arg3, ret);
8333 unlock_user(p, arg2, 0);
8335 return ret;
8336 #endif
8337 #ifdef TARGET_NR_swapon
8338 case TARGET_NR_swapon:
8339 if (!(p = lock_user_string(arg1)))
8340 return -TARGET_EFAULT;
8341 ret = get_errno(swapon(p, arg2));
8342 unlock_user(p, arg1, 0);
8343 return ret;
8344 #endif
8345 case TARGET_NR_reboot:
8346 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8347 /* arg4 must be ignored in all other cases */
8348 p = lock_user_string(arg4);
8349 if (!p) {
8350 return -TARGET_EFAULT;
8352 ret = get_errno(reboot(arg1, arg2, arg3, p));
8353 unlock_user(p, arg4, 0);
8354 } else {
8355 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8357 return ret;
8358 #ifdef TARGET_NR_mmap
8359 case TARGET_NR_mmap:
8360 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8361 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8362 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8363 || defined(TARGET_S390X)
8365 abi_ulong *v;
8366 abi_ulong v1, v2, v3, v4, v5, v6;
8367 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8368 return -TARGET_EFAULT;
8369 v1 = tswapal(v[0]);
8370 v2 = tswapal(v[1]);
8371 v3 = tswapal(v[2]);
8372 v4 = tswapal(v[3]);
8373 v5 = tswapal(v[4]);
8374 v6 = tswapal(v[5]);
8375 unlock_user(v, arg1, 0);
8376 ret = get_errno(target_mmap(v1, v2, v3,
8377 target_to_host_bitmask(v4, mmap_flags_tbl),
8378 v5, v6));
8380 #else
8381 ret = get_errno(target_mmap(arg1, arg2, arg3,
8382 target_to_host_bitmask(arg4, mmap_flags_tbl),
8383 arg5,
8384 arg6));
8385 #endif
8386 return ret;
8387 #endif
8388 #ifdef TARGET_NR_mmap2
8389 case TARGET_NR_mmap2:
8390 #ifndef MMAP_SHIFT
8391 #define MMAP_SHIFT 12
8392 #endif
8393 ret = target_mmap(arg1, arg2, arg3,
8394 target_to_host_bitmask(arg4, mmap_flags_tbl),
8395 arg5, arg6 << MMAP_SHIFT);
8396 return get_errno(ret);
8397 #endif
8398 case TARGET_NR_munmap:
8399 return get_errno(target_munmap(arg1, arg2));
8400 case TARGET_NR_mprotect:
8402 TaskState *ts = cpu->opaque;
8403 /* Special hack to detect libc making the stack executable. */
8404 if ((arg3 & PROT_GROWSDOWN)
8405 && arg1 >= ts->info->stack_limit
8406 && arg1 <= ts->info->start_stack) {
8407 arg3 &= ~PROT_GROWSDOWN;
8408 arg2 = arg2 + arg1 - ts->info->stack_limit;
8409 arg1 = ts->info->stack_limit;
8412 return get_errno(target_mprotect(arg1, arg2, arg3));
8413 #ifdef TARGET_NR_mremap
8414 case TARGET_NR_mremap:
8415 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8416 #endif
8417 /* ??? msync/mlock/munlock are broken for softmmu. */
8418 #ifdef TARGET_NR_msync
8419 case TARGET_NR_msync:
8420 return get_errno(msync(g2h(arg1), arg2, arg3));
8421 #endif
8422 #ifdef TARGET_NR_mlock
8423 case TARGET_NR_mlock:
8424 return get_errno(mlock(g2h(arg1), arg2));
8425 #endif
8426 #ifdef TARGET_NR_munlock
8427 case TARGET_NR_munlock:
8428 return get_errno(munlock(g2h(arg1), arg2));
8429 #endif
8430 #ifdef TARGET_NR_mlockall
8431 case TARGET_NR_mlockall:
8432 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8433 #endif
8434 #ifdef TARGET_NR_munlockall
8435 case TARGET_NR_munlockall:
8436 return get_errno(munlockall());
8437 #endif
8438 #ifdef TARGET_NR_truncate
8439 case TARGET_NR_truncate:
8440 if (!(p = lock_user_string(arg1)))
8441 return -TARGET_EFAULT;
8442 ret = get_errno(truncate(p, arg2));
8443 unlock_user(p, arg1, 0);
8444 return ret;
8445 #endif
8446 #ifdef TARGET_NR_ftruncate
8447 case TARGET_NR_ftruncate:
8448 return get_errno(ftruncate(arg1, arg2));
8449 #endif
8450 case TARGET_NR_fchmod:
8451 return get_errno(fchmod(arg1, arg2));
8452 #if defined(TARGET_NR_fchmodat)
8453 case TARGET_NR_fchmodat:
8454 if (!(p = lock_user_string(arg2)))
8455 return -TARGET_EFAULT;
8456 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8457 unlock_user(p, arg2, 0);
8458 return ret;
8459 #endif
8460 case TARGET_NR_getpriority:
8461 /* Note that negative values are valid for getpriority, so we must
8462 differentiate based on errno settings. */
8463 errno = 0;
8464 ret = getpriority(arg1, arg2);
8465 if (ret == -1 && errno != 0) {
8466 return -host_to_target_errno(errno);
8468 #ifdef TARGET_ALPHA
8469 /* Return value is the unbiased priority. Signal no error. */
8470 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8471 #else
8472 /* Return value is a biased priority to avoid negative numbers. */
8473 ret = 20 - ret;
8474 #endif
8475 return ret;
8476 case TARGET_NR_setpriority:
8477 return get_errno(setpriority(arg1, arg2, arg3));
8478 #ifdef TARGET_NR_statfs
8479 case TARGET_NR_statfs:
8480 if (!(p = lock_user_string(arg1))) {
8481 return -TARGET_EFAULT;
8483 ret = get_errno(statfs(path(p), &stfs));
8484 unlock_user(p, arg1, 0);
8485 convert_statfs:
8486 if (!is_error(ret)) {
8487 struct target_statfs *target_stfs;
8489 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8490 return -TARGET_EFAULT;
8491 __put_user(stfs.f_type, &target_stfs->f_type);
8492 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8493 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8494 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8495 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8496 __put_user(stfs.f_files, &target_stfs->f_files);
8497 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8498 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8499 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8500 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8501 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8502 #ifdef _STATFS_F_FLAGS
8503 __put_user(stfs.f_flags, &target_stfs->f_flags);
8504 #else
8505 __put_user(0, &target_stfs->f_flags);
8506 #endif
8507 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8508 unlock_user_struct(target_stfs, arg2, 1);
8510 return ret;
8511 #endif
8512 #ifdef TARGET_NR_fstatfs
8513 case TARGET_NR_fstatfs:
8514 ret = get_errno(fstatfs(arg1, &stfs));
8515 goto convert_statfs;
8516 #endif
8517 #ifdef TARGET_NR_statfs64
8518 case TARGET_NR_statfs64:
8519 if (!(p = lock_user_string(arg1))) {
8520 return -TARGET_EFAULT;
8522 ret = get_errno(statfs(path(p), &stfs));
8523 unlock_user(p, arg1, 0);
8524 convert_statfs64:
8525 if (!is_error(ret)) {
8526 struct target_statfs64 *target_stfs;
8528 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8529 return -TARGET_EFAULT;
8530 __put_user(stfs.f_type, &target_stfs->f_type);
8531 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8532 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8533 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8534 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8535 __put_user(stfs.f_files, &target_stfs->f_files);
8536 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8537 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8538 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8539 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8540 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8541 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8542 unlock_user_struct(target_stfs, arg3, 1);
8544 return ret;
8545 case TARGET_NR_fstatfs64:
8546 ret = get_errno(fstatfs(arg1, &stfs));
8547 goto convert_statfs64;
8548 #endif
8549 #ifdef TARGET_NR_socketcall
8550 case TARGET_NR_socketcall:
8551 return do_socketcall(arg1, arg2);
8552 #endif
8553 #ifdef TARGET_NR_accept
8554 case TARGET_NR_accept:
8555 return do_accept4(arg1, arg2, arg3, 0);
8556 #endif
8557 #ifdef TARGET_NR_accept4
8558 case TARGET_NR_accept4:
8559 return do_accept4(arg1, arg2, arg3, arg4);
8560 #endif
8561 #ifdef TARGET_NR_bind
8562 case TARGET_NR_bind:
8563 return do_bind(arg1, arg2, arg3);
8564 #endif
8565 #ifdef TARGET_NR_connect
8566 case TARGET_NR_connect:
8567 return do_connect(arg1, arg2, arg3);
8568 #endif
8569 #ifdef TARGET_NR_getpeername
8570 case TARGET_NR_getpeername:
8571 return do_getpeername(arg1, arg2, arg3);
8572 #endif
8573 #ifdef TARGET_NR_getsockname
8574 case TARGET_NR_getsockname:
8575 return do_getsockname(arg1, arg2, arg3);
8576 #endif
8577 #ifdef TARGET_NR_getsockopt
8578 case TARGET_NR_getsockopt:
8579 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8580 #endif
8581 #ifdef TARGET_NR_listen
8582 case TARGET_NR_listen:
8583 return get_errno(listen(arg1, arg2));
8584 #endif
8585 #ifdef TARGET_NR_recv
8586 case TARGET_NR_recv:
8587 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8588 #endif
8589 #ifdef TARGET_NR_recvfrom
8590 case TARGET_NR_recvfrom:
8591 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8592 #endif
8593 #ifdef TARGET_NR_recvmsg
8594 case TARGET_NR_recvmsg:
8595 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8596 #endif
8597 #ifdef TARGET_NR_send
8598 case TARGET_NR_send:
8599 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8600 #endif
8601 #ifdef TARGET_NR_sendmsg
8602 case TARGET_NR_sendmsg:
8603 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8604 #endif
8605 #ifdef TARGET_NR_sendmmsg
8606 case TARGET_NR_sendmmsg:
8607 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8608 case TARGET_NR_recvmmsg:
8609 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8610 #endif
8611 #ifdef TARGET_NR_sendto
8612 case TARGET_NR_sendto:
8613 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8614 #endif
8615 #ifdef TARGET_NR_shutdown
8616 case TARGET_NR_shutdown:
8617 return get_errno(shutdown(arg1, arg2));
8618 #endif
8619 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8620 case TARGET_NR_getrandom:
8621 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8622 if (!p) {
8623 return -TARGET_EFAULT;
8625 ret = get_errno(getrandom(p, arg2, arg3));
8626 unlock_user(p, arg1, ret);
8627 return ret;
8628 #endif
8629 #ifdef TARGET_NR_socket
8630 case TARGET_NR_socket:
8631 return do_socket(arg1, arg2, arg3);
8632 #endif
8633 #ifdef TARGET_NR_socketpair
8634 case TARGET_NR_socketpair:
8635 return do_socketpair(arg1, arg2, arg3, arg4);
8636 #endif
8637 #ifdef TARGET_NR_setsockopt
8638 case TARGET_NR_setsockopt:
8639 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8640 #endif
8641 #if defined(TARGET_NR_syslog)
8642 case TARGET_NR_syslog:
8644 int len = arg2;
8646 switch (arg1) {
8647 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8648 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8649 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8650 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8651 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8652 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8653 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
8654 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
8655 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8656 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
8657 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
8658 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
8660 if (len < 0) {
8661 return -TARGET_EINVAL;
8663 if (len == 0) {
8664 return 0;
8666 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8667 if (!p) {
8668 return -TARGET_EFAULT;
8670 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8671 unlock_user(p, arg2, arg3);
8673 return ret;
8674 default:
8675 return -TARGET_EINVAL;
8678 break;
8679 #endif
8680 case TARGET_NR_setitimer:
8682 struct itimerval value, ovalue, *pvalue;
8684 if (arg2) {
8685 pvalue = &value;
8686 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8687 || copy_from_user_timeval(&pvalue->it_value,
8688 arg2 + sizeof(struct target_timeval)))
8689 return -TARGET_EFAULT;
8690 } else {
8691 pvalue = NULL;
8693 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8694 if (!is_error(ret) && arg3) {
8695 if (copy_to_user_timeval(arg3,
8696 &ovalue.it_interval)
8697 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8698 &ovalue.it_value))
8699 return -TARGET_EFAULT;
8702 return ret;
8703 case TARGET_NR_getitimer:
8705 struct itimerval value;
8707 ret = get_errno(getitimer(arg1, &value));
8708 if (!is_error(ret) && arg2) {
8709 if (copy_to_user_timeval(arg2,
8710 &value.it_interval)
8711 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8712 &value.it_value))
8713 return -TARGET_EFAULT;
8716 return ret;
8717 #ifdef TARGET_NR_stat
8718 case TARGET_NR_stat:
8719 if (!(p = lock_user_string(arg1))) {
8720 return -TARGET_EFAULT;
8722 ret = get_errno(stat(path(p), &st));
8723 unlock_user(p, arg1, 0);
8724 goto do_stat;
8725 #endif
8726 #ifdef TARGET_NR_lstat
8727 case TARGET_NR_lstat:
8728 if (!(p = lock_user_string(arg1))) {
8729 return -TARGET_EFAULT;
8731 ret = get_errno(lstat(path(p), &st));
8732 unlock_user(p, arg1, 0);
8733 goto do_stat;
8734 #endif
8735 #ifdef TARGET_NR_fstat
8736 case TARGET_NR_fstat:
8738 ret = get_errno(fstat(arg1, &st));
8739 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8740 do_stat:
8741 #endif
8742 if (!is_error(ret)) {
8743 struct target_stat *target_st;
8745 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8746 return -TARGET_EFAULT;
8747 memset(target_st, 0, sizeof(*target_st));
8748 __put_user(st.st_dev, &target_st->st_dev);
8749 __put_user(st.st_ino, &target_st->st_ino);
8750 __put_user(st.st_mode, &target_st->st_mode);
8751 __put_user(st.st_uid, &target_st->st_uid);
8752 __put_user(st.st_gid, &target_st->st_gid);
8753 __put_user(st.st_nlink, &target_st->st_nlink);
8754 __put_user(st.st_rdev, &target_st->st_rdev);
8755 __put_user(st.st_size, &target_st->st_size);
8756 __put_user(st.st_blksize, &target_st->st_blksize);
8757 __put_user(st.st_blocks, &target_st->st_blocks);
8758 __put_user(st.st_atime, &target_st->target_st_atime);
8759 __put_user(st.st_mtime, &target_st->target_st_mtime);
8760 __put_user(st.st_ctime, &target_st->target_st_ctime);
8761 unlock_user_struct(target_st, arg2, 1);
8764 return ret;
8765 #endif
8766 case TARGET_NR_vhangup:
8767 return get_errno(vhangup());
8768 #ifdef TARGET_NR_syscall
8769 case TARGET_NR_syscall:
8770 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8771 arg6, arg7, arg8, 0);
8772 #endif
8773 case TARGET_NR_wait4:
8775 int status;
8776 abi_long status_ptr = arg2;
8777 struct rusage rusage, *rusage_ptr;
8778 abi_ulong target_rusage = arg4;
8779 abi_long rusage_err;
8780 if (target_rusage)
8781 rusage_ptr = &rusage;
8782 else
8783 rusage_ptr = NULL;
8784 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8785 if (!is_error(ret)) {
8786 if (status_ptr && ret) {
8787 status = host_to_target_waitstatus(status);
8788 if (put_user_s32(status, status_ptr))
8789 return -TARGET_EFAULT;
8791 if (target_rusage) {
8792 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8793 if (rusage_err) {
8794 ret = rusage_err;
8799 return ret;
8800 #ifdef TARGET_NR_swapoff
8801 case TARGET_NR_swapoff:
8802 if (!(p = lock_user_string(arg1)))
8803 return -TARGET_EFAULT;
8804 ret = get_errno(swapoff(p));
8805 unlock_user(p, arg1, 0);
8806 return ret;
8807 #endif
8808 case TARGET_NR_sysinfo:
8810 struct target_sysinfo *target_value;
8811 struct sysinfo value;
8812 ret = get_errno(sysinfo(&value));
8813 if (!is_error(ret) && arg1)
8815 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8816 return -TARGET_EFAULT;
8817 __put_user(value.uptime, &target_value->uptime);
8818 __put_user(value.loads[0], &target_value->loads[0]);
8819 __put_user(value.loads[1], &target_value->loads[1]);
8820 __put_user(value.loads[2], &target_value->loads[2]);
8821 __put_user(value.totalram, &target_value->totalram);
8822 __put_user(value.freeram, &target_value->freeram);
8823 __put_user(value.sharedram, &target_value->sharedram);
8824 __put_user(value.bufferram, &target_value->bufferram);
8825 __put_user(value.totalswap, &target_value->totalswap);
8826 __put_user(value.freeswap, &target_value->freeswap);
8827 __put_user(value.procs, &target_value->procs);
8828 __put_user(value.totalhigh, &target_value->totalhigh);
8829 __put_user(value.freehigh, &target_value->freehigh);
8830 __put_user(value.mem_unit, &target_value->mem_unit);
8831 unlock_user_struct(target_value, arg1, 1);
8834 return ret;
8835 #ifdef TARGET_NR_ipc
8836 case TARGET_NR_ipc:
8837 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8838 #endif
8839 #ifdef TARGET_NR_semget
8840 case TARGET_NR_semget:
8841 return get_errno(semget(arg1, arg2, arg3));
8842 #endif
8843 #ifdef TARGET_NR_semop
8844 case TARGET_NR_semop:
8845 return do_semop(arg1, arg2, arg3);
8846 #endif
8847 #ifdef TARGET_NR_semctl
8848 case TARGET_NR_semctl:
8849 return do_semctl(arg1, arg2, arg3, arg4);
8850 #endif
8851 #ifdef TARGET_NR_msgctl
8852 case TARGET_NR_msgctl:
8853 return do_msgctl(arg1, arg2, arg3);
8854 #endif
8855 #ifdef TARGET_NR_msgget
8856 case TARGET_NR_msgget:
8857 return get_errno(msgget(arg1, arg2));
8858 #endif
8859 #ifdef TARGET_NR_msgrcv
8860 case TARGET_NR_msgrcv:
8861 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8862 #endif
8863 #ifdef TARGET_NR_msgsnd
8864 case TARGET_NR_msgsnd:
8865 return do_msgsnd(arg1, arg2, arg3, arg4);
8866 #endif
8867 #ifdef TARGET_NR_shmget
8868 case TARGET_NR_shmget:
8869 return get_errno(shmget(arg1, arg2, arg3));
8870 #endif
8871 #ifdef TARGET_NR_shmctl
8872 case TARGET_NR_shmctl:
8873 return do_shmctl(arg1, arg2, arg3);
8874 #endif
8875 #ifdef TARGET_NR_shmat
8876 case TARGET_NR_shmat:
8877 return do_shmat(cpu_env, arg1, arg2, arg3);
8878 #endif
8879 #ifdef TARGET_NR_shmdt
8880 case TARGET_NR_shmdt:
8881 return do_shmdt(arg1);
8882 #endif
8883 case TARGET_NR_fsync:
8884 return get_errno(fsync(arg1));
8885 case TARGET_NR_clone:
8886 /* Linux manages to have three different orderings for its
8887 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8888 * match the kernel's CONFIG_CLONE_* settings.
8889 * Microblaze is further special in that it uses a sixth
8890 * implicit argument to clone for the TLS pointer.
8892 #if defined(TARGET_MICROBLAZE)
8893 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8894 #elif defined(TARGET_CLONE_BACKWARDS)
8895 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8896 #elif defined(TARGET_CLONE_BACKWARDS2)
8897 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8898 #else
8899 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8900 #endif
8901 return ret;
8902 #ifdef __NR_exit_group
8903 /* new thread calls */
8904 case TARGET_NR_exit_group:
8905 preexit_cleanup(cpu_env, arg1);
8906 return get_errno(exit_group(arg1));
8907 #endif
8908 case TARGET_NR_setdomainname:
8909 if (!(p = lock_user_string(arg1)))
8910 return -TARGET_EFAULT;
8911 ret = get_errno(setdomainname(p, arg2));
8912 unlock_user(p, arg1, 0);
8913 return ret;
8914 case TARGET_NR_uname:
8915 /* no need to transcode because we use the linux syscall */
8917 struct new_utsname * buf;
8919 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8920 return -TARGET_EFAULT;
8921 ret = get_errno(sys_uname(buf));
8922 if (!is_error(ret)) {
8923 /* Overwrite the native machine name with whatever is being
8924 emulated. */
8925 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8926 sizeof(buf->machine));
8927 /* Allow the user to override the reported release. */
8928 if (qemu_uname_release && *qemu_uname_release) {
8929 g_strlcpy(buf->release, qemu_uname_release,
8930 sizeof(buf->release));
8933 unlock_user_struct(buf, arg1, 1);
8935 return ret;
8936 #ifdef TARGET_I386
8937 case TARGET_NR_modify_ldt:
8938 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8939 #if !defined(TARGET_X86_64)
8940 case TARGET_NR_vm86:
8941 return do_vm86(cpu_env, arg1, arg2);
8942 #endif
8943 #endif
8944 case TARGET_NR_adjtimex:
8946 struct timex host_buf;
8948 if (target_to_host_timex(&host_buf, arg1) != 0) {
8949 return -TARGET_EFAULT;
8951 ret = get_errno(adjtimex(&host_buf));
8952 if (!is_error(ret)) {
8953 if (host_to_target_timex(arg1, &host_buf) != 0) {
8954 return -TARGET_EFAULT;
8958 return ret;
8959 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8960 case TARGET_NR_clock_adjtime:
8962 struct timex htx, *phtx = &htx;
8964 if (target_to_host_timex(phtx, arg2) != 0) {
8965 return -TARGET_EFAULT;
8967 ret = get_errno(clock_adjtime(arg1, phtx));
8968 if (!is_error(ret) && phtx) {
8969 if (host_to_target_timex(arg2, phtx) != 0) {
8970 return -TARGET_EFAULT;
8974 return ret;
8975 #endif
8976 case TARGET_NR_getpgid:
8977 return get_errno(getpgid(arg1));
8978 case TARGET_NR_fchdir:
8979 return get_errno(fchdir(arg1));
8980 case TARGET_NR_personality:
8981 return get_errno(personality(arg1));
8982 #ifdef TARGET_NR__llseek /* Not on alpha */
8983 case TARGET_NR__llseek:
8985 int64_t res;
8986 #if !defined(__NR_llseek)
8987 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8988 if (res == -1) {
8989 ret = get_errno(res);
8990 } else {
8991 ret = 0;
8993 #else
8994 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8995 #endif
8996 if ((ret == 0) && put_user_s64(res, arg4)) {
8997 return -TARGET_EFAULT;
9000 return ret;
9001 #endif
9002 #ifdef TARGET_NR_getdents
9003 case TARGET_NR_getdents:
9004 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9005 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9007 struct target_dirent *target_dirp;
9008 struct linux_dirent *dirp;
9009 abi_long count = arg3;
9011 dirp = g_try_malloc(count);
9012 if (!dirp) {
9013 return -TARGET_ENOMEM;
9016 ret = get_errno(sys_getdents(arg1, dirp, count));
9017 if (!is_error(ret)) {
9018 struct linux_dirent *de;
9019 struct target_dirent *tde;
9020 int len = ret;
9021 int reclen, treclen;
9022 int count1, tnamelen;
9024 count1 = 0;
9025 de = dirp;
9026 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9027 return -TARGET_EFAULT;
9028 tde = target_dirp;
9029 while (len > 0) {
9030 reclen = de->d_reclen;
9031 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9032 assert(tnamelen >= 0);
9033 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9034 assert(count1 + treclen <= count);
9035 tde->d_reclen = tswap16(treclen);
9036 tde->d_ino = tswapal(de->d_ino);
9037 tde->d_off = tswapal(de->d_off);
9038 memcpy(tde->d_name, de->d_name, tnamelen);
9039 de = (struct linux_dirent *)((char *)de + reclen);
9040 len -= reclen;
9041 tde = (struct target_dirent *)((char *)tde + treclen);
9042 count1 += treclen;
9044 ret = count1;
9045 unlock_user(target_dirp, arg2, ret);
9047 g_free(dirp);
9049 #else
9051 struct linux_dirent *dirp;
9052 abi_long count = arg3;
9054 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9055 return -TARGET_EFAULT;
9056 ret = get_errno(sys_getdents(arg1, dirp, count));
9057 if (!is_error(ret)) {
9058 struct linux_dirent *de;
9059 int len = ret;
9060 int reclen;
9061 de = dirp;
9062 while (len > 0) {
9063 reclen = de->d_reclen;
9064 if (reclen > len)
9065 break;
9066 de->d_reclen = tswap16(reclen);
9067 tswapls(&de->d_ino);
9068 tswapls(&de->d_off);
9069 de = (struct linux_dirent *)((char *)de + reclen);
9070 len -= reclen;
9073 unlock_user(dirp, arg2, ret);
9075 #endif
9076 #else
9077 /* Implement getdents in terms of getdents64 */
9079 struct linux_dirent64 *dirp;
9080 abi_long count = arg3;
9082 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9083 if (!dirp) {
9084 return -TARGET_EFAULT;
9086 ret = get_errno(sys_getdents64(arg1, dirp, count));
9087 if (!is_error(ret)) {
9088 /* Convert the dirent64 structs to target dirent. We do this
9089 * in-place, since we can guarantee that a target_dirent is no
9090 * larger than a dirent64; however this means we have to be
9091 * careful to read everything before writing in the new format.
9093 struct linux_dirent64 *de;
9094 struct target_dirent *tde;
9095 int len = ret;
9096 int tlen = 0;
9098 de = dirp;
9099 tde = (struct target_dirent *)dirp;
9100 while (len > 0) {
9101 int namelen, treclen;
9102 int reclen = de->d_reclen;
9103 uint64_t ino = de->d_ino;
9104 int64_t off = de->d_off;
9105 uint8_t type = de->d_type;
9107 namelen = strlen(de->d_name);
9108 treclen = offsetof(struct target_dirent, d_name)
9109 + namelen + 2;
9110 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9112 memmove(tde->d_name, de->d_name, namelen + 1);
9113 tde->d_ino = tswapal(ino);
9114 tde->d_off = tswapal(off);
9115 tde->d_reclen = tswap16(treclen);
9116 /* The target_dirent type is in what was formerly a padding
9117 * byte at the end of the structure:
9119 *(((char *)tde) + treclen - 1) = type;
9121 de = (struct linux_dirent64 *)((char *)de + reclen);
9122 tde = (struct target_dirent *)((char *)tde + treclen);
9123 len -= reclen;
9124 tlen += treclen;
9126 ret = tlen;
9128 unlock_user(dirp, arg2, ret);
9130 #endif
9131 return ret;
9132 #endif /* TARGET_NR_getdents */
9133 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9134 case TARGET_NR_getdents64:
9136 struct linux_dirent64 *dirp;
9137 abi_long count = arg3;
9138 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9139 return -TARGET_EFAULT;
9140 ret = get_errno(sys_getdents64(arg1, dirp, count));
9141 if (!is_error(ret)) {
9142 struct linux_dirent64 *de;
9143 int len = ret;
9144 int reclen;
9145 de = dirp;
9146 while (len > 0) {
9147 reclen = de->d_reclen;
9148 if (reclen > len)
9149 break;
9150 de->d_reclen = tswap16(reclen);
9151 tswap64s((uint64_t *)&de->d_ino);
9152 tswap64s((uint64_t *)&de->d_off);
9153 de = (struct linux_dirent64 *)((char *)de + reclen);
9154 len -= reclen;
9157 unlock_user(dirp, arg2, ret);
9159 return ret;
9160 #endif /* TARGET_NR_getdents64 */
9161 #if defined(TARGET_NR__newselect)
9162 case TARGET_NR__newselect:
9163 return do_select(arg1, arg2, arg3, arg4, arg5);
9164 #endif
9165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9166 # ifdef TARGET_NR_poll
9167 case TARGET_NR_poll:
9168 # endif
9169 # ifdef TARGET_NR_ppoll
9170 case TARGET_NR_ppoll:
9171 # endif
9173 struct target_pollfd *target_pfd;
9174 unsigned int nfds = arg2;
9175 struct pollfd *pfd;
9176 unsigned int i;
9178 pfd = NULL;
9179 target_pfd = NULL;
9180 if (nfds) {
9181 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9182 return -TARGET_EINVAL;
9185 target_pfd = lock_user(VERIFY_WRITE, arg1,
9186 sizeof(struct target_pollfd) * nfds, 1);
9187 if (!target_pfd) {
9188 return -TARGET_EFAULT;
9191 pfd = alloca(sizeof(struct pollfd) * nfds);
9192 for (i = 0; i < nfds; i++) {
9193 pfd[i].fd = tswap32(target_pfd[i].fd);
9194 pfd[i].events = tswap16(target_pfd[i].events);
9198 switch (num) {
9199 # ifdef TARGET_NR_ppoll
9200 case TARGET_NR_ppoll:
9202 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9203 target_sigset_t *target_set;
9204 sigset_t _set, *set = &_set;
9206 if (arg3) {
9207 if (target_to_host_timespec(timeout_ts, arg3)) {
9208 unlock_user(target_pfd, arg1, 0);
9209 return -TARGET_EFAULT;
9211 } else {
9212 timeout_ts = NULL;
9215 if (arg4) {
9216 if (arg5 != sizeof(target_sigset_t)) {
9217 unlock_user(target_pfd, arg1, 0);
9218 return -TARGET_EINVAL;
9221 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9222 if (!target_set) {
9223 unlock_user(target_pfd, arg1, 0);
9224 return -TARGET_EFAULT;
9226 target_to_host_sigset(set, target_set);
9227 } else {
9228 set = NULL;
9231 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9232 set, SIGSET_T_SIZE));
9234 if (!is_error(ret) && arg3) {
9235 host_to_target_timespec(arg3, timeout_ts);
9237 if (arg4) {
9238 unlock_user(target_set, arg4, 0);
9240 break;
9242 # endif
9243 # ifdef TARGET_NR_poll
9244 case TARGET_NR_poll:
9246 struct timespec ts, *pts;
9248 if (arg3 >= 0) {
9249 /* Convert ms to secs, ns */
9250 ts.tv_sec = arg3 / 1000;
9251 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9252 pts = &ts;
9253 } else {
9254 /* -ve poll() timeout means "infinite" */
9255 pts = NULL;
9257 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9258 break;
9260 # endif
9261 default:
9262 g_assert_not_reached();
9265 if (!is_error(ret)) {
9266 for(i = 0; i < nfds; i++) {
9267 target_pfd[i].revents = tswap16(pfd[i].revents);
9270 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9272 return ret;
9273 #endif
9274 case TARGET_NR_flock:
9275 /* NOTE: the flock constant seems to be the same for every
9276 Linux platform */
9277 return get_errno(safe_flock(arg1, arg2));
9278 case TARGET_NR_readv:
9280 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9281 if (vec != NULL) {
9282 ret = get_errno(safe_readv(arg1, vec, arg3));
9283 unlock_iovec(vec, arg2, arg3, 1);
9284 } else {
9285 ret = -host_to_target_errno(errno);
9288 return ret;
9289 case TARGET_NR_writev:
9291 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9292 if (vec != NULL) {
9293 ret = get_errno(safe_writev(arg1, vec, arg3));
9294 unlock_iovec(vec, arg2, arg3, 0);
9295 } else {
9296 ret = -host_to_target_errno(errno);
9299 return ret;
9300 #if defined(TARGET_NR_preadv)
9301 case TARGET_NR_preadv:
9303 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9304 if (vec != NULL) {
9305 unsigned long low, high;
9307 target_to_host_low_high(arg4, arg5, &low, &high);
9308 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9309 unlock_iovec(vec, arg2, arg3, 1);
9310 } else {
9311 ret = -host_to_target_errno(errno);
9314 return ret;
9315 #endif
9316 #if defined(TARGET_NR_pwritev)
9317 case TARGET_NR_pwritev:
9319 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9320 if (vec != NULL) {
9321 unsigned long low, high;
9323 target_to_host_low_high(arg4, arg5, &low, &high);
9324 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9325 unlock_iovec(vec, arg2, arg3, 0);
9326 } else {
9327 ret = -host_to_target_errno(errno);
9330 return ret;
9331 #endif
9332 case TARGET_NR_getsid:
9333 return get_errno(getsid(arg1));
9334 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9335 case TARGET_NR_fdatasync:
9336 return get_errno(fdatasync(arg1));
9337 #endif
9338 #ifdef TARGET_NR__sysctl
9339 case TARGET_NR__sysctl:
9340 /* We don't implement this, but ENOTDIR is always a safe
9341 return value. */
9342 return -TARGET_ENOTDIR;
9343 #endif
9344 case TARGET_NR_sched_getaffinity:
9346 unsigned int mask_size;
9347 unsigned long *mask;
9350 * sched_getaffinity needs multiples of ulong, so need to take
9351 * care of mismatches between target ulong and host ulong sizes.
9353 if (arg2 & (sizeof(abi_ulong) - 1)) {
9354 return -TARGET_EINVAL;
9356 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9358 mask = alloca(mask_size);
9359 memset(mask, 0, mask_size);
9360 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9362 if (!is_error(ret)) {
9363 if (ret > arg2) {
9364 /* More data returned than the caller's buffer will fit.
9365 * This only happens if sizeof(abi_long) < sizeof(long)
9366 * and the caller passed us a buffer holding an odd number
9367 * of abi_longs. If the host kernel is actually using the
9368 * extra 4 bytes then fail EINVAL; otherwise we can just
9369 * ignore them and only copy the interesting part.
9371 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9372 if (numcpus > arg2 * 8) {
9373 return -TARGET_EINVAL;
9375 ret = arg2;
9378 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9379 return -TARGET_EFAULT;
9383 return ret;
9384 case TARGET_NR_sched_setaffinity:
9386 unsigned int mask_size;
9387 unsigned long *mask;
9390 * sched_setaffinity needs multiples of ulong, so need to take
9391 * care of mismatches between target ulong and host ulong sizes.
9393 if (arg2 & (sizeof(abi_ulong) - 1)) {
9394 return -TARGET_EINVAL;
9396 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9397 mask = alloca(mask_size);
9399 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9400 if (ret) {
9401 return ret;
9404 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9406 case TARGET_NR_getcpu:
9408 unsigned cpu, node;
9409 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9410 arg2 ? &node : NULL,
9411 NULL));
9412 if (is_error(ret)) {
9413 return ret;
9415 if (arg1 && put_user_u32(cpu, arg1)) {
9416 return -TARGET_EFAULT;
9418 if (arg2 && put_user_u32(node, arg2)) {
9419 return -TARGET_EFAULT;
9422 return ret;
9423 case TARGET_NR_sched_setparam:
9425 struct sched_param *target_schp;
9426 struct sched_param schp;
9428 if (arg2 == 0) {
9429 return -TARGET_EINVAL;
9431 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9432 return -TARGET_EFAULT;
9433 schp.sched_priority = tswap32(target_schp->sched_priority);
9434 unlock_user_struct(target_schp, arg2, 0);
9435 return get_errno(sched_setparam(arg1, &schp));
9437 case TARGET_NR_sched_getparam:
9439 struct sched_param *target_schp;
9440 struct sched_param schp;
9442 if (arg2 == 0) {
9443 return -TARGET_EINVAL;
9445 ret = get_errno(sched_getparam(arg1, &schp));
9446 if (!is_error(ret)) {
9447 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9448 return -TARGET_EFAULT;
9449 target_schp->sched_priority = tswap32(schp.sched_priority);
9450 unlock_user_struct(target_schp, arg2, 1);
9453 return ret;
9454 case TARGET_NR_sched_setscheduler:
9456 struct sched_param *target_schp;
9457 struct sched_param schp;
9458 if (arg3 == 0) {
9459 return -TARGET_EINVAL;
9461 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9462 return -TARGET_EFAULT;
9463 schp.sched_priority = tswap32(target_schp->sched_priority);
9464 unlock_user_struct(target_schp, arg3, 0);
9465 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9467 case TARGET_NR_sched_getscheduler:
9468 return get_errno(sched_getscheduler(arg1));
9469 case TARGET_NR_sched_yield:
9470 return get_errno(sched_yield());
9471 case TARGET_NR_sched_get_priority_max:
9472 return get_errno(sched_get_priority_max(arg1));
9473 case TARGET_NR_sched_get_priority_min:
9474 return get_errno(sched_get_priority_min(arg1));
9475 case TARGET_NR_sched_rr_get_interval:
9477 struct timespec ts;
9478 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9479 if (!is_error(ret)) {
9480 ret = host_to_target_timespec(arg2, &ts);
9483 return ret;
9484 case TARGET_NR_nanosleep:
9486 struct timespec req, rem;
9487 target_to_host_timespec(&req, arg1);
9488 ret = get_errno(safe_nanosleep(&req, &rem));
9489 if (is_error(ret) && arg2) {
9490 host_to_target_timespec(arg2, &rem);
9493 return ret;
9494 case TARGET_NR_prctl:
9495 switch (arg1) {
9496 case PR_GET_PDEATHSIG:
9498 int deathsig;
9499 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9500 if (!is_error(ret) && arg2
9501 && put_user_ual(deathsig, arg2)) {
9502 return -TARGET_EFAULT;
9504 return ret;
9506 #ifdef PR_GET_NAME
9507 case PR_GET_NAME:
9509 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9510 if (!name) {
9511 return -TARGET_EFAULT;
9513 ret = get_errno(prctl(arg1, (unsigned long)name,
9514 arg3, arg4, arg5));
9515 unlock_user(name, arg2, 16);
9516 return ret;
9518 case PR_SET_NAME:
9520 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9521 if (!name) {
9522 return -TARGET_EFAULT;
9524 ret = get_errno(prctl(arg1, (unsigned long)name,
9525 arg3, arg4, arg5));
9526 unlock_user(name, arg2, 0);
9527 return ret;
9529 #endif
9530 #ifdef TARGET_MIPS
9531 case TARGET_PR_GET_FP_MODE:
9532 /* TODO: Implement TARGET_PR_SET_FP_MODE handling.*/
9533 return -TARGET_EINVAL;
9534 case TARGET_PR_SET_FP_MODE:
9535 /* TODO: Implement TARGET_PR_GET_FP_MODE handling.*/
9536 return -TARGET_EINVAL;
9537 #endif /* MIPS */
9538 #ifdef TARGET_AARCH64
9539 case TARGET_PR_SVE_SET_VL:
9541 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9542 * PR_SVE_VL_INHERIT. Note the kernel definition
9543 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9544 * even though the current architectural maximum is VQ=16.
9546 ret = -TARGET_EINVAL;
9547 if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9548 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9549 CPUARMState *env = cpu_env;
9550 ARMCPU *cpu = arm_env_get_cpu(env);
9551 uint32_t vq, old_vq;
9553 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9554 vq = MAX(arg2 / 16, 1);
9555 vq = MIN(vq, cpu->sve_max_vq);
9557 if (vq < old_vq) {
9558 aarch64_sve_narrow_vq(env, vq);
9560 env->vfp.zcr_el[1] = vq - 1;
9561 ret = vq * 16;
9563 return ret;
9564 case TARGET_PR_SVE_GET_VL:
9565 ret = -TARGET_EINVAL;
9567 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9568 if (cpu_isar_feature(aa64_sve, cpu)) {
9569 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9572 return ret;
9573 #endif /* AARCH64 */
9574 case PR_GET_SECCOMP:
9575 case PR_SET_SECCOMP:
9576 /* Disable seccomp to prevent the target disabling syscalls we
9577 * need. */
9578 return -TARGET_EINVAL;
9579 default:
9580 /* Most prctl options have no pointer arguments */
9581 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9583 break;
9584 #ifdef TARGET_NR_arch_prctl
9585 case TARGET_NR_arch_prctl:
9586 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9587 return do_arch_prctl(cpu_env, arg1, arg2);
9588 #else
9589 #error unreachable
9590 #endif
9591 #endif
9592 #ifdef TARGET_NR_pread64
9593 case TARGET_NR_pread64:
9594 if (regpairs_aligned(cpu_env, num)) {
9595 arg4 = arg5;
9596 arg5 = arg6;
9598 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9599 return -TARGET_EFAULT;
9600 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9601 unlock_user(p, arg2, ret);
9602 return ret;
9603 case TARGET_NR_pwrite64:
9604 if (regpairs_aligned(cpu_env, num)) {
9605 arg4 = arg5;
9606 arg5 = arg6;
9608 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9609 return -TARGET_EFAULT;
9610 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9611 unlock_user(p, arg2, 0);
9612 return ret;
9613 #endif
9614 case TARGET_NR_getcwd:
9615 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9616 return -TARGET_EFAULT;
9617 ret = get_errno(sys_getcwd1(p, arg2));
9618 unlock_user(p, arg1, ret);
9619 return ret;
9620 case TARGET_NR_capget:
9621 case TARGET_NR_capset:
9623 struct target_user_cap_header *target_header;
9624 struct target_user_cap_data *target_data = NULL;
9625 struct __user_cap_header_struct header;
9626 struct __user_cap_data_struct data[2];
9627 struct __user_cap_data_struct *dataptr = NULL;
9628 int i, target_datalen;
9629 int data_items = 1;
9631 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9632 return -TARGET_EFAULT;
9634 header.version = tswap32(target_header->version);
9635 header.pid = tswap32(target_header->pid);
9637 if (header.version != _LINUX_CAPABILITY_VERSION) {
9638 /* Version 2 and up takes pointer to two user_data structs */
9639 data_items = 2;
9642 target_datalen = sizeof(*target_data) * data_items;
9644 if (arg2) {
9645 if (num == TARGET_NR_capget) {
9646 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9647 } else {
9648 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9650 if (!target_data) {
9651 unlock_user_struct(target_header, arg1, 0);
9652 return -TARGET_EFAULT;
9655 if (num == TARGET_NR_capset) {
9656 for (i = 0; i < data_items; i++) {
9657 data[i].effective = tswap32(target_data[i].effective);
9658 data[i].permitted = tswap32(target_data[i].permitted);
9659 data[i].inheritable = tswap32(target_data[i].inheritable);
9663 dataptr = data;
9666 if (num == TARGET_NR_capget) {
9667 ret = get_errno(capget(&header, dataptr));
9668 } else {
9669 ret = get_errno(capset(&header, dataptr));
9672 /* The kernel always updates version for both capget and capset */
9673 target_header->version = tswap32(header.version);
9674 unlock_user_struct(target_header, arg1, 1);
9676 if (arg2) {
9677 if (num == TARGET_NR_capget) {
9678 for (i = 0; i < data_items; i++) {
9679 target_data[i].effective = tswap32(data[i].effective);
9680 target_data[i].permitted = tswap32(data[i].permitted);
9681 target_data[i].inheritable = tswap32(data[i].inheritable);
9683 unlock_user(target_data, arg2, target_datalen);
9684 } else {
9685 unlock_user(target_data, arg2, 0);
9688 return ret;
9690 case TARGET_NR_sigaltstack:
9691 return do_sigaltstack(arg1, arg2,
9692 get_sp_from_cpustate((CPUArchState *)cpu_env));
9694 #ifdef CONFIG_SENDFILE
9695 #ifdef TARGET_NR_sendfile
9696 case TARGET_NR_sendfile:
9698 off_t *offp = NULL;
9699 off_t off;
9700 if (arg3) {
9701 ret = get_user_sal(off, arg3);
9702 if (is_error(ret)) {
9703 return ret;
9705 offp = &off;
9707 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9708 if (!is_error(ret) && arg3) {
9709 abi_long ret2 = put_user_sal(off, arg3);
9710 if (is_error(ret2)) {
9711 ret = ret2;
9714 return ret;
9716 #endif
9717 #ifdef TARGET_NR_sendfile64
9718 case TARGET_NR_sendfile64:
9720 off_t *offp = NULL;
9721 off_t off;
9722 if (arg3) {
9723 ret = get_user_s64(off, arg3);
9724 if (is_error(ret)) {
9725 return ret;
9727 offp = &off;
9729 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9730 if (!is_error(ret) && arg3) {
9731 abi_long ret2 = put_user_s64(off, arg3);
9732 if (is_error(ret2)) {
9733 ret = ret2;
9736 return ret;
9738 #endif
9739 #endif
9740 #ifdef TARGET_NR_vfork
9741 case TARGET_NR_vfork:
9742 return get_errno(do_fork(cpu_env,
9743 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9744 0, 0, 0, 0));
9745 #endif
9746 #ifdef TARGET_NR_ugetrlimit
9747 case TARGET_NR_ugetrlimit:
9749 struct rlimit rlim;
9750 int resource = target_to_host_resource(arg1);
9751 ret = get_errno(getrlimit(resource, &rlim));
9752 if (!is_error(ret)) {
9753 struct target_rlimit *target_rlim;
9754 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9755 return -TARGET_EFAULT;
9756 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9757 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9758 unlock_user_struct(target_rlim, arg2, 1);
9760 return ret;
9762 #endif
9763 #ifdef TARGET_NR_truncate64
9764 case TARGET_NR_truncate64:
9765 if (!(p = lock_user_string(arg1)))
9766 return -TARGET_EFAULT;
9767 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9768 unlock_user(p, arg1, 0);
9769 return ret;
9770 #endif
9771 #ifdef TARGET_NR_ftruncate64
9772 case TARGET_NR_ftruncate64:
9773 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9774 #endif
9775 #ifdef TARGET_NR_stat64
9776 case TARGET_NR_stat64:
9777 if (!(p = lock_user_string(arg1))) {
9778 return -TARGET_EFAULT;
9780 ret = get_errno(stat(path(p), &st));
9781 unlock_user(p, arg1, 0);
9782 if (!is_error(ret))
9783 ret = host_to_target_stat64(cpu_env, arg2, &st);
9784 return ret;
9785 #endif
9786 #ifdef TARGET_NR_lstat64
9787 case TARGET_NR_lstat64:
9788 if (!(p = lock_user_string(arg1))) {
9789 return -TARGET_EFAULT;
9791 ret = get_errno(lstat(path(p), &st));
9792 unlock_user(p, arg1, 0);
9793 if (!is_error(ret))
9794 ret = host_to_target_stat64(cpu_env, arg2, &st);
9795 return ret;
9796 #endif
9797 #ifdef TARGET_NR_fstat64
9798 case TARGET_NR_fstat64:
9799 ret = get_errno(fstat(arg1, &st));
9800 if (!is_error(ret))
9801 ret = host_to_target_stat64(cpu_env, arg2, &st);
9802 return ret;
9803 #endif
9804 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9805 #ifdef TARGET_NR_fstatat64
9806 case TARGET_NR_fstatat64:
9807 #endif
9808 #ifdef TARGET_NR_newfstatat
9809 case TARGET_NR_newfstatat:
9810 #endif
9811 if (!(p = lock_user_string(arg2))) {
9812 return -TARGET_EFAULT;
9814 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9815 unlock_user(p, arg2, 0);
9816 if (!is_error(ret))
9817 ret = host_to_target_stat64(cpu_env, arg3, &st);
9818 return ret;
9819 #endif
9820 #ifdef TARGET_NR_lchown
9821 case TARGET_NR_lchown:
9822 if (!(p = lock_user_string(arg1)))
9823 return -TARGET_EFAULT;
9824 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9825 unlock_user(p, arg1, 0);
9826 return ret;
9827 #endif
9828 #ifdef TARGET_NR_getuid
9829 case TARGET_NR_getuid:
9830 return get_errno(high2lowuid(getuid()));
9831 #endif
9832 #ifdef TARGET_NR_getgid
9833 case TARGET_NR_getgid:
9834 return get_errno(high2lowgid(getgid()));
9835 #endif
9836 #ifdef TARGET_NR_geteuid
9837 case TARGET_NR_geteuid:
9838 return get_errno(high2lowuid(geteuid()));
9839 #endif
9840 #ifdef TARGET_NR_getegid
9841 case TARGET_NR_getegid:
9842 return get_errno(high2lowgid(getegid()));
9843 #endif
9844 case TARGET_NR_setreuid:
9845 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9846 case TARGET_NR_setregid:
9847 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9848 case TARGET_NR_getgroups:
9850 int gidsetsize = arg1;
9851 target_id *target_grouplist;
9852 gid_t *grouplist;
9853 int i;
9855 grouplist = alloca(gidsetsize * sizeof(gid_t));
9856 ret = get_errno(getgroups(gidsetsize, grouplist));
9857 if (gidsetsize == 0)
9858 return ret;
9859 if (!is_error(ret)) {
9860 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9861 if (!target_grouplist)
9862 return -TARGET_EFAULT;
9863 for(i = 0;i < ret; i++)
9864 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9865 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9868 return ret;
9869 case TARGET_NR_setgroups:
9871 int gidsetsize = arg1;
9872 target_id *target_grouplist;
9873 gid_t *grouplist = NULL;
9874 int i;
9875 if (gidsetsize) {
9876 grouplist = alloca(gidsetsize * sizeof(gid_t));
9877 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9878 if (!target_grouplist) {
9879 return -TARGET_EFAULT;
9881 for (i = 0; i < gidsetsize; i++) {
9882 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9884 unlock_user(target_grouplist, arg2, 0);
9886 return get_errno(setgroups(gidsetsize, grouplist));
9888 case TARGET_NR_fchown:
9889 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9890 #if defined(TARGET_NR_fchownat)
9891 case TARGET_NR_fchownat:
9892 if (!(p = lock_user_string(arg2)))
9893 return -TARGET_EFAULT;
9894 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9895 low2highgid(arg4), arg5));
9896 unlock_user(p, arg2, 0);
9897 return ret;
9898 #endif
9899 #ifdef TARGET_NR_setresuid
9900 case TARGET_NR_setresuid:
9901 return get_errno(sys_setresuid(low2highuid(arg1),
9902 low2highuid(arg2),
9903 low2highuid(arg3)));
9904 #endif
9905 #ifdef TARGET_NR_getresuid
9906 case TARGET_NR_getresuid:
9908 uid_t ruid, euid, suid;
9909 ret = get_errno(getresuid(&ruid, &euid, &suid));
9910 if (!is_error(ret)) {
9911 if (put_user_id(high2lowuid(ruid), arg1)
9912 || put_user_id(high2lowuid(euid), arg2)
9913 || put_user_id(high2lowuid(suid), arg3))
9914 return -TARGET_EFAULT;
9917 return ret;
9918 #endif
9919 #ifdef TARGET_NR_getresgid
9920 case TARGET_NR_setresgid:
9921 return get_errno(sys_setresgid(low2highgid(arg1),
9922 low2highgid(arg2),
9923 low2highgid(arg3)));
9924 #endif
9925 #ifdef TARGET_NR_getresgid
9926 case TARGET_NR_getresgid:
9928 gid_t rgid, egid, sgid;
9929 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9930 if (!is_error(ret)) {
9931 if (put_user_id(high2lowgid(rgid), arg1)
9932 || put_user_id(high2lowgid(egid), arg2)
9933 || put_user_id(high2lowgid(sgid), arg3))
9934 return -TARGET_EFAULT;
9937 return ret;
9938 #endif
9939 #ifdef TARGET_NR_chown
9940 case TARGET_NR_chown:
9941 if (!(p = lock_user_string(arg1)))
9942 return -TARGET_EFAULT;
9943 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9944 unlock_user(p, arg1, 0);
9945 return ret;
9946 #endif
9947 case TARGET_NR_setuid:
9948 return get_errno(sys_setuid(low2highuid(arg1)));
9949 case TARGET_NR_setgid:
9950 return get_errno(sys_setgid(low2highgid(arg1)));
9951 case TARGET_NR_setfsuid:
9952 return get_errno(setfsuid(arg1));
9953 case TARGET_NR_setfsgid:
9954 return get_errno(setfsgid(arg1));
9956 #ifdef TARGET_NR_lchown32
9957 case TARGET_NR_lchown32:
9958 if (!(p = lock_user_string(arg1)))
9959 return -TARGET_EFAULT;
9960 ret = get_errno(lchown(p, arg2, arg3));
9961 unlock_user(p, arg1, 0);
9962 return ret;
9963 #endif
9964 #ifdef TARGET_NR_getuid32
9965 case TARGET_NR_getuid32:
9966 return get_errno(getuid());
9967 #endif
9969 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9970 /* Alpha specific */
9971 case TARGET_NR_getxuid:
9973 uid_t euid;
9974 euid=geteuid();
9975 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9977 return get_errno(getuid());
9978 #endif
9979 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9980 /* Alpha specific */
9981 case TARGET_NR_getxgid:
9983 uid_t egid;
9984 egid=getegid();
9985 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9987 return get_errno(getgid());
9988 #endif
9989 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9990 /* Alpha specific */
9991 case TARGET_NR_osf_getsysinfo:
9992 ret = -TARGET_EOPNOTSUPP;
9993 switch (arg1) {
9994 case TARGET_GSI_IEEE_FP_CONTROL:
9996 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9998 /* Copied from linux ieee_fpcr_to_swcr. */
9999 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10000 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10001 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10002 | SWCR_TRAP_ENABLE_DZE
10003 | SWCR_TRAP_ENABLE_OVF);
10004 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10005 | SWCR_TRAP_ENABLE_INE);
10006 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10007 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10009 if (put_user_u64 (swcr, arg2))
10010 return -TARGET_EFAULT;
10011 ret = 0;
10013 break;
10015 /* case GSI_IEEE_STATE_AT_SIGNAL:
10016 -- Not implemented in linux kernel.
10017 case GSI_UACPROC:
10018 -- Retrieves current unaligned access state; not much used.
10019 case GSI_PROC_TYPE:
10020 -- Retrieves implver information; surely not used.
10021 case GSI_GET_HWRPB:
10022 -- Grabs a copy of the HWRPB; surely not used.
10025 return ret;
10026 #endif
10027 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10028 /* Alpha specific */
10029 case TARGET_NR_osf_setsysinfo:
10030 ret = -TARGET_EOPNOTSUPP;
10031 switch (arg1) {
10032 case TARGET_SSI_IEEE_FP_CONTROL:
10034 uint64_t swcr, fpcr, orig_fpcr;
10036 if (get_user_u64 (swcr, arg2)) {
10037 return -TARGET_EFAULT;
10039 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10040 fpcr = orig_fpcr & FPCR_DYN_MASK;
10042 /* Copied from linux ieee_swcr_to_fpcr. */
10043 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10044 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10045 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10046 | SWCR_TRAP_ENABLE_DZE
10047 | SWCR_TRAP_ENABLE_OVF)) << 48;
10048 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10049 | SWCR_TRAP_ENABLE_INE)) << 57;
10050 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10051 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10053 cpu_alpha_store_fpcr(cpu_env, fpcr);
10054 ret = 0;
10056 break;
10058 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10060 uint64_t exc, fpcr, orig_fpcr;
10061 int si_code;
10063 if (get_user_u64(exc, arg2)) {
10064 return -TARGET_EFAULT;
10067 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10069 /* We only add to the exception status here. */
10070 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10072 cpu_alpha_store_fpcr(cpu_env, fpcr);
10073 ret = 0;
10075 /* Old exceptions are not signaled. */
10076 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10078 /* If any exceptions set by this call,
10079 and are unmasked, send a signal. */
10080 si_code = 0;
10081 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10082 si_code = TARGET_FPE_FLTRES;
10084 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10085 si_code = TARGET_FPE_FLTUND;
10087 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10088 si_code = TARGET_FPE_FLTOVF;
10090 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10091 si_code = TARGET_FPE_FLTDIV;
10093 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10094 si_code = TARGET_FPE_FLTINV;
10096 if (si_code != 0) {
10097 target_siginfo_t info;
10098 info.si_signo = SIGFPE;
10099 info.si_errno = 0;
10100 info.si_code = si_code;
10101 info._sifields._sigfault._addr
10102 = ((CPUArchState *)cpu_env)->pc;
10103 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10104 QEMU_SI_FAULT, &info);
10107 break;
10109 /* case SSI_NVPAIRS:
10110 -- Used with SSIN_UACPROC to enable unaligned accesses.
10111 case SSI_IEEE_STATE_AT_SIGNAL:
10112 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10113 -- Not implemented in linux kernel
10116 return ret;
10117 #endif
10118 #ifdef TARGET_NR_osf_sigprocmask
10119 /* Alpha specific. */
10120 case TARGET_NR_osf_sigprocmask:
10122 abi_ulong mask;
10123 int how;
10124 sigset_t set, oldset;
10126 switch(arg1) {
10127 case TARGET_SIG_BLOCK:
10128 how = SIG_BLOCK;
10129 break;
10130 case TARGET_SIG_UNBLOCK:
10131 how = SIG_UNBLOCK;
10132 break;
10133 case TARGET_SIG_SETMASK:
10134 how = SIG_SETMASK;
10135 break;
10136 default:
10137 return -TARGET_EINVAL;
10139 mask = arg2;
10140 target_to_host_old_sigset(&set, &mask);
10141 ret = do_sigprocmask(how, &set, &oldset);
10142 if (!ret) {
10143 host_to_target_old_sigset(&mask, &oldset);
10144 ret = mask;
10147 return ret;
10148 #endif
10150 #ifdef TARGET_NR_getgid32
10151 case TARGET_NR_getgid32:
10152 return get_errno(getgid());
10153 #endif
10154 #ifdef TARGET_NR_geteuid32
10155 case TARGET_NR_geteuid32:
10156 return get_errno(geteuid());
10157 #endif
10158 #ifdef TARGET_NR_getegid32
10159 case TARGET_NR_getegid32:
10160 return get_errno(getegid());
10161 #endif
10162 #ifdef TARGET_NR_setreuid32
10163 case TARGET_NR_setreuid32:
10164 return get_errno(setreuid(arg1, arg2));
10165 #endif
10166 #ifdef TARGET_NR_setregid32
10167 case TARGET_NR_setregid32:
10168 return get_errno(setregid(arg1, arg2));
10169 #endif
10170 #ifdef TARGET_NR_getgroups32
10171 case TARGET_NR_getgroups32:
10173 int gidsetsize = arg1;
10174 uint32_t *target_grouplist;
10175 gid_t *grouplist;
10176 int i;
10178 grouplist = alloca(gidsetsize * sizeof(gid_t));
10179 ret = get_errno(getgroups(gidsetsize, grouplist));
10180 if (gidsetsize == 0)
10181 return ret;
10182 if (!is_error(ret)) {
10183 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10184 if (!target_grouplist) {
10185 return -TARGET_EFAULT;
10187 for(i = 0;i < ret; i++)
10188 target_grouplist[i] = tswap32(grouplist[i]);
10189 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10192 return ret;
10193 #endif
10194 #ifdef TARGET_NR_setgroups32
10195 case TARGET_NR_setgroups32:
10197 int gidsetsize = arg1;
10198 uint32_t *target_grouplist;
10199 gid_t *grouplist;
10200 int i;
10202 grouplist = alloca(gidsetsize * sizeof(gid_t));
10203 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10204 if (!target_grouplist) {
10205 return -TARGET_EFAULT;
10207 for(i = 0;i < gidsetsize; i++)
10208 grouplist[i] = tswap32(target_grouplist[i]);
10209 unlock_user(target_grouplist, arg2, 0);
10210 return get_errno(setgroups(gidsetsize, grouplist));
10212 #endif
10213 #ifdef TARGET_NR_fchown32
10214 case TARGET_NR_fchown32:
10215 return get_errno(fchown(arg1, arg2, arg3));
10216 #endif
10217 #ifdef TARGET_NR_setresuid32
10218 case TARGET_NR_setresuid32:
10219 return get_errno(sys_setresuid(arg1, arg2, arg3));
10220 #endif
10221 #ifdef TARGET_NR_getresuid32
10222 case TARGET_NR_getresuid32:
10224 uid_t ruid, euid, suid;
10225 ret = get_errno(getresuid(&ruid, &euid, &suid));
10226 if (!is_error(ret)) {
10227 if (put_user_u32(ruid, arg1)
10228 || put_user_u32(euid, arg2)
10229 || put_user_u32(suid, arg3))
10230 return -TARGET_EFAULT;
10233 return ret;
10234 #endif
10235 #ifdef TARGET_NR_setresgid32
10236 case TARGET_NR_setresgid32:
10237 return get_errno(sys_setresgid(arg1, arg2, arg3));
10238 #endif
10239 #ifdef TARGET_NR_getresgid32
10240 case TARGET_NR_getresgid32:
10242 gid_t rgid, egid, sgid;
10243 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10244 if (!is_error(ret)) {
10245 if (put_user_u32(rgid, arg1)
10246 || put_user_u32(egid, arg2)
10247 || put_user_u32(sgid, arg3))
10248 return -TARGET_EFAULT;
10251 return ret;
10252 #endif
10253 #ifdef TARGET_NR_chown32
10254 case TARGET_NR_chown32:
10255 if (!(p = lock_user_string(arg1)))
10256 return -TARGET_EFAULT;
10257 ret = get_errno(chown(p, arg2, arg3));
10258 unlock_user(p, arg1, 0);
10259 return ret;
10260 #endif
10261 #ifdef TARGET_NR_setuid32
10262 case TARGET_NR_setuid32:
10263 return get_errno(sys_setuid(arg1));
10264 #endif
10265 #ifdef TARGET_NR_setgid32
10266 case TARGET_NR_setgid32:
10267 return get_errno(sys_setgid(arg1));
10268 #endif
10269 #ifdef TARGET_NR_setfsuid32
10270 case TARGET_NR_setfsuid32:
10271 return get_errno(setfsuid(arg1));
10272 #endif
10273 #ifdef TARGET_NR_setfsgid32
10274 case TARGET_NR_setfsgid32:
10275 return get_errno(setfsgid(arg1));
10276 #endif
10277 #ifdef TARGET_NR_mincore
10278 case TARGET_NR_mincore:
10280 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10281 if (!a) {
10282 return -TARGET_ENOMEM;
10284 p = lock_user_string(arg3);
10285 if (!p) {
10286 ret = -TARGET_EFAULT;
10287 } else {
10288 ret = get_errno(mincore(a, arg2, p));
10289 unlock_user(p, arg3, ret);
10291 unlock_user(a, arg1, 0);
10293 return ret;
10294 #endif
10295 #ifdef TARGET_NR_arm_fadvise64_64
10296 case TARGET_NR_arm_fadvise64_64:
10297 /* arm_fadvise64_64 looks like fadvise64_64 but
10298 * with different argument order: fd, advice, offset, len
10299 * rather than the usual fd, offset, len, advice.
10300 * Note that offset and len are both 64-bit so appear as
10301 * pairs of 32-bit registers.
10303 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10304 target_offset64(arg5, arg6), arg2);
10305 return -host_to_target_errno(ret);
10306 #endif
10308 #if TARGET_ABI_BITS == 32
10310 #ifdef TARGET_NR_fadvise64_64
10311 case TARGET_NR_fadvise64_64:
10312 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10313 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10314 ret = arg2;
10315 arg2 = arg3;
10316 arg3 = arg4;
10317 arg4 = arg5;
10318 arg5 = arg6;
10319 arg6 = ret;
10320 #else
10321 /* 6 args: fd, offset (high, low), len (high, low), advice */
10322 if (regpairs_aligned(cpu_env, num)) {
10323 /* offset is in (3,4), len in (5,6) and advice in 7 */
10324 arg2 = arg3;
10325 arg3 = arg4;
10326 arg4 = arg5;
10327 arg5 = arg6;
10328 arg6 = arg7;
10330 #endif
10331 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10332 target_offset64(arg4, arg5), arg6);
10333 return -host_to_target_errno(ret);
10334 #endif
10336 #ifdef TARGET_NR_fadvise64
10337 case TARGET_NR_fadvise64:
10338 /* 5 args: fd, offset (high, low), len, advice */
10339 if (regpairs_aligned(cpu_env, num)) {
10340 /* offset is in (3,4), len in 5 and advice in 6 */
10341 arg2 = arg3;
10342 arg3 = arg4;
10343 arg4 = arg5;
10344 arg5 = arg6;
10346 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10347 return -host_to_target_errno(ret);
10348 #endif
10350 #else /* not a 32-bit ABI */
10351 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10352 #ifdef TARGET_NR_fadvise64_64
10353 case TARGET_NR_fadvise64_64:
10354 #endif
10355 #ifdef TARGET_NR_fadvise64
10356 case TARGET_NR_fadvise64:
10357 #endif
10358 #ifdef TARGET_S390X
10359 switch (arg4) {
10360 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10361 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10362 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10363 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10364 default: break;
10366 #endif
10367 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10368 #endif
10369 #endif /* end of 64-bit ABI fadvise handling */
10371 #ifdef TARGET_NR_madvise
10372 case TARGET_NR_madvise:
10373 /* A straight passthrough may not be safe because qemu sometimes
10374 turns private file-backed mappings into anonymous mappings.
10375 This will break MADV_DONTNEED.
10376 This is a hint, so ignoring and returning success is ok. */
10377 return 0;
10378 #endif
10379 #if TARGET_ABI_BITS == 32
10380 case TARGET_NR_fcntl64:
10382 int cmd;
10383 struct flock64 fl;
10384 from_flock64_fn *copyfrom = copy_from_user_flock64;
10385 to_flock64_fn *copyto = copy_to_user_flock64;
10387 #ifdef TARGET_ARM
10388 if (!((CPUARMState *)cpu_env)->eabi) {
10389 copyfrom = copy_from_user_oabi_flock64;
10390 copyto = copy_to_user_oabi_flock64;
10392 #endif
10394 cmd = target_to_host_fcntl_cmd(arg2);
10395 if (cmd == -TARGET_EINVAL) {
10396 return cmd;
10399 switch(arg2) {
10400 case TARGET_F_GETLK64:
10401 ret = copyfrom(&fl, arg3);
10402 if (ret) {
10403 break;
10405 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10406 if (ret == 0) {
10407 ret = copyto(arg3, &fl);
10409 break;
10411 case TARGET_F_SETLK64:
10412 case TARGET_F_SETLKW64:
10413 ret = copyfrom(&fl, arg3);
10414 if (ret) {
10415 break;
10417 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10418 break;
10419 default:
10420 ret = do_fcntl(arg1, arg2, arg3);
10421 break;
10423 return ret;
10425 #endif
10426 #ifdef TARGET_NR_cacheflush
10427 case TARGET_NR_cacheflush:
10428 /* self-modifying code is handled automatically, so nothing needed */
10429 return 0;
10430 #endif
10431 #ifdef TARGET_NR_getpagesize
10432 case TARGET_NR_getpagesize:
10433 return TARGET_PAGE_SIZE;
10434 #endif
10435 case TARGET_NR_gettid:
10436 return get_errno(gettid());
10437 #ifdef TARGET_NR_readahead
10438 case TARGET_NR_readahead:
10439 #if TARGET_ABI_BITS == 32
10440 if (regpairs_aligned(cpu_env, num)) {
10441 arg2 = arg3;
10442 arg3 = arg4;
10443 arg4 = arg5;
10445 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10446 #else
10447 ret = get_errno(readahead(arg1, arg2, arg3));
10448 #endif
10449 return ret;
10450 #endif
10451 #ifdef CONFIG_ATTR
10452 #ifdef TARGET_NR_setxattr
10453 case TARGET_NR_listxattr:
10454 case TARGET_NR_llistxattr:
10456 void *p, *b = 0;
10457 if (arg2) {
10458 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10459 if (!b) {
10460 return -TARGET_EFAULT;
10463 p = lock_user_string(arg1);
10464 if (p) {
10465 if (num == TARGET_NR_listxattr) {
10466 ret = get_errno(listxattr(p, b, arg3));
10467 } else {
10468 ret = get_errno(llistxattr(p, b, arg3));
10470 } else {
10471 ret = -TARGET_EFAULT;
10473 unlock_user(p, arg1, 0);
10474 unlock_user(b, arg2, arg3);
10475 return ret;
10477 case TARGET_NR_flistxattr:
10479 void *b = 0;
10480 if (arg2) {
10481 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10482 if (!b) {
10483 return -TARGET_EFAULT;
10486 ret = get_errno(flistxattr(arg1, b, arg3));
10487 unlock_user(b, arg2, arg3);
10488 return ret;
10490 case TARGET_NR_setxattr:
10491 case TARGET_NR_lsetxattr:
10493 void *p, *n, *v = 0;
10494 if (arg3) {
10495 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10496 if (!v) {
10497 return -TARGET_EFAULT;
10500 p = lock_user_string(arg1);
10501 n = lock_user_string(arg2);
10502 if (p && n) {
10503 if (num == TARGET_NR_setxattr) {
10504 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10505 } else {
10506 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10508 } else {
10509 ret = -TARGET_EFAULT;
10511 unlock_user(p, arg1, 0);
10512 unlock_user(n, arg2, 0);
10513 unlock_user(v, arg3, 0);
10515 return ret;
10516 case TARGET_NR_fsetxattr:
10518 void *n, *v = 0;
10519 if (arg3) {
10520 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10521 if (!v) {
10522 return -TARGET_EFAULT;
10525 n = lock_user_string(arg2);
10526 if (n) {
10527 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10528 } else {
10529 ret = -TARGET_EFAULT;
10531 unlock_user(n, arg2, 0);
10532 unlock_user(v, arg3, 0);
10534 return ret;
10535 case TARGET_NR_getxattr:
10536 case TARGET_NR_lgetxattr:
10538 void *p, *n, *v = 0;
10539 if (arg3) {
10540 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10541 if (!v) {
10542 return -TARGET_EFAULT;
10545 p = lock_user_string(arg1);
10546 n = lock_user_string(arg2);
10547 if (p && n) {
10548 if (num == TARGET_NR_getxattr) {
10549 ret = get_errno(getxattr(p, n, v, arg4));
10550 } else {
10551 ret = get_errno(lgetxattr(p, n, v, arg4));
10553 } else {
10554 ret = -TARGET_EFAULT;
10556 unlock_user(p, arg1, 0);
10557 unlock_user(n, arg2, 0);
10558 unlock_user(v, arg3, arg4);
10560 return ret;
10561 case TARGET_NR_fgetxattr:
10563 void *n, *v = 0;
10564 if (arg3) {
10565 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10566 if (!v) {
10567 return -TARGET_EFAULT;
10570 n = lock_user_string(arg2);
10571 if (n) {
10572 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10573 } else {
10574 ret = -TARGET_EFAULT;
10576 unlock_user(n, arg2, 0);
10577 unlock_user(v, arg3, arg4);
10579 return ret;
10580 case TARGET_NR_removexattr:
10581 case TARGET_NR_lremovexattr:
10583 void *p, *n;
10584 p = lock_user_string(arg1);
10585 n = lock_user_string(arg2);
10586 if (p && n) {
10587 if (num == TARGET_NR_removexattr) {
10588 ret = get_errno(removexattr(p, n));
10589 } else {
10590 ret = get_errno(lremovexattr(p, n));
10592 } else {
10593 ret = -TARGET_EFAULT;
10595 unlock_user(p, arg1, 0);
10596 unlock_user(n, arg2, 0);
10598 return ret;
10599 case TARGET_NR_fremovexattr:
10601 void *n;
10602 n = lock_user_string(arg2);
10603 if (n) {
10604 ret = get_errno(fremovexattr(arg1, n));
10605 } else {
10606 ret = -TARGET_EFAULT;
10608 unlock_user(n, arg2, 0);
10610 return ret;
10611 #endif
10612 #endif /* CONFIG_ATTR */
10613 #ifdef TARGET_NR_set_thread_area
10614 case TARGET_NR_set_thread_area:
10615 #if defined(TARGET_MIPS)
10616 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10617 return 0;
10618 #elif defined(TARGET_CRIS)
10619 if (arg1 & 0xff)
10620 ret = -TARGET_EINVAL;
10621 else {
10622 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10623 ret = 0;
10625 return ret;
10626 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10627 return do_set_thread_area(cpu_env, arg1);
10628 #elif defined(TARGET_M68K)
10630 TaskState *ts = cpu->opaque;
10631 ts->tp_value = arg1;
10632 return 0;
10634 #else
10635 return -TARGET_ENOSYS;
10636 #endif
10637 #endif
10638 #ifdef TARGET_NR_get_thread_area
10639 case TARGET_NR_get_thread_area:
10640 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10641 return do_get_thread_area(cpu_env, arg1);
10642 #elif defined(TARGET_M68K)
10644 TaskState *ts = cpu->opaque;
10645 return ts->tp_value;
10647 #else
10648 return -TARGET_ENOSYS;
10649 #endif
10650 #endif
10651 #ifdef TARGET_NR_getdomainname
10652 case TARGET_NR_getdomainname:
10653 return -TARGET_ENOSYS;
10654 #endif
10656 #ifdef TARGET_NR_clock_settime
10657 case TARGET_NR_clock_settime:
10659 struct timespec ts;
10661 ret = target_to_host_timespec(&ts, arg2);
10662 if (!is_error(ret)) {
10663 ret = get_errno(clock_settime(arg1, &ts));
10665 return ret;
10667 #endif
10668 #ifdef TARGET_NR_clock_gettime
10669 case TARGET_NR_clock_gettime:
10671 struct timespec ts;
10672 ret = get_errno(clock_gettime(arg1, &ts));
10673 if (!is_error(ret)) {
10674 ret = host_to_target_timespec(arg2, &ts);
10676 return ret;
10678 #endif
10679 #ifdef TARGET_NR_clock_getres
10680 case TARGET_NR_clock_getres:
10682 struct timespec ts;
10683 ret = get_errno(clock_getres(arg1, &ts));
10684 if (!is_error(ret)) {
10685 host_to_target_timespec(arg2, &ts);
10687 return ret;
10689 #endif
10690 #ifdef TARGET_NR_clock_nanosleep
10691 case TARGET_NR_clock_nanosleep:
10693 struct timespec ts;
10694 target_to_host_timespec(&ts, arg3);
10695 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10696 &ts, arg4 ? &ts : NULL));
10697 if (arg4)
10698 host_to_target_timespec(arg4, &ts);
10700 #if defined(TARGET_PPC)
10701 /* clock_nanosleep is odd in that it returns positive errno values.
10702 * On PPC, CR0 bit 3 should be set in such a situation. */
10703 if (ret && ret != -TARGET_ERESTARTSYS) {
10704 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10706 #endif
10707 return ret;
10709 #endif
10711 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10712 case TARGET_NR_set_tid_address:
10713 return get_errno(set_tid_address((int *)g2h(arg1)));
10714 #endif
10716 case TARGET_NR_tkill:
10717 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10719 case TARGET_NR_tgkill:
10720 return get_errno(safe_tgkill((int)arg1, (int)arg2,
10721 target_to_host_signal(arg3)));
10723 #ifdef TARGET_NR_set_robust_list
10724 case TARGET_NR_set_robust_list:
10725 case TARGET_NR_get_robust_list:
10726 /* The ABI for supporting robust futexes has userspace pass
10727 * the kernel a pointer to a linked list which is updated by
10728 * userspace after the syscall; the list is walked by the kernel
10729 * when the thread exits. Since the linked list in QEMU guest
10730 * memory isn't a valid linked list for the host and we have
10731 * no way to reliably intercept the thread-death event, we can't
10732 * support these. Silently return ENOSYS so that guest userspace
10733 * falls back to a non-robust futex implementation (which should
10734 * be OK except in the corner case of the guest crashing while
10735 * holding a mutex that is shared with another process via
10736 * shared memory).
10738 return -TARGET_ENOSYS;
10739 #endif
10741 #if defined(TARGET_NR_utimensat)
10742 case TARGET_NR_utimensat:
10744 struct timespec *tsp, ts[2];
10745 if (!arg3) {
10746 tsp = NULL;
10747 } else {
10748 target_to_host_timespec(ts, arg3);
10749 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10750 tsp = ts;
10752 if (!arg2)
10753 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10754 else {
10755 if (!(p = lock_user_string(arg2))) {
10756 return -TARGET_EFAULT;
10758 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10759 unlock_user(p, arg2, 0);
10762 return ret;
10763 #endif
10764 case TARGET_NR_futex:
10765 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10766 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10767 case TARGET_NR_inotify_init:
10768 ret = get_errno(sys_inotify_init());
10769 if (ret >= 0) {
10770 fd_trans_register(ret, &target_inotify_trans);
10772 return ret;
10773 #endif
10774 #ifdef CONFIG_INOTIFY1
10775 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10776 case TARGET_NR_inotify_init1:
10777 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10778 fcntl_flags_tbl)));
10779 if (ret >= 0) {
10780 fd_trans_register(ret, &target_inotify_trans);
10782 return ret;
10783 #endif
10784 #endif
10785 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10786 case TARGET_NR_inotify_add_watch:
10787 p = lock_user_string(arg2);
10788 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10789 unlock_user(p, arg2, 0);
10790 return ret;
10791 #endif
10792 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10793 case TARGET_NR_inotify_rm_watch:
10794 return get_errno(sys_inotify_rm_watch(arg1, arg2));
10795 #endif
10797 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10798 case TARGET_NR_mq_open:
10800 struct mq_attr posix_mq_attr;
10801 struct mq_attr *pposix_mq_attr;
10802 int host_flags;
10804 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10805 pposix_mq_attr = NULL;
10806 if (arg4) {
10807 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10808 return -TARGET_EFAULT;
10810 pposix_mq_attr = &posix_mq_attr;
10812 p = lock_user_string(arg1 - 1);
10813 if (!p) {
10814 return -TARGET_EFAULT;
10816 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10817 unlock_user (p, arg1, 0);
10819 return ret;
10821 case TARGET_NR_mq_unlink:
10822 p = lock_user_string(arg1 - 1);
10823 if (!p) {
10824 return -TARGET_EFAULT;
10826 ret = get_errno(mq_unlink(p));
10827 unlock_user (p, arg1, 0);
10828 return ret;
10830 case TARGET_NR_mq_timedsend:
10832 struct timespec ts;
10834 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10835 if (arg5 != 0) {
10836 target_to_host_timespec(&ts, arg5);
10837 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10838 host_to_target_timespec(arg5, &ts);
10839 } else {
10840 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10842 unlock_user (p, arg2, arg3);
10844 return ret;
10846 case TARGET_NR_mq_timedreceive:
10848 struct timespec ts;
10849 unsigned int prio;
10851 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10852 if (arg5 != 0) {
10853 target_to_host_timespec(&ts, arg5);
10854 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10855 &prio, &ts));
10856 host_to_target_timespec(arg5, &ts);
10857 } else {
10858 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10859 &prio, NULL));
10861 unlock_user (p, arg2, arg3);
10862 if (arg4 != 0)
10863 put_user_u32(prio, arg4);
10865 return ret;
10867 /* Not implemented for now... */
10868 /* case TARGET_NR_mq_notify: */
10869 /* break; */
10871 case TARGET_NR_mq_getsetattr:
10873 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10874 ret = 0;
10875 if (arg2 != 0) {
10876 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10877 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10878 &posix_mq_attr_out));
10879 } else if (arg3 != 0) {
10880 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10882 if (ret == 0 && arg3 != 0) {
10883 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10886 return ret;
10887 #endif
10889 #ifdef CONFIG_SPLICE
10890 #ifdef TARGET_NR_tee
10891 case TARGET_NR_tee:
10893 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10895 return ret;
10896 #endif
10897 #ifdef TARGET_NR_splice
10898 case TARGET_NR_splice:
10900 loff_t loff_in, loff_out;
10901 loff_t *ploff_in = NULL, *ploff_out = NULL;
10902 if (arg2) {
10903 if (get_user_u64(loff_in, arg2)) {
10904 return -TARGET_EFAULT;
10906 ploff_in = &loff_in;
10908 if (arg4) {
10909 if (get_user_u64(loff_out, arg4)) {
10910 return -TARGET_EFAULT;
10912 ploff_out = &loff_out;
10914 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10915 if (arg2) {
10916 if (put_user_u64(loff_in, arg2)) {
10917 return -TARGET_EFAULT;
10920 if (arg4) {
10921 if (put_user_u64(loff_out, arg4)) {
10922 return -TARGET_EFAULT;
10926 return ret;
10927 #endif
10928 #ifdef TARGET_NR_vmsplice
10929 case TARGET_NR_vmsplice:
10931 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10932 if (vec != NULL) {
10933 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10934 unlock_iovec(vec, arg2, arg3, 0);
10935 } else {
10936 ret = -host_to_target_errno(errno);
10939 return ret;
10940 #endif
10941 #endif /* CONFIG_SPLICE */
10942 #ifdef CONFIG_EVENTFD
10943 #if defined(TARGET_NR_eventfd)
10944 case TARGET_NR_eventfd:
10945 ret = get_errno(eventfd(arg1, 0));
10946 if (ret >= 0) {
10947 fd_trans_register(ret, &target_eventfd_trans);
10949 return ret;
10950 #endif
10951 #if defined(TARGET_NR_eventfd2)
10952 case TARGET_NR_eventfd2:
10954 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10955 if (arg2 & TARGET_O_NONBLOCK) {
10956 host_flags |= O_NONBLOCK;
10958 if (arg2 & TARGET_O_CLOEXEC) {
10959 host_flags |= O_CLOEXEC;
10961 ret = get_errno(eventfd(arg1, host_flags));
10962 if (ret >= 0) {
10963 fd_trans_register(ret, &target_eventfd_trans);
10965 return ret;
10967 #endif
10968 #endif /* CONFIG_EVENTFD */
10969 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10970 case TARGET_NR_fallocate:
10971 #if TARGET_ABI_BITS == 32
10972 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10973 target_offset64(arg5, arg6)));
10974 #else
10975 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10976 #endif
10977 return ret;
10978 #endif
10979 #if defined(CONFIG_SYNC_FILE_RANGE)
10980 #if defined(TARGET_NR_sync_file_range)
10981 case TARGET_NR_sync_file_range:
10982 #if TARGET_ABI_BITS == 32
10983 #if defined(TARGET_MIPS)
10984 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10985 target_offset64(arg5, arg6), arg7));
10986 #else
10987 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10988 target_offset64(arg4, arg5), arg6));
10989 #endif /* !TARGET_MIPS */
10990 #else
10991 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10992 #endif
10993 return ret;
10994 #endif
10995 #if defined(TARGET_NR_sync_file_range2)
10996 case TARGET_NR_sync_file_range2:
10997 /* This is like sync_file_range but the arguments are reordered */
10998 #if TARGET_ABI_BITS == 32
10999 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11000 target_offset64(arg5, arg6), arg2));
11001 #else
11002 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11003 #endif
11004 return ret;
11005 #endif
11006 #endif
11007 #if defined(TARGET_NR_signalfd4)
11008 case TARGET_NR_signalfd4:
11009 return do_signalfd4(arg1, arg2, arg4);
11010 #endif
11011 #if defined(TARGET_NR_signalfd)
11012 case TARGET_NR_signalfd:
11013 return do_signalfd4(arg1, arg2, 0);
11014 #endif
11015 #if defined(CONFIG_EPOLL)
11016 #if defined(TARGET_NR_epoll_create)
11017 case TARGET_NR_epoll_create:
11018 return get_errno(epoll_create(arg1));
11019 #endif
11020 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11021 case TARGET_NR_epoll_create1:
11022 return get_errno(epoll_create1(arg1));
11023 #endif
11024 #if defined(TARGET_NR_epoll_ctl)
11025 case TARGET_NR_epoll_ctl:
11027 struct epoll_event ep;
11028 struct epoll_event *epp = 0;
11029 if (arg4) {
11030 struct target_epoll_event *target_ep;
11031 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11032 return -TARGET_EFAULT;
11034 ep.events = tswap32(target_ep->events);
11035 /* The epoll_data_t union is just opaque data to the kernel,
11036 * so we transfer all 64 bits across and need not worry what
11037 * actual data type it is.
11039 ep.data.u64 = tswap64(target_ep->data.u64);
11040 unlock_user_struct(target_ep, arg4, 0);
11041 epp = &ep;
11043 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11045 #endif
11047 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11048 #if defined(TARGET_NR_epoll_wait)
11049 case TARGET_NR_epoll_wait:
11050 #endif
11051 #if defined(TARGET_NR_epoll_pwait)
11052 case TARGET_NR_epoll_pwait:
11053 #endif
11055 struct target_epoll_event *target_ep;
11056 struct epoll_event *ep;
11057 int epfd = arg1;
11058 int maxevents = arg3;
11059 int timeout = arg4;
11061 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11062 return -TARGET_EINVAL;
11065 target_ep = lock_user(VERIFY_WRITE, arg2,
11066 maxevents * sizeof(struct target_epoll_event), 1);
11067 if (!target_ep) {
11068 return -TARGET_EFAULT;
11071 ep = g_try_new(struct epoll_event, maxevents);
11072 if (!ep) {
11073 unlock_user(target_ep, arg2, 0);
11074 return -TARGET_ENOMEM;
11077 switch (num) {
11078 #if defined(TARGET_NR_epoll_pwait)
11079 case TARGET_NR_epoll_pwait:
11081 target_sigset_t *target_set;
11082 sigset_t _set, *set = &_set;
11084 if (arg5) {
11085 if (arg6 != sizeof(target_sigset_t)) {
11086 ret = -TARGET_EINVAL;
11087 break;
11090 target_set = lock_user(VERIFY_READ, arg5,
11091 sizeof(target_sigset_t), 1);
11092 if (!target_set) {
11093 ret = -TARGET_EFAULT;
11094 break;
11096 target_to_host_sigset(set, target_set);
11097 unlock_user(target_set, arg5, 0);
11098 } else {
11099 set = NULL;
11102 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11103 set, SIGSET_T_SIZE));
11104 break;
11106 #endif
11107 #if defined(TARGET_NR_epoll_wait)
11108 case TARGET_NR_epoll_wait:
11109 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11110 NULL, 0));
11111 break;
11112 #endif
11113 default:
11114 ret = -TARGET_ENOSYS;
11116 if (!is_error(ret)) {
11117 int i;
11118 for (i = 0; i < ret; i++) {
11119 target_ep[i].events = tswap32(ep[i].events);
11120 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11122 unlock_user(target_ep, arg2,
11123 ret * sizeof(struct target_epoll_event));
11124 } else {
11125 unlock_user(target_ep, arg2, 0);
11127 g_free(ep);
11128 return ret;
11130 #endif
11131 #endif
11132 #ifdef TARGET_NR_prlimit64
11133 case TARGET_NR_prlimit64:
11135 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11136 struct target_rlimit64 *target_rnew, *target_rold;
11137 struct host_rlimit64 rnew, rold, *rnewp = 0;
11138 int resource = target_to_host_resource(arg2);
11139 if (arg3) {
11140 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11141 return -TARGET_EFAULT;
11143 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11144 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11145 unlock_user_struct(target_rnew, arg3, 0);
11146 rnewp = &rnew;
11149 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11150 if (!is_error(ret) && arg4) {
11151 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11152 return -TARGET_EFAULT;
11154 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11155 target_rold->rlim_max = tswap64(rold.rlim_max);
11156 unlock_user_struct(target_rold, arg4, 1);
11158 return ret;
11160 #endif
11161 #ifdef TARGET_NR_gethostname
11162 case TARGET_NR_gethostname:
11164 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11165 if (name) {
11166 ret = get_errno(gethostname(name, arg2));
11167 unlock_user(name, arg1, arg2);
11168 } else {
11169 ret = -TARGET_EFAULT;
11171 return ret;
11173 #endif
11174 #ifdef TARGET_NR_atomic_cmpxchg_32
11175 case TARGET_NR_atomic_cmpxchg_32:
11177 /* should use start_exclusive from main.c */
11178 abi_ulong mem_value;
11179 if (get_user_u32(mem_value, arg6)) {
11180 target_siginfo_t info;
11181 info.si_signo = SIGSEGV;
11182 info.si_errno = 0;
11183 info.si_code = TARGET_SEGV_MAPERR;
11184 info._sifields._sigfault._addr = arg6;
11185 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11186 QEMU_SI_FAULT, &info);
11187 ret = 0xdeadbeef;
11190 if (mem_value == arg2)
11191 put_user_u32(arg1, arg6);
11192 return mem_value;
11194 #endif
11195 #ifdef TARGET_NR_atomic_barrier
11196 case TARGET_NR_atomic_barrier:
11197 /* Like the kernel implementation and the
11198 qemu arm barrier, no-op this? */
11199 return 0;
11200 #endif
11202 #ifdef TARGET_NR_timer_create
11203 case TARGET_NR_timer_create:
11205 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11207 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11209 int clkid = arg1;
11210 int timer_index = next_free_host_timer();
11212 if (timer_index < 0) {
11213 ret = -TARGET_EAGAIN;
11214 } else {
11215 timer_t *phtimer = g_posix_timers + timer_index;
11217 if (arg2) {
11218 phost_sevp = &host_sevp;
11219 ret = target_to_host_sigevent(phost_sevp, arg2);
11220 if (ret != 0) {
11221 return ret;
11225 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11226 if (ret) {
11227 phtimer = NULL;
11228 } else {
11229 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11230 return -TARGET_EFAULT;
11234 return ret;
11236 #endif
11238 #ifdef TARGET_NR_timer_settime
11239 case TARGET_NR_timer_settime:
11241 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11242 * struct itimerspec * old_value */
11243 target_timer_t timerid = get_timer_id(arg1);
11245 if (timerid < 0) {
11246 ret = timerid;
11247 } else if (arg3 == 0) {
11248 ret = -TARGET_EINVAL;
11249 } else {
11250 timer_t htimer = g_posix_timers[timerid];
11251 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11253 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11254 return -TARGET_EFAULT;
11256 ret = get_errno(
11257 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11258 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11259 return -TARGET_EFAULT;
11262 return ret;
11264 #endif
11266 #ifdef TARGET_NR_timer_gettime
11267 case TARGET_NR_timer_gettime:
11269 /* args: timer_t timerid, struct itimerspec *curr_value */
11270 target_timer_t timerid = get_timer_id(arg1);
11272 if (timerid < 0) {
11273 ret = timerid;
11274 } else if (!arg2) {
11275 ret = -TARGET_EFAULT;
11276 } else {
11277 timer_t htimer = g_posix_timers[timerid];
11278 struct itimerspec hspec;
11279 ret = get_errno(timer_gettime(htimer, &hspec));
11281 if (host_to_target_itimerspec(arg2, &hspec)) {
11282 ret = -TARGET_EFAULT;
11285 return ret;
11287 #endif
11289 #ifdef TARGET_NR_timer_getoverrun
11290 case TARGET_NR_timer_getoverrun:
11292 /* args: timer_t timerid */
11293 target_timer_t timerid = get_timer_id(arg1);
11295 if (timerid < 0) {
11296 ret = timerid;
11297 } else {
11298 timer_t htimer = g_posix_timers[timerid];
11299 ret = get_errno(timer_getoverrun(htimer));
11301 fd_trans_unregister(ret);
11302 return ret;
11304 #endif
11306 #ifdef TARGET_NR_timer_delete
11307 case TARGET_NR_timer_delete:
11309 /* args: timer_t timerid */
11310 target_timer_t timerid = get_timer_id(arg1);
11312 if (timerid < 0) {
11313 ret = timerid;
11314 } else {
11315 timer_t htimer = g_posix_timers[timerid];
11316 ret = get_errno(timer_delete(htimer));
11317 g_posix_timers[timerid] = 0;
11319 return ret;
11321 #endif
11323 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11324 case TARGET_NR_timerfd_create:
11325 return get_errno(timerfd_create(arg1,
11326 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11327 #endif
11329 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11330 case TARGET_NR_timerfd_gettime:
11332 struct itimerspec its_curr;
11334 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11336 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11337 return -TARGET_EFAULT;
11340 return ret;
11341 #endif
11343 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11344 case TARGET_NR_timerfd_settime:
11346 struct itimerspec its_new, its_old, *p_new;
11348 if (arg3) {
11349 if (target_to_host_itimerspec(&its_new, arg3)) {
11350 return -TARGET_EFAULT;
11352 p_new = &its_new;
11353 } else {
11354 p_new = NULL;
11357 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11359 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11360 return -TARGET_EFAULT;
11363 return ret;
11364 #endif
11366 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11367 case TARGET_NR_ioprio_get:
11368 return get_errno(ioprio_get(arg1, arg2));
11369 #endif
11371 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11372 case TARGET_NR_ioprio_set:
11373 return get_errno(ioprio_set(arg1, arg2, arg3));
11374 #endif
11376 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11377 case TARGET_NR_setns:
11378 return get_errno(setns(arg1, arg2));
11379 #endif
11380 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11381 case TARGET_NR_unshare:
11382 return get_errno(unshare(arg1));
11383 #endif
11384 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11385 case TARGET_NR_kcmp:
11386 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11387 #endif
11388 #ifdef TARGET_NR_swapcontext
11389 case TARGET_NR_swapcontext:
11390 /* PowerPC specific. */
11391 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11392 #endif
11394 default:
11395 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11396 return -TARGET_ENOSYS;
11398 return ret;
11401 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11402 abi_long arg2, abi_long arg3, abi_long arg4,
11403 abi_long arg5, abi_long arg6, abi_long arg7,
11404 abi_long arg8)
11406 CPUState *cpu = ENV_GET_CPU(cpu_env);
11407 abi_long ret;
11409 #ifdef DEBUG_ERESTARTSYS
11410 /* Debug-only code for exercising the syscall-restart code paths
11411 * in the per-architecture cpu main loops: restart every syscall
11412 * the guest makes once before letting it through.
11415 static bool flag;
11416 flag = !flag;
11417 if (flag) {
11418 return -TARGET_ERESTARTSYS;
11421 #endif
11423 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11424 arg5, arg6, arg7, arg8);
11426 if (unlikely(do_strace)) {
11427 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11428 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11429 arg5, arg6, arg7, arg8);
11430 print_syscall_ret(num, ret);
11431 } else {
11432 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11433 arg5, arg6, arg7, arg8);
11436 trace_guest_user_syscall_ret(cpu, num, ret);
11437 return ret;