linux-user: move TargetFdTrans functions to their own file
[qemu.git] / linux-user / syscall.c
blobe252f8b55573372e3854d0c974bd8da91fb3cd6d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
106 #include "uname.h"
108 #include "qemu.h"
109 #include "fd-trans.h"
111 #ifndef CLONE_IO
112 #define CLONE_IO 0x80000000 /* Clone io context */
113 #endif
115 /* We can't directly call the host clone syscall, because this will
116 * badly confuse libc (breaking mutexes, for example). So we must
117 * divide clone flags into:
118 * * flag combinations that look like pthread_create()
119 * * flag combinations that look like fork()
120 * * flags we can implement within QEMU itself
121 * * flags we can't support and will return an error for
123 /* For thread creation, all these flags must be present; for
124 * fork, none must be present.
126 #define CLONE_THREAD_FLAGS \
127 (CLONE_VM | CLONE_FS | CLONE_FILES | \
128 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
130 /* These flags are ignored:
131 * CLONE_DETACHED is now ignored by the kernel;
132 * CLONE_IO is just an optimisation hint to the I/O scheduler
134 #define CLONE_IGNORED_FLAGS \
135 (CLONE_DETACHED | CLONE_IO)
137 /* Flags for fork which we can implement within QEMU itself */
138 #define CLONE_OPTIONAL_FORK_FLAGS \
139 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
140 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
142 /* Flags for thread creation which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_THREAD_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
147 #define CLONE_INVALID_FORK_FLAGS \
148 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
150 #define CLONE_INVALID_THREAD_FLAGS \
151 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
152 CLONE_IGNORED_FLAGS))
154 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
155 * have almost all been allocated. We cannot support any of
156 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
157 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
158 * The checks against the invalid thread masks above will catch these.
159 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
162 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
163 * once. This exercises the codepaths for restart.
165 //#define DEBUG_ERESTARTSYS
167 //#include <linux/msdos_fs.h>
168 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
169 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
171 #undef _syscall0
172 #undef _syscall1
173 #undef _syscall2
174 #undef _syscall3
175 #undef _syscall4
176 #undef _syscall5
177 #undef _syscall6
179 #define _syscall0(type,name) \
180 static type name (void) \
182 return syscall(__NR_##name); \
185 #define _syscall1(type,name,type1,arg1) \
186 static type name (type1 arg1) \
188 return syscall(__NR_##name, arg1); \
191 #define _syscall2(type,name,type1,arg1,type2,arg2) \
192 static type name (type1 arg1,type2 arg2) \
194 return syscall(__NR_##name, arg1, arg2); \
197 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
198 static type name (type1 arg1,type2 arg2,type3 arg3) \
200 return syscall(__NR_##name, arg1, arg2, arg3); \
203 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
204 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
206 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
209 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
210 type5,arg5) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
217 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 type5,arg5,type6,arg6) \
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
220 type6 arg6) \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
226 #define __NR_sys_uname __NR_uname
227 #define __NR_sys_getcwd1 __NR_getcwd
228 #define __NR_sys_getdents __NR_getdents
229 #define __NR_sys_getdents64 __NR_getdents64
230 #define __NR_sys_getpriority __NR_getpriority
231 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
232 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
233 #define __NR_sys_syslog __NR_syslog
234 #define __NR_sys_futex __NR_futex
235 #define __NR_sys_inotify_init __NR_inotify_init
236 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
237 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
239 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
240 #define __NR__llseek __NR_lseek
241 #endif
243 /* Newer kernel ports have llseek() instead of _llseek() */
244 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
245 #define TARGET_NR__llseek TARGET_NR_llseek
246 #endif
248 #ifdef __NR_gettid
249 _syscall0(int, gettid)
250 #else
251 /* This is a replacement for the host gettid() and must return a host
252 errno. */
253 static int gettid(void) {
254 return -ENOSYS;
256 #endif
258 /* For the 64-bit guest on 32-bit host case we must emulate
259 * getdents using getdents64, because otherwise the host
260 * might hand us back more dirent records than we can fit
261 * into the guest buffer after structure format conversion.
262 * Otherwise we emulate getdents with getdents if the host has it.
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
266 #endif
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if (defined(TARGET_NR_getdents) && \
272 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
275 #endif
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
278 loff_t *, res, uint, wh);
279 #endif
280 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
281 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
282 siginfo_t *, uinfo)
283 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group,int,error_code)
286 #endif
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address,int *,tidptr)
289 #endif
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
292 const struct timespec *,timeout,int *,uaddr2,int,val3)
293 #endif
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
296 unsigned long *, user_mask_ptr);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
299 unsigned long *, user_mask_ptr);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
302 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
303 void *, arg);
304 _syscall2(int, capget, struct __user_cap_header_struct *, header,
305 struct __user_cap_data_struct *, data);
306 _syscall2(int, capset, struct __user_cap_header_struct *, header,
307 struct __user_cap_data_struct *, data);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get, int, which, int, who)
310 #endif
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
313 #endif
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
316 #endif
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
320 unsigned long, idx1, unsigned long, idx2)
321 #endif
323 static bitmask_transtbl fcntl_flags_tbl[] = {
324 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
325 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
326 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
327 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
328 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
329 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
330 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
331 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
332 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
333 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
334 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
335 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
336 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
337 #if defined(O_DIRECT)
338 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
339 #endif
340 #if defined(O_NOATIME)
341 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
342 #endif
343 #if defined(O_CLOEXEC)
344 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
345 #endif
346 #if defined(O_PATH)
347 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
348 #endif
349 #if defined(O_TMPFILE)
350 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
351 #endif
352 /* Don't terminate the list prematurely on 64-bit host+guest. */
353 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
354 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
355 #endif
356 { 0, 0, 0, 0 }
359 static int sys_getcwd1(char *buf, size_t size)
361 if (getcwd(buf, size) == NULL) {
362 /* getcwd() sets errno */
363 return (-1);
365 return strlen(buf)+1;
368 #ifdef TARGET_NR_utimensat
369 #if defined(__NR_utimensat)
370 #define __NR_sys_utimensat __NR_utimensat
371 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
372 const struct timespec *,tsp,int,flags)
373 #else
374 static int sys_utimensat(int dirfd, const char *pathname,
375 const struct timespec times[2], int flags)
377 errno = ENOSYS;
378 return -1;
380 #endif
381 #endif /* TARGET_NR_utimensat */
383 #ifdef TARGET_NR_renameat2
384 #if defined(__NR_renameat2)
385 #define __NR_sys_renameat2 __NR_renameat2
386 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
387 const char *, new, unsigned int, flags)
388 #else
389 static int sys_renameat2(int oldfd, const char *old,
390 int newfd, const char *new, int flags)
392 if (flags == 0) {
393 return renameat(oldfd, old, newfd, new);
395 errno = ENOSYS;
396 return -1;
398 #endif
399 #endif /* TARGET_NR_renameat2 */
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
407 return (inotify_init());
409 #endif
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
413 return (inotify_add_watch(fd, pathname, mask));
415 #endif
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd, int32_t wd)
419 return (inotify_rm_watch(fd, wd));
421 #endif
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags)
426 return (inotify_init1(flags));
428 #endif
429 #endif
430 #else
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY */
438 #if defined(TARGET_NR_prlimit64)
439 #ifndef __NR_prlimit64
440 # define __NR_prlimit64 -1
441 #endif
442 #define __NR_sys_prlimit64 __NR_prlimit64
443 /* The glibc rlimit structure may not be that used by the underlying syscall */
444 struct host_rlimit64 {
445 uint64_t rlim_cur;
446 uint64_t rlim_max;
448 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
449 const struct host_rlimit64 *, new_limit,
450 struct host_rlimit64 *, old_limit)
451 #endif
454 #if defined(TARGET_NR_timer_create)
455 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
456 static timer_t g_posix_timers[32] = { 0, } ;
458 static inline int next_free_host_timer(void)
460 int k ;
461 /* FIXME: Does finding the next free slot require a lock? */
462 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
463 if (g_posix_timers[k] == 0) {
464 g_posix_timers[k] = (timer_t) 1;
465 return k;
468 return -1;
470 #endif
472 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
473 #ifdef TARGET_ARM
474 static inline int regpairs_aligned(void *cpu_env, int num)
476 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
478 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
481 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
482 * of registers which translates to the same as ARM/MIPS, because we start with
483 * r3 as arg1 */
484 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
485 #elif defined(TARGET_SH4)
486 /* SH4 doesn't align register pairs, except for p{read,write}64 */
487 static inline int regpairs_aligned(void *cpu_env, int num)
489 switch (num) {
490 case TARGET_NR_pread64:
491 case TARGET_NR_pwrite64:
492 return 1;
494 default:
495 return 0;
498 #elif defined(TARGET_XTENSA)
499 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
500 #else
501 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
502 #endif
504 #define ERRNO_TABLE_SIZE 1200
506 /* target_to_host_errno_table[] is initialized from
507 * host_to_target_errno_table[] in syscall_init(). */
508 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
512 * This list is the union of errno values overridden in asm-<arch>/errno.h
513 * minus the errnos that are not actually generic to all archs.
515 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
516 [EAGAIN] = TARGET_EAGAIN,
517 [EIDRM] = TARGET_EIDRM,
518 [ECHRNG] = TARGET_ECHRNG,
519 [EL2NSYNC] = TARGET_EL2NSYNC,
520 [EL3HLT] = TARGET_EL3HLT,
521 [EL3RST] = TARGET_EL3RST,
522 [ELNRNG] = TARGET_ELNRNG,
523 [EUNATCH] = TARGET_EUNATCH,
524 [ENOCSI] = TARGET_ENOCSI,
525 [EL2HLT] = TARGET_EL2HLT,
526 [EDEADLK] = TARGET_EDEADLK,
527 [ENOLCK] = TARGET_ENOLCK,
528 [EBADE] = TARGET_EBADE,
529 [EBADR] = TARGET_EBADR,
530 [EXFULL] = TARGET_EXFULL,
531 [ENOANO] = TARGET_ENOANO,
532 [EBADRQC] = TARGET_EBADRQC,
533 [EBADSLT] = TARGET_EBADSLT,
534 [EBFONT] = TARGET_EBFONT,
535 [ENOSTR] = TARGET_ENOSTR,
536 [ENODATA] = TARGET_ENODATA,
537 [ETIME] = TARGET_ETIME,
538 [ENOSR] = TARGET_ENOSR,
539 [ENONET] = TARGET_ENONET,
540 [ENOPKG] = TARGET_ENOPKG,
541 [EREMOTE] = TARGET_EREMOTE,
542 [ENOLINK] = TARGET_ENOLINK,
543 [EADV] = TARGET_EADV,
544 [ESRMNT] = TARGET_ESRMNT,
545 [ECOMM] = TARGET_ECOMM,
546 [EPROTO] = TARGET_EPROTO,
547 [EDOTDOT] = TARGET_EDOTDOT,
548 [EMULTIHOP] = TARGET_EMULTIHOP,
549 [EBADMSG] = TARGET_EBADMSG,
550 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
551 [EOVERFLOW] = TARGET_EOVERFLOW,
552 [ENOTUNIQ] = TARGET_ENOTUNIQ,
553 [EBADFD] = TARGET_EBADFD,
554 [EREMCHG] = TARGET_EREMCHG,
555 [ELIBACC] = TARGET_ELIBACC,
556 [ELIBBAD] = TARGET_ELIBBAD,
557 [ELIBSCN] = TARGET_ELIBSCN,
558 [ELIBMAX] = TARGET_ELIBMAX,
559 [ELIBEXEC] = TARGET_ELIBEXEC,
560 [EILSEQ] = TARGET_EILSEQ,
561 [ENOSYS] = TARGET_ENOSYS,
562 [ELOOP] = TARGET_ELOOP,
563 [ERESTART] = TARGET_ERESTART,
564 [ESTRPIPE] = TARGET_ESTRPIPE,
565 [ENOTEMPTY] = TARGET_ENOTEMPTY,
566 [EUSERS] = TARGET_EUSERS,
567 [ENOTSOCK] = TARGET_ENOTSOCK,
568 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
569 [EMSGSIZE] = TARGET_EMSGSIZE,
570 [EPROTOTYPE] = TARGET_EPROTOTYPE,
571 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
572 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
573 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
574 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
575 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
576 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
577 [EADDRINUSE] = TARGET_EADDRINUSE,
578 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
579 [ENETDOWN] = TARGET_ENETDOWN,
580 [ENETUNREACH] = TARGET_ENETUNREACH,
581 [ENETRESET] = TARGET_ENETRESET,
582 [ECONNABORTED] = TARGET_ECONNABORTED,
583 [ECONNRESET] = TARGET_ECONNRESET,
584 [ENOBUFS] = TARGET_ENOBUFS,
585 [EISCONN] = TARGET_EISCONN,
586 [ENOTCONN] = TARGET_ENOTCONN,
587 [EUCLEAN] = TARGET_EUCLEAN,
588 [ENOTNAM] = TARGET_ENOTNAM,
589 [ENAVAIL] = TARGET_ENAVAIL,
590 [EISNAM] = TARGET_EISNAM,
591 [EREMOTEIO] = TARGET_EREMOTEIO,
592 [EDQUOT] = TARGET_EDQUOT,
593 [ESHUTDOWN] = TARGET_ESHUTDOWN,
594 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
595 [ETIMEDOUT] = TARGET_ETIMEDOUT,
596 [ECONNREFUSED] = TARGET_ECONNREFUSED,
597 [EHOSTDOWN] = TARGET_EHOSTDOWN,
598 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
599 [EALREADY] = TARGET_EALREADY,
600 [EINPROGRESS] = TARGET_EINPROGRESS,
601 [ESTALE] = TARGET_ESTALE,
602 [ECANCELED] = TARGET_ECANCELED,
603 [ENOMEDIUM] = TARGET_ENOMEDIUM,
604 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
605 #ifdef ENOKEY
606 [ENOKEY] = TARGET_ENOKEY,
607 #endif
608 #ifdef EKEYEXPIRED
609 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
610 #endif
611 #ifdef EKEYREVOKED
612 [EKEYREVOKED] = TARGET_EKEYREVOKED,
613 #endif
614 #ifdef EKEYREJECTED
615 [EKEYREJECTED] = TARGET_EKEYREJECTED,
616 #endif
617 #ifdef EOWNERDEAD
618 [EOWNERDEAD] = TARGET_EOWNERDEAD,
619 #endif
620 #ifdef ENOTRECOVERABLE
621 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
622 #endif
623 #ifdef ENOMSG
624 [ENOMSG] = TARGET_ENOMSG,
625 #endif
626 #ifdef ERKFILL
627 [ERFKILL] = TARGET_ERFKILL,
628 #endif
629 #ifdef EHWPOISON
630 [EHWPOISON] = TARGET_EHWPOISON,
631 #endif
634 static inline int host_to_target_errno(int err)
636 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
637 host_to_target_errno_table[err]) {
638 return host_to_target_errno_table[err];
640 return err;
643 static inline int target_to_host_errno(int err)
645 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646 target_to_host_errno_table[err]) {
647 return target_to_host_errno_table[err];
649 return err;
652 static inline abi_long get_errno(abi_long ret)
654 if (ret == -1)
655 return -host_to_target_errno(errno);
656 else
657 return ret;
660 const char *target_strerror(int err)
662 if (err == TARGET_ERESTARTSYS) {
663 return "To be restarted";
665 if (err == TARGET_QEMU_ESIGRETURN) {
666 return "Successful exit from sigreturn";
669 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
670 return NULL;
672 return strerror(target_to_host_errno(err));
675 #define safe_syscall0(type, name) \
676 static type safe_##name(void) \
678 return safe_syscall(__NR_##name); \
681 #define safe_syscall1(type, name, type1, arg1) \
682 static type safe_##name(type1 arg1) \
684 return safe_syscall(__NR_##name, arg1); \
687 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
688 static type safe_##name(type1 arg1, type2 arg2) \
690 return safe_syscall(__NR_##name, arg1, arg2); \
693 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
694 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
696 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
699 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
700 type4, arg4) \
701 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
703 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
706 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
707 type4, arg4, type5, arg5) \
708 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
709 type5 arg5) \
711 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
714 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
715 type4, arg4, type5, arg5, type6, arg6) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
717 type5 arg5, type6 arg6) \
719 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
722 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
723 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
724 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
725 int, flags, mode_t, mode)
726 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
727 struct rusage *, rusage)
728 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
729 int, options, struct rusage *, rusage)
730 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
731 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
732 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734 struct timespec *, tsp, const sigset_t *, sigmask,
735 size_t, sigsetsize)
736 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
737 int, maxevents, int, timeout, const sigset_t *, sigmask,
738 size_t, sigsetsize)
739 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
740 const struct timespec *,timeout,int *,uaddr2,int,val3)
741 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
742 safe_syscall2(int, kill, pid_t, pid, int, sig)
743 safe_syscall2(int, tkill, int, tid, int, sig)
744 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
745 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
746 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
747 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
748 unsigned long, pos_l, unsigned long, pos_h)
749 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
750 unsigned long, pos_l, unsigned long, pos_h)
751 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
752 socklen_t, addrlen)
753 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
754 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
755 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
756 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
757 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
758 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
759 safe_syscall2(int, flock, int, fd, int, operation)
760 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
761 const struct timespec *, uts, size_t, sigsetsize)
762 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
763 int, flags)
764 safe_syscall2(int, nanosleep, const struct timespec *, req,
765 struct timespec *, rem)
766 #ifdef TARGET_NR_clock_nanosleep
767 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
768 const struct timespec *, req, struct timespec *, rem)
769 #endif
770 #ifdef __NR_msgsnd
771 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
772 int, flags)
773 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
774 long, msgtype, int, flags)
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776 unsigned, nsops, const struct timespec *, timeout)
777 #else
778 /* This host kernel architecture uses a single ipc syscall; fake up
779 * wrappers for the sub-operations to hide this implementation detail.
780 * Annoyingly we can't include linux/ipc.h to get the constant definitions
781 * for the call parameter because some structs in there conflict with the
782 * sys/ipc.h ones. So we just define them here, and rely on them being
783 * the same for all host architectures.
785 #define Q_SEMTIMEDOP 4
786 #define Q_MSGSND 11
787 #define Q_MSGRCV 12
788 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791 void *, ptr, long, fifth)
792 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
794 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
796 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
798 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
800 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
801 const struct timespec *timeout)
803 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
804 (long)timeout);
806 #endif
807 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809 size_t, len, unsigned, prio, const struct timespec *, timeout)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811 size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
815 * the libc function.
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
829 static inline int host_to_target_sock_type(int host_type)
831 int target_type;
833 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834 case SOCK_DGRAM:
835 target_type = TARGET_SOCK_DGRAM;
836 break;
837 case SOCK_STREAM:
838 target_type = TARGET_SOCK_STREAM;
839 break;
840 default:
841 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842 break;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type & SOCK_CLOEXEC) {
847 target_type |= TARGET_SOCK_CLOEXEC;
849 #endif
851 #if defined(SOCK_NONBLOCK)
852 if (host_type & SOCK_NONBLOCK) {
853 target_type |= TARGET_SOCK_NONBLOCK;
855 #endif
857 return target_type;
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
864 void target_set_brk(abi_ulong new_brk)
866 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867 brk_page = HOST_PAGE_ALIGN(target_brk);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
876 abi_long mapped_addr;
877 abi_ulong new_alloc_size;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
881 if (!new_brk) {
882 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883 return target_brk;
885 if (new_brk < target_original_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887 target_brk);
888 return target_brk;
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk <= brk_page) {
894 /* Heap contents are initialized to zero, as for anonymous
895 * mapped pages. */
896 if (new_brk > target_brk) {
897 memset(g2h(target_brk), 0, new_brk - target_brk);
899 target_brk = new_brk;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901 return target_brk;
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912 PROT_READ|PROT_WRITE,
913 MAP_ANON|MAP_PRIVATE, 0, 0));
915 if (mapped_addr == brk_page) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
922 * then shrunken). */
923 memset(g2h(target_brk), 0, brk_page - target_brk);
925 target_brk = new_brk;
926 brk_page = HOST_PAGE_ALIGN(target_brk);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928 target_brk);
929 return target_brk;
930 } else if (mapped_addr != -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr, new_alloc_size);
935 mapped_addr = -1;
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
938 else {
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM;
946 #endif
947 /* For everything else, return the previous break. */
948 return target_brk;
951 static inline abi_long copy_from_user_fdset(fd_set *fds,
952 abi_ulong target_fds_addr,
953 int n)
955 int i, nw, j, k;
956 abi_ulong b, *target_fds;
958 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
959 if (!(target_fds = lock_user(VERIFY_READ,
960 target_fds_addr,
961 sizeof(abi_ulong) * nw,
962 1)))
963 return -TARGET_EFAULT;
965 FD_ZERO(fds);
966 k = 0;
967 for (i = 0; i < nw; i++) {
968 /* grab the abi_ulong */
969 __get_user(b, &target_fds[i]);
970 for (j = 0; j < TARGET_ABI_BITS; j++) {
971 /* check the bit inside the abi_ulong */
972 if ((b >> j) & 1)
973 FD_SET(k, fds);
974 k++;
978 unlock_user(target_fds, target_fds_addr, 0);
980 return 0;
983 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
984 abi_ulong target_fds_addr,
985 int n)
987 if (target_fds_addr) {
988 if (copy_from_user_fdset(fds, target_fds_addr, n))
989 return -TARGET_EFAULT;
990 *fds_ptr = fds;
991 } else {
992 *fds_ptr = NULL;
994 return 0;
997 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
998 const fd_set *fds,
999 int n)
1001 int i, nw, j, k;
1002 abi_long v;
1003 abi_ulong *target_fds;
1005 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1006 if (!(target_fds = lock_user(VERIFY_WRITE,
1007 target_fds_addr,
1008 sizeof(abi_ulong) * nw,
1009 0)))
1010 return -TARGET_EFAULT;
1012 k = 0;
1013 for (i = 0; i < nw; i++) {
1014 v = 0;
1015 for (j = 0; j < TARGET_ABI_BITS; j++) {
1016 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1017 k++;
1019 __put_user(v, &target_fds[i]);
1022 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1024 return 0;
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1033 static inline abi_long host_to_target_clock_t(long ticks)
1035 #if HOST_HZ == TARGET_HZ
1036 return ticks;
1037 #else
1038 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043 const struct rusage *rusage)
1045 struct target_rusage *target_rusage;
1047 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048 return -TARGET_EFAULT;
1049 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067 unlock_user_struct(target_rusage, target_addr, 1);
1069 return 0;
1072 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1074 abi_ulong target_rlim_swap;
1075 rlim_t result;
1077 target_rlim_swap = tswapal(target_rlim);
1078 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1079 return RLIM_INFINITY;
1081 result = target_rlim_swap;
1082 if (target_rlim_swap != (rlim_t)result)
1083 return RLIM_INFINITY;
1085 return result;
1088 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1090 abi_ulong target_rlim_swap;
1091 abi_ulong result;
1093 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1094 target_rlim_swap = TARGET_RLIM_INFINITY;
1095 else
1096 target_rlim_swap = rlim;
1097 result = tswapal(target_rlim_swap);
1099 return result;
1102 static inline int target_to_host_resource(int code)
1104 switch (code) {
1105 case TARGET_RLIMIT_AS:
1106 return RLIMIT_AS;
1107 case TARGET_RLIMIT_CORE:
1108 return RLIMIT_CORE;
1109 case TARGET_RLIMIT_CPU:
1110 return RLIMIT_CPU;
1111 case TARGET_RLIMIT_DATA:
1112 return RLIMIT_DATA;
1113 case TARGET_RLIMIT_FSIZE:
1114 return RLIMIT_FSIZE;
1115 case TARGET_RLIMIT_LOCKS:
1116 return RLIMIT_LOCKS;
1117 case TARGET_RLIMIT_MEMLOCK:
1118 return RLIMIT_MEMLOCK;
1119 case TARGET_RLIMIT_MSGQUEUE:
1120 return RLIMIT_MSGQUEUE;
1121 case TARGET_RLIMIT_NICE:
1122 return RLIMIT_NICE;
1123 case TARGET_RLIMIT_NOFILE:
1124 return RLIMIT_NOFILE;
1125 case TARGET_RLIMIT_NPROC:
1126 return RLIMIT_NPROC;
1127 case TARGET_RLIMIT_RSS:
1128 return RLIMIT_RSS;
1129 case TARGET_RLIMIT_RTPRIO:
1130 return RLIMIT_RTPRIO;
1131 case TARGET_RLIMIT_SIGPENDING:
1132 return RLIMIT_SIGPENDING;
1133 case TARGET_RLIMIT_STACK:
1134 return RLIMIT_STACK;
1135 default:
1136 return code;
1140 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1141 abi_ulong target_tv_addr)
1143 struct target_timeval *target_tv;
1145 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1146 return -TARGET_EFAULT;
1148 __get_user(tv->tv_sec, &target_tv->tv_sec);
1149 __get_user(tv->tv_usec, &target_tv->tv_usec);
1151 unlock_user_struct(target_tv, target_tv_addr, 0);
1153 return 0;
1156 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1157 const struct timeval *tv)
1159 struct target_timeval *target_tv;
1161 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1162 return -TARGET_EFAULT;
1164 __put_user(tv->tv_sec, &target_tv->tv_sec);
1165 __put_user(tv->tv_usec, &target_tv->tv_usec);
1167 unlock_user_struct(target_tv, target_tv_addr, 1);
1169 return 0;
1172 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1173 abi_ulong target_tz_addr)
1175 struct target_timezone *target_tz;
1177 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1178 return -TARGET_EFAULT;
1181 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1182 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1184 unlock_user_struct(target_tz, target_tz_addr, 0);
1186 return 0;
1189 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1190 #include <mqueue.h>
1192 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1193 abi_ulong target_mq_attr_addr)
1195 struct target_mq_attr *target_mq_attr;
1197 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1198 target_mq_attr_addr, 1))
1199 return -TARGET_EFAULT;
1201 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1202 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1203 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1204 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1206 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1208 return 0;
1211 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1212 const struct mq_attr *attr)
1214 struct target_mq_attr *target_mq_attr;
1216 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1217 target_mq_attr_addr, 0))
1218 return -TARGET_EFAULT;
1220 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1221 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1222 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1223 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1225 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1227 return 0;
1229 #endif
1231 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1232 /* do_select() must return target values and target errnos. */
1233 static abi_long do_select(int n,
1234 abi_ulong rfd_addr, abi_ulong wfd_addr,
1235 abi_ulong efd_addr, abi_ulong target_tv_addr)
1237 fd_set rfds, wfds, efds;
1238 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1239 struct timeval tv;
1240 struct timespec ts, *ts_ptr;
1241 abi_long ret;
1243 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1244 if (ret) {
1245 return ret;
1247 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1248 if (ret) {
1249 return ret;
1251 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1252 if (ret) {
1253 return ret;
1256 if (target_tv_addr) {
1257 if (copy_from_user_timeval(&tv, target_tv_addr))
1258 return -TARGET_EFAULT;
1259 ts.tv_sec = tv.tv_sec;
1260 ts.tv_nsec = tv.tv_usec * 1000;
1261 ts_ptr = &ts;
1262 } else {
1263 ts_ptr = NULL;
1266 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1267 ts_ptr, NULL));
1269 if (!is_error(ret)) {
1270 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1271 return -TARGET_EFAULT;
1272 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1273 return -TARGET_EFAULT;
1274 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1275 return -TARGET_EFAULT;
1277 if (target_tv_addr) {
1278 tv.tv_sec = ts.tv_sec;
1279 tv.tv_usec = ts.tv_nsec / 1000;
1280 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1281 return -TARGET_EFAULT;
1286 return ret;
1289 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1290 static abi_long do_old_select(abi_ulong arg1)
1292 struct target_sel_arg_struct *sel;
1293 abi_ulong inp, outp, exp, tvp;
1294 long nsel;
1296 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1297 return -TARGET_EFAULT;
1300 nsel = tswapal(sel->n);
1301 inp = tswapal(sel->inp);
1302 outp = tswapal(sel->outp);
1303 exp = tswapal(sel->exp);
1304 tvp = tswapal(sel->tvp);
1306 unlock_user_struct(sel, arg1, 0);
1308 return do_select(nsel, inp, outp, exp, tvp);
1310 #endif
1311 #endif
1313 static abi_long do_pipe2(int host_pipe[], int flags)
1315 #ifdef CONFIG_PIPE2
1316 return pipe2(host_pipe, flags);
1317 #else
1318 return -ENOSYS;
1319 #endif
1322 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1323 int flags, int is_pipe2)
1325 int host_pipe[2];
1326 abi_long ret;
1327 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1329 if (is_error(ret))
1330 return get_errno(ret);
1332 /* Several targets have special calling conventions for the original
1333 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1334 if (!is_pipe2) {
1335 #if defined(TARGET_ALPHA)
1336 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1337 return host_pipe[0];
1338 #elif defined(TARGET_MIPS)
1339 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1340 return host_pipe[0];
1341 #elif defined(TARGET_SH4)
1342 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1343 return host_pipe[0];
1344 #elif defined(TARGET_SPARC)
1345 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1346 return host_pipe[0];
1347 #endif
1350 if (put_user_s32(host_pipe[0], pipedes)
1351 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1352 return -TARGET_EFAULT;
1353 return get_errno(ret);
1356 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1357 abi_ulong target_addr,
1358 socklen_t len)
1360 struct target_ip_mreqn *target_smreqn;
1362 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1363 if (!target_smreqn)
1364 return -TARGET_EFAULT;
1365 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1366 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1367 if (len == sizeof(struct target_ip_mreqn))
1368 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1369 unlock_user(target_smreqn, target_addr, 0);
1371 return 0;
1374 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1375 abi_ulong target_addr,
1376 socklen_t len)
1378 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1379 sa_family_t sa_family;
1380 struct target_sockaddr *target_saddr;
1382 if (fd_trans_target_to_host_addr(fd)) {
1383 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1386 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1387 if (!target_saddr)
1388 return -TARGET_EFAULT;
1390 sa_family = tswap16(target_saddr->sa_family);
1392 /* Oops. The caller might send a incomplete sun_path; sun_path
1393 * must be terminated by \0 (see the manual page), but
1394 * unfortunately it is quite common to specify sockaddr_un
1395 * length as "strlen(x->sun_path)" while it should be
1396 * "strlen(...) + 1". We'll fix that here if needed.
1397 * Linux kernel has a similar feature.
1400 if (sa_family == AF_UNIX) {
1401 if (len < unix_maxlen && len > 0) {
1402 char *cp = (char*)target_saddr;
1404 if ( cp[len-1] && !cp[len] )
1405 len++;
1407 if (len > unix_maxlen)
1408 len = unix_maxlen;
1411 memcpy(addr, target_saddr, len);
1412 addr->sa_family = sa_family;
1413 if (sa_family == AF_NETLINK) {
1414 struct sockaddr_nl *nladdr;
1416 nladdr = (struct sockaddr_nl *)addr;
1417 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1418 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1419 } else if (sa_family == AF_PACKET) {
1420 struct target_sockaddr_ll *lladdr;
1422 lladdr = (struct target_sockaddr_ll *)addr;
1423 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1424 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1426 unlock_user(target_saddr, target_addr, 0);
1428 return 0;
1431 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1432 struct sockaddr *addr,
1433 socklen_t len)
1435 struct target_sockaddr *target_saddr;
1437 if (len == 0) {
1438 return 0;
1440 assert(addr);
1442 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1443 if (!target_saddr)
1444 return -TARGET_EFAULT;
1445 memcpy(target_saddr, addr, len);
1446 if (len >= offsetof(struct target_sockaddr, sa_family) +
1447 sizeof(target_saddr->sa_family)) {
1448 target_saddr->sa_family = tswap16(addr->sa_family);
1450 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1451 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1452 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1453 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1454 } else if (addr->sa_family == AF_PACKET) {
1455 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1456 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1457 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1458 } else if (addr->sa_family == AF_INET6 &&
1459 len >= sizeof(struct target_sockaddr_in6)) {
1460 struct target_sockaddr_in6 *target_in6 =
1461 (struct target_sockaddr_in6 *)target_saddr;
1462 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1464 unlock_user(target_saddr, target_addr, len);
1466 return 0;
1469 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1470 struct target_msghdr *target_msgh)
1472 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1473 abi_long msg_controllen;
1474 abi_ulong target_cmsg_addr;
1475 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1476 socklen_t space = 0;
1478 msg_controllen = tswapal(target_msgh->msg_controllen);
1479 if (msg_controllen < sizeof (struct target_cmsghdr))
1480 goto the_end;
1481 target_cmsg_addr = tswapal(target_msgh->msg_control);
1482 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1483 target_cmsg_start = target_cmsg;
1484 if (!target_cmsg)
1485 return -TARGET_EFAULT;
1487 while (cmsg && target_cmsg) {
1488 void *data = CMSG_DATA(cmsg);
1489 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1491 int len = tswapal(target_cmsg->cmsg_len)
1492 - sizeof(struct target_cmsghdr);
1494 space += CMSG_SPACE(len);
1495 if (space > msgh->msg_controllen) {
1496 space -= CMSG_SPACE(len);
1497 /* This is a QEMU bug, since we allocated the payload
1498 * area ourselves (unlike overflow in host-to-target
1499 * conversion, which is just the guest giving us a buffer
1500 * that's too small). It can't happen for the payload types
1501 * we currently support; if it becomes an issue in future
1502 * we would need to improve our allocation strategy to
1503 * something more intelligent than "twice the size of the
1504 * target buffer we're reading from".
1506 gemu_log("Host cmsg overflow\n");
1507 break;
1510 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1511 cmsg->cmsg_level = SOL_SOCKET;
1512 } else {
1513 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1515 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1516 cmsg->cmsg_len = CMSG_LEN(len);
1518 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1519 int *fd = (int *)data;
1520 int *target_fd = (int *)target_data;
1521 int i, numfds = len / sizeof(int);
1523 for (i = 0; i < numfds; i++) {
1524 __get_user(fd[i], target_fd + i);
1526 } else if (cmsg->cmsg_level == SOL_SOCKET
1527 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1528 struct ucred *cred = (struct ucred *)data;
1529 struct target_ucred *target_cred =
1530 (struct target_ucred *)target_data;
1532 __get_user(cred->pid, &target_cred->pid);
1533 __get_user(cred->uid, &target_cred->uid);
1534 __get_user(cred->gid, &target_cred->gid);
1535 } else {
1536 gemu_log("Unsupported ancillary data: %d/%d\n",
1537 cmsg->cmsg_level, cmsg->cmsg_type);
1538 memcpy(data, target_data, len);
1541 cmsg = CMSG_NXTHDR(msgh, cmsg);
1542 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1543 target_cmsg_start);
1545 unlock_user(target_cmsg, target_cmsg_addr, 0);
1546 the_end:
1547 msgh->msg_controllen = space;
1548 return 0;
1551 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1552 struct msghdr *msgh)
1554 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555 abi_long msg_controllen;
1556 abi_ulong target_cmsg_addr;
1557 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558 socklen_t space = 0;
1560 msg_controllen = tswapal(target_msgh->msg_controllen);
1561 if (msg_controllen < sizeof (struct target_cmsghdr))
1562 goto the_end;
1563 target_cmsg_addr = tswapal(target_msgh->msg_control);
1564 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1565 target_cmsg_start = target_cmsg;
1566 if (!target_cmsg)
1567 return -TARGET_EFAULT;
1569 while (cmsg && target_cmsg) {
1570 void *data = CMSG_DATA(cmsg);
1571 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1573 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1574 int tgt_len, tgt_space;
1576 /* We never copy a half-header but may copy half-data;
1577 * this is Linux's behaviour in put_cmsg(). Note that
1578 * truncation here is a guest problem (which we report
1579 * to the guest via the CTRUNC bit), unlike truncation
1580 * in target_to_host_cmsg, which is a QEMU bug.
1582 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1583 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1584 break;
1587 if (cmsg->cmsg_level == SOL_SOCKET) {
1588 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1589 } else {
1590 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1592 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1594 /* Payload types which need a different size of payload on
1595 * the target must adjust tgt_len here.
1597 tgt_len = len;
1598 switch (cmsg->cmsg_level) {
1599 case SOL_SOCKET:
1600 switch (cmsg->cmsg_type) {
1601 case SO_TIMESTAMP:
1602 tgt_len = sizeof(struct target_timeval);
1603 break;
1604 default:
1605 break;
1607 break;
1608 default:
1609 break;
1612 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1613 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1614 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1617 /* We must now copy-and-convert len bytes of payload
1618 * into tgt_len bytes of destination space. Bear in mind
1619 * that in both source and destination we may be dealing
1620 * with a truncated value!
1622 switch (cmsg->cmsg_level) {
1623 case SOL_SOCKET:
1624 switch (cmsg->cmsg_type) {
1625 case SCM_RIGHTS:
1627 int *fd = (int *)data;
1628 int *target_fd = (int *)target_data;
1629 int i, numfds = tgt_len / sizeof(int);
1631 for (i = 0; i < numfds; i++) {
1632 __put_user(fd[i], target_fd + i);
1634 break;
1636 case SO_TIMESTAMP:
1638 struct timeval *tv = (struct timeval *)data;
1639 struct target_timeval *target_tv =
1640 (struct target_timeval *)target_data;
1642 if (len != sizeof(struct timeval) ||
1643 tgt_len != sizeof(struct target_timeval)) {
1644 goto unimplemented;
1647 /* copy struct timeval to target */
1648 __put_user(tv->tv_sec, &target_tv->tv_sec);
1649 __put_user(tv->tv_usec, &target_tv->tv_usec);
1650 break;
1652 case SCM_CREDENTIALS:
1654 struct ucred *cred = (struct ucred *)data;
1655 struct target_ucred *target_cred =
1656 (struct target_ucred *)target_data;
1658 __put_user(cred->pid, &target_cred->pid);
1659 __put_user(cred->uid, &target_cred->uid);
1660 __put_user(cred->gid, &target_cred->gid);
1661 break;
1663 default:
1664 goto unimplemented;
1666 break;
1668 case SOL_IP:
1669 switch (cmsg->cmsg_type) {
1670 case IP_TTL:
1672 uint32_t *v = (uint32_t *)data;
1673 uint32_t *t_int = (uint32_t *)target_data;
1675 if (len != sizeof(uint32_t) ||
1676 tgt_len != sizeof(uint32_t)) {
1677 goto unimplemented;
1679 __put_user(*v, t_int);
1680 break;
1682 case IP_RECVERR:
1684 struct errhdr_t {
1685 struct sock_extended_err ee;
1686 struct sockaddr_in offender;
1688 struct errhdr_t *errh = (struct errhdr_t *)data;
1689 struct errhdr_t *target_errh =
1690 (struct errhdr_t *)target_data;
1692 if (len != sizeof(struct errhdr_t) ||
1693 tgt_len != sizeof(struct errhdr_t)) {
1694 goto unimplemented;
1696 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1697 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1698 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1699 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1700 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1701 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1702 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1703 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1704 (void *) &errh->offender, sizeof(errh->offender));
1705 break;
1707 default:
1708 goto unimplemented;
1710 break;
1712 case SOL_IPV6:
1713 switch (cmsg->cmsg_type) {
1714 case IPV6_HOPLIMIT:
1716 uint32_t *v = (uint32_t *)data;
1717 uint32_t *t_int = (uint32_t *)target_data;
1719 if (len != sizeof(uint32_t) ||
1720 tgt_len != sizeof(uint32_t)) {
1721 goto unimplemented;
1723 __put_user(*v, t_int);
1724 break;
1726 case IPV6_RECVERR:
1728 struct errhdr6_t {
1729 struct sock_extended_err ee;
1730 struct sockaddr_in6 offender;
1732 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1733 struct errhdr6_t *target_errh =
1734 (struct errhdr6_t *)target_data;
1736 if (len != sizeof(struct errhdr6_t) ||
1737 tgt_len != sizeof(struct errhdr6_t)) {
1738 goto unimplemented;
1740 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1741 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1742 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1743 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1744 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1745 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1746 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1747 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1748 (void *) &errh->offender, sizeof(errh->offender));
1749 break;
1751 default:
1752 goto unimplemented;
1754 break;
1756 default:
1757 unimplemented:
1758 gemu_log("Unsupported ancillary data: %d/%d\n",
1759 cmsg->cmsg_level, cmsg->cmsg_type);
1760 memcpy(target_data, data, MIN(len, tgt_len));
1761 if (tgt_len > len) {
1762 memset(target_data + len, 0, tgt_len - len);
1766 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1767 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1768 if (msg_controllen < tgt_space) {
1769 tgt_space = msg_controllen;
1771 msg_controllen -= tgt_space;
1772 space += tgt_space;
1773 cmsg = CMSG_NXTHDR(msgh, cmsg);
1774 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1775 target_cmsg_start);
1777 unlock_user(target_cmsg, target_cmsg_addr, space);
1778 the_end:
1779 target_msgh->msg_controllen = tswapal(space);
1780 return 0;
1783 /* do_setsockopt() Must return target values and target errnos. */
1784 static abi_long do_setsockopt(int sockfd, int level, int optname,
1785 abi_ulong optval_addr, socklen_t optlen)
1787 abi_long ret;
1788 int val;
1789 struct ip_mreqn *ip_mreq;
1790 struct ip_mreq_source *ip_mreq_source;
1792 switch(level) {
1793 case SOL_TCP:
1794 /* TCP options all take an 'int' value. */
1795 if (optlen < sizeof(uint32_t))
1796 return -TARGET_EINVAL;
1798 if (get_user_u32(val, optval_addr))
1799 return -TARGET_EFAULT;
1800 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1801 break;
1802 case SOL_IP:
1803 switch(optname) {
1804 case IP_TOS:
1805 case IP_TTL:
1806 case IP_HDRINCL:
1807 case IP_ROUTER_ALERT:
1808 case IP_RECVOPTS:
1809 case IP_RETOPTS:
1810 case IP_PKTINFO:
1811 case IP_MTU_DISCOVER:
1812 case IP_RECVERR:
1813 case IP_RECVTTL:
1814 case IP_RECVTOS:
1815 #ifdef IP_FREEBIND
1816 case IP_FREEBIND:
1817 #endif
1818 case IP_MULTICAST_TTL:
1819 case IP_MULTICAST_LOOP:
1820 val = 0;
1821 if (optlen >= sizeof(uint32_t)) {
1822 if (get_user_u32(val, optval_addr))
1823 return -TARGET_EFAULT;
1824 } else if (optlen >= 1) {
1825 if (get_user_u8(val, optval_addr))
1826 return -TARGET_EFAULT;
1828 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1829 break;
1830 case IP_ADD_MEMBERSHIP:
1831 case IP_DROP_MEMBERSHIP:
1832 if (optlen < sizeof (struct target_ip_mreq) ||
1833 optlen > sizeof (struct target_ip_mreqn))
1834 return -TARGET_EINVAL;
1836 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1837 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1838 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1839 break;
1841 case IP_BLOCK_SOURCE:
1842 case IP_UNBLOCK_SOURCE:
1843 case IP_ADD_SOURCE_MEMBERSHIP:
1844 case IP_DROP_SOURCE_MEMBERSHIP:
1845 if (optlen != sizeof (struct target_ip_mreq_source))
1846 return -TARGET_EINVAL;
1848 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1849 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1850 unlock_user (ip_mreq_source, optval_addr, 0);
1851 break;
1853 default:
1854 goto unimplemented;
1856 break;
1857 case SOL_IPV6:
1858 switch (optname) {
1859 case IPV6_MTU_DISCOVER:
1860 case IPV6_MTU:
1861 case IPV6_V6ONLY:
1862 case IPV6_RECVPKTINFO:
1863 case IPV6_UNICAST_HOPS:
1864 case IPV6_MULTICAST_HOPS:
1865 case IPV6_MULTICAST_LOOP:
1866 case IPV6_RECVERR:
1867 case IPV6_RECVHOPLIMIT:
1868 case IPV6_2292HOPLIMIT:
1869 case IPV6_CHECKSUM:
1870 val = 0;
1871 if (optlen < sizeof(uint32_t)) {
1872 return -TARGET_EINVAL;
1874 if (get_user_u32(val, optval_addr)) {
1875 return -TARGET_EFAULT;
1877 ret = get_errno(setsockopt(sockfd, level, optname,
1878 &val, sizeof(val)));
1879 break;
1880 case IPV6_PKTINFO:
1882 struct in6_pktinfo pki;
1884 if (optlen < sizeof(pki)) {
1885 return -TARGET_EINVAL;
1888 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1889 return -TARGET_EFAULT;
1892 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1894 ret = get_errno(setsockopt(sockfd, level, optname,
1895 &pki, sizeof(pki)));
1896 break;
1898 default:
1899 goto unimplemented;
1901 break;
1902 case SOL_ICMPV6:
1903 switch (optname) {
1904 case ICMPV6_FILTER:
1906 struct icmp6_filter icmp6f;
1908 if (optlen > sizeof(icmp6f)) {
1909 optlen = sizeof(icmp6f);
1912 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1913 return -TARGET_EFAULT;
1916 for (val = 0; val < 8; val++) {
1917 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1920 ret = get_errno(setsockopt(sockfd, level, optname,
1921 &icmp6f, optlen));
1922 break;
1924 default:
1925 goto unimplemented;
1927 break;
1928 case SOL_RAW:
1929 switch (optname) {
1930 case ICMP_FILTER:
1931 case IPV6_CHECKSUM:
1932 /* those take an u32 value */
1933 if (optlen < sizeof(uint32_t)) {
1934 return -TARGET_EINVAL;
1937 if (get_user_u32(val, optval_addr)) {
1938 return -TARGET_EFAULT;
1940 ret = get_errno(setsockopt(sockfd, level, optname,
1941 &val, sizeof(val)));
1942 break;
1944 default:
1945 goto unimplemented;
1947 break;
1948 case TARGET_SOL_SOCKET:
1949 switch (optname) {
1950 case TARGET_SO_RCVTIMEO:
1952 struct timeval tv;
1954 optname = SO_RCVTIMEO;
1956 set_timeout:
1957 if (optlen != sizeof(struct target_timeval)) {
1958 return -TARGET_EINVAL;
1961 if (copy_from_user_timeval(&tv, optval_addr)) {
1962 return -TARGET_EFAULT;
1965 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1966 &tv, sizeof(tv)));
1967 return ret;
1969 case TARGET_SO_SNDTIMEO:
1970 optname = SO_SNDTIMEO;
1971 goto set_timeout;
1972 case TARGET_SO_ATTACH_FILTER:
1974 struct target_sock_fprog *tfprog;
1975 struct target_sock_filter *tfilter;
1976 struct sock_fprog fprog;
1977 struct sock_filter *filter;
1978 int i;
1980 if (optlen != sizeof(*tfprog)) {
1981 return -TARGET_EINVAL;
1983 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1984 return -TARGET_EFAULT;
1986 if (!lock_user_struct(VERIFY_READ, tfilter,
1987 tswapal(tfprog->filter), 0)) {
1988 unlock_user_struct(tfprog, optval_addr, 1);
1989 return -TARGET_EFAULT;
1992 fprog.len = tswap16(tfprog->len);
1993 filter = g_try_new(struct sock_filter, fprog.len);
1994 if (filter == NULL) {
1995 unlock_user_struct(tfilter, tfprog->filter, 1);
1996 unlock_user_struct(tfprog, optval_addr, 1);
1997 return -TARGET_ENOMEM;
1999 for (i = 0; i < fprog.len; i++) {
2000 filter[i].code = tswap16(tfilter[i].code);
2001 filter[i].jt = tfilter[i].jt;
2002 filter[i].jf = tfilter[i].jf;
2003 filter[i].k = tswap32(tfilter[i].k);
2005 fprog.filter = filter;
2007 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2008 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2009 g_free(filter);
2011 unlock_user_struct(tfilter, tfprog->filter, 1);
2012 unlock_user_struct(tfprog, optval_addr, 1);
2013 return ret;
2015 case TARGET_SO_BINDTODEVICE:
2017 char *dev_ifname, *addr_ifname;
2019 if (optlen > IFNAMSIZ - 1) {
2020 optlen = IFNAMSIZ - 1;
2022 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2023 if (!dev_ifname) {
2024 return -TARGET_EFAULT;
2026 optname = SO_BINDTODEVICE;
2027 addr_ifname = alloca(IFNAMSIZ);
2028 memcpy(addr_ifname, dev_ifname, optlen);
2029 addr_ifname[optlen] = 0;
2030 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2031 addr_ifname, optlen));
2032 unlock_user (dev_ifname, optval_addr, 0);
2033 return ret;
2035 /* Options with 'int' argument. */
2036 case TARGET_SO_DEBUG:
2037 optname = SO_DEBUG;
2038 break;
2039 case TARGET_SO_REUSEADDR:
2040 optname = SO_REUSEADDR;
2041 break;
2042 case TARGET_SO_TYPE:
2043 optname = SO_TYPE;
2044 break;
2045 case TARGET_SO_ERROR:
2046 optname = SO_ERROR;
2047 break;
2048 case TARGET_SO_DONTROUTE:
2049 optname = SO_DONTROUTE;
2050 break;
2051 case TARGET_SO_BROADCAST:
2052 optname = SO_BROADCAST;
2053 break;
2054 case TARGET_SO_SNDBUF:
2055 optname = SO_SNDBUF;
2056 break;
2057 case TARGET_SO_SNDBUFFORCE:
2058 optname = SO_SNDBUFFORCE;
2059 break;
2060 case TARGET_SO_RCVBUF:
2061 optname = SO_RCVBUF;
2062 break;
2063 case TARGET_SO_RCVBUFFORCE:
2064 optname = SO_RCVBUFFORCE;
2065 break;
2066 case TARGET_SO_KEEPALIVE:
2067 optname = SO_KEEPALIVE;
2068 break;
2069 case TARGET_SO_OOBINLINE:
2070 optname = SO_OOBINLINE;
2071 break;
2072 case TARGET_SO_NO_CHECK:
2073 optname = SO_NO_CHECK;
2074 break;
2075 case TARGET_SO_PRIORITY:
2076 optname = SO_PRIORITY;
2077 break;
2078 #ifdef SO_BSDCOMPAT
2079 case TARGET_SO_BSDCOMPAT:
2080 optname = SO_BSDCOMPAT;
2081 break;
2082 #endif
2083 case TARGET_SO_PASSCRED:
2084 optname = SO_PASSCRED;
2085 break;
2086 case TARGET_SO_PASSSEC:
2087 optname = SO_PASSSEC;
2088 break;
2089 case TARGET_SO_TIMESTAMP:
2090 optname = SO_TIMESTAMP;
2091 break;
2092 case TARGET_SO_RCVLOWAT:
2093 optname = SO_RCVLOWAT;
2094 break;
2095 default:
2096 goto unimplemented;
2098 if (optlen < sizeof(uint32_t))
2099 return -TARGET_EINVAL;
2101 if (get_user_u32(val, optval_addr))
2102 return -TARGET_EFAULT;
2103 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2104 break;
2105 default:
2106 unimplemented:
2107 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2108 ret = -TARGET_ENOPROTOOPT;
2110 return ret;
2113 /* do_getsockopt() Must return target values and target errnos. */
2114 static abi_long do_getsockopt(int sockfd, int level, int optname,
2115 abi_ulong optval_addr, abi_ulong optlen)
2117 abi_long ret;
2118 int len, val;
2119 socklen_t lv;
2121 switch(level) {
2122 case TARGET_SOL_SOCKET:
2123 level = SOL_SOCKET;
2124 switch (optname) {
2125 /* These don't just return a single integer */
2126 case TARGET_SO_LINGER:
2127 case TARGET_SO_RCVTIMEO:
2128 case TARGET_SO_SNDTIMEO:
2129 case TARGET_SO_PEERNAME:
2130 goto unimplemented;
2131 case TARGET_SO_PEERCRED: {
2132 struct ucred cr;
2133 socklen_t crlen;
2134 struct target_ucred *tcr;
2136 if (get_user_u32(len, optlen)) {
2137 return -TARGET_EFAULT;
2139 if (len < 0) {
2140 return -TARGET_EINVAL;
2143 crlen = sizeof(cr);
2144 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2145 &cr, &crlen));
2146 if (ret < 0) {
2147 return ret;
2149 if (len > crlen) {
2150 len = crlen;
2152 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2153 return -TARGET_EFAULT;
2155 __put_user(cr.pid, &tcr->pid);
2156 __put_user(cr.uid, &tcr->uid);
2157 __put_user(cr.gid, &tcr->gid);
2158 unlock_user_struct(tcr, optval_addr, 1);
2159 if (put_user_u32(len, optlen)) {
2160 return -TARGET_EFAULT;
2162 break;
2164 /* Options with 'int' argument. */
2165 case TARGET_SO_DEBUG:
2166 optname = SO_DEBUG;
2167 goto int_case;
2168 case TARGET_SO_REUSEADDR:
2169 optname = SO_REUSEADDR;
2170 goto int_case;
2171 case TARGET_SO_TYPE:
2172 optname = SO_TYPE;
2173 goto int_case;
2174 case TARGET_SO_ERROR:
2175 optname = SO_ERROR;
2176 goto int_case;
2177 case TARGET_SO_DONTROUTE:
2178 optname = SO_DONTROUTE;
2179 goto int_case;
2180 case TARGET_SO_BROADCAST:
2181 optname = SO_BROADCAST;
2182 goto int_case;
2183 case TARGET_SO_SNDBUF:
2184 optname = SO_SNDBUF;
2185 goto int_case;
2186 case TARGET_SO_RCVBUF:
2187 optname = SO_RCVBUF;
2188 goto int_case;
2189 case TARGET_SO_KEEPALIVE:
2190 optname = SO_KEEPALIVE;
2191 goto int_case;
2192 case TARGET_SO_OOBINLINE:
2193 optname = SO_OOBINLINE;
2194 goto int_case;
2195 case TARGET_SO_NO_CHECK:
2196 optname = SO_NO_CHECK;
2197 goto int_case;
2198 case TARGET_SO_PRIORITY:
2199 optname = SO_PRIORITY;
2200 goto int_case;
2201 #ifdef SO_BSDCOMPAT
2202 case TARGET_SO_BSDCOMPAT:
2203 optname = SO_BSDCOMPAT;
2204 goto int_case;
2205 #endif
2206 case TARGET_SO_PASSCRED:
2207 optname = SO_PASSCRED;
2208 goto int_case;
2209 case TARGET_SO_TIMESTAMP:
2210 optname = SO_TIMESTAMP;
2211 goto int_case;
2212 case TARGET_SO_RCVLOWAT:
2213 optname = SO_RCVLOWAT;
2214 goto int_case;
2215 case TARGET_SO_ACCEPTCONN:
2216 optname = SO_ACCEPTCONN;
2217 goto int_case;
2218 default:
2219 goto int_case;
2221 break;
2222 case SOL_TCP:
2223 /* TCP options all take an 'int' value. */
2224 int_case:
2225 if (get_user_u32(len, optlen))
2226 return -TARGET_EFAULT;
2227 if (len < 0)
2228 return -TARGET_EINVAL;
2229 lv = sizeof(lv);
2230 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2231 if (ret < 0)
2232 return ret;
2233 if (optname == SO_TYPE) {
2234 val = host_to_target_sock_type(val);
2236 if (len > lv)
2237 len = lv;
2238 if (len == 4) {
2239 if (put_user_u32(val, optval_addr))
2240 return -TARGET_EFAULT;
2241 } else {
2242 if (put_user_u8(val, optval_addr))
2243 return -TARGET_EFAULT;
2245 if (put_user_u32(len, optlen))
2246 return -TARGET_EFAULT;
2247 break;
2248 case SOL_IP:
2249 switch(optname) {
2250 case IP_TOS:
2251 case IP_TTL:
2252 case IP_HDRINCL:
2253 case IP_ROUTER_ALERT:
2254 case IP_RECVOPTS:
2255 case IP_RETOPTS:
2256 case IP_PKTINFO:
2257 case IP_MTU_DISCOVER:
2258 case IP_RECVERR:
2259 case IP_RECVTOS:
2260 #ifdef IP_FREEBIND
2261 case IP_FREEBIND:
2262 #endif
2263 case IP_MULTICAST_TTL:
2264 case IP_MULTICAST_LOOP:
2265 if (get_user_u32(len, optlen))
2266 return -TARGET_EFAULT;
2267 if (len < 0)
2268 return -TARGET_EINVAL;
2269 lv = sizeof(lv);
2270 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2271 if (ret < 0)
2272 return ret;
2273 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2274 len = 1;
2275 if (put_user_u32(len, optlen)
2276 || put_user_u8(val, optval_addr))
2277 return -TARGET_EFAULT;
2278 } else {
2279 if (len > sizeof(int))
2280 len = sizeof(int);
2281 if (put_user_u32(len, optlen)
2282 || put_user_u32(val, optval_addr))
2283 return -TARGET_EFAULT;
2285 break;
2286 default:
2287 ret = -TARGET_ENOPROTOOPT;
2288 break;
2290 break;
2291 default:
2292 unimplemented:
2293 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2294 level, optname);
2295 ret = -TARGET_EOPNOTSUPP;
2296 break;
2298 return ret;
2301 /* Convert target low/high pair representing file offset into the host
2302 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2303 * as the kernel doesn't handle them either.
2305 static void target_to_host_low_high(abi_ulong tlow,
2306 abi_ulong thigh,
2307 unsigned long *hlow,
2308 unsigned long *hhigh)
2310 uint64_t off = tlow |
2311 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2312 TARGET_LONG_BITS / 2;
2314 *hlow = off;
2315 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2318 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2319 abi_ulong count, int copy)
2321 struct target_iovec *target_vec;
2322 struct iovec *vec;
2323 abi_ulong total_len, max_len;
2324 int i;
2325 int err = 0;
2326 bool bad_address = false;
2328 if (count == 0) {
2329 errno = 0;
2330 return NULL;
2332 if (count > IOV_MAX) {
2333 errno = EINVAL;
2334 return NULL;
2337 vec = g_try_new0(struct iovec, count);
2338 if (vec == NULL) {
2339 errno = ENOMEM;
2340 return NULL;
2343 target_vec = lock_user(VERIFY_READ, target_addr,
2344 count * sizeof(struct target_iovec), 1);
2345 if (target_vec == NULL) {
2346 err = EFAULT;
2347 goto fail2;
2350 /* ??? If host page size > target page size, this will result in a
2351 value larger than what we can actually support. */
2352 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2353 total_len = 0;
2355 for (i = 0; i < count; i++) {
2356 abi_ulong base = tswapal(target_vec[i].iov_base);
2357 abi_long len = tswapal(target_vec[i].iov_len);
2359 if (len < 0) {
2360 err = EINVAL;
2361 goto fail;
2362 } else if (len == 0) {
2363 /* Zero length pointer is ignored. */
2364 vec[i].iov_base = 0;
2365 } else {
2366 vec[i].iov_base = lock_user(type, base, len, copy);
2367 /* If the first buffer pointer is bad, this is a fault. But
2368 * subsequent bad buffers will result in a partial write; this
2369 * is realized by filling the vector with null pointers and
2370 * zero lengths. */
2371 if (!vec[i].iov_base) {
2372 if (i == 0) {
2373 err = EFAULT;
2374 goto fail;
2375 } else {
2376 bad_address = true;
2379 if (bad_address) {
2380 len = 0;
2382 if (len > max_len - total_len) {
2383 len = max_len - total_len;
2386 vec[i].iov_len = len;
2387 total_len += len;
2390 unlock_user(target_vec, target_addr, 0);
2391 return vec;
2393 fail:
2394 while (--i >= 0) {
2395 if (tswapal(target_vec[i].iov_len) > 0) {
2396 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2399 unlock_user(target_vec, target_addr, 0);
2400 fail2:
2401 g_free(vec);
2402 errno = err;
2403 return NULL;
2406 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2407 abi_ulong count, int copy)
2409 struct target_iovec *target_vec;
2410 int i;
2412 target_vec = lock_user(VERIFY_READ, target_addr,
2413 count * sizeof(struct target_iovec), 1);
2414 if (target_vec) {
2415 for (i = 0; i < count; i++) {
2416 abi_ulong base = tswapal(target_vec[i].iov_base);
2417 abi_long len = tswapal(target_vec[i].iov_len);
2418 if (len < 0) {
2419 break;
2421 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2423 unlock_user(target_vec, target_addr, 0);
2426 g_free(vec);
2429 static inline int target_to_host_sock_type(int *type)
2431 int host_type = 0;
2432 int target_type = *type;
2434 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2435 case TARGET_SOCK_DGRAM:
2436 host_type = SOCK_DGRAM;
2437 break;
2438 case TARGET_SOCK_STREAM:
2439 host_type = SOCK_STREAM;
2440 break;
2441 default:
2442 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2443 break;
2445 if (target_type & TARGET_SOCK_CLOEXEC) {
2446 #if defined(SOCK_CLOEXEC)
2447 host_type |= SOCK_CLOEXEC;
2448 #else
2449 return -TARGET_EINVAL;
2450 #endif
2452 if (target_type & TARGET_SOCK_NONBLOCK) {
2453 #if defined(SOCK_NONBLOCK)
2454 host_type |= SOCK_NONBLOCK;
2455 #elif !defined(O_NONBLOCK)
2456 return -TARGET_EINVAL;
2457 #endif
2459 *type = host_type;
2460 return 0;
2463 /* Try to emulate socket type flags after socket creation. */
2464 static int sock_flags_fixup(int fd, int target_type)
2466 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2467 if (target_type & TARGET_SOCK_NONBLOCK) {
2468 int flags = fcntl(fd, F_GETFL);
2469 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2470 close(fd);
2471 return -TARGET_EINVAL;
2474 #endif
2475 return fd;
2478 /* do_socket() Must return target values and target errnos. */
2479 static abi_long do_socket(int domain, int type, int protocol)
2481 int target_type = type;
2482 int ret;
2484 ret = target_to_host_sock_type(&type);
2485 if (ret) {
2486 return ret;
2489 if (domain == PF_NETLINK && !(
2490 #ifdef CONFIG_RTNETLINK
2491 protocol == NETLINK_ROUTE ||
2492 #endif
2493 protocol == NETLINK_KOBJECT_UEVENT ||
2494 protocol == NETLINK_AUDIT)) {
2495 return -EPFNOSUPPORT;
2498 if (domain == AF_PACKET ||
2499 (domain == AF_INET && type == SOCK_PACKET)) {
2500 protocol = tswap16(protocol);
2503 ret = get_errno(socket(domain, type, protocol));
2504 if (ret >= 0) {
2505 ret = sock_flags_fixup(ret, target_type);
2506 if (type == SOCK_PACKET) {
2507 /* Manage an obsolete case :
2508 * if socket type is SOCK_PACKET, bind by name
2510 fd_trans_register(ret, &target_packet_trans);
2511 } else if (domain == PF_NETLINK) {
2512 switch (protocol) {
2513 #ifdef CONFIG_RTNETLINK
2514 case NETLINK_ROUTE:
2515 fd_trans_register(ret, &target_netlink_route_trans);
2516 break;
2517 #endif
2518 case NETLINK_KOBJECT_UEVENT:
2519 /* nothing to do: messages are strings */
2520 break;
2521 case NETLINK_AUDIT:
2522 fd_trans_register(ret, &target_netlink_audit_trans);
2523 break;
2524 default:
2525 g_assert_not_reached();
2529 return ret;
2532 /* do_bind() Must return target values and target errnos. */
2533 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2534 socklen_t addrlen)
2536 void *addr;
2537 abi_long ret;
2539 if ((int)addrlen < 0) {
2540 return -TARGET_EINVAL;
2543 addr = alloca(addrlen+1);
2545 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2546 if (ret)
2547 return ret;
2549 return get_errno(bind(sockfd, addr, addrlen));
2552 /* do_connect() Must return target values and target errnos. */
2553 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2554 socklen_t addrlen)
2556 void *addr;
2557 abi_long ret;
2559 if ((int)addrlen < 0) {
2560 return -TARGET_EINVAL;
2563 addr = alloca(addrlen+1);
2565 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2566 if (ret)
2567 return ret;
2569 return get_errno(safe_connect(sockfd, addr, addrlen));
2572 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2573 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2574 int flags, int send)
2576 abi_long ret, len;
2577 struct msghdr msg;
2578 abi_ulong count;
2579 struct iovec *vec;
2580 abi_ulong target_vec;
2582 if (msgp->msg_name) {
2583 msg.msg_namelen = tswap32(msgp->msg_namelen);
2584 msg.msg_name = alloca(msg.msg_namelen+1);
2585 ret = target_to_host_sockaddr(fd, msg.msg_name,
2586 tswapal(msgp->msg_name),
2587 msg.msg_namelen);
2588 if (ret == -TARGET_EFAULT) {
2589 /* For connected sockets msg_name and msg_namelen must
2590 * be ignored, so returning EFAULT immediately is wrong.
2591 * Instead, pass a bad msg_name to the host kernel, and
2592 * let it decide whether to return EFAULT or not.
2594 msg.msg_name = (void *)-1;
2595 } else if (ret) {
2596 goto out2;
2598 } else {
2599 msg.msg_name = NULL;
2600 msg.msg_namelen = 0;
2602 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2603 msg.msg_control = alloca(msg.msg_controllen);
2604 memset(msg.msg_control, 0, msg.msg_controllen);
2606 msg.msg_flags = tswap32(msgp->msg_flags);
2608 count = tswapal(msgp->msg_iovlen);
2609 target_vec = tswapal(msgp->msg_iov);
2611 if (count > IOV_MAX) {
2612 /* sendrcvmsg returns a different errno for this condition than
2613 * readv/writev, so we must catch it here before lock_iovec() does.
2615 ret = -TARGET_EMSGSIZE;
2616 goto out2;
2619 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2620 target_vec, count, send);
2621 if (vec == NULL) {
2622 ret = -host_to_target_errno(errno);
2623 goto out2;
2625 msg.msg_iovlen = count;
2626 msg.msg_iov = vec;
2628 if (send) {
2629 if (fd_trans_target_to_host_data(fd)) {
2630 void *host_msg;
2632 host_msg = g_malloc(msg.msg_iov->iov_len);
2633 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2634 ret = fd_trans_target_to_host_data(fd)(host_msg,
2635 msg.msg_iov->iov_len);
2636 if (ret >= 0) {
2637 msg.msg_iov->iov_base = host_msg;
2638 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2640 g_free(host_msg);
2641 } else {
2642 ret = target_to_host_cmsg(&msg, msgp);
2643 if (ret == 0) {
2644 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2647 } else {
2648 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2649 if (!is_error(ret)) {
2650 len = ret;
2651 if (fd_trans_host_to_target_data(fd)) {
2652 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2653 MIN(msg.msg_iov->iov_len, len));
2654 } else {
2655 ret = host_to_target_cmsg(msgp, &msg);
2657 if (!is_error(ret)) {
2658 msgp->msg_namelen = tswap32(msg.msg_namelen);
2659 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2660 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2661 msg.msg_name, msg.msg_namelen);
2662 if (ret) {
2663 goto out;
2667 ret = len;
2672 out:
2673 unlock_iovec(vec, target_vec, count, !send);
2674 out2:
2675 return ret;
2678 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2679 int flags, int send)
2681 abi_long ret;
2682 struct target_msghdr *msgp;
2684 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2685 msgp,
2686 target_msg,
2687 send ? 1 : 0)) {
2688 return -TARGET_EFAULT;
2690 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2691 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2692 return ret;
2695 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2696 * so it might not have this *mmsg-specific flag either.
2698 #ifndef MSG_WAITFORONE
2699 #define MSG_WAITFORONE 0x10000
2700 #endif
2702 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2703 unsigned int vlen, unsigned int flags,
2704 int send)
2706 struct target_mmsghdr *mmsgp;
2707 abi_long ret = 0;
2708 int i;
2710 if (vlen > UIO_MAXIOV) {
2711 vlen = UIO_MAXIOV;
2714 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2715 if (!mmsgp) {
2716 return -TARGET_EFAULT;
2719 for (i = 0; i < vlen; i++) {
2720 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2721 if (is_error(ret)) {
2722 break;
2724 mmsgp[i].msg_len = tswap32(ret);
2725 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2726 if (flags & MSG_WAITFORONE) {
2727 flags |= MSG_DONTWAIT;
2731 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2733 /* Return number of datagrams sent if we sent any at all;
2734 * otherwise return the error.
2736 if (i) {
2737 return i;
2739 return ret;
2742 /* do_accept4() Must return target values and target errnos. */
2743 static abi_long do_accept4(int fd, abi_ulong target_addr,
2744 abi_ulong target_addrlen_addr, int flags)
2746 socklen_t addrlen;
2747 void *addr;
2748 abi_long ret;
2749 int host_flags;
2751 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2753 if (target_addr == 0) {
2754 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2757 /* linux returns EINVAL if addrlen pointer is invalid */
2758 if (get_user_u32(addrlen, target_addrlen_addr))
2759 return -TARGET_EINVAL;
2761 if ((int)addrlen < 0) {
2762 return -TARGET_EINVAL;
2765 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2766 return -TARGET_EINVAL;
2768 addr = alloca(addrlen);
2770 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2771 if (!is_error(ret)) {
2772 host_to_target_sockaddr(target_addr, addr, addrlen);
2773 if (put_user_u32(addrlen, target_addrlen_addr))
2774 ret = -TARGET_EFAULT;
2776 return ret;
2779 /* do_getpeername() Must return target values and target errnos. */
2780 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2781 abi_ulong target_addrlen_addr)
2783 socklen_t addrlen;
2784 void *addr;
2785 abi_long ret;
2787 if (get_user_u32(addrlen, target_addrlen_addr))
2788 return -TARGET_EFAULT;
2790 if ((int)addrlen < 0) {
2791 return -TARGET_EINVAL;
2794 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2795 return -TARGET_EFAULT;
2797 addr = alloca(addrlen);
2799 ret = get_errno(getpeername(fd, addr, &addrlen));
2800 if (!is_error(ret)) {
2801 host_to_target_sockaddr(target_addr, addr, addrlen);
2802 if (put_user_u32(addrlen, target_addrlen_addr))
2803 ret = -TARGET_EFAULT;
2805 return ret;
2808 /* do_getsockname() Must return target values and target errnos. */
2809 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2810 abi_ulong target_addrlen_addr)
2812 socklen_t addrlen;
2813 void *addr;
2814 abi_long ret;
2816 if (get_user_u32(addrlen, target_addrlen_addr))
2817 return -TARGET_EFAULT;
2819 if ((int)addrlen < 0) {
2820 return -TARGET_EINVAL;
2823 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2824 return -TARGET_EFAULT;
2826 addr = alloca(addrlen);
2828 ret = get_errno(getsockname(fd, addr, &addrlen));
2829 if (!is_error(ret)) {
2830 host_to_target_sockaddr(target_addr, addr, addrlen);
2831 if (put_user_u32(addrlen, target_addrlen_addr))
2832 ret = -TARGET_EFAULT;
2834 return ret;
2837 /* do_socketpair() Must return target values and target errnos. */
2838 static abi_long do_socketpair(int domain, int type, int protocol,
2839 abi_ulong target_tab_addr)
2841 int tab[2];
2842 abi_long ret;
2844 target_to_host_sock_type(&type);
2846 ret = get_errno(socketpair(domain, type, protocol, tab));
2847 if (!is_error(ret)) {
2848 if (put_user_s32(tab[0], target_tab_addr)
2849 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2850 ret = -TARGET_EFAULT;
2852 return ret;
2855 /* do_sendto() Must return target values and target errnos. */
2856 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2857 abi_ulong target_addr, socklen_t addrlen)
2859 void *addr;
2860 void *host_msg;
2861 void *copy_msg = NULL;
2862 abi_long ret;
2864 if ((int)addrlen < 0) {
2865 return -TARGET_EINVAL;
2868 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2869 if (!host_msg)
2870 return -TARGET_EFAULT;
2871 if (fd_trans_target_to_host_data(fd)) {
2872 copy_msg = host_msg;
2873 host_msg = g_malloc(len);
2874 memcpy(host_msg, copy_msg, len);
2875 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2876 if (ret < 0) {
2877 goto fail;
2880 if (target_addr) {
2881 addr = alloca(addrlen+1);
2882 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2883 if (ret) {
2884 goto fail;
2886 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2887 } else {
2888 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2890 fail:
2891 if (copy_msg) {
2892 g_free(host_msg);
2893 host_msg = copy_msg;
2895 unlock_user(host_msg, msg, 0);
2896 return ret;
2899 /* do_recvfrom() Must return target values and target errnos. */
2900 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2901 abi_ulong target_addr,
2902 abi_ulong target_addrlen)
2904 socklen_t addrlen;
2905 void *addr;
2906 void *host_msg;
2907 abi_long ret;
2909 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2910 if (!host_msg)
2911 return -TARGET_EFAULT;
2912 if (target_addr) {
2913 if (get_user_u32(addrlen, target_addrlen)) {
2914 ret = -TARGET_EFAULT;
2915 goto fail;
2917 if ((int)addrlen < 0) {
2918 ret = -TARGET_EINVAL;
2919 goto fail;
2921 addr = alloca(addrlen);
2922 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
2923 addr, &addrlen));
2924 } else {
2925 addr = NULL; /* To keep compiler quiet. */
2926 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
2928 if (!is_error(ret)) {
2929 if (fd_trans_host_to_target_data(fd)) {
2930 abi_long trans;
2931 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
2932 if (is_error(trans)) {
2933 ret = trans;
2934 goto fail;
2937 if (target_addr) {
2938 host_to_target_sockaddr(target_addr, addr, addrlen);
2939 if (put_user_u32(addrlen, target_addrlen)) {
2940 ret = -TARGET_EFAULT;
2941 goto fail;
2944 unlock_user(host_msg, msg, len);
2945 } else {
2946 fail:
2947 unlock_user(host_msg, msg, 0);
2949 return ret;
2952 #ifdef TARGET_NR_socketcall
2953 /* do_socketcall() must return target values and target errnos. */
2954 static abi_long do_socketcall(int num, abi_ulong vptr)
2956 static const unsigned nargs[] = { /* number of arguments per operation */
2957 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
2958 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
2959 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
2960 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
2961 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
2962 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
2963 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
2964 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
2965 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
2966 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
2967 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
2968 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
2969 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
2970 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
2971 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
2972 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
2973 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
2974 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
2975 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
2976 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
2978 abi_long a[6]; /* max 6 args */
2979 unsigned i;
2981 /* check the range of the first argument num */
2982 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
2983 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
2984 return -TARGET_EINVAL;
2986 /* ensure we have space for args */
2987 if (nargs[num] > ARRAY_SIZE(a)) {
2988 return -TARGET_EINVAL;
2990 /* collect the arguments in a[] according to nargs[] */
2991 for (i = 0; i < nargs[num]; ++i) {
2992 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2993 return -TARGET_EFAULT;
2996 /* now when we have the args, invoke the appropriate underlying function */
2997 switch (num) {
2998 case TARGET_SYS_SOCKET: /* domain, type, protocol */
2999 return do_socket(a[0], a[1], a[2]);
3000 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3001 return do_bind(a[0], a[1], a[2]);
3002 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3003 return do_connect(a[0], a[1], a[2]);
3004 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3005 return get_errno(listen(a[0], a[1]));
3006 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3007 return do_accept4(a[0], a[1], a[2], 0);
3008 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3009 return do_getsockname(a[0], a[1], a[2]);
3010 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3011 return do_getpeername(a[0], a[1], a[2]);
3012 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3013 return do_socketpair(a[0], a[1], a[2], a[3]);
3014 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3015 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3016 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3017 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3018 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3019 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3020 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3021 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3022 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3023 return get_errno(shutdown(a[0], a[1]));
3024 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3025 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3026 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3027 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3028 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3029 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3030 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3031 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3032 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3033 return do_accept4(a[0], a[1], a[2], a[3]);
3034 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3035 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3036 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3037 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3038 default:
3039 gemu_log("Unsupported socketcall: %d\n", num);
3040 return -TARGET_EINVAL;
3043 #endif
3045 #define N_SHM_REGIONS 32
3047 static struct shm_region {
3048 abi_ulong start;
3049 abi_ulong size;
3050 bool in_use;
3051 } shm_regions[N_SHM_REGIONS];
3053 #ifndef TARGET_SEMID64_DS
3054 /* asm-generic version of this struct */
3055 struct target_semid64_ds
3057 struct target_ipc_perm sem_perm;
3058 abi_ulong sem_otime;
3059 #if TARGET_ABI_BITS == 32
3060 abi_ulong __unused1;
3061 #endif
3062 abi_ulong sem_ctime;
3063 #if TARGET_ABI_BITS == 32
3064 abi_ulong __unused2;
3065 #endif
3066 abi_ulong sem_nsems;
3067 abi_ulong __unused3;
3068 abi_ulong __unused4;
3070 #endif
3072 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3073 abi_ulong target_addr)
3075 struct target_ipc_perm *target_ip;
3076 struct target_semid64_ds *target_sd;
3078 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3079 return -TARGET_EFAULT;
3080 target_ip = &(target_sd->sem_perm);
3081 host_ip->__key = tswap32(target_ip->__key);
3082 host_ip->uid = tswap32(target_ip->uid);
3083 host_ip->gid = tswap32(target_ip->gid);
3084 host_ip->cuid = tswap32(target_ip->cuid);
3085 host_ip->cgid = tswap32(target_ip->cgid);
3086 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3087 host_ip->mode = tswap32(target_ip->mode);
3088 #else
3089 host_ip->mode = tswap16(target_ip->mode);
3090 #endif
3091 #if defined(TARGET_PPC)
3092 host_ip->__seq = tswap32(target_ip->__seq);
3093 #else
3094 host_ip->__seq = tswap16(target_ip->__seq);
3095 #endif
3096 unlock_user_struct(target_sd, target_addr, 0);
3097 return 0;
3100 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3101 struct ipc_perm *host_ip)
3103 struct target_ipc_perm *target_ip;
3104 struct target_semid64_ds *target_sd;
3106 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3107 return -TARGET_EFAULT;
3108 target_ip = &(target_sd->sem_perm);
3109 target_ip->__key = tswap32(host_ip->__key);
3110 target_ip->uid = tswap32(host_ip->uid);
3111 target_ip->gid = tswap32(host_ip->gid);
3112 target_ip->cuid = tswap32(host_ip->cuid);
3113 target_ip->cgid = tswap32(host_ip->cgid);
3114 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3115 target_ip->mode = tswap32(host_ip->mode);
3116 #else
3117 target_ip->mode = tswap16(host_ip->mode);
3118 #endif
3119 #if defined(TARGET_PPC)
3120 target_ip->__seq = tswap32(host_ip->__seq);
3121 #else
3122 target_ip->__seq = tswap16(host_ip->__seq);
3123 #endif
3124 unlock_user_struct(target_sd, target_addr, 1);
3125 return 0;
3128 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3129 abi_ulong target_addr)
3131 struct target_semid64_ds *target_sd;
3133 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3134 return -TARGET_EFAULT;
3135 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3136 return -TARGET_EFAULT;
3137 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3138 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3139 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3140 unlock_user_struct(target_sd, target_addr, 0);
3141 return 0;
3144 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3145 struct semid_ds *host_sd)
3147 struct target_semid64_ds *target_sd;
3149 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3150 return -TARGET_EFAULT;
3151 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3152 return -TARGET_EFAULT;
3153 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3154 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3155 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3156 unlock_user_struct(target_sd, target_addr, 1);
3157 return 0;
3160 struct target_seminfo {
3161 int semmap;
3162 int semmni;
3163 int semmns;
3164 int semmnu;
3165 int semmsl;
3166 int semopm;
3167 int semume;
3168 int semusz;
3169 int semvmx;
3170 int semaem;
3173 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3174 struct seminfo *host_seminfo)
3176 struct target_seminfo *target_seminfo;
3177 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3178 return -TARGET_EFAULT;
3179 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3180 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3181 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3182 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3183 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3184 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3185 __put_user(host_seminfo->semume, &target_seminfo->semume);
3186 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3187 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3188 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3189 unlock_user_struct(target_seminfo, target_addr, 1);
3190 return 0;
3193 union semun {
3194 int val;
3195 struct semid_ds *buf;
3196 unsigned short *array;
3197 struct seminfo *__buf;
3200 union target_semun {
3201 int val;
3202 abi_ulong buf;
3203 abi_ulong array;
3204 abi_ulong __buf;
3207 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3208 abi_ulong target_addr)
3210 int nsems;
3211 unsigned short *array;
3212 union semun semun;
3213 struct semid_ds semid_ds;
3214 int i, ret;
3216 semun.buf = &semid_ds;
3218 ret = semctl(semid, 0, IPC_STAT, semun);
3219 if (ret == -1)
3220 return get_errno(ret);
3222 nsems = semid_ds.sem_nsems;
3224 *host_array = g_try_new(unsigned short, nsems);
3225 if (!*host_array) {
3226 return -TARGET_ENOMEM;
3228 array = lock_user(VERIFY_READ, target_addr,
3229 nsems*sizeof(unsigned short), 1);
3230 if (!array) {
3231 g_free(*host_array);
3232 return -TARGET_EFAULT;
3235 for(i=0; i<nsems; i++) {
3236 __get_user((*host_array)[i], &array[i]);
3238 unlock_user(array, target_addr, 0);
3240 return 0;
3243 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3244 unsigned short **host_array)
3246 int nsems;
3247 unsigned short *array;
3248 union semun semun;
3249 struct semid_ds semid_ds;
3250 int i, ret;
3252 semun.buf = &semid_ds;
3254 ret = semctl(semid, 0, IPC_STAT, semun);
3255 if (ret == -1)
3256 return get_errno(ret);
3258 nsems = semid_ds.sem_nsems;
3260 array = lock_user(VERIFY_WRITE, target_addr,
3261 nsems*sizeof(unsigned short), 0);
3262 if (!array)
3263 return -TARGET_EFAULT;
3265 for(i=0; i<nsems; i++) {
3266 __put_user((*host_array)[i], &array[i]);
3268 g_free(*host_array);
3269 unlock_user(array, target_addr, 1);
3271 return 0;
3274 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3275 abi_ulong target_arg)
3277 union target_semun target_su = { .buf = target_arg };
3278 union semun arg;
3279 struct semid_ds dsarg;
3280 unsigned short *array = NULL;
3281 struct seminfo seminfo;
3282 abi_long ret = -TARGET_EINVAL;
3283 abi_long err;
3284 cmd &= 0xff;
3286 switch( cmd ) {
3287 case GETVAL:
3288 case SETVAL:
3289 /* In 64 bit cross-endian situations, we will erroneously pick up
3290 * the wrong half of the union for the "val" element. To rectify
3291 * this, the entire 8-byte structure is byteswapped, followed by
3292 * a swap of the 4 byte val field. In other cases, the data is
3293 * already in proper host byte order. */
3294 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3295 target_su.buf = tswapal(target_su.buf);
3296 arg.val = tswap32(target_su.val);
3297 } else {
3298 arg.val = target_su.val;
3300 ret = get_errno(semctl(semid, semnum, cmd, arg));
3301 break;
3302 case GETALL:
3303 case SETALL:
3304 err = target_to_host_semarray(semid, &array, target_su.array);
3305 if (err)
3306 return err;
3307 arg.array = array;
3308 ret = get_errno(semctl(semid, semnum, cmd, arg));
3309 err = host_to_target_semarray(semid, target_su.array, &array);
3310 if (err)
3311 return err;
3312 break;
3313 case IPC_STAT:
3314 case IPC_SET:
3315 case SEM_STAT:
3316 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3317 if (err)
3318 return err;
3319 arg.buf = &dsarg;
3320 ret = get_errno(semctl(semid, semnum, cmd, arg));
3321 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3322 if (err)
3323 return err;
3324 break;
3325 case IPC_INFO:
3326 case SEM_INFO:
3327 arg.__buf = &seminfo;
3328 ret = get_errno(semctl(semid, semnum, cmd, arg));
3329 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3330 if (err)
3331 return err;
3332 break;
3333 case IPC_RMID:
3334 case GETPID:
3335 case GETNCNT:
3336 case GETZCNT:
3337 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3338 break;
3341 return ret;
3344 struct target_sembuf {
3345 unsigned short sem_num;
3346 short sem_op;
3347 short sem_flg;
3350 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3351 abi_ulong target_addr,
3352 unsigned nsops)
3354 struct target_sembuf *target_sembuf;
3355 int i;
3357 target_sembuf = lock_user(VERIFY_READ, target_addr,
3358 nsops*sizeof(struct target_sembuf), 1);
3359 if (!target_sembuf)
3360 return -TARGET_EFAULT;
3362 for(i=0; i<nsops; i++) {
3363 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3364 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3365 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3368 unlock_user(target_sembuf, target_addr, 0);
3370 return 0;
3373 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3375 struct sembuf sops[nsops];
3377 if (target_to_host_sembuf(sops, ptr, nsops))
3378 return -TARGET_EFAULT;
3380 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3383 struct target_msqid_ds
3385 struct target_ipc_perm msg_perm;
3386 abi_ulong msg_stime;
3387 #if TARGET_ABI_BITS == 32
3388 abi_ulong __unused1;
3389 #endif
3390 abi_ulong msg_rtime;
3391 #if TARGET_ABI_BITS == 32
3392 abi_ulong __unused2;
3393 #endif
3394 abi_ulong msg_ctime;
3395 #if TARGET_ABI_BITS == 32
3396 abi_ulong __unused3;
3397 #endif
3398 abi_ulong __msg_cbytes;
3399 abi_ulong msg_qnum;
3400 abi_ulong msg_qbytes;
3401 abi_ulong msg_lspid;
3402 abi_ulong msg_lrpid;
3403 abi_ulong __unused4;
3404 abi_ulong __unused5;
3407 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3408 abi_ulong target_addr)
3410 struct target_msqid_ds *target_md;
3412 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3413 return -TARGET_EFAULT;
3414 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3415 return -TARGET_EFAULT;
3416 host_md->msg_stime = tswapal(target_md->msg_stime);
3417 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3418 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3419 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3420 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3421 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3422 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3423 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3424 unlock_user_struct(target_md, target_addr, 0);
3425 return 0;
3428 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3429 struct msqid_ds *host_md)
3431 struct target_msqid_ds *target_md;
3433 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3434 return -TARGET_EFAULT;
3435 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3436 return -TARGET_EFAULT;
3437 target_md->msg_stime = tswapal(host_md->msg_stime);
3438 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3439 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3440 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3441 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3442 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3443 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3444 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3445 unlock_user_struct(target_md, target_addr, 1);
3446 return 0;
3449 struct target_msginfo {
3450 int msgpool;
3451 int msgmap;
3452 int msgmax;
3453 int msgmnb;
3454 int msgmni;
3455 int msgssz;
3456 int msgtql;
3457 unsigned short int msgseg;
3460 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3461 struct msginfo *host_msginfo)
3463 struct target_msginfo *target_msginfo;
3464 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3465 return -TARGET_EFAULT;
3466 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3467 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3468 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3469 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3470 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3471 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3472 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3473 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3474 unlock_user_struct(target_msginfo, target_addr, 1);
3475 return 0;
3478 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3480 struct msqid_ds dsarg;
3481 struct msginfo msginfo;
3482 abi_long ret = -TARGET_EINVAL;
3484 cmd &= 0xff;
3486 switch (cmd) {
3487 case IPC_STAT:
3488 case IPC_SET:
3489 case MSG_STAT:
3490 if (target_to_host_msqid_ds(&dsarg,ptr))
3491 return -TARGET_EFAULT;
3492 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3493 if (host_to_target_msqid_ds(ptr,&dsarg))
3494 return -TARGET_EFAULT;
3495 break;
3496 case IPC_RMID:
3497 ret = get_errno(msgctl(msgid, cmd, NULL));
3498 break;
3499 case IPC_INFO:
3500 case MSG_INFO:
3501 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3502 if (host_to_target_msginfo(ptr, &msginfo))
3503 return -TARGET_EFAULT;
3504 break;
3507 return ret;
3510 struct target_msgbuf {
3511 abi_long mtype;
3512 char mtext[1];
3515 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3516 ssize_t msgsz, int msgflg)
3518 struct target_msgbuf *target_mb;
3519 struct msgbuf *host_mb;
3520 abi_long ret = 0;
3522 if (msgsz < 0) {
3523 return -TARGET_EINVAL;
3526 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3527 return -TARGET_EFAULT;
3528 host_mb = g_try_malloc(msgsz + sizeof(long));
3529 if (!host_mb) {
3530 unlock_user_struct(target_mb, msgp, 0);
3531 return -TARGET_ENOMEM;
3533 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3534 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3535 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3536 g_free(host_mb);
3537 unlock_user_struct(target_mb, msgp, 0);
3539 return ret;
3542 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3543 ssize_t msgsz, abi_long msgtyp,
3544 int msgflg)
3546 struct target_msgbuf *target_mb;
3547 char *target_mtext;
3548 struct msgbuf *host_mb;
3549 abi_long ret = 0;
3551 if (msgsz < 0) {
3552 return -TARGET_EINVAL;
3555 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3556 return -TARGET_EFAULT;
3558 host_mb = g_try_malloc(msgsz + sizeof(long));
3559 if (!host_mb) {
3560 ret = -TARGET_ENOMEM;
3561 goto end;
3563 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3565 if (ret > 0) {
3566 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3567 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3568 if (!target_mtext) {
3569 ret = -TARGET_EFAULT;
3570 goto end;
3572 memcpy(target_mb->mtext, host_mb->mtext, ret);
3573 unlock_user(target_mtext, target_mtext_addr, ret);
3576 target_mb->mtype = tswapal(host_mb->mtype);
3578 end:
3579 if (target_mb)
3580 unlock_user_struct(target_mb, msgp, 1);
3581 g_free(host_mb);
3582 return ret;
3585 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3586 abi_ulong target_addr)
3588 struct target_shmid_ds *target_sd;
3590 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3591 return -TARGET_EFAULT;
3592 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3593 return -TARGET_EFAULT;
3594 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3595 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3596 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3597 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3598 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3599 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3600 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3601 unlock_user_struct(target_sd, target_addr, 0);
3602 return 0;
3605 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3606 struct shmid_ds *host_sd)
3608 struct target_shmid_ds *target_sd;
3610 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3611 return -TARGET_EFAULT;
3612 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3613 return -TARGET_EFAULT;
3614 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3615 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3616 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3617 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3618 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3619 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3620 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3621 unlock_user_struct(target_sd, target_addr, 1);
3622 return 0;
3625 struct target_shminfo {
3626 abi_ulong shmmax;
3627 abi_ulong shmmin;
3628 abi_ulong shmmni;
3629 abi_ulong shmseg;
3630 abi_ulong shmall;
3633 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3634 struct shminfo *host_shminfo)
3636 struct target_shminfo *target_shminfo;
3637 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3638 return -TARGET_EFAULT;
3639 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3640 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3641 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3642 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3643 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3644 unlock_user_struct(target_shminfo, target_addr, 1);
3645 return 0;
3648 struct target_shm_info {
3649 int used_ids;
3650 abi_ulong shm_tot;
3651 abi_ulong shm_rss;
3652 abi_ulong shm_swp;
3653 abi_ulong swap_attempts;
3654 abi_ulong swap_successes;
3657 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3658 struct shm_info *host_shm_info)
3660 struct target_shm_info *target_shm_info;
3661 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3662 return -TARGET_EFAULT;
3663 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3664 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3665 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3666 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3667 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3668 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3669 unlock_user_struct(target_shm_info, target_addr, 1);
3670 return 0;
3673 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3675 struct shmid_ds dsarg;
3676 struct shminfo shminfo;
3677 struct shm_info shm_info;
3678 abi_long ret = -TARGET_EINVAL;
3680 cmd &= 0xff;
3682 switch(cmd) {
3683 case IPC_STAT:
3684 case IPC_SET:
3685 case SHM_STAT:
3686 if (target_to_host_shmid_ds(&dsarg, buf))
3687 return -TARGET_EFAULT;
3688 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3689 if (host_to_target_shmid_ds(buf, &dsarg))
3690 return -TARGET_EFAULT;
3691 break;
3692 case IPC_INFO:
3693 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3694 if (host_to_target_shminfo(buf, &shminfo))
3695 return -TARGET_EFAULT;
3696 break;
3697 case SHM_INFO:
3698 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3699 if (host_to_target_shm_info(buf, &shm_info))
3700 return -TARGET_EFAULT;
3701 break;
3702 case IPC_RMID:
3703 case SHM_LOCK:
3704 case SHM_UNLOCK:
3705 ret = get_errno(shmctl(shmid, cmd, NULL));
3706 break;
3709 return ret;
3712 #ifndef TARGET_FORCE_SHMLBA
3713 /* For most architectures, SHMLBA is the same as the page size;
3714 * some architectures have larger values, in which case they should
3715 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3716 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3717 * and defining its own value for SHMLBA.
3719 * The kernel also permits SHMLBA to be set by the architecture to a
3720 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3721 * this means that addresses are rounded to the large size if
3722 * SHM_RND is set but addresses not aligned to that size are not rejected
3723 * as long as they are at least page-aligned. Since the only architecture
3724 * which uses this is ia64 this code doesn't provide for that oddity.
3726 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3728 return TARGET_PAGE_SIZE;
3730 #endif
3732 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3733 int shmid, abi_ulong shmaddr, int shmflg)
3735 abi_long raddr;
3736 void *host_raddr;
3737 struct shmid_ds shm_info;
3738 int i,ret;
3739 abi_ulong shmlba;
3741 /* find out the length of the shared memory segment */
3742 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3743 if (is_error(ret)) {
3744 /* can't get length, bail out */
3745 return ret;
3748 shmlba = target_shmlba(cpu_env);
3750 if (shmaddr & (shmlba - 1)) {
3751 if (shmflg & SHM_RND) {
3752 shmaddr &= ~(shmlba - 1);
3753 } else {
3754 return -TARGET_EINVAL;
3757 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3758 return -TARGET_EINVAL;
3761 mmap_lock();
3763 if (shmaddr)
3764 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3765 else {
3766 abi_ulong mmap_start;
3768 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3770 if (mmap_start == -1) {
3771 errno = ENOMEM;
3772 host_raddr = (void *)-1;
3773 } else
3774 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3777 if (host_raddr == (void *)-1) {
3778 mmap_unlock();
3779 return get_errno((long)host_raddr);
3781 raddr=h2g((unsigned long)host_raddr);
3783 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3784 PAGE_VALID | PAGE_READ |
3785 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3787 for (i = 0; i < N_SHM_REGIONS; i++) {
3788 if (!shm_regions[i].in_use) {
3789 shm_regions[i].in_use = true;
3790 shm_regions[i].start = raddr;
3791 shm_regions[i].size = shm_info.shm_segsz;
3792 break;
3796 mmap_unlock();
3797 return raddr;
3801 static inline abi_long do_shmdt(abi_ulong shmaddr)
3803 int i;
3804 abi_long rv;
3806 mmap_lock();
3808 for (i = 0; i < N_SHM_REGIONS; ++i) {
3809 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3810 shm_regions[i].in_use = false;
3811 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3812 break;
3815 rv = get_errno(shmdt(g2h(shmaddr)));
3817 mmap_unlock();
3819 return rv;
3822 #ifdef TARGET_NR_ipc
3823 /* ??? This only works with linear mappings. */
3824 /* do_ipc() must return target values and target errnos. */
3825 static abi_long do_ipc(CPUArchState *cpu_env,
3826 unsigned int call, abi_long first,
3827 abi_long second, abi_long third,
3828 abi_long ptr, abi_long fifth)
3830 int version;
3831 abi_long ret = 0;
3833 version = call >> 16;
3834 call &= 0xffff;
3836 switch (call) {
3837 case IPCOP_semop:
3838 ret = do_semop(first, ptr, second);
3839 break;
3841 case IPCOP_semget:
3842 ret = get_errno(semget(first, second, third));
3843 break;
3845 case IPCOP_semctl: {
3846 /* The semun argument to semctl is passed by value, so dereference the
3847 * ptr argument. */
3848 abi_ulong atptr;
3849 get_user_ual(atptr, ptr);
3850 ret = do_semctl(first, second, third, atptr);
3851 break;
3854 case IPCOP_msgget:
3855 ret = get_errno(msgget(first, second));
3856 break;
3858 case IPCOP_msgsnd:
3859 ret = do_msgsnd(first, ptr, second, third);
3860 break;
3862 case IPCOP_msgctl:
3863 ret = do_msgctl(first, second, ptr);
3864 break;
3866 case IPCOP_msgrcv:
3867 switch (version) {
3868 case 0:
3870 struct target_ipc_kludge {
3871 abi_long msgp;
3872 abi_long msgtyp;
3873 } *tmp;
3875 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3876 ret = -TARGET_EFAULT;
3877 break;
3880 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3882 unlock_user_struct(tmp, ptr, 0);
3883 break;
3885 default:
3886 ret = do_msgrcv(first, ptr, second, fifth, third);
3888 break;
3890 case IPCOP_shmat:
3891 switch (version) {
3892 default:
3894 abi_ulong raddr;
3895 raddr = do_shmat(cpu_env, first, ptr, second);
3896 if (is_error(raddr))
3897 return get_errno(raddr);
3898 if (put_user_ual(raddr, third))
3899 return -TARGET_EFAULT;
3900 break;
3902 case 1:
3903 ret = -TARGET_EINVAL;
3904 break;
3906 break;
3907 case IPCOP_shmdt:
3908 ret = do_shmdt(ptr);
3909 break;
3911 case IPCOP_shmget:
3912 /* IPC_* flag values are the same on all linux platforms */
3913 ret = get_errno(shmget(first, second, third));
3914 break;
3916 /* IPC_* and SHM_* command values are the same on all linux platforms */
3917 case IPCOP_shmctl:
3918 ret = do_shmctl(first, second, ptr);
3919 break;
3920 default:
3921 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3922 ret = -TARGET_ENOSYS;
3923 break;
3925 return ret;
3927 #endif
3929 /* kernel structure types definitions */
3931 #define STRUCT(name, ...) STRUCT_ ## name,
3932 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3933 enum {
3934 #include "syscall_types.h"
3935 STRUCT_MAX
3937 #undef STRUCT
3938 #undef STRUCT_SPECIAL
3940 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3941 #define STRUCT_SPECIAL(name)
3942 #include "syscall_types.h"
3943 #undef STRUCT
3944 #undef STRUCT_SPECIAL
3946 typedef struct IOCTLEntry IOCTLEntry;
3948 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3949 int fd, int cmd, abi_long arg);
3951 struct IOCTLEntry {
3952 int target_cmd;
3953 unsigned int host_cmd;
3954 const char *name;
3955 int access;
3956 do_ioctl_fn *do_ioctl;
3957 const argtype arg_type[5];
3960 #define IOC_R 0x0001
3961 #define IOC_W 0x0002
3962 #define IOC_RW (IOC_R | IOC_W)
3964 #define MAX_STRUCT_SIZE 4096
3966 #ifdef CONFIG_FIEMAP
3967 /* So fiemap access checks don't overflow on 32 bit systems.
3968 * This is very slightly smaller than the limit imposed by
3969 * the underlying kernel.
3971 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3972 / sizeof(struct fiemap_extent))
3974 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3975 int fd, int cmd, abi_long arg)
3977 /* The parameter for this ioctl is a struct fiemap followed
3978 * by an array of struct fiemap_extent whose size is set
3979 * in fiemap->fm_extent_count. The array is filled in by the
3980 * ioctl.
3982 int target_size_in, target_size_out;
3983 struct fiemap *fm;
3984 const argtype *arg_type = ie->arg_type;
3985 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3986 void *argptr, *p;
3987 abi_long ret;
3988 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3989 uint32_t outbufsz;
3990 int free_fm = 0;
3992 assert(arg_type[0] == TYPE_PTR);
3993 assert(ie->access == IOC_RW);
3994 arg_type++;
3995 target_size_in = thunk_type_size(arg_type, 0);
3996 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3997 if (!argptr) {
3998 return -TARGET_EFAULT;
4000 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4001 unlock_user(argptr, arg, 0);
4002 fm = (struct fiemap *)buf_temp;
4003 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4004 return -TARGET_EINVAL;
4007 outbufsz = sizeof (*fm) +
4008 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4010 if (outbufsz > MAX_STRUCT_SIZE) {
4011 /* We can't fit all the extents into the fixed size buffer.
4012 * Allocate one that is large enough and use it instead.
4014 fm = g_try_malloc(outbufsz);
4015 if (!fm) {
4016 return -TARGET_ENOMEM;
4018 memcpy(fm, buf_temp, sizeof(struct fiemap));
4019 free_fm = 1;
4021 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4022 if (!is_error(ret)) {
4023 target_size_out = target_size_in;
4024 /* An extent_count of 0 means we were only counting the extents
4025 * so there are no structs to copy
4027 if (fm->fm_extent_count != 0) {
4028 target_size_out += fm->fm_mapped_extents * extent_size;
4030 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4031 if (!argptr) {
4032 ret = -TARGET_EFAULT;
4033 } else {
4034 /* Convert the struct fiemap */
4035 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4036 if (fm->fm_extent_count != 0) {
4037 p = argptr + target_size_in;
4038 /* ...and then all the struct fiemap_extents */
4039 for (i = 0; i < fm->fm_mapped_extents; i++) {
4040 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4041 THUNK_TARGET);
4042 p += extent_size;
4045 unlock_user(argptr, arg, target_size_out);
4048 if (free_fm) {
4049 g_free(fm);
4051 return ret;
4053 #endif
4055 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4056 int fd, int cmd, abi_long arg)
4058 const argtype *arg_type = ie->arg_type;
4059 int target_size;
4060 void *argptr;
4061 int ret;
4062 struct ifconf *host_ifconf;
4063 uint32_t outbufsz;
4064 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4065 int target_ifreq_size;
4066 int nb_ifreq;
4067 int free_buf = 0;
4068 int i;
4069 int target_ifc_len;
4070 abi_long target_ifc_buf;
4071 int host_ifc_len;
4072 char *host_ifc_buf;
4074 assert(arg_type[0] == TYPE_PTR);
4075 assert(ie->access == IOC_RW);
4077 arg_type++;
4078 target_size = thunk_type_size(arg_type, 0);
4080 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4081 if (!argptr)
4082 return -TARGET_EFAULT;
4083 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4084 unlock_user(argptr, arg, 0);
4086 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4087 target_ifc_len = host_ifconf->ifc_len;
4088 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4090 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4091 nb_ifreq = target_ifc_len / target_ifreq_size;
4092 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4094 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4095 if (outbufsz > MAX_STRUCT_SIZE) {
4096 /* We can't fit all the extents into the fixed size buffer.
4097 * Allocate one that is large enough and use it instead.
4099 host_ifconf = malloc(outbufsz);
4100 if (!host_ifconf) {
4101 return -TARGET_ENOMEM;
4103 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4104 free_buf = 1;
4106 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4108 host_ifconf->ifc_len = host_ifc_len;
4109 host_ifconf->ifc_buf = host_ifc_buf;
4111 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4112 if (!is_error(ret)) {
4113 /* convert host ifc_len to target ifc_len */
4115 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4116 target_ifc_len = nb_ifreq * target_ifreq_size;
4117 host_ifconf->ifc_len = target_ifc_len;
4119 /* restore target ifc_buf */
4121 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4123 /* copy struct ifconf to target user */
4125 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4126 if (!argptr)
4127 return -TARGET_EFAULT;
4128 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4129 unlock_user(argptr, arg, target_size);
4131 /* copy ifreq[] to target user */
4133 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4134 for (i = 0; i < nb_ifreq ; i++) {
4135 thunk_convert(argptr + i * target_ifreq_size,
4136 host_ifc_buf + i * sizeof(struct ifreq),
4137 ifreq_arg_type, THUNK_TARGET);
4139 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4142 if (free_buf) {
4143 free(host_ifconf);
4146 return ret;
4149 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4150 int cmd, abi_long arg)
4152 void *argptr;
4153 struct dm_ioctl *host_dm;
4154 abi_long guest_data;
4155 uint32_t guest_data_size;
4156 int target_size;
4157 const argtype *arg_type = ie->arg_type;
4158 abi_long ret;
4159 void *big_buf = NULL;
4160 char *host_data;
4162 arg_type++;
4163 target_size = thunk_type_size(arg_type, 0);
4164 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4165 if (!argptr) {
4166 ret = -TARGET_EFAULT;
4167 goto out;
4169 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4170 unlock_user(argptr, arg, 0);
4172 /* buf_temp is too small, so fetch things into a bigger buffer */
4173 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4174 memcpy(big_buf, buf_temp, target_size);
4175 buf_temp = big_buf;
4176 host_dm = big_buf;
4178 guest_data = arg + host_dm->data_start;
4179 if ((guest_data - arg) < 0) {
4180 ret = -TARGET_EINVAL;
4181 goto out;
4183 guest_data_size = host_dm->data_size - host_dm->data_start;
4184 host_data = (char*)host_dm + host_dm->data_start;
4186 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4187 if (!argptr) {
4188 ret = -TARGET_EFAULT;
4189 goto out;
4192 switch (ie->host_cmd) {
4193 case DM_REMOVE_ALL:
4194 case DM_LIST_DEVICES:
4195 case DM_DEV_CREATE:
4196 case DM_DEV_REMOVE:
4197 case DM_DEV_SUSPEND:
4198 case DM_DEV_STATUS:
4199 case DM_DEV_WAIT:
4200 case DM_TABLE_STATUS:
4201 case DM_TABLE_CLEAR:
4202 case DM_TABLE_DEPS:
4203 case DM_LIST_VERSIONS:
4204 /* no input data */
4205 break;
4206 case DM_DEV_RENAME:
4207 case DM_DEV_SET_GEOMETRY:
4208 /* data contains only strings */
4209 memcpy(host_data, argptr, guest_data_size);
4210 break;
4211 case DM_TARGET_MSG:
4212 memcpy(host_data, argptr, guest_data_size);
4213 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4214 break;
4215 case DM_TABLE_LOAD:
4217 void *gspec = argptr;
4218 void *cur_data = host_data;
4219 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4220 int spec_size = thunk_type_size(arg_type, 0);
4221 int i;
4223 for (i = 0; i < host_dm->target_count; i++) {
4224 struct dm_target_spec *spec = cur_data;
4225 uint32_t next;
4226 int slen;
4228 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4229 slen = strlen((char*)gspec + spec_size) + 1;
4230 next = spec->next;
4231 spec->next = sizeof(*spec) + slen;
4232 strcpy((char*)&spec[1], gspec + spec_size);
4233 gspec += next;
4234 cur_data += spec->next;
4236 break;
4238 default:
4239 ret = -TARGET_EINVAL;
4240 unlock_user(argptr, guest_data, 0);
4241 goto out;
4243 unlock_user(argptr, guest_data, 0);
4245 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4246 if (!is_error(ret)) {
4247 guest_data = arg + host_dm->data_start;
4248 guest_data_size = host_dm->data_size - host_dm->data_start;
4249 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4250 switch (ie->host_cmd) {
4251 case DM_REMOVE_ALL:
4252 case DM_DEV_CREATE:
4253 case DM_DEV_REMOVE:
4254 case DM_DEV_RENAME:
4255 case DM_DEV_SUSPEND:
4256 case DM_DEV_STATUS:
4257 case DM_TABLE_LOAD:
4258 case DM_TABLE_CLEAR:
4259 case DM_TARGET_MSG:
4260 case DM_DEV_SET_GEOMETRY:
4261 /* no return data */
4262 break;
4263 case DM_LIST_DEVICES:
4265 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4266 uint32_t remaining_data = guest_data_size;
4267 void *cur_data = argptr;
4268 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4269 int nl_size = 12; /* can't use thunk_size due to alignment */
4271 while (1) {
4272 uint32_t next = nl->next;
4273 if (next) {
4274 nl->next = nl_size + (strlen(nl->name) + 1);
4276 if (remaining_data < nl->next) {
4277 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4278 break;
4280 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4281 strcpy(cur_data + nl_size, nl->name);
4282 cur_data += nl->next;
4283 remaining_data -= nl->next;
4284 if (!next) {
4285 break;
4287 nl = (void*)nl + next;
4289 break;
4291 case DM_DEV_WAIT:
4292 case DM_TABLE_STATUS:
4294 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4295 void *cur_data = argptr;
4296 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4297 int spec_size = thunk_type_size(arg_type, 0);
4298 int i;
4300 for (i = 0; i < host_dm->target_count; i++) {
4301 uint32_t next = spec->next;
4302 int slen = strlen((char*)&spec[1]) + 1;
4303 spec->next = (cur_data - argptr) + spec_size + slen;
4304 if (guest_data_size < spec->next) {
4305 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4306 break;
4308 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4309 strcpy(cur_data + spec_size, (char*)&spec[1]);
4310 cur_data = argptr + spec->next;
4311 spec = (void*)host_dm + host_dm->data_start + next;
4313 break;
4315 case DM_TABLE_DEPS:
4317 void *hdata = (void*)host_dm + host_dm->data_start;
4318 int count = *(uint32_t*)hdata;
4319 uint64_t *hdev = hdata + 8;
4320 uint64_t *gdev = argptr + 8;
4321 int i;
4323 *(uint32_t*)argptr = tswap32(count);
4324 for (i = 0; i < count; i++) {
4325 *gdev = tswap64(*hdev);
4326 gdev++;
4327 hdev++;
4329 break;
4331 case DM_LIST_VERSIONS:
4333 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4334 uint32_t remaining_data = guest_data_size;
4335 void *cur_data = argptr;
4336 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4337 int vers_size = thunk_type_size(arg_type, 0);
4339 while (1) {
4340 uint32_t next = vers->next;
4341 if (next) {
4342 vers->next = vers_size + (strlen(vers->name) + 1);
4344 if (remaining_data < vers->next) {
4345 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4346 break;
4348 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4349 strcpy(cur_data + vers_size, vers->name);
4350 cur_data += vers->next;
4351 remaining_data -= vers->next;
4352 if (!next) {
4353 break;
4355 vers = (void*)vers + next;
4357 break;
4359 default:
4360 unlock_user(argptr, guest_data, 0);
4361 ret = -TARGET_EINVAL;
4362 goto out;
4364 unlock_user(argptr, guest_data, guest_data_size);
4366 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4367 if (!argptr) {
4368 ret = -TARGET_EFAULT;
4369 goto out;
4371 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4372 unlock_user(argptr, arg, target_size);
4374 out:
4375 g_free(big_buf);
4376 return ret;
4379 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4380 int cmd, abi_long arg)
4382 void *argptr;
4383 int target_size;
4384 const argtype *arg_type = ie->arg_type;
4385 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4386 abi_long ret;
4388 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4389 struct blkpg_partition host_part;
4391 /* Read and convert blkpg */
4392 arg_type++;
4393 target_size = thunk_type_size(arg_type, 0);
4394 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4395 if (!argptr) {
4396 ret = -TARGET_EFAULT;
4397 goto out;
4399 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4400 unlock_user(argptr, arg, 0);
4402 switch (host_blkpg->op) {
4403 case BLKPG_ADD_PARTITION:
4404 case BLKPG_DEL_PARTITION:
4405 /* payload is struct blkpg_partition */
4406 break;
4407 default:
4408 /* Unknown opcode */
4409 ret = -TARGET_EINVAL;
4410 goto out;
4413 /* Read and convert blkpg->data */
4414 arg = (abi_long)(uintptr_t)host_blkpg->data;
4415 target_size = thunk_type_size(part_arg_type, 0);
4416 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4417 if (!argptr) {
4418 ret = -TARGET_EFAULT;
4419 goto out;
4421 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4422 unlock_user(argptr, arg, 0);
4424 /* Swizzle the data pointer to our local copy and call! */
4425 host_blkpg->data = &host_part;
4426 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4428 out:
4429 return ret;
4432 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4433 int fd, int cmd, abi_long arg)
4435 const argtype *arg_type = ie->arg_type;
4436 const StructEntry *se;
4437 const argtype *field_types;
4438 const int *dst_offsets, *src_offsets;
4439 int target_size;
4440 void *argptr;
4441 abi_ulong *target_rt_dev_ptr;
4442 unsigned long *host_rt_dev_ptr;
4443 abi_long ret;
4444 int i;
4446 assert(ie->access == IOC_W);
4447 assert(*arg_type == TYPE_PTR);
4448 arg_type++;
4449 assert(*arg_type == TYPE_STRUCT);
4450 target_size = thunk_type_size(arg_type, 0);
4451 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4452 if (!argptr) {
4453 return -TARGET_EFAULT;
4455 arg_type++;
4456 assert(*arg_type == (int)STRUCT_rtentry);
4457 se = struct_entries + *arg_type++;
4458 assert(se->convert[0] == NULL);
4459 /* convert struct here to be able to catch rt_dev string */
4460 field_types = se->field_types;
4461 dst_offsets = se->field_offsets[THUNK_HOST];
4462 src_offsets = se->field_offsets[THUNK_TARGET];
4463 for (i = 0; i < se->nb_fields; i++) {
4464 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4465 assert(*field_types == TYPE_PTRVOID);
4466 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4467 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4468 if (*target_rt_dev_ptr != 0) {
4469 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4470 tswapal(*target_rt_dev_ptr));
4471 if (!*host_rt_dev_ptr) {
4472 unlock_user(argptr, arg, 0);
4473 return -TARGET_EFAULT;
4475 } else {
4476 *host_rt_dev_ptr = 0;
4478 field_types++;
4479 continue;
4481 field_types = thunk_convert(buf_temp + dst_offsets[i],
4482 argptr + src_offsets[i],
4483 field_types, THUNK_HOST);
4485 unlock_user(argptr, arg, 0);
4487 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4488 if (*host_rt_dev_ptr != 0) {
4489 unlock_user((void *)*host_rt_dev_ptr,
4490 *target_rt_dev_ptr, 0);
4492 return ret;
4495 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4496 int fd, int cmd, abi_long arg)
4498 int sig = target_to_host_signal(arg);
4499 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4502 #ifdef TIOCGPTPEER
4503 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4504 int fd, int cmd, abi_long arg)
4506 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4507 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4509 #endif
4511 static IOCTLEntry ioctl_entries[] = {
4512 #define IOCTL(cmd, access, ...) \
4513 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4514 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4515 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4516 #define IOCTL_IGNORE(cmd) \
4517 { TARGET_ ## cmd, 0, #cmd },
4518 #include "ioctls.h"
4519 { 0, 0, },
4522 /* ??? Implement proper locking for ioctls. */
4523 /* do_ioctl() Must return target values and target errnos. */
4524 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4526 const IOCTLEntry *ie;
4527 const argtype *arg_type;
4528 abi_long ret;
4529 uint8_t buf_temp[MAX_STRUCT_SIZE];
4530 int target_size;
4531 void *argptr;
4533 ie = ioctl_entries;
4534 for(;;) {
4535 if (ie->target_cmd == 0) {
4536 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4537 return -TARGET_ENOSYS;
4539 if (ie->target_cmd == cmd)
4540 break;
4541 ie++;
4543 arg_type = ie->arg_type;
4544 if (ie->do_ioctl) {
4545 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4546 } else if (!ie->host_cmd) {
4547 /* Some architectures define BSD ioctls in their headers
4548 that are not implemented in Linux. */
4549 return -TARGET_ENOSYS;
4552 switch(arg_type[0]) {
4553 case TYPE_NULL:
4554 /* no argument */
4555 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4556 break;
4557 case TYPE_PTRVOID:
4558 case TYPE_INT:
4559 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4560 break;
4561 case TYPE_PTR:
4562 arg_type++;
4563 target_size = thunk_type_size(arg_type, 0);
4564 switch(ie->access) {
4565 case IOC_R:
4566 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4567 if (!is_error(ret)) {
4568 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4569 if (!argptr)
4570 return -TARGET_EFAULT;
4571 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4572 unlock_user(argptr, arg, target_size);
4574 break;
4575 case IOC_W:
4576 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4577 if (!argptr)
4578 return -TARGET_EFAULT;
4579 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4580 unlock_user(argptr, arg, 0);
4581 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4582 break;
4583 default:
4584 case IOC_RW:
4585 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4586 if (!argptr)
4587 return -TARGET_EFAULT;
4588 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4589 unlock_user(argptr, arg, 0);
4590 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4591 if (!is_error(ret)) {
4592 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4593 if (!argptr)
4594 return -TARGET_EFAULT;
4595 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4596 unlock_user(argptr, arg, target_size);
4598 break;
4600 break;
4601 default:
4602 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4603 (long)cmd, arg_type[0]);
4604 ret = -TARGET_ENOSYS;
4605 break;
4607 return ret;
4610 static const bitmask_transtbl iflag_tbl[] = {
4611 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4612 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4613 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4614 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4615 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4616 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4617 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4618 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4619 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4620 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4621 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4622 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4623 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4624 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4625 { 0, 0, 0, 0 }
4628 static const bitmask_transtbl oflag_tbl[] = {
4629 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4630 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4631 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4632 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4633 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4634 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4635 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4636 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4637 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4638 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4639 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4640 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4641 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4642 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4643 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4644 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4645 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4646 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4647 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4648 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4649 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4650 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4651 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4652 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4653 { 0, 0, 0, 0 }
4656 static const bitmask_transtbl cflag_tbl[] = {
4657 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4658 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4659 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4660 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4661 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4662 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4663 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4664 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4665 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4666 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4667 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4668 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4669 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4670 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4671 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4672 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4673 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4674 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4675 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4676 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4677 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4678 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4679 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4680 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4681 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4682 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4683 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4684 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4685 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4686 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4687 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4688 { 0, 0, 0, 0 }
4691 static const bitmask_transtbl lflag_tbl[] = {
4692 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4693 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4694 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4695 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4696 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4697 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4698 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4699 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4700 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4701 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4702 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4703 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4704 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4705 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4706 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4707 { 0, 0, 0, 0 }
4710 static void target_to_host_termios (void *dst, const void *src)
4712 struct host_termios *host = dst;
4713 const struct target_termios *target = src;
4715 host->c_iflag =
4716 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4717 host->c_oflag =
4718 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4719 host->c_cflag =
4720 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4721 host->c_lflag =
4722 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4723 host->c_line = target->c_line;
4725 memset(host->c_cc, 0, sizeof(host->c_cc));
4726 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4727 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4728 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4729 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4730 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4731 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4732 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4733 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4734 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4735 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4736 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4737 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4738 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4739 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4740 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4741 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4742 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4745 static void host_to_target_termios (void *dst, const void *src)
4747 struct target_termios *target = dst;
4748 const struct host_termios *host = src;
4750 target->c_iflag =
4751 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4752 target->c_oflag =
4753 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4754 target->c_cflag =
4755 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4756 target->c_lflag =
4757 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4758 target->c_line = host->c_line;
4760 memset(target->c_cc, 0, sizeof(target->c_cc));
4761 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4762 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4763 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4764 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4765 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4766 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4767 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4768 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4769 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4770 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4771 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4772 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4773 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4774 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4775 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4776 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4777 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4780 static const StructEntry struct_termios_def = {
4781 .convert = { host_to_target_termios, target_to_host_termios },
4782 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4783 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4786 static bitmask_transtbl mmap_flags_tbl[] = {
4787 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4788 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4789 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4790 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
4791 MAP_ANONYMOUS, MAP_ANONYMOUS },
4792 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
4793 MAP_GROWSDOWN, MAP_GROWSDOWN },
4794 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
4795 MAP_DENYWRITE, MAP_DENYWRITE },
4796 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
4797 MAP_EXECUTABLE, MAP_EXECUTABLE },
4798 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4799 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
4800 MAP_NORESERVE, MAP_NORESERVE },
4801 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
4802 /* MAP_STACK had been ignored by the kernel for quite some time.
4803 Recognize it for the target insofar as we do not want to pass
4804 it through to the host. */
4805 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
4806 { 0, 0, 0, 0 }
4809 #if defined(TARGET_I386)
4811 /* NOTE: there is really one LDT for all the threads */
4812 static uint8_t *ldt_table;
4814 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4816 int size;
4817 void *p;
4819 if (!ldt_table)
4820 return 0;
4821 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4822 if (size > bytecount)
4823 size = bytecount;
4824 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4825 if (!p)
4826 return -TARGET_EFAULT;
4827 /* ??? Should this by byteswapped? */
4828 memcpy(p, ldt_table, size);
4829 unlock_user(p, ptr, size);
4830 return size;
4833 /* XXX: add locking support */
4834 static abi_long write_ldt(CPUX86State *env,
4835 abi_ulong ptr, unsigned long bytecount, int oldmode)
4837 struct target_modify_ldt_ldt_s ldt_info;
4838 struct target_modify_ldt_ldt_s *target_ldt_info;
4839 int seg_32bit, contents, read_exec_only, limit_in_pages;
4840 int seg_not_present, useable, lm;
4841 uint32_t *lp, entry_1, entry_2;
4843 if (bytecount != sizeof(ldt_info))
4844 return -TARGET_EINVAL;
4845 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4846 return -TARGET_EFAULT;
4847 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4848 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4849 ldt_info.limit = tswap32(target_ldt_info->limit);
4850 ldt_info.flags = tswap32(target_ldt_info->flags);
4851 unlock_user_struct(target_ldt_info, ptr, 0);
4853 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4854 return -TARGET_EINVAL;
4855 seg_32bit = ldt_info.flags & 1;
4856 contents = (ldt_info.flags >> 1) & 3;
4857 read_exec_only = (ldt_info.flags >> 3) & 1;
4858 limit_in_pages = (ldt_info.flags >> 4) & 1;
4859 seg_not_present = (ldt_info.flags >> 5) & 1;
4860 useable = (ldt_info.flags >> 6) & 1;
4861 #ifdef TARGET_ABI32
4862 lm = 0;
4863 #else
4864 lm = (ldt_info.flags >> 7) & 1;
4865 #endif
4866 if (contents == 3) {
4867 if (oldmode)
4868 return -TARGET_EINVAL;
4869 if (seg_not_present == 0)
4870 return -TARGET_EINVAL;
4872 /* allocate the LDT */
4873 if (!ldt_table) {
4874 env->ldt.base = target_mmap(0,
4875 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4876 PROT_READ|PROT_WRITE,
4877 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4878 if (env->ldt.base == -1)
4879 return -TARGET_ENOMEM;
4880 memset(g2h(env->ldt.base), 0,
4881 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4882 env->ldt.limit = 0xffff;
4883 ldt_table = g2h(env->ldt.base);
4886 /* NOTE: same code as Linux kernel */
4887 /* Allow LDTs to be cleared by the user. */
4888 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4889 if (oldmode ||
4890 (contents == 0 &&
4891 read_exec_only == 1 &&
4892 seg_32bit == 0 &&
4893 limit_in_pages == 0 &&
4894 seg_not_present == 1 &&
4895 useable == 0 )) {
4896 entry_1 = 0;
4897 entry_2 = 0;
4898 goto install;
4902 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4903 (ldt_info.limit & 0x0ffff);
4904 entry_2 = (ldt_info.base_addr & 0xff000000) |
4905 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4906 (ldt_info.limit & 0xf0000) |
4907 ((read_exec_only ^ 1) << 9) |
4908 (contents << 10) |
4909 ((seg_not_present ^ 1) << 15) |
4910 (seg_32bit << 22) |
4911 (limit_in_pages << 23) |
4912 (lm << 21) |
4913 0x7000;
4914 if (!oldmode)
4915 entry_2 |= (useable << 20);
4917 /* Install the new entry ... */
4918 install:
4919 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4920 lp[0] = tswap32(entry_1);
4921 lp[1] = tswap32(entry_2);
4922 return 0;
4925 /* specific and weird i386 syscalls */
4926 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4927 unsigned long bytecount)
4929 abi_long ret;
4931 switch (func) {
4932 case 0:
4933 ret = read_ldt(ptr, bytecount);
4934 break;
4935 case 1:
4936 ret = write_ldt(env, ptr, bytecount, 1);
4937 break;
4938 case 0x11:
4939 ret = write_ldt(env, ptr, bytecount, 0);
4940 break;
4941 default:
4942 ret = -TARGET_ENOSYS;
4943 break;
4945 return ret;
4948 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4949 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4951 uint64_t *gdt_table = g2h(env->gdt.base);
4952 struct target_modify_ldt_ldt_s ldt_info;
4953 struct target_modify_ldt_ldt_s *target_ldt_info;
4954 int seg_32bit, contents, read_exec_only, limit_in_pages;
4955 int seg_not_present, useable, lm;
4956 uint32_t *lp, entry_1, entry_2;
4957 int i;
4959 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4960 if (!target_ldt_info)
4961 return -TARGET_EFAULT;
4962 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4963 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4964 ldt_info.limit = tswap32(target_ldt_info->limit);
4965 ldt_info.flags = tswap32(target_ldt_info->flags);
4966 if (ldt_info.entry_number == -1) {
4967 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4968 if (gdt_table[i] == 0) {
4969 ldt_info.entry_number = i;
4970 target_ldt_info->entry_number = tswap32(i);
4971 break;
4975 unlock_user_struct(target_ldt_info, ptr, 1);
4977 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4978 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4979 return -TARGET_EINVAL;
4980 seg_32bit = ldt_info.flags & 1;
4981 contents = (ldt_info.flags >> 1) & 3;
4982 read_exec_only = (ldt_info.flags >> 3) & 1;
4983 limit_in_pages = (ldt_info.flags >> 4) & 1;
4984 seg_not_present = (ldt_info.flags >> 5) & 1;
4985 useable = (ldt_info.flags >> 6) & 1;
4986 #ifdef TARGET_ABI32
4987 lm = 0;
4988 #else
4989 lm = (ldt_info.flags >> 7) & 1;
4990 #endif
4992 if (contents == 3) {
4993 if (seg_not_present == 0)
4994 return -TARGET_EINVAL;
4997 /* NOTE: same code as Linux kernel */
4998 /* Allow LDTs to be cleared by the user. */
4999 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5000 if ((contents == 0 &&
5001 read_exec_only == 1 &&
5002 seg_32bit == 0 &&
5003 limit_in_pages == 0 &&
5004 seg_not_present == 1 &&
5005 useable == 0 )) {
5006 entry_1 = 0;
5007 entry_2 = 0;
5008 goto install;
5012 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5013 (ldt_info.limit & 0x0ffff);
5014 entry_2 = (ldt_info.base_addr & 0xff000000) |
5015 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5016 (ldt_info.limit & 0xf0000) |
5017 ((read_exec_only ^ 1) << 9) |
5018 (contents << 10) |
5019 ((seg_not_present ^ 1) << 15) |
5020 (seg_32bit << 22) |
5021 (limit_in_pages << 23) |
5022 (useable << 20) |
5023 (lm << 21) |
5024 0x7000;
5026 /* Install the new entry ... */
5027 install:
5028 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5029 lp[0] = tswap32(entry_1);
5030 lp[1] = tswap32(entry_2);
5031 return 0;
5034 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5036 struct target_modify_ldt_ldt_s *target_ldt_info;
5037 uint64_t *gdt_table = g2h(env->gdt.base);
5038 uint32_t base_addr, limit, flags;
5039 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5040 int seg_not_present, useable, lm;
5041 uint32_t *lp, entry_1, entry_2;
5043 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5044 if (!target_ldt_info)
5045 return -TARGET_EFAULT;
5046 idx = tswap32(target_ldt_info->entry_number);
5047 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5048 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5049 unlock_user_struct(target_ldt_info, ptr, 1);
5050 return -TARGET_EINVAL;
5052 lp = (uint32_t *)(gdt_table + idx);
5053 entry_1 = tswap32(lp[0]);
5054 entry_2 = tswap32(lp[1]);
5056 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5057 contents = (entry_2 >> 10) & 3;
5058 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5059 seg_32bit = (entry_2 >> 22) & 1;
5060 limit_in_pages = (entry_2 >> 23) & 1;
5061 useable = (entry_2 >> 20) & 1;
5062 #ifdef TARGET_ABI32
5063 lm = 0;
5064 #else
5065 lm = (entry_2 >> 21) & 1;
5066 #endif
5067 flags = (seg_32bit << 0) | (contents << 1) |
5068 (read_exec_only << 3) | (limit_in_pages << 4) |
5069 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5070 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5071 base_addr = (entry_1 >> 16) |
5072 (entry_2 & 0xff000000) |
5073 ((entry_2 & 0xff) << 16);
5074 target_ldt_info->base_addr = tswapal(base_addr);
5075 target_ldt_info->limit = tswap32(limit);
5076 target_ldt_info->flags = tswap32(flags);
5077 unlock_user_struct(target_ldt_info, ptr, 1);
5078 return 0;
5080 #endif /* TARGET_I386 && TARGET_ABI32 */
5082 #ifndef TARGET_ABI32
5083 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5085 abi_long ret = 0;
5086 abi_ulong val;
5087 int idx;
5089 switch(code) {
5090 case TARGET_ARCH_SET_GS:
5091 case TARGET_ARCH_SET_FS:
5092 if (code == TARGET_ARCH_SET_GS)
5093 idx = R_GS;
5094 else
5095 idx = R_FS;
5096 cpu_x86_load_seg(env, idx, 0);
5097 env->segs[idx].base = addr;
5098 break;
5099 case TARGET_ARCH_GET_GS:
5100 case TARGET_ARCH_GET_FS:
5101 if (code == TARGET_ARCH_GET_GS)
5102 idx = R_GS;
5103 else
5104 idx = R_FS;
5105 val = env->segs[idx].base;
5106 if (put_user(val, addr, abi_ulong))
5107 ret = -TARGET_EFAULT;
5108 break;
5109 default:
5110 ret = -TARGET_EINVAL;
5111 break;
5113 return ret;
5115 #endif
5117 #endif /* defined(TARGET_I386) */
5119 #define NEW_STACK_SIZE 0x40000
5122 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5123 typedef struct {
5124 CPUArchState *env;
5125 pthread_mutex_t mutex;
5126 pthread_cond_t cond;
5127 pthread_t thread;
5128 uint32_t tid;
5129 abi_ulong child_tidptr;
5130 abi_ulong parent_tidptr;
5131 sigset_t sigmask;
5132 } new_thread_info;
5134 static void *clone_func(void *arg)
5136 new_thread_info *info = arg;
5137 CPUArchState *env;
5138 CPUState *cpu;
5139 TaskState *ts;
5141 rcu_register_thread();
5142 tcg_register_thread();
5143 env = info->env;
5144 cpu = ENV_GET_CPU(env);
5145 thread_cpu = cpu;
5146 ts = (TaskState *)cpu->opaque;
5147 info->tid = gettid();
5148 task_settid(ts);
5149 if (info->child_tidptr)
5150 put_user_u32(info->tid, info->child_tidptr);
5151 if (info->parent_tidptr)
5152 put_user_u32(info->tid, info->parent_tidptr);
5153 /* Enable signals. */
5154 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5155 /* Signal to the parent that we're ready. */
5156 pthread_mutex_lock(&info->mutex);
5157 pthread_cond_broadcast(&info->cond);
5158 pthread_mutex_unlock(&info->mutex);
5159 /* Wait until the parent has finished initializing the tls state. */
5160 pthread_mutex_lock(&clone_lock);
5161 pthread_mutex_unlock(&clone_lock);
5162 cpu_loop(env);
5163 /* never exits */
5164 return NULL;
5167 /* do_fork() Must return host values and target errnos (unlike most
5168 do_*() functions). */
5169 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5170 abi_ulong parent_tidptr, target_ulong newtls,
5171 abi_ulong child_tidptr)
5173 CPUState *cpu = ENV_GET_CPU(env);
5174 int ret;
5175 TaskState *ts;
5176 CPUState *new_cpu;
5177 CPUArchState *new_env;
5178 sigset_t sigmask;
5180 flags &= ~CLONE_IGNORED_FLAGS;
5182 /* Emulate vfork() with fork() */
5183 if (flags & CLONE_VFORK)
5184 flags &= ~(CLONE_VFORK | CLONE_VM);
5186 if (flags & CLONE_VM) {
5187 TaskState *parent_ts = (TaskState *)cpu->opaque;
5188 new_thread_info info;
5189 pthread_attr_t attr;
5191 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5192 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5193 return -TARGET_EINVAL;
5196 ts = g_new0(TaskState, 1);
5197 init_task_state(ts);
5199 /* Grab a mutex so that thread setup appears atomic. */
5200 pthread_mutex_lock(&clone_lock);
5202 /* we create a new CPU instance. */
5203 new_env = cpu_copy(env);
5204 /* Init regs that differ from the parent. */
5205 cpu_clone_regs(new_env, newsp);
5206 new_cpu = ENV_GET_CPU(new_env);
5207 new_cpu->opaque = ts;
5208 ts->bprm = parent_ts->bprm;
5209 ts->info = parent_ts->info;
5210 ts->signal_mask = parent_ts->signal_mask;
5212 if (flags & CLONE_CHILD_CLEARTID) {
5213 ts->child_tidptr = child_tidptr;
5216 if (flags & CLONE_SETTLS) {
5217 cpu_set_tls (new_env, newtls);
5220 memset(&info, 0, sizeof(info));
5221 pthread_mutex_init(&info.mutex, NULL);
5222 pthread_mutex_lock(&info.mutex);
5223 pthread_cond_init(&info.cond, NULL);
5224 info.env = new_env;
5225 if (flags & CLONE_CHILD_SETTID) {
5226 info.child_tidptr = child_tidptr;
5228 if (flags & CLONE_PARENT_SETTID) {
5229 info.parent_tidptr = parent_tidptr;
5232 ret = pthread_attr_init(&attr);
5233 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5234 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5235 /* It is not safe to deliver signals until the child has finished
5236 initializing, so temporarily block all signals. */
5237 sigfillset(&sigmask);
5238 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5240 /* If this is our first additional thread, we need to ensure we
5241 * generate code for parallel execution and flush old translations.
5243 if (!parallel_cpus) {
5244 parallel_cpus = true;
5245 tb_flush(cpu);
5248 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5249 /* TODO: Free new CPU state if thread creation failed. */
5251 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5252 pthread_attr_destroy(&attr);
5253 if (ret == 0) {
5254 /* Wait for the child to initialize. */
5255 pthread_cond_wait(&info.cond, &info.mutex);
5256 ret = info.tid;
5257 } else {
5258 ret = -1;
5260 pthread_mutex_unlock(&info.mutex);
5261 pthread_cond_destroy(&info.cond);
5262 pthread_mutex_destroy(&info.mutex);
5263 pthread_mutex_unlock(&clone_lock);
5264 } else {
5265 /* if no CLONE_VM, we consider it is a fork */
5266 if (flags & CLONE_INVALID_FORK_FLAGS) {
5267 return -TARGET_EINVAL;
5270 /* We can't support custom termination signals */
5271 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5272 return -TARGET_EINVAL;
5275 if (block_signals()) {
5276 return -TARGET_ERESTARTSYS;
5279 fork_start();
5280 ret = fork();
5281 if (ret == 0) {
5282 /* Child Process. */
5283 cpu_clone_regs(env, newsp);
5284 fork_end(1);
5285 /* There is a race condition here. The parent process could
5286 theoretically read the TID in the child process before the child
5287 tid is set. This would require using either ptrace
5288 (not implemented) or having *_tidptr to point at a shared memory
5289 mapping. We can't repeat the spinlock hack used above because
5290 the child process gets its own copy of the lock. */
5291 if (flags & CLONE_CHILD_SETTID)
5292 put_user_u32(gettid(), child_tidptr);
5293 if (flags & CLONE_PARENT_SETTID)
5294 put_user_u32(gettid(), parent_tidptr);
5295 ts = (TaskState *)cpu->opaque;
5296 if (flags & CLONE_SETTLS)
5297 cpu_set_tls (env, newtls);
5298 if (flags & CLONE_CHILD_CLEARTID)
5299 ts->child_tidptr = child_tidptr;
5300 } else {
5301 fork_end(0);
5304 return ret;
5307 /* warning : doesn't handle linux specific flags... */
5308 static int target_to_host_fcntl_cmd(int cmd)
5310 int ret;
5312 switch(cmd) {
5313 case TARGET_F_DUPFD:
5314 case TARGET_F_GETFD:
5315 case TARGET_F_SETFD:
5316 case TARGET_F_GETFL:
5317 case TARGET_F_SETFL:
5318 ret = cmd;
5319 break;
5320 case TARGET_F_GETLK:
5321 ret = F_GETLK64;
5322 break;
5323 case TARGET_F_SETLK:
5324 ret = F_SETLK64;
5325 break;
5326 case TARGET_F_SETLKW:
5327 ret = F_SETLKW64;
5328 break;
5329 case TARGET_F_GETOWN:
5330 ret = F_GETOWN;
5331 break;
5332 case TARGET_F_SETOWN:
5333 ret = F_SETOWN;
5334 break;
5335 case TARGET_F_GETSIG:
5336 ret = F_GETSIG;
5337 break;
5338 case TARGET_F_SETSIG:
5339 ret = F_SETSIG;
5340 break;
5341 #if TARGET_ABI_BITS == 32
5342 case TARGET_F_GETLK64:
5343 ret = F_GETLK64;
5344 break;
5345 case TARGET_F_SETLK64:
5346 ret = F_SETLK64;
5347 break;
5348 case TARGET_F_SETLKW64:
5349 ret = F_SETLKW64;
5350 break;
5351 #endif
5352 case TARGET_F_SETLEASE:
5353 ret = F_SETLEASE;
5354 break;
5355 case TARGET_F_GETLEASE:
5356 ret = F_GETLEASE;
5357 break;
5358 #ifdef F_DUPFD_CLOEXEC
5359 case TARGET_F_DUPFD_CLOEXEC:
5360 ret = F_DUPFD_CLOEXEC;
5361 break;
5362 #endif
5363 case TARGET_F_NOTIFY:
5364 ret = F_NOTIFY;
5365 break;
5366 #ifdef F_GETOWN_EX
5367 case TARGET_F_GETOWN_EX:
5368 ret = F_GETOWN_EX;
5369 break;
5370 #endif
5371 #ifdef F_SETOWN_EX
5372 case TARGET_F_SETOWN_EX:
5373 ret = F_SETOWN_EX;
5374 break;
5375 #endif
5376 #ifdef F_SETPIPE_SZ
5377 case TARGET_F_SETPIPE_SZ:
5378 ret = F_SETPIPE_SZ;
5379 break;
5380 case TARGET_F_GETPIPE_SZ:
5381 ret = F_GETPIPE_SZ;
5382 break;
5383 #endif
5384 default:
5385 ret = -TARGET_EINVAL;
5386 break;
5389 #if defined(__powerpc64__)
5390 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5391 * is not supported by kernel. The glibc fcntl call actually adjusts
5392 * them to 5, 6 and 7 before making the syscall(). Since we make the
5393 * syscall directly, adjust to what is supported by the kernel.
5395 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5396 ret -= F_GETLK64 - 5;
5398 #endif
5400 return ret;
5403 #define FLOCK_TRANSTBL \
5404 switch (type) { \
5405 TRANSTBL_CONVERT(F_RDLCK); \
5406 TRANSTBL_CONVERT(F_WRLCK); \
5407 TRANSTBL_CONVERT(F_UNLCK); \
5408 TRANSTBL_CONVERT(F_EXLCK); \
5409 TRANSTBL_CONVERT(F_SHLCK); \
5412 static int target_to_host_flock(int type)
5414 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5415 FLOCK_TRANSTBL
5416 #undef TRANSTBL_CONVERT
5417 return -TARGET_EINVAL;
5420 static int host_to_target_flock(int type)
5422 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5423 FLOCK_TRANSTBL
5424 #undef TRANSTBL_CONVERT
5425 /* if we don't know how to convert the value coming
5426 * from the host we copy to the target field as-is
5428 return type;
5431 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5432 abi_ulong target_flock_addr)
5434 struct target_flock *target_fl;
5435 int l_type;
5437 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5438 return -TARGET_EFAULT;
5441 __get_user(l_type, &target_fl->l_type);
5442 l_type = target_to_host_flock(l_type);
5443 if (l_type < 0) {
5444 return l_type;
5446 fl->l_type = l_type;
5447 __get_user(fl->l_whence, &target_fl->l_whence);
5448 __get_user(fl->l_start, &target_fl->l_start);
5449 __get_user(fl->l_len, &target_fl->l_len);
5450 __get_user(fl->l_pid, &target_fl->l_pid);
5451 unlock_user_struct(target_fl, target_flock_addr, 0);
5452 return 0;
5455 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5456 const struct flock64 *fl)
5458 struct target_flock *target_fl;
5459 short l_type;
5461 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5462 return -TARGET_EFAULT;
5465 l_type = host_to_target_flock(fl->l_type);
5466 __put_user(l_type, &target_fl->l_type);
5467 __put_user(fl->l_whence, &target_fl->l_whence);
5468 __put_user(fl->l_start, &target_fl->l_start);
5469 __put_user(fl->l_len, &target_fl->l_len);
5470 __put_user(fl->l_pid, &target_fl->l_pid);
5471 unlock_user_struct(target_fl, target_flock_addr, 1);
5472 return 0;
5475 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5476 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5478 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5479 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5480 abi_ulong target_flock_addr)
5482 struct target_oabi_flock64 *target_fl;
5483 int l_type;
5485 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5486 return -TARGET_EFAULT;
5489 __get_user(l_type, &target_fl->l_type);
5490 l_type = target_to_host_flock(l_type);
5491 if (l_type < 0) {
5492 return l_type;
5494 fl->l_type = l_type;
5495 __get_user(fl->l_whence, &target_fl->l_whence);
5496 __get_user(fl->l_start, &target_fl->l_start);
5497 __get_user(fl->l_len, &target_fl->l_len);
5498 __get_user(fl->l_pid, &target_fl->l_pid);
5499 unlock_user_struct(target_fl, target_flock_addr, 0);
5500 return 0;
5503 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5504 const struct flock64 *fl)
5506 struct target_oabi_flock64 *target_fl;
5507 short l_type;
5509 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5510 return -TARGET_EFAULT;
5513 l_type = host_to_target_flock(fl->l_type);
5514 __put_user(l_type, &target_fl->l_type);
5515 __put_user(fl->l_whence, &target_fl->l_whence);
5516 __put_user(fl->l_start, &target_fl->l_start);
5517 __put_user(fl->l_len, &target_fl->l_len);
5518 __put_user(fl->l_pid, &target_fl->l_pid);
5519 unlock_user_struct(target_fl, target_flock_addr, 1);
5520 return 0;
5522 #endif
5524 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5525 abi_ulong target_flock_addr)
5527 struct target_flock64 *target_fl;
5528 int l_type;
5530 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5531 return -TARGET_EFAULT;
5534 __get_user(l_type, &target_fl->l_type);
5535 l_type = target_to_host_flock(l_type);
5536 if (l_type < 0) {
5537 return l_type;
5539 fl->l_type = l_type;
5540 __get_user(fl->l_whence, &target_fl->l_whence);
5541 __get_user(fl->l_start, &target_fl->l_start);
5542 __get_user(fl->l_len, &target_fl->l_len);
5543 __get_user(fl->l_pid, &target_fl->l_pid);
5544 unlock_user_struct(target_fl, target_flock_addr, 0);
5545 return 0;
5548 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5549 const struct flock64 *fl)
5551 struct target_flock64 *target_fl;
5552 short l_type;
5554 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5555 return -TARGET_EFAULT;
5558 l_type = host_to_target_flock(fl->l_type);
5559 __put_user(l_type, &target_fl->l_type);
5560 __put_user(fl->l_whence, &target_fl->l_whence);
5561 __put_user(fl->l_start, &target_fl->l_start);
5562 __put_user(fl->l_len, &target_fl->l_len);
5563 __put_user(fl->l_pid, &target_fl->l_pid);
5564 unlock_user_struct(target_fl, target_flock_addr, 1);
5565 return 0;
5568 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5570 struct flock64 fl64;
5571 #ifdef F_GETOWN_EX
5572 struct f_owner_ex fox;
5573 struct target_f_owner_ex *target_fox;
5574 #endif
5575 abi_long ret;
5576 int host_cmd = target_to_host_fcntl_cmd(cmd);
5578 if (host_cmd == -TARGET_EINVAL)
5579 return host_cmd;
5581 switch(cmd) {
5582 case TARGET_F_GETLK:
5583 ret = copy_from_user_flock(&fl64, arg);
5584 if (ret) {
5585 return ret;
5587 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5588 if (ret == 0) {
5589 ret = copy_to_user_flock(arg, &fl64);
5591 break;
5593 case TARGET_F_SETLK:
5594 case TARGET_F_SETLKW:
5595 ret = copy_from_user_flock(&fl64, arg);
5596 if (ret) {
5597 return ret;
5599 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5600 break;
5602 case TARGET_F_GETLK64:
5603 ret = copy_from_user_flock64(&fl64, arg);
5604 if (ret) {
5605 return ret;
5607 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5608 if (ret == 0) {
5609 ret = copy_to_user_flock64(arg, &fl64);
5611 break;
5612 case TARGET_F_SETLK64:
5613 case TARGET_F_SETLKW64:
5614 ret = copy_from_user_flock64(&fl64, arg);
5615 if (ret) {
5616 return ret;
5618 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5619 break;
5621 case TARGET_F_GETFL:
5622 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5623 if (ret >= 0) {
5624 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5626 break;
5628 case TARGET_F_SETFL:
5629 ret = get_errno(safe_fcntl(fd, host_cmd,
5630 target_to_host_bitmask(arg,
5631 fcntl_flags_tbl)));
5632 break;
5634 #ifdef F_GETOWN_EX
5635 case TARGET_F_GETOWN_EX:
5636 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5637 if (ret >= 0) {
5638 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5639 return -TARGET_EFAULT;
5640 target_fox->type = tswap32(fox.type);
5641 target_fox->pid = tswap32(fox.pid);
5642 unlock_user_struct(target_fox, arg, 1);
5644 break;
5645 #endif
5647 #ifdef F_SETOWN_EX
5648 case TARGET_F_SETOWN_EX:
5649 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5650 return -TARGET_EFAULT;
5651 fox.type = tswap32(target_fox->type);
5652 fox.pid = tswap32(target_fox->pid);
5653 unlock_user_struct(target_fox, arg, 0);
5654 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5655 break;
5656 #endif
5658 case TARGET_F_SETOWN:
5659 case TARGET_F_GETOWN:
5660 case TARGET_F_SETSIG:
5661 case TARGET_F_GETSIG:
5662 case TARGET_F_SETLEASE:
5663 case TARGET_F_GETLEASE:
5664 case TARGET_F_SETPIPE_SZ:
5665 case TARGET_F_GETPIPE_SZ:
5666 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5667 break;
5669 default:
5670 ret = get_errno(safe_fcntl(fd, cmd, arg));
5671 break;
5673 return ret;
5676 #ifdef USE_UID16
5678 static inline int high2lowuid(int uid)
5680 if (uid > 65535)
5681 return 65534;
5682 else
5683 return uid;
5686 static inline int high2lowgid(int gid)
5688 if (gid > 65535)
5689 return 65534;
5690 else
5691 return gid;
5694 static inline int low2highuid(int uid)
5696 if ((int16_t)uid == -1)
5697 return -1;
5698 else
5699 return uid;
5702 static inline int low2highgid(int gid)
5704 if ((int16_t)gid == -1)
5705 return -1;
5706 else
5707 return gid;
5709 static inline int tswapid(int id)
5711 return tswap16(id);
5714 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5716 #else /* !USE_UID16 */
5717 static inline int high2lowuid(int uid)
5719 return uid;
5721 static inline int high2lowgid(int gid)
5723 return gid;
5725 static inline int low2highuid(int uid)
5727 return uid;
5729 static inline int low2highgid(int gid)
5731 return gid;
5733 static inline int tswapid(int id)
5735 return tswap32(id);
5738 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5740 #endif /* USE_UID16 */
5742 /* We must do direct syscalls for setting UID/GID, because we want to
5743 * implement the Linux system call semantics of "change only for this thread",
5744 * not the libc/POSIX semantics of "change for all threads in process".
5745 * (See http://ewontfix.com/17/ for more details.)
5746 * We use the 32-bit version of the syscalls if present; if it is not
5747 * then either the host architecture supports 32-bit UIDs natively with
5748 * the standard syscall, or the 16-bit UID is the best we can do.
5750 #ifdef __NR_setuid32
5751 #define __NR_sys_setuid __NR_setuid32
5752 #else
5753 #define __NR_sys_setuid __NR_setuid
5754 #endif
5755 #ifdef __NR_setgid32
5756 #define __NR_sys_setgid __NR_setgid32
5757 #else
5758 #define __NR_sys_setgid __NR_setgid
5759 #endif
5760 #ifdef __NR_setresuid32
5761 #define __NR_sys_setresuid __NR_setresuid32
5762 #else
5763 #define __NR_sys_setresuid __NR_setresuid
5764 #endif
5765 #ifdef __NR_setresgid32
5766 #define __NR_sys_setresgid __NR_setresgid32
5767 #else
5768 #define __NR_sys_setresgid __NR_setresgid
5769 #endif
5771 _syscall1(int, sys_setuid, uid_t, uid)
5772 _syscall1(int, sys_setgid, gid_t, gid)
5773 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5774 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5776 void syscall_init(void)
5778 IOCTLEntry *ie;
5779 const argtype *arg_type;
5780 int size;
5781 int i;
5783 thunk_init(STRUCT_MAX);
5785 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5786 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5787 #include "syscall_types.h"
5788 #undef STRUCT
5789 #undef STRUCT_SPECIAL
5791 /* Build target_to_host_errno_table[] table from
5792 * host_to_target_errno_table[]. */
5793 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5794 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5797 /* we patch the ioctl size if necessary. We rely on the fact that
5798 no ioctl has all the bits at '1' in the size field */
5799 ie = ioctl_entries;
5800 while (ie->target_cmd != 0) {
5801 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5802 TARGET_IOC_SIZEMASK) {
5803 arg_type = ie->arg_type;
5804 if (arg_type[0] != TYPE_PTR) {
5805 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5806 ie->target_cmd);
5807 exit(1);
5809 arg_type++;
5810 size = thunk_type_size(arg_type, 0);
5811 ie->target_cmd = (ie->target_cmd &
5812 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5813 (size << TARGET_IOC_SIZESHIFT);
5816 /* automatic consistency check if same arch */
5817 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5818 (defined(__x86_64__) && defined(TARGET_X86_64))
5819 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5820 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5821 ie->name, ie->target_cmd, ie->host_cmd);
5823 #endif
5824 ie++;
5828 #if TARGET_ABI_BITS == 32
5829 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5831 #ifdef TARGET_WORDS_BIGENDIAN
5832 return ((uint64_t)word0 << 32) | word1;
5833 #else
5834 return ((uint64_t)word1 << 32) | word0;
5835 #endif
5837 #else /* TARGET_ABI_BITS == 32 */
5838 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5840 return word0;
5842 #endif /* TARGET_ABI_BITS != 32 */
5844 #ifdef TARGET_NR_truncate64
5845 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5846 abi_long arg2,
5847 abi_long arg3,
5848 abi_long arg4)
5850 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
5851 arg2 = arg3;
5852 arg3 = arg4;
5854 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5856 #endif
5858 #ifdef TARGET_NR_ftruncate64
5859 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5860 abi_long arg2,
5861 abi_long arg3,
5862 abi_long arg4)
5864 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
5865 arg2 = arg3;
5866 arg3 = arg4;
5868 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5870 #endif
5872 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5873 abi_ulong target_addr)
5875 struct target_timespec *target_ts;
5877 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5878 return -TARGET_EFAULT;
5879 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5880 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5881 unlock_user_struct(target_ts, target_addr, 0);
5882 return 0;
5885 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5886 struct timespec *host_ts)
5888 struct target_timespec *target_ts;
5890 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5891 return -TARGET_EFAULT;
5892 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5893 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5894 unlock_user_struct(target_ts, target_addr, 1);
5895 return 0;
5898 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5899 abi_ulong target_addr)
5901 struct target_itimerspec *target_itspec;
5903 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5904 return -TARGET_EFAULT;
5907 host_itspec->it_interval.tv_sec =
5908 tswapal(target_itspec->it_interval.tv_sec);
5909 host_itspec->it_interval.tv_nsec =
5910 tswapal(target_itspec->it_interval.tv_nsec);
5911 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5912 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5914 unlock_user_struct(target_itspec, target_addr, 1);
5915 return 0;
5918 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5919 struct itimerspec *host_its)
5921 struct target_itimerspec *target_itspec;
5923 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5924 return -TARGET_EFAULT;
5927 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5928 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5930 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5931 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5933 unlock_user_struct(target_itspec, target_addr, 0);
5934 return 0;
5937 static inline abi_long target_to_host_timex(struct timex *host_tx,
5938 abi_long target_addr)
5940 struct target_timex *target_tx;
5942 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
5943 return -TARGET_EFAULT;
5946 __get_user(host_tx->modes, &target_tx->modes);
5947 __get_user(host_tx->offset, &target_tx->offset);
5948 __get_user(host_tx->freq, &target_tx->freq);
5949 __get_user(host_tx->maxerror, &target_tx->maxerror);
5950 __get_user(host_tx->esterror, &target_tx->esterror);
5951 __get_user(host_tx->status, &target_tx->status);
5952 __get_user(host_tx->constant, &target_tx->constant);
5953 __get_user(host_tx->precision, &target_tx->precision);
5954 __get_user(host_tx->tolerance, &target_tx->tolerance);
5955 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
5956 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
5957 __get_user(host_tx->tick, &target_tx->tick);
5958 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
5959 __get_user(host_tx->jitter, &target_tx->jitter);
5960 __get_user(host_tx->shift, &target_tx->shift);
5961 __get_user(host_tx->stabil, &target_tx->stabil);
5962 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
5963 __get_user(host_tx->calcnt, &target_tx->calcnt);
5964 __get_user(host_tx->errcnt, &target_tx->errcnt);
5965 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
5966 __get_user(host_tx->tai, &target_tx->tai);
5968 unlock_user_struct(target_tx, target_addr, 0);
5969 return 0;
5972 static inline abi_long host_to_target_timex(abi_long target_addr,
5973 struct timex *host_tx)
5975 struct target_timex *target_tx;
5977 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
5978 return -TARGET_EFAULT;
5981 __put_user(host_tx->modes, &target_tx->modes);
5982 __put_user(host_tx->offset, &target_tx->offset);
5983 __put_user(host_tx->freq, &target_tx->freq);
5984 __put_user(host_tx->maxerror, &target_tx->maxerror);
5985 __put_user(host_tx->esterror, &target_tx->esterror);
5986 __put_user(host_tx->status, &target_tx->status);
5987 __put_user(host_tx->constant, &target_tx->constant);
5988 __put_user(host_tx->precision, &target_tx->precision);
5989 __put_user(host_tx->tolerance, &target_tx->tolerance);
5990 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
5991 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
5992 __put_user(host_tx->tick, &target_tx->tick);
5993 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
5994 __put_user(host_tx->jitter, &target_tx->jitter);
5995 __put_user(host_tx->shift, &target_tx->shift);
5996 __put_user(host_tx->stabil, &target_tx->stabil);
5997 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
5998 __put_user(host_tx->calcnt, &target_tx->calcnt);
5999 __put_user(host_tx->errcnt, &target_tx->errcnt);
6000 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6001 __put_user(host_tx->tai, &target_tx->tai);
6003 unlock_user_struct(target_tx, target_addr, 1);
6004 return 0;
6008 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6009 abi_ulong target_addr)
6011 struct target_sigevent *target_sevp;
6013 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6014 return -TARGET_EFAULT;
6017 /* This union is awkward on 64 bit systems because it has a 32 bit
6018 * integer and a pointer in it; we follow the conversion approach
6019 * used for handling sigval types in signal.c so the guest should get
6020 * the correct value back even if we did a 64 bit byteswap and it's
6021 * using the 32 bit integer.
6023 host_sevp->sigev_value.sival_ptr =
6024 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6025 host_sevp->sigev_signo =
6026 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6027 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6028 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6030 unlock_user_struct(target_sevp, target_addr, 1);
6031 return 0;
6034 #if defined(TARGET_NR_mlockall)
6035 static inline int target_to_host_mlockall_arg(int arg)
6037 int result = 0;
6039 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6040 result |= MCL_CURRENT;
6042 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6043 result |= MCL_FUTURE;
6045 return result;
6047 #endif
6049 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6050 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6051 defined(TARGET_NR_newfstatat))
6052 static inline abi_long host_to_target_stat64(void *cpu_env,
6053 abi_ulong target_addr,
6054 struct stat *host_st)
6056 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6057 if (((CPUARMState *)cpu_env)->eabi) {
6058 struct target_eabi_stat64 *target_st;
6060 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6061 return -TARGET_EFAULT;
6062 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6063 __put_user(host_st->st_dev, &target_st->st_dev);
6064 __put_user(host_st->st_ino, &target_st->st_ino);
6065 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6066 __put_user(host_st->st_ino, &target_st->__st_ino);
6067 #endif
6068 __put_user(host_st->st_mode, &target_st->st_mode);
6069 __put_user(host_st->st_nlink, &target_st->st_nlink);
6070 __put_user(host_st->st_uid, &target_st->st_uid);
6071 __put_user(host_st->st_gid, &target_st->st_gid);
6072 __put_user(host_st->st_rdev, &target_st->st_rdev);
6073 __put_user(host_st->st_size, &target_st->st_size);
6074 __put_user(host_st->st_blksize, &target_st->st_blksize);
6075 __put_user(host_st->st_blocks, &target_st->st_blocks);
6076 __put_user(host_st->st_atime, &target_st->target_st_atime);
6077 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6078 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6079 unlock_user_struct(target_st, target_addr, 1);
6080 } else
6081 #endif
6083 #if defined(TARGET_HAS_STRUCT_STAT64)
6084 struct target_stat64 *target_st;
6085 #else
6086 struct target_stat *target_st;
6087 #endif
6089 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6090 return -TARGET_EFAULT;
6091 memset(target_st, 0, sizeof(*target_st));
6092 __put_user(host_st->st_dev, &target_st->st_dev);
6093 __put_user(host_st->st_ino, &target_st->st_ino);
6094 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6095 __put_user(host_st->st_ino, &target_st->__st_ino);
6096 #endif
6097 __put_user(host_st->st_mode, &target_st->st_mode);
6098 __put_user(host_st->st_nlink, &target_st->st_nlink);
6099 __put_user(host_st->st_uid, &target_st->st_uid);
6100 __put_user(host_st->st_gid, &target_st->st_gid);
6101 __put_user(host_st->st_rdev, &target_st->st_rdev);
6102 /* XXX: better use of kernel struct */
6103 __put_user(host_st->st_size, &target_st->st_size);
6104 __put_user(host_st->st_blksize, &target_st->st_blksize);
6105 __put_user(host_st->st_blocks, &target_st->st_blocks);
6106 __put_user(host_st->st_atime, &target_st->target_st_atime);
6107 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6108 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6109 unlock_user_struct(target_st, target_addr, 1);
6112 return 0;
6114 #endif
6116 /* ??? Using host futex calls even when target atomic operations
6117 are not really atomic probably breaks things. However implementing
6118 futexes locally would make futexes shared between multiple processes
6119 tricky. However they're probably useless because guest atomic
6120 operations won't work either. */
6121 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6122 target_ulong uaddr2, int val3)
6124 struct timespec ts, *pts;
6125 int base_op;
6127 /* ??? We assume FUTEX_* constants are the same on both host
6128 and target. */
6129 #ifdef FUTEX_CMD_MASK
6130 base_op = op & FUTEX_CMD_MASK;
6131 #else
6132 base_op = op;
6133 #endif
6134 switch (base_op) {
6135 case FUTEX_WAIT:
6136 case FUTEX_WAIT_BITSET:
6137 if (timeout) {
6138 pts = &ts;
6139 target_to_host_timespec(pts, timeout);
6140 } else {
6141 pts = NULL;
6143 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6144 pts, NULL, val3));
6145 case FUTEX_WAKE:
6146 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6147 case FUTEX_FD:
6148 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6149 case FUTEX_REQUEUE:
6150 case FUTEX_CMP_REQUEUE:
6151 case FUTEX_WAKE_OP:
6152 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6153 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6154 But the prototype takes a `struct timespec *'; insert casts
6155 to satisfy the compiler. We do not need to tswap TIMEOUT
6156 since it's not compared to guest memory. */
6157 pts = (struct timespec *)(uintptr_t) timeout;
6158 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6159 g2h(uaddr2),
6160 (base_op == FUTEX_CMP_REQUEUE
6161 ? tswap32(val3)
6162 : val3)));
6163 default:
6164 return -TARGET_ENOSYS;
6167 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6168 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6169 abi_long handle, abi_long mount_id,
6170 abi_long flags)
6172 struct file_handle *target_fh;
6173 struct file_handle *fh;
6174 int mid = 0;
6175 abi_long ret;
6176 char *name;
6177 unsigned int size, total_size;
6179 if (get_user_s32(size, handle)) {
6180 return -TARGET_EFAULT;
6183 name = lock_user_string(pathname);
6184 if (!name) {
6185 return -TARGET_EFAULT;
6188 total_size = sizeof(struct file_handle) + size;
6189 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6190 if (!target_fh) {
6191 unlock_user(name, pathname, 0);
6192 return -TARGET_EFAULT;
6195 fh = g_malloc0(total_size);
6196 fh->handle_bytes = size;
6198 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6199 unlock_user(name, pathname, 0);
6201 /* man name_to_handle_at(2):
6202 * Other than the use of the handle_bytes field, the caller should treat
6203 * the file_handle structure as an opaque data type
6206 memcpy(target_fh, fh, total_size);
6207 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6208 target_fh->handle_type = tswap32(fh->handle_type);
6209 g_free(fh);
6210 unlock_user(target_fh, handle, total_size);
6212 if (put_user_s32(mid, mount_id)) {
6213 return -TARGET_EFAULT;
6216 return ret;
6219 #endif
6221 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6222 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6223 abi_long flags)
6225 struct file_handle *target_fh;
6226 struct file_handle *fh;
6227 unsigned int size, total_size;
6228 abi_long ret;
6230 if (get_user_s32(size, handle)) {
6231 return -TARGET_EFAULT;
6234 total_size = sizeof(struct file_handle) + size;
6235 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6236 if (!target_fh) {
6237 return -TARGET_EFAULT;
6240 fh = g_memdup(target_fh, total_size);
6241 fh->handle_bytes = size;
6242 fh->handle_type = tswap32(target_fh->handle_type);
6244 ret = get_errno(open_by_handle_at(mount_fd, fh,
6245 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6247 g_free(fh);
6249 unlock_user(target_fh, handle, total_size);
6251 return ret;
6253 #endif
6255 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6257 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6259 int host_flags;
6260 target_sigset_t *target_mask;
6261 sigset_t host_mask;
6262 abi_long ret;
6264 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6265 return -TARGET_EINVAL;
6267 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6268 return -TARGET_EFAULT;
6271 target_to_host_sigset(&host_mask, target_mask);
6273 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6275 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6276 if (ret >= 0) {
6277 fd_trans_register(ret, &target_signalfd_trans);
6280 unlock_user_struct(target_mask, mask, 0);
6282 return ret;
6284 #endif
6286 /* Map host to target signal numbers for the wait family of syscalls.
6287 Assume all other status bits are the same. */
6288 int host_to_target_waitstatus(int status)
6290 if (WIFSIGNALED(status)) {
6291 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6293 if (WIFSTOPPED(status)) {
6294 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6295 | (status & 0xff);
6297 return status;
6300 static int open_self_cmdline(void *cpu_env, int fd)
6302 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6303 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6304 int i;
6306 for (i = 0; i < bprm->argc; i++) {
6307 size_t len = strlen(bprm->argv[i]) + 1;
6309 if (write(fd, bprm->argv[i], len) != len) {
6310 return -1;
6314 return 0;
6317 static int open_self_maps(void *cpu_env, int fd)
6319 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6320 TaskState *ts = cpu->opaque;
6321 FILE *fp;
6322 char *line = NULL;
6323 size_t len = 0;
6324 ssize_t read;
6326 fp = fopen("/proc/self/maps", "r");
6327 if (fp == NULL) {
6328 return -1;
6331 while ((read = getline(&line, &len, fp)) != -1) {
6332 int fields, dev_maj, dev_min, inode;
6333 uint64_t min, max, offset;
6334 char flag_r, flag_w, flag_x, flag_p;
6335 char path[512] = "";
6336 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6337 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6338 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6340 if ((fields < 10) || (fields > 11)) {
6341 continue;
6343 if (h2g_valid(min)) {
6344 int flags = page_get_flags(h2g(min));
6345 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6346 if (page_check_range(h2g(min), max - min, flags) == -1) {
6347 continue;
6349 if (h2g(min) == ts->info->stack_limit) {
6350 pstrcpy(path, sizeof(path), " [stack]");
6352 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6353 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6354 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6355 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6356 path[0] ? " " : "", path);
6360 free(line);
6361 fclose(fp);
6363 return 0;
6366 static int open_self_stat(void *cpu_env, int fd)
6368 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6369 TaskState *ts = cpu->opaque;
6370 abi_ulong start_stack = ts->info->start_stack;
6371 int i;
6373 for (i = 0; i < 44; i++) {
6374 char buf[128];
6375 int len;
6376 uint64_t val = 0;
6378 if (i == 0) {
6379 /* pid */
6380 val = getpid();
6381 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6382 } else if (i == 1) {
6383 /* app name */
6384 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6385 } else if (i == 27) {
6386 /* stack bottom */
6387 val = start_stack;
6388 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6389 } else {
6390 /* for the rest, there is MasterCard */
6391 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6394 len = strlen(buf);
6395 if (write(fd, buf, len) != len) {
6396 return -1;
6400 return 0;
6403 static int open_self_auxv(void *cpu_env, int fd)
6405 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6406 TaskState *ts = cpu->opaque;
6407 abi_ulong auxv = ts->info->saved_auxv;
6408 abi_ulong len = ts->info->auxv_len;
6409 char *ptr;
6412 * Auxiliary vector is stored in target process stack.
6413 * read in whole auxv vector and copy it to file
6415 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6416 if (ptr != NULL) {
6417 while (len > 0) {
6418 ssize_t r;
6419 r = write(fd, ptr, len);
6420 if (r <= 0) {
6421 break;
6423 len -= r;
6424 ptr += r;
6426 lseek(fd, 0, SEEK_SET);
6427 unlock_user(ptr, auxv, len);
6430 return 0;
6433 static int is_proc_myself(const char *filename, const char *entry)
6435 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6436 filename += strlen("/proc/");
6437 if (!strncmp(filename, "self/", strlen("self/"))) {
6438 filename += strlen("self/");
6439 } else if (*filename >= '1' && *filename <= '9') {
6440 char myself[80];
6441 snprintf(myself, sizeof(myself), "%d/", getpid());
6442 if (!strncmp(filename, myself, strlen(myself))) {
6443 filename += strlen(myself);
6444 } else {
6445 return 0;
6447 } else {
6448 return 0;
6450 if (!strcmp(filename, entry)) {
6451 return 1;
6454 return 0;
6457 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6458 static int is_proc(const char *filename, const char *entry)
6460 return strcmp(filename, entry) == 0;
6463 static int open_net_route(void *cpu_env, int fd)
6465 FILE *fp;
6466 char *line = NULL;
6467 size_t len = 0;
6468 ssize_t read;
6470 fp = fopen("/proc/net/route", "r");
6471 if (fp == NULL) {
6472 return -1;
6475 /* read header */
6477 read = getline(&line, &len, fp);
6478 dprintf(fd, "%s", line);
6480 /* read routes */
6482 while ((read = getline(&line, &len, fp)) != -1) {
6483 char iface[16];
6484 uint32_t dest, gw, mask;
6485 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6486 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6487 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6488 &mask, &mtu, &window, &irtt);
6489 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6490 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6491 metric, tswap32(mask), mtu, window, irtt);
6494 free(line);
6495 fclose(fp);
6497 return 0;
6499 #endif
6501 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6503 struct fake_open {
6504 const char *filename;
6505 int (*fill)(void *cpu_env, int fd);
6506 int (*cmp)(const char *s1, const char *s2);
6508 const struct fake_open *fake_open;
6509 static const struct fake_open fakes[] = {
6510 { "maps", open_self_maps, is_proc_myself },
6511 { "stat", open_self_stat, is_proc_myself },
6512 { "auxv", open_self_auxv, is_proc_myself },
6513 { "cmdline", open_self_cmdline, is_proc_myself },
6514 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6515 { "/proc/net/route", open_net_route, is_proc },
6516 #endif
6517 { NULL, NULL, NULL }
6520 if (is_proc_myself(pathname, "exe")) {
6521 int execfd = qemu_getauxval(AT_EXECFD);
6522 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6525 for (fake_open = fakes; fake_open->filename; fake_open++) {
6526 if (fake_open->cmp(pathname, fake_open->filename)) {
6527 break;
6531 if (fake_open->filename) {
6532 const char *tmpdir;
6533 char filename[PATH_MAX];
6534 int fd, r;
6536 /* create temporary file to map stat to */
6537 tmpdir = getenv("TMPDIR");
6538 if (!tmpdir)
6539 tmpdir = "/tmp";
6540 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6541 fd = mkstemp(filename);
6542 if (fd < 0) {
6543 return fd;
6545 unlink(filename);
6547 if ((r = fake_open->fill(cpu_env, fd))) {
6548 int e = errno;
6549 close(fd);
6550 errno = e;
6551 return r;
6553 lseek(fd, 0, SEEK_SET);
6555 return fd;
6558 return safe_openat(dirfd, path(pathname), flags, mode);
6561 #define TIMER_MAGIC 0x0caf0000
6562 #define TIMER_MAGIC_MASK 0xffff0000
6564 /* Convert QEMU provided timer ID back to internal 16bit index format */
6565 static target_timer_t get_timer_id(abi_long arg)
6567 target_timer_t timerid = arg;
6569 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6570 return -TARGET_EINVAL;
6573 timerid &= 0xffff;
6575 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6576 return -TARGET_EINVAL;
6579 return timerid;
6582 static int target_to_host_cpu_mask(unsigned long *host_mask,
6583 size_t host_size,
6584 abi_ulong target_addr,
6585 size_t target_size)
6587 unsigned target_bits = sizeof(abi_ulong) * 8;
6588 unsigned host_bits = sizeof(*host_mask) * 8;
6589 abi_ulong *target_mask;
6590 unsigned i, j;
6592 assert(host_size >= target_size);
6594 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6595 if (!target_mask) {
6596 return -TARGET_EFAULT;
6598 memset(host_mask, 0, host_size);
6600 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6601 unsigned bit = i * target_bits;
6602 abi_ulong val;
6604 __get_user(val, &target_mask[i]);
6605 for (j = 0; j < target_bits; j++, bit++) {
6606 if (val & (1UL << j)) {
6607 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6612 unlock_user(target_mask, target_addr, 0);
6613 return 0;
6616 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6617 size_t host_size,
6618 abi_ulong target_addr,
6619 size_t target_size)
6621 unsigned target_bits = sizeof(abi_ulong) * 8;
6622 unsigned host_bits = sizeof(*host_mask) * 8;
6623 abi_ulong *target_mask;
6624 unsigned i, j;
6626 assert(host_size >= target_size);
6628 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6629 if (!target_mask) {
6630 return -TARGET_EFAULT;
6633 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6634 unsigned bit = i * target_bits;
6635 abi_ulong val = 0;
6637 for (j = 0; j < target_bits; j++, bit++) {
6638 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6639 val |= 1UL << j;
6642 __put_user(val, &target_mask[i]);
6645 unlock_user(target_mask, target_addr, target_size);
6646 return 0;
6649 /* This is an internal helper for do_syscall so that it is easier
6650 * to have a single return point, so that actions, such as logging
6651 * of syscall results, can be performed.
6652 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6654 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6655 abi_long arg2, abi_long arg3, abi_long arg4,
6656 abi_long arg5, abi_long arg6, abi_long arg7,
6657 abi_long arg8)
6659 CPUState *cpu = ENV_GET_CPU(cpu_env);
6660 abi_long ret;
6661 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6662 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6663 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6664 struct stat st;
6665 #endif
6666 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6667 || defined(TARGET_NR_fstatfs)
6668 struct statfs stfs;
6669 #endif
6670 void *p;
6672 switch(num) {
6673 case TARGET_NR_exit:
6674 /* In old applications this may be used to implement _exit(2).
6675 However in threaded applictions it is used for thread termination,
6676 and _exit_group is used for application termination.
6677 Do thread termination if we have more then one thread. */
6679 if (block_signals()) {
6680 return -TARGET_ERESTARTSYS;
6683 cpu_list_lock();
6685 if (CPU_NEXT(first_cpu)) {
6686 TaskState *ts;
6688 /* Remove the CPU from the list. */
6689 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6691 cpu_list_unlock();
6693 ts = cpu->opaque;
6694 if (ts->child_tidptr) {
6695 put_user_u32(0, ts->child_tidptr);
6696 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6697 NULL, NULL, 0);
6699 thread_cpu = NULL;
6700 object_unref(OBJECT(cpu));
6701 g_free(ts);
6702 rcu_unregister_thread();
6703 pthread_exit(NULL);
6706 cpu_list_unlock();
6707 preexit_cleanup(cpu_env, arg1);
6708 _exit(arg1);
6709 return 0; /* avoid warning */
6710 case TARGET_NR_read:
6711 if (arg3 == 0) {
6712 return 0;
6713 } else {
6714 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6715 return -TARGET_EFAULT;
6716 ret = get_errno(safe_read(arg1, p, arg3));
6717 if (ret >= 0 &&
6718 fd_trans_host_to_target_data(arg1)) {
6719 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6721 unlock_user(p, arg2, ret);
6723 return ret;
6724 case TARGET_NR_write:
6725 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6726 return -TARGET_EFAULT;
6727 if (fd_trans_target_to_host_data(arg1)) {
6728 void *copy = g_malloc(arg3);
6729 memcpy(copy, p, arg3);
6730 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
6731 if (ret >= 0) {
6732 ret = get_errno(safe_write(arg1, copy, ret));
6734 g_free(copy);
6735 } else {
6736 ret = get_errno(safe_write(arg1, p, arg3));
6738 unlock_user(p, arg2, 0);
6739 return ret;
6741 #ifdef TARGET_NR_open
6742 case TARGET_NR_open:
6743 if (!(p = lock_user_string(arg1)))
6744 return -TARGET_EFAULT;
6745 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6746 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6747 arg3));
6748 fd_trans_unregister(ret);
6749 unlock_user(p, arg1, 0);
6750 return ret;
6751 #endif
6752 case TARGET_NR_openat:
6753 if (!(p = lock_user_string(arg2)))
6754 return -TARGET_EFAULT;
6755 ret = get_errno(do_openat(cpu_env, arg1, p,
6756 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6757 arg4));
6758 fd_trans_unregister(ret);
6759 unlock_user(p, arg2, 0);
6760 return ret;
6761 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 case TARGET_NR_name_to_handle_at:
6763 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6764 return ret;
6765 #endif
6766 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6767 case TARGET_NR_open_by_handle_at:
6768 ret = do_open_by_handle_at(arg1, arg2, arg3);
6769 fd_trans_unregister(ret);
6770 return ret;
6771 #endif
6772 case TARGET_NR_close:
6773 fd_trans_unregister(arg1);
6774 return get_errno(close(arg1));
6776 case TARGET_NR_brk:
6777 return do_brk(arg1);
6778 #ifdef TARGET_NR_fork
6779 case TARGET_NR_fork:
6780 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
6781 #endif
6782 #ifdef TARGET_NR_waitpid
6783 case TARGET_NR_waitpid:
6785 int status;
6786 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6787 if (!is_error(ret) && arg2 && ret
6788 && put_user_s32(host_to_target_waitstatus(status), arg2))
6789 return -TARGET_EFAULT;
6791 return ret;
6792 #endif
6793 #ifdef TARGET_NR_waitid
6794 case TARGET_NR_waitid:
6796 siginfo_t info;
6797 info.si_pid = 0;
6798 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6799 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6800 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6801 return -TARGET_EFAULT;
6802 host_to_target_siginfo(p, &info);
6803 unlock_user(p, arg3, sizeof(target_siginfo_t));
6806 return ret;
6807 #endif
6808 #ifdef TARGET_NR_creat /* not on alpha */
6809 case TARGET_NR_creat:
6810 if (!(p = lock_user_string(arg1)))
6811 return -TARGET_EFAULT;
6812 ret = get_errno(creat(p, arg2));
6813 fd_trans_unregister(ret);
6814 unlock_user(p, arg1, 0);
6815 return ret;
6816 #endif
6817 #ifdef TARGET_NR_link
6818 case TARGET_NR_link:
6820 void * p2;
6821 p = lock_user_string(arg1);
6822 p2 = lock_user_string(arg2);
6823 if (!p || !p2)
6824 ret = -TARGET_EFAULT;
6825 else
6826 ret = get_errno(link(p, p2));
6827 unlock_user(p2, arg2, 0);
6828 unlock_user(p, arg1, 0);
6830 return ret;
6831 #endif
6832 #if defined(TARGET_NR_linkat)
6833 case TARGET_NR_linkat:
6835 void * p2 = NULL;
6836 if (!arg2 || !arg4)
6837 return -TARGET_EFAULT;
6838 p = lock_user_string(arg2);
6839 p2 = lock_user_string(arg4);
6840 if (!p || !p2)
6841 ret = -TARGET_EFAULT;
6842 else
6843 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6844 unlock_user(p, arg2, 0);
6845 unlock_user(p2, arg4, 0);
6847 return ret;
6848 #endif
6849 #ifdef TARGET_NR_unlink
6850 case TARGET_NR_unlink:
6851 if (!(p = lock_user_string(arg1)))
6852 return -TARGET_EFAULT;
6853 ret = get_errno(unlink(p));
6854 unlock_user(p, arg1, 0);
6855 return ret;
6856 #endif
6857 #if defined(TARGET_NR_unlinkat)
6858 case TARGET_NR_unlinkat:
6859 if (!(p = lock_user_string(arg2)))
6860 return -TARGET_EFAULT;
6861 ret = get_errno(unlinkat(arg1, p, arg3));
6862 unlock_user(p, arg2, 0);
6863 return ret;
6864 #endif
6865 case TARGET_NR_execve:
6867 char **argp, **envp;
6868 int argc, envc;
6869 abi_ulong gp;
6870 abi_ulong guest_argp;
6871 abi_ulong guest_envp;
6872 abi_ulong addr;
6873 char **q;
6874 int total_size = 0;
6876 argc = 0;
6877 guest_argp = arg2;
6878 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6879 if (get_user_ual(addr, gp))
6880 return -TARGET_EFAULT;
6881 if (!addr)
6882 break;
6883 argc++;
6885 envc = 0;
6886 guest_envp = arg3;
6887 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6888 if (get_user_ual(addr, gp))
6889 return -TARGET_EFAULT;
6890 if (!addr)
6891 break;
6892 envc++;
6895 argp = g_new0(char *, argc + 1);
6896 envp = g_new0(char *, envc + 1);
6898 for (gp = guest_argp, q = argp; gp;
6899 gp += sizeof(abi_ulong), q++) {
6900 if (get_user_ual(addr, gp))
6901 goto execve_efault;
6902 if (!addr)
6903 break;
6904 if (!(*q = lock_user_string(addr)))
6905 goto execve_efault;
6906 total_size += strlen(*q) + 1;
6908 *q = NULL;
6910 for (gp = guest_envp, q = envp; gp;
6911 gp += sizeof(abi_ulong), q++) {
6912 if (get_user_ual(addr, gp))
6913 goto execve_efault;
6914 if (!addr)
6915 break;
6916 if (!(*q = lock_user_string(addr)))
6917 goto execve_efault;
6918 total_size += strlen(*q) + 1;
6920 *q = NULL;
6922 if (!(p = lock_user_string(arg1)))
6923 goto execve_efault;
6924 /* Although execve() is not an interruptible syscall it is
6925 * a special case where we must use the safe_syscall wrapper:
6926 * if we allow a signal to happen before we make the host
6927 * syscall then we will 'lose' it, because at the point of
6928 * execve the process leaves QEMU's control. So we use the
6929 * safe syscall wrapper to ensure that we either take the
6930 * signal as a guest signal, or else it does not happen
6931 * before the execve completes and makes it the other
6932 * program's problem.
6934 ret = get_errno(safe_execve(p, argp, envp));
6935 unlock_user(p, arg1, 0);
6937 goto execve_end;
6939 execve_efault:
6940 ret = -TARGET_EFAULT;
6942 execve_end:
6943 for (gp = guest_argp, q = argp; *q;
6944 gp += sizeof(abi_ulong), q++) {
6945 if (get_user_ual(addr, gp)
6946 || !addr)
6947 break;
6948 unlock_user(*q, addr, 0);
6950 for (gp = guest_envp, q = envp; *q;
6951 gp += sizeof(abi_ulong), q++) {
6952 if (get_user_ual(addr, gp)
6953 || !addr)
6954 break;
6955 unlock_user(*q, addr, 0);
6958 g_free(argp);
6959 g_free(envp);
6961 return ret;
6962 case TARGET_NR_chdir:
6963 if (!(p = lock_user_string(arg1)))
6964 return -TARGET_EFAULT;
6965 ret = get_errno(chdir(p));
6966 unlock_user(p, arg1, 0);
6967 return ret;
6968 #ifdef TARGET_NR_time
6969 case TARGET_NR_time:
6971 time_t host_time;
6972 ret = get_errno(time(&host_time));
6973 if (!is_error(ret)
6974 && arg1
6975 && put_user_sal(host_time, arg1))
6976 return -TARGET_EFAULT;
6978 return ret;
6979 #endif
6980 #ifdef TARGET_NR_mknod
6981 case TARGET_NR_mknod:
6982 if (!(p = lock_user_string(arg1)))
6983 return -TARGET_EFAULT;
6984 ret = get_errno(mknod(p, arg2, arg3));
6985 unlock_user(p, arg1, 0);
6986 return ret;
6987 #endif
6988 #if defined(TARGET_NR_mknodat)
6989 case TARGET_NR_mknodat:
6990 if (!(p = lock_user_string(arg2)))
6991 return -TARGET_EFAULT;
6992 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6993 unlock_user(p, arg2, 0);
6994 return ret;
6995 #endif
6996 #ifdef TARGET_NR_chmod
6997 case TARGET_NR_chmod:
6998 if (!(p = lock_user_string(arg1)))
6999 return -TARGET_EFAULT;
7000 ret = get_errno(chmod(p, arg2));
7001 unlock_user(p, arg1, 0);
7002 return ret;
7003 #endif
7004 #ifdef TARGET_NR_lseek
7005 case TARGET_NR_lseek:
7006 return get_errno(lseek(arg1, arg2, arg3));
7007 #endif
7008 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7009 /* Alpha specific */
7010 case TARGET_NR_getxpid:
7011 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7012 return get_errno(getpid());
7013 #endif
7014 #ifdef TARGET_NR_getpid
7015 case TARGET_NR_getpid:
7016 return get_errno(getpid());
7017 #endif
7018 case TARGET_NR_mount:
7020 /* need to look at the data field */
7021 void *p2, *p3;
7023 if (arg1) {
7024 p = lock_user_string(arg1);
7025 if (!p) {
7026 return -TARGET_EFAULT;
7028 } else {
7029 p = NULL;
7032 p2 = lock_user_string(arg2);
7033 if (!p2) {
7034 if (arg1) {
7035 unlock_user(p, arg1, 0);
7037 return -TARGET_EFAULT;
7040 if (arg3) {
7041 p3 = lock_user_string(arg3);
7042 if (!p3) {
7043 if (arg1) {
7044 unlock_user(p, arg1, 0);
7046 unlock_user(p2, arg2, 0);
7047 return -TARGET_EFAULT;
7049 } else {
7050 p3 = NULL;
7053 /* FIXME - arg5 should be locked, but it isn't clear how to
7054 * do that since it's not guaranteed to be a NULL-terminated
7055 * string.
7057 if (!arg5) {
7058 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7059 } else {
7060 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7062 ret = get_errno(ret);
7064 if (arg1) {
7065 unlock_user(p, arg1, 0);
7067 unlock_user(p2, arg2, 0);
7068 if (arg3) {
7069 unlock_user(p3, arg3, 0);
7072 return ret;
7073 #ifdef TARGET_NR_umount
7074 case TARGET_NR_umount:
7075 if (!(p = lock_user_string(arg1)))
7076 return -TARGET_EFAULT;
7077 ret = get_errno(umount(p));
7078 unlock_user(p, arg1, 0);
7079 return ret;
7080 #endif
7081 #ifdef TARGET_NR_stime /* not on alpha */
7082 case TARGET_NR_stime:
7084 time_t host_time;
7085 if (get_user_sal(host_time, arg1))
7086 return -TARGET_EFAULT;
7087 return get_errno(stime(&host_time));
7089 #endif
7090 #ifdef TARGET_NR_alarm /* not on alpha */
7091 case TARGET_NR_alarm:
7092 return alarm(arg1);
7093 #endif
7094 #ifdef TARGET_NR_pause /* not on alpha */
7095 case TARGET_NR_pause:
7096 if (!block_signals()) {
7097 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7099 return -TARGET_EINTR;
7100 #endif
7101 #ifdef TARGET_NR_utime
7102 case TARGET_NR_utime:
7104 struct utimbuf tbuf, *host_tbuf;
7105 struct target_utimbuf *target_tbuf;
7106 if (arg2) {
7107 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7108 return -TARGET_EFAULT;
7109 tbuf.actime = tswapal(target_tbuf->actime);
7110 tbuf.modtime = tswapal(target_tbuf->modtime);
7111 unlock_user_struct(target_tbuf, arg2, 0);
7112 host_tbuf = &tbuf;
7113 } else {
7114 host_tbuf = NULL;
7116 if (!(p = lock_user_string(arg1)))
7117 return -TARGET_EFAULT;
7118 ret = get_errno(utime(p, host_tbuf));
7119 unlock_user(p, arg1, 0);
7121 return ret;
7122 #endif
7123 #ifdef TARGET_NR_utimes
7124 case TARGET_NR_utimes:
7126 struct timeval *tvp, tv[2];
7127 if (arg2) {
7128 if (copy_from_user_timeval(&tv[0], arg2)
7129 || copy_from_user_timeval(&tv[1],
7130 arg2 + sizeof(struct target_timeval)))
7131 return -TARGET_EFAULT;
7132 tvp = tv;
7133 } else {
7134 tvp = NULL;
7136 if (!(p = lock_user_string(arg1)))
7137 return -TARGET_EFAULT;
7138 ret = get_errno(utimes(p, tvp));
7139 unlock_user(p, arg1, 0);
7141 return ret;
7142 #endif
7143 #if defined(TARGET_NR_futimesat)
7144 case TARGET_NR_futimesat:
7146 struct timeval *tvp, tv[2];
7147 if (arg3) {
7148 if (copy_from_user_timeval(&tv[0], arg3)
7149 || copy_from_user_timeval(&tv[1],
7150 arg3 + sizeof(struct target_timeval)))
7151 return -TARGET_EFAULT;
7152 tvp = tv;
7153 } else {
7154 tvp = NULL;
7156 if (!(p = lock_user_string(arg2))) {
7157 return -TARGET_EFAULT;
7159 ret = get_errno(futimesat(arg1, path(p), tvp));
7160 unlock_user(p, arg2, 0);
7162 return ret;
7163 #endif
7164 #ifdef TARGET_NR_access
7165 case TARGET_NR_access:
7166 if (!(p = lock_user_string(arg1))) {
7167 return -TARGET_EFAULT;
7169 ret = get_errno(access(path(p), arg2));
7170 unlock_user(p, arg1, 0);
7171 return ret;
7172 #endif
7173 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7174 case TARGET_NR_faccessat:
7175 if (!(p = lock_user_string(arg2))) {
7176 return -TARGET_EFAULT;
7178 ret = get_errno(faccessat(arg1, p, arg3, 0));
7179 unlock_user(p, arg2, 0);
7180 return ret;
7181 #endif
7182 #ifdef TARGET_NR_nice /* not on alpha */
7183 case TARGET_NR_nice:
7184 return get_errno(nice(arg1));
7185 #endif
7186 case TARGET_NR_sync:
7187 sync();
7188 return 0;
7189 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7190 case TARGET_NR_syncfs:
7191 return get_errno(syncfs(arg1));
7192 #endif
7193 case TARGET_NR_kill:
7194 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7195 #ifdef TARGET_NR_rename
7196 case TARGET_NR_rename:
7198 void *p2;
7199 p = lock_user_string(arg1);
7200 p2 = lock_user_string(arg2);
7201 if (!p || !p2)
7202 ret = -TARGET_EFAULT;
7203 else
7204 ret = get_errno(rename(p, p2));
7205 unlock_user(p2, arg2, 0);
7206 unlock_user(p, arg1, 0);
7208 return ret;
7209 #endif
7210 #if defined(TARGET_NR_renameat)
7211 case TARGET_NR_renameat:
7213 void *p2;
7214 p = lock_user_string(arg2);
7215 p2 = lock_user_string(arg4);
7216 if (!p || !p2)
7217 ret = -TARGET_EFAULT;
7218 else
7219 ret = get_errno(renameat(arg1, p, arg3, p2));
7220 unlock_user(p2, arg4, 0);
7221 unlock_user(p, arg2, 0);
7223 return ret;
7224 #endif
7225 #if defined(TARGET_NR_renameat2)
7226 case TARGET_NR_renameat2:
7228 void *p2;
7229 p = lock_user_string(arg2);
7230 p2 = lock_user_string(arg4);
7231 if (!p || !p2) {
7232 ret = -TARGET_EFAULT;
7233 } else {
7234 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7236 unlock_user(p2, arg4, 0);
7237 unlock_user(p, arg2, 0);
7239 return ret;
7240 #endif
7241 #ifdef TARGET_NR_mkdir
7242 case TARGET_NR_mkdir:
7243 if (!(p = lock_user_string(arg1)))
7244 return -TARGET_EFAULT;
7245 ret = get_errno(mkdir(p, arg2));
7246 unlock_user(p, arg1, 0);
7247 return ret;
7248 #endif
7249 #if defined(TARGET_NR_mkdirat)
7250 case TARGET_NR_mkdirat:
7251 if (!(p = lock_user_string(arg2)))
7252 return -TARGET_EFAULT;
7253 ret = get_errno(mkdirat(arg1, p, arg3));
7254 unlock_user(p, arg2, 0);
7255 return ret;
7256 #endif
7257 #ifdef TARGET_NR_rmdir
7258 case TARGET_NR_rmdir:
7259 if (!(p = lock_user_string(arg1)))
7260 return -TARGET_EFAULT;
7261 ret = get_errno(rmdir(p));
7262 unlock_user(p, arg1, 0);
7263 return ret;
7264 #endif
7265 case TARGET_NR_dup:
7266 ret = get_errno(dup(arg1));
7267 if (ret >= 0) {
7268 fd_trans_dup(arg1, ret);
7270 return ret;
7271 #ifdef TARGET_NR_pipe
7272 case TARGET_NR_pipe:
7273 return do_pipe(cpu_env, arg1, 0, 0);
7274 #endif
7275 #ifdef TARGET_NR_pipe2
7276 case TARGET_NR_pipe2:
7277 return do_pipe(cpu_env, arg1,
7278 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7279 #endif
7280 case TARGET_NR_times:
7282 struct target_tms *tmsp;
7283 struct tms tms;
7284 ret = get_errno(times(&tms));
7285 if (arg1) {
7286 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7287 if (!tmsp)
7288 return -TARGET_EFAULT;
7289 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7290 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7291 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7292 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7294 if (!is_error(ret))
7295 ret = host_to_target_clock_t(ret);
7297 return ret;
7298 case TARGET_NR_acct:
7299 if (arg1 == 0) {
7300 ret = get_errno(acct(NULL));
7301 } else {
7302 if (!(p = lock_user_string(arg1))) {
7303 return -TARGET_EFAULT;
7305 ret = get_errno(acct(path(p)));
7306 unlock_user(p, arg1, 0);
7308 return ret;
7309 #ifdef TARGET_NR_umount2
7310 case TARGET_NR_umount2:
7311 if (!(p = lock_user_string(arg1)))
7312 return -TARGET_EFAULT;
7313 ret = get_errno(umount2(p, arg2));
7314 unlock_user(p, arg1, 0);
7315 return ret;
7316 #endif
7317 case TARGET_NR_ioctl:
7318 return do_ioctl(arg1, arg2, arg3);
7319 #ifdef TARGET_NR_fcntl
7320 case TARGET_NR_fcntl:
7321 return do_fcntl(arg1, arg2, arg3);
7322 #endif
7323 case TARGET_NR_setpgid:
7324 return get_errno(setpgid(arg1, arg2));
7325 case TARGET_NR_umask:
7326 return get_errno(umask(arg1));
7327 case TARGET_NR_chroot:
7328 if (!(p = lock_user_string(arg1)))
7329 return -TARGET_EFAULT;
7330 ret = get_errno(chroot(p));
7331 unlock_user(p, arg1, 0);
7332 return ret;
7333 #ifdef TARGET_NR_dup2
7334 case TARGET_NR_dup2:
7335 ret = get_errno(dup2(arg1, arg2));
7336 if (ret >= 0) {
7337 fd_trans_dup(arg1, arg2);
7339 return ret;
7340 #endif
7341 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7342 case TARGET_NR_dup3:
7344 int host_flags;
7346 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7347 return -EINVAL;
7349 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7350 ret = get_errno(dup3(arg1, arg2, host_flags));
7351 if (ret >= 0) {
7352 fd_trans_dup(arg1, arg2);
7354 return ret;
7356 #endif
7357 #ifdef TARGET_NR_getppid /* not on alpha */
7358 case TARGET_NR_getppid:
7359 return get_errno(getppid());
7360 #endif
7361 #ifdef TARGET_NR_getpgrp
7362 case TARGET_NR_getpgrp:
7363 return get_errno(getpgrp());
7364 #endif
7365 case TARGET_NR_setsid:
7366 return get_errno(setsid());
7367 #ifdef TARGET_NR_sigaction
7368 case TARGET_NR_sigaction:
7370 #if defined(TARGET_ALPHA)
7371 struct target_sigaction act, oact, *pact = 0;
7372 struct target_old_sigaction *old_act;
7373 if (arg2) {
7374 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7375 return -TARGET_EFAULT;
7376 act._sa_handler = old_act->_sa_handler;
7377 target_siginitset(&act.sa_mask, old_act->sa_mask);
7378 act.sa_flags = old_act->sa_flags;
7379 act.sa_restorer = 0;
7380 unlock_user_struct(old_act, arg2, 0);
7381 pact = &act;
7383 ret = get_errno(do_sigaction(arg1, pact, &oact));
7384 if (!is_error(ret) && arg3) {
7385 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7386 return -TARGET_EFAULT;
7387 old_act->_sa_handler = oact._sa_handler;
7388 old_act->sa_mask = oact.sa_mask.sig[0];
7389 old_act->sa_flags = oact.sa_flags;
7390 unlock_user_struct(old_act, arg3, 1);
7392 #elif defined(TARGET_MIPS)
7393 struct target_sigaction act, oact, *pact, *old_act;
7395 if (arg2) {
7396 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7397 return -TARGET_EFAULT;
7398 act._sa_handler = old_act->_sa_handler;
7399 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7400 act.sa_flags = old_act->sa_flags;
7401 unlock_user_struct(old_act, arg2, 0);
7402 pact = &act;
7403 } else {
7404 pact = NULL;
7407 ret = get_errno(do_sigaction(arg1, pact, &oact));
7409 if (!is_error(ret) && arg3) {
7410 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7411 return -TARGET_EFAULT;
7412 old_act->_sa_handler = oact._sa_handler;
7413 old_act->sa_flags = oact.sa_flags;
7414 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7415 old_act->sa_mask.sig[1] = 0;
7416 old_act->sa_mask.sig[2] = 0;
7417 old_act->sa_mask.sig[3] = 0;
7418 unlock_user_struct(old_act, arg3, 1);
7420 #else
7421 struct target_old_sigaction *old_act;
7422 struct target_sigaction act, oact, *pact;
7423 if (arg2) {
7424 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7425 return -TARGET_EFAULT;
7426 act._sa_handler = old_act->_sa_handler;
7427 target_siginitset(&act.sa_mask, old_act->sa_mask);
7428 act.sa_flags = old_act->sa_flags;
7429 act.sa_restorer = old_act->sa_restorer;
7430 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7431 act.ka_restorer = 0;
7432 #endif
7433 unlock_user_struct(old_act, arg2, 0);
7434 pact = &act;
7435 } else {
7436 pact = NULL;
7438 ret = get_errno(do_sigaction(arg1, pact, &oact));
7439 if (!is_error(ret) && arg3) {
7440 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7441 return -TARGET_EFAULT;
7442 old_act->_sa_handler = oact._sa_handler;
7443 old_act->sa_mask = oact.sa_mask.sig[0];
7444 old_act->sa_flags = oact.sa_flags;
7445 old_act->sa_restorer = oact.sa_restorer;
7446 unlock_user_struct(old_act, arg3, 1);
7448 #endif
7450 return ret;
7451 #endif
7452 case TARGET_NR_rt_sigaction:
7454 #if defined(TARGET_ALPHA)
7455 /* For Alpha and SPARC this is a 5 argument syscall, with
7456 * a 'restorer' parameter which must be copied into the
7457 * sa_restorer field of the sigaction struct.
7458 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7459 * and arg5 is the sigsetsize.
7460 * Alpha also has a separate rt_sigaction struct that it uses
7461 * here; SPARC uses the usual sigaction struct.
7463 struct target_rt_sigaction *rt_act;
7464 struct target_sigaction act, oact, *pact = 0;
7466 if (arg4 != sizeof(target_sigset_t)) {
7467 return -TARGET_EINVAL;
7469 if (arg2) {
7470 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7471 return -TARGET_EFAULT;
7472 act._sa_handler = rt_act->_sa_handler;
7473 act.sa_mask = rt_act->sa_mask;
7474 act.sa_flags = rt_act->sa_flags;
7475 act.sa_restorer = arg5;
7476 unlock_user_struct(rt_act, arg2, 0);
7477 pact = &act;
7479 ret = get_errno(do_sigaction(arg1, pact, &oact));
7480 if (!is_error(ret) && arg3) {
7481 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7482 return -TARGET_EFAULT;
7483 rt_act->_sa_handler = oact._sa_handler;
7484 rt_act->sa_mask = oact.sa_mask;
7485 rt_act->sa_flags = oact.sa_flags;
7486 unlock_user_struct(rt_act, arg3, 1);
7488 #else
7489 #ifdef TARGET_SPARC
7490 target_ulong restorer = arg4;
7491 target_ulong sigsetsize = arg5;
7492 #else
7493 target_ulong sigsetsize = arg4;
7494 #endif
7495 struct target_sigaction *act;
7496 struct target_sigaction *oact;
7498 if (sigsetsize != sizeof(target_sigset_t)) {
7499 return -TARGET_EINVAL;
7501 if (arg2) {
7502 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7503 return -TARGET_EFAULT;
7505 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7506 act->ka_restorer = restorer;
7507 #endif
7508 } else {
7509 act = NULL;
7511 if (arg3) {
7512 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7513 ret = -TARGET_EFAULT;
7514 goto rt_sigaction_fail;
7516 } else
7517 oact = NULL;
7518 ret = get_errno(do_sigaction(arg1, act, oact));
7519 rt_sigaction_fail:
7520 if (act)
7521 unlock_user_struct(act, arg2, 0);
7522 if (oact)
7523 unlock_user_struct(oact, arg3, 1);
7524 #endif
7526 return ret;
7527 #ifdef TARGET_NR_sgetmask /* not on alpha */
7528 case TARGET_NR_sgetmask:
7530 sigset_t cur_set;
7531 abi_ulong target_set;
7532 ret = do_sigprocmask(0, NULL, &cur_set);
7533 if (!ret) {
7534 host_to_target_old_sigset(&target_set, &cur_set);
7535 ret = target_set;
7538 return ret;
7539 #endif
7540 #ifdef TARGET_NR_ssetmask /* not on alpha */
7541 case TARGET_NR_ssetmask:
7543 sigset_t set, oset;
7544 abi_ulong target_set = arg1;
7545 target_to_host_old_sigset(&set, &target_set);
7546 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7547 if (!ret) {
7548 host_to_target_old_sigset(&target_set, &oset);
7549 ret = target_set;
7552 return ret;
7553 #endif
7554 #ifdef TARGET_NR_sigprocmask
7555 case TARGET_NR_sigprocmask:
7557 #if defined(TARGET_ALPHA)
7558 sigset_t set, oldset;
7559 abi_ulong mask;
7560 int how;
7562 switch (arg1) {
7563 case TARGET_SIG_BLOCK:
7564 how = SIG_BLOCK;
7565 break;
7566 case TARGET_SIG_UNBLOCK:
7567 how = SIG_UNBLOCK;
7568 break;
7569 case TARGET_SIG_SETMASK:
7570 how = SIG_SETMASK;
7571 break;
7572 default:
7573 return -TARGET_EINVAL;
7575 mask = arg2;
7576 target_to_host_old_sigset(&set, &mask);
7578 ret = do_sigprocmask(how, &set, &oldset);
7579 if (!is_error(ret)) {
7580 host_to_target_old_sigset(&mask, &oldset);
7581 ret = mask;
7582 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7584 #else
7585 sigset_t set, oldset, *set_ptr;
7586 int how;
7588 if (arg2) {
7589 switch (arg1) {
7590 case TARGET_SIG_BLOCK:
7591 how = SIG_BLOCK;
7592 break;
7593 case TARGET_SIG_UNBLOCK:
7594 how = SIG_UNBLOCK;
7595 break;
7596 case TARGET_SIG_SETMASK:
7597 how = SIG_SETMASK;
7598 break;
7599 default:
7600 return -TARGET_EINVAL;
7602 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7603 return -TARGET_EFAULT;
7604 target_to_host_old_sigset(&set, p);
7605 unlock_user(p, arg2, 0);
7606 set_ptr = &set;
7607 } else {
7608 how = 0;
7609 set_ptr = NULL;
7611 ret = do_sigprocmask(how, set_ptr, &oldset);
7612 if (!is_error(ret) && arg3) {
7613 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7614 return -TARGET_EFAULT;
7615 host_to_target_old_sigset(p, &oldset);
7616 unlock_user(p, arg3, sizeof(target_sigset_t));
7618 #endif
7620 return ret;
7621 #endif
7622 case TARGET_NR_rt_sigprocmask:
7624 int how = arg1;
7625 sigset_t set, oldset, *set_ptr;
7627 if (arg4 != sizeof(target_sigset_t)) {
7628 return -TARGET_EINVAL;
7631 if (arg2) {
7632 switch(how) {
7633 case TARGET_SIG_BLOCK:
7634 how = SIG_BLOCK;
7635 break;
7636 case TARGET_SIG_UNBLOCK:
7637 how = SIG_UNBLOCK;
7638 break;
7639 case TARGET_SIG_SETMASK:
7640 how = SIG_SETMASK;
7641 break;
7642 default:
7643 return -TARGET_EINVAL;
7645 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7646 return -TARGET_EFAULT;
7647 target_to_host_sigset(&set, p);
7648 unlock_user(p, arg2, 0);
7649 set_ptr = &set;
7650 } else {
7651 how = 0;
7652 set_ptr = NULL;
7654 ret = do_sigprocmask(how, set_ptr, &oldset);
7655 if (!is_error(ret) && arg3) {
7656 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7657 return -TARGET_EFAULT;
7658 host_to_target_sigset(p, &oldset);
7659 unlock_user(p, arg3, sizeof(target_sigset_t));
7662 return ret;
7663 #ifdef TARGET_NR_sigpending
7664 case TARGET_NR_sigpending:
7666 sigset_t set;
7667 ret = get_errno(sigpending(&set));
7668 if (!is_error(ret)) {
7669 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7670 return -TARGET_EFAULT;
7671 host_to_target_old_sigset(p, &set);
7672 unlock_user(p, arg1, sizeof(target_sigset_t));
7675 return ret;
7676 #endif
7677 case TARGET_NR_rt_sigpending:
7679 sigset_t set;
7681 /* Yes, this check is >, not != like most. We follow the kernel's
7682 * logic and it does it like this because it implements
7683 * NR_sigpending through the same code path, and in that case
7684 * the old_sigset_t is smaller in size.
7686 if (arg2 > sizeof(target_sigset_t)) {
7687 return -TARGET_EINVAL;
7690 ret = get_errno(sigpending(&set));
7691 if (!is_error(ret)) {
7692 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7693 return -TARGET_EFAULT;
7694 host_to_target_sigset(p, &set);
7695 unlock_user(p, arg1, sizeof(target_sigset_t));
7698 return ret;
7699 #ifdef TARGET_NR_sigsuspend
7700 case TARGET_NR_sigsuspend:
7702 TaskState *ts = cpu->opaque;
7703 #if defined(TARGET_ALPHA)
7704 abi_ulong mask = arg1;
7705 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7706 #else
7707 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7708 return -TARGET_EFAULT;
7709 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7710 unlock_user(p, arg1, 0);
7711 #endif
7712 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7713 SIGSET_T_SIZE));
7714 if (ret != -TARGET_ERESTARTSYS) {
7715 ts->in_sigsuspend = 1;
7718 return ret;
7719 #endif
7720 case TARGET_NR_rt_sigsuspend:
7722 TaskState *ts = cpu->opaque;
7724 if (arg2 != sizeof(target_sigset_t)) {
7725 return -TARGET_EINVAL;
7727 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7728 return -TARGET_EFAULT;
7729 target_to_host_sigset(&ts->sigsuspend_mask, p);
7730 unlock_user(p, arg1, 0);
7731 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7732 SIGSET_T_SIZE));
7733 if (ret != -TARGET_ERESTARTSYS) {
7734 ts->in_sigsuspend = 1;
7737 return ret;
7738 case TARGET_NR_rt_sigtimedwait:
7740 sigset_t set;
7741 struct timespec uts, *puts;
7742 siginfo_t uinfo;
7744 if (arg4 != sizeof(target_sigset_t)) {
7745 return -TARGET_EINVAL;
7748 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7749 return -TARGET_EFAULT;
7750 target_to_host_sigset(&set, p);
7751 unlock_user(p, arg1, 0);
7752 if (arg3) {
7753 puts = &uts;
7754 target_to_host_timespec(puts, arg3);
7755 } else {
7756 puts = NULL;
7758 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7759 SIGSET_T_SIZE));
7760 if (!is_error(ret)) {
7761 if (arg2) {
7762 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7764 if (!p) {
7765 return -TARGET_EFAULT;
7767 host_to_target_siginfo(p, &uinfo);
7768 unlock_user(p, arg2, sizeof(target_siginfo_t));
7770 ret = host_to_target_signal(ret);
7773 return ret;
7774 case TARGET_NR_rt_sigqueueinfo:
7776 siginfo_t uinfo;
7778 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7779 if (!p) {
7780 return -TARGET_EFAULT;
7782 target_to_host_siginfo(&uinfo, p);
7783 unlock_user(p, arg3, 0);
7784 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7786 return ret;
7787 case TARGET_NR_rt_tgsigqueueinfo:
7789 siginfo_t uinfo;
7791 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
7792 if (!p) {
7793 return -TARGET_EFAULT;
7795 target_to_host_siginfo(&uinfo, p);
7796 unlock_user(p, arg4, 0);
7797 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
7799 return ret;
7800 #ifdef TARGET_NR_sigreturn
7801 case TARGET_NR_sigreturn:
7802 if (block_signals()) {
7803 return -TARGET_ERESTARTSYS;
7805 return do_sigreturn(cpu_env);
7806 #endif
7807 case TARGET_NR_rt_sigreturn:
7808 if (block_signals()) {
7809 return -TARGET_ERESTARTSYS;
7811 return do_rt_sigreturn(cpu_env);
7812 case TARGET_NR_sethostname:
7813 if (!(p = lock_user_string(arg1)))
7814 return -TARGET_EFAULT;
7815 ret = get_errno(sethostname(p, arg2));
7816 unlock_user(p, arg1, 0);
7817 return ret;
7818 #ifdef TARGET_NR_setrlimit
7819 case TARGET_NR_setrlimit:
7821 int resource = target_to_host_resource(arg1);
7822 struct target_rlimit *target_rlim;
7823 struct rlimit rlim;
7824 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7825 return -TARGET_EFAULT;
7826 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7827 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7828 unlock_user_struct(target_rlim, arg2, 0);
7829 return get_errno(setrlimit(resource, &rlim));
7831 #endif
7832 #ifdef TARGET_NR_getrlimit
7833 case TARGET_NR_getrlimit:
7835 int resource = target_to_host_resource(arg1);
7836 struct target_rlimit *target_rlim;
7837 struct rlimit rlim;
7839 ret = get_errno(getrlimit(resource, &rlim));
7840 if (!is_error(ret)) {
7841 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7842 return -TARGET_EFAULT;
7843 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7844 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7845 unlock_user_struct(target_rlim, arg2, 1);
7848 return ret;
7849 #endif
7850 case TARGET_NR_getrusage:
7852 struct rusage rusage;
7853 ret = get_errno(getrusage(arg1, &rusage));
7854 if (!is_error(ret)) {
7855 ret = host_to_target_rusage(arg2, &rusage);
7858 return ret;
7859 case TARGET_NR_gettimeofday:
7861 struct timeval tv;
7862 ret = get_errno(gettimeofday(&tv, NULL));
7863 if (!is_error(ret)) {
7864 if (copy_to_user_timeval(arg1, &tv))
7865 return -TARGET_EFAULT;
7868 return ret;
7869 case TARGET_NR_settimeofday:
7871 struct timeval tv, *ptv = NULL;
7872 struct timezone tz, *ptz = NULL;
7874 if (arg1) {
7875 if (copy_from_user_timeval(&tv, arg1)) {
7876 return -TARGET_EFAULT;
7878 ptv = &tv;
7881 if (arg2) {
7882 if (copy_from_user_timezone(&tz, arg2)) {
7883 return -TARGET_EFAULT;
7885 ptz = &tz;
7888 return get_errno(settimeofday(ptv, ptz));
7890 #if defined(TARGET_NR_select)
7891 case TARGET_NR_select:
7892 #if defined(TARGET_WANT_NI_OLD_SELECT)
7893 /* some architectures used to have old_select here
7894 * but now ENOSYS it.
7896 ret = -TARGET_ENOSYS;
7897 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
7898 ret = do_old_select(arg1);
7899 #else
7900 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7901 #endif
7902 return ret;
7903 #endif
7904 #ifdef TARGET_NR_pselect6
7905 case TARGET_NR_pselect6:
7907 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7908 fd_set rfds, wfds, efds;
7909 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7910 struct timespec ts, *ts_ptr;
7913 * The 6th arg is actually two args smashed together,
7914 * so we cannot use the C library.
7916 sigset_t set;
7917 struct {
7918 sigset_t *set;
7919 size_t size;
7920 } sig, *sig_ptr;
7922 abi_ulong arg_sigset, arg_sigsize, *arg7;
7923 target_sigset_t *target_sigset;
7925 n = arg1;
7926 rfd_addr = arg2;
7927 wfd_addr = arg3;
7928 efd_addr = arg4;
7929 ts_addr = arg5;
7931 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7932 if (ret) {
7933 return ret;
7935 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7936 if (ret) {
7937 return ret;
7939 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7940 if (ret) {
7941 return ret;
7945 * This takes a timespec, and not a timeval, so we cannot
7946 * use the do_select() helper ...
7948 if (ts_addr) {
7949 if (target_to_host_timespec(&ts, ts_addr)) {
7950 return -TARGET_EFAULT;
7952 ts_ptr = &ts;
7953 } else {
7954 ts_ptr = NULL;
7957 /* Extract the two packed args for the sigset */
7958 if (arg6) {
7959 sig_ptr = &sig;
7960 sig.size = SIGSET_T_SIZE;
7962 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7963 if (!arg7) {
7964 return -TARGET_EFAULT;
7966 arg_sigset = tswapal(arg7[0]);
7967 arg_sigsize = tswapal(arg7[1]);
7968 unlock_user(arg7, arg6, 0);
7970 if (arg_sigset) {
7971 sig.set = &set;
7972 if (arg_sigsize != sizeof(*target_sigset)) {
7973 /* Like the kernel, we enforce correct size sigsets */
7974 return -TARGET_EINVAL;
7976 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7977 sizeof(*target_sigset), 1);
7978 if (!target_sigset) {
7979 return -TARGET_EFAULT;
7981 target_to_host_sigset(&set, target_sigset);
7982 unlock_user(target_sigset, arg_sigset, 0);
7983 } else {
7984 sig.set = NULL;
7986 } else {
7987 sig_ptr = NULL;
7990 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7991 ts_ptr, sig_ptr));
7993 if (!is_error(ret)) {
7994 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7995 return -TARGET_EFAULT;
7996 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7997 return -TARGET_EFAULT;
7998 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7999 return -TARGET_EFAULT;
8001 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8002 return -TARGET_EFAULT;
8005 return ret;
8006 #endif
8007 #ifdef TARGET_NR_symlink
8008 case TARGET_NR_symlink:
8010 void *p2;
8011 p = lock_user_string(arg1);
8012 p2 = lock_user_string(arg2);
8013 if (!p || !p2)
8014 ret = -TARGET_EFAULT;
8015 else
8016 ret = get_errno(symlink(p, p2));
8017 unlock_user(p2, arg2, 0);
8018 unlock_user(p, arg1, 0);
8020 return ret;
8021 #endif
8022 #if defined(TARGET_NR_symlinkat)
8023 case TARGET_NR_symlinkat:
8025 void *p2;
8026 p = lock_user_string(arg1);
8027 p2 = lock_user_string(arg3);
8028 if (!p || !p2)
8029 ret = -TARGET_EFAULT;
8030 else
8031 ret = get_errno(symlinkat(p, arg2, p2));
8032 unlock_user(p2, arg3, 0);
8033 unlock_user(p, arg1, 0);
8035 return ret;
8036 #endif
8037 #ifdef TARGET_NR_readlink
8038 case TARGET_NR_readlink:
8040 void *p2;
8041 p = lock_user_string(arg1);
8042 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8043 if (!p || !p2) {
8044 ret = -TARGET_EFAULT;
8045 } else if (!arg3) {
8046 /* Short circuit this for the magic exe check. */
8047 ret = -TARGET_EINVAL;
8048 } else if (is_proc_myself((const char *)p, "exe")) {
8049 char real[PATH_MAX], *temp;
8050 temp = realpath(exec_path, real);
8051 /* Return value is # of bytes that we wrote to the buffer. */
8052 if (temp == NULL) {
8053 ret = get_errno(-1);
8054 } else {
8055 /* Don't worry about sign mismatch as earlier mapping
8056 * logic would have thrown a bad address error. */
8057 ret = MIN(strlen(real), arg3);
8058 /* We cannot NUL terminate the string. */
8059 memcpy(p2, real, ret);
8061 } else {
8062 ret = get_errno(readlink(path(p), p2, arg3));
8064 unlock_user(p2, arg2, ret);
8065 unlock_user(p, arg1, 0);
8067 return ret;
8068 #endif
8069 #if defined(TARGET_NR_readlinkat)
8070 case TARGET_NR_readlinkat:
8072 void *p2;
8073 p = lock_user_string(arg2);
8074 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8075 if (!p || !p2) {
8076 ret = -TARGET_EFAULT;
8077 } else if (is_proc_myself((const char *)p, "exe")) {
8078 char real[PATH_MAX], *temp;
8079 temp = realpath(exec_path, real);
8080 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8081 snprintf((char *)p2, arg4, "%s", real);
8082 } else {
8083 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8085 unlock_user(p2, arg3, ret);
8086 unlock_user(p, arg2, 0);
8088 return ret;
8089 #endif
8090 #ifdef TARGET_NR_swapon
8091 case TARGET_NR_swapon:
8092 if (!(p = lock_user_string(arg1)))
8093 return -TARGET_EFAULT;
8094 ret = get_errno(swapon(p, arg2));
8095 unlock_user(p, arg1, 0);
8096 return ret;
8097 #endif
8098 case TARGET_NR_reboot:
8099 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8100 /* arg4 must be ignored in all other cases */
8101 p = lock_user_string(arg4);
8102 if (!p) {
8103 return -TARGET_EFAULT;
8105 ret = get_errno(reboot(arg1, arg2, arg3, p));
8106 unlock_user(p, arg4, 0);
8107 } else {
8108 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8110 return ret;
8111 #ifdef TARGET_NR_mmap
8112 case TARGET_NR_mmap:
8113 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8114 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8115 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8116 || defined(TARGET_S390X)
8118 abi_ulong *v;
8119 abi_ulong v1, v2, v3, v4, v5, v6;
8120 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8121 return -TARGET_EFAULT;
8122 v1 = tswapal(v[0]);
8123 v2 = tswapal(v[1]);
8124 v3 = tswapal(v[2]);
8125 v4 = tswapal(v[3]);
8126 v5 = tswapal(v[4]);
8127 v6 = tswapal(v[5]);
8128 unlock_user(v, arg1, 0);
8129 ret = get_errno(target_mmap(v1, v2, v3,
8130 target_to_host_bitmask(v4, mmap_flags_tbl),
8131 v5, v6));
8133 #else
8134 ret = get_errno(target_mmap(arg1, arg2, arg3,
8135 target_to_host_bitmask(arg4, mmap_flags_tbl),
8136 arg5,
8137 arg6));
8138 #endif
8139 return ret;
8140 #endif
8141 #ifdef TARGET_NR_mmap2
8142 case TARGET_NR_mmap2:
8143 #ifndef MMAP_SHIFT
8144 #define MMAP_SHIFT 12
8145 #endif
8146 ret = target_mmap(arg1, arg2, arg3,
8147 target_to_host_bitmask(arg4, mmap_flags_tbl),
8148 arg5, arg6 << MMAP_SHIFT);
8149 return get_errno(ret);
8150 #endif
8151 case TARGET_NR_munmap:
8152 return get_errno(target_munmap(arg1, arg2));
8153 case TARGET_NR_mprotect:
8155 TaskState *ts = cpu->opaque;
8156 /* Special hack to detect libc making the stack executable. */
8157 if ((arg3 & PROT_GROWSDOWN)
8158 && arg1 >= ts->info->stack_limit
8159 && arg1 <= ts->info->start_stack) {
8160 arg3 &= ~PROT_GROWSDOWN;
8161 arg2 = arg2 + arg1 - ts->info->stack_limit;
8162 arg1 = ts->info->stack_limit;
8165 return get_errno(target_mprotect(arg1, arg2, arg3));
8166 #ifdef TARGET_NR_mremap
8167 case TARGET_NR_mremap:
8168 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8169 #endif
8170 /* ??? msync/mlock/munlock are broken for softmmu. */
8171 #ifdef TARGET_NR_msync
8172 case TARGET_NR_msync:
8173 return get_errno(msync(g2h(arg1), arg2, arg3));
8174 #endif
8175 #ifdef TARGET_NR_mlock
8176 case TARGET_NR_mlock:
8177 return get_errno(mlock(g2h(arg1), arg2));
8178 #endif
8179 #ifdef TARGET_NR_munlock
8180 case TARGET_NR_munlock:
8181 return get_errno(munlock(g2h(arg1), arg2));
8182 #endif
8183 #ifdef TARGET_NR_mlockall
8184 case TARGET_NR_mlockall:
8185 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8186 #endif
8187 #ifdef TARGET_NR_munlockall
8188 case TARGET_NR_munlockall:
8189 return get_errno(munlockall());
8190 #endif
8191 #ifdef TARGET_NR_truncate
8192 case TARGET_NR_truncate:
8193 if (!(p = lock_user_string(arg1)))
8194 return -TARGET_EFAULT;
8195 ret = get_errno(truncate(p, arg2));
8196 unlock_user(p, arg1, 0);
8197 return ret;
8198 #endif
8199 #ifdef TARGET_NR_ftruncate
8200 case TARGET_NR_ftruncate:
8201 return get_errno(ftruncate(arg1, arg2));
8202 #endif
8203 case TARGET_NR_fchmod:
8204 return get_errno(fchmod(arg1, arg2));
8205 #if defined(TARGET_NR_fchmodat)
8206 case TARGET_NR_fchmodat:
8207 if (!(p = lock_user_string(arg2)))
8208 return -TARGET_EFAULT;
8209 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8210 unlock_user(p, arg2, 0);
8211 return ret;
8212 #endif
8213 case TARGET_NR_getpriority:
8214 /* Note that negative values are valid for getpriority, so we must
8215 differentiate based on errno settings. */
8216 errno = 0;
8217 ret = getpriority(arg1, arg2);
8218 if (ret == -1 && errno != 0) {
8219 return -host_to_target_errno(errno);
8221 #ifdef TARGET_ALPHA
8222 /* Return value is the unbiased priority. Signal no error. */
8223 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8224 #else
8225 /* Return value is a biased priority to avoid negative numbers. */
8226 ret = 20 - ret;
8227 #endif
8228 return ret;
8229 case TARGET_NR_setpriority:
8230 return get_errno(setpriority(arg1, arg2, arg3));
8231 #ifdef TARGET_NR_statfs
8232 case TARGET_NR_statfs:
8233 if (!(p = lock_user_string(arg1))) {
8234 return -TARGET_EFAULT;
8236 ret = get_errno(statfs(path(p), &stfs));
8237 unlock_user(p, arg1, 0);
8238 convert_statfs:
8239 if (!is_error(ret)) {
8240 struct target_statfs *target_stfs;
8242 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8243 return -TARGET_EFAULT;
8244 __put_user(stfs.f_type, &target_stfs->f_type);
8245 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8246 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8247 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8248 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8249 __put_user(stfs.f_files, &target_stfs->f_files);
8250 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8251 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8252 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8253 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8254 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8255 #ifdef _STATFS_F_FLAGS
8256 __put_user(stfs.f_flags, &target_stfs->f_flags);
8257 #else
8258 __put_user(0, &target_stfs->f_flags);
8259 #endif
8260 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8261 unlock_user_struct(target_stfs, arg2, 1);
8263 return ret;
8264 #endif
8265 #ifdef TARGET_NR_fstatfs
8266 case TARGET_NR_fstatfs:
8267 ret = get_errno(fstatfs(arg1, &stfs));
8268 goto convert_statfs;
8269 #endif
8270 #ifdef TARGET_NR_statfs64
8271 case TARGET_NR_statfs64:
8272 if (!(p = lock_user_string(arg1))) {
8273 return -TARGET_EFAULT;
8275 ret = get_errno(statfs(path(p), &stfs));
8276 unlock_user(p, arg1, 0);
8277 convert_statfs64:
8278 if (!is_error(ret)) {
8279 struct target_statfs64 *target_stfs;
8281 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8282 return -TARGET_EFAULT;
8283 __put_user(stfs.f_type, &target_stfs->f_type);
8284 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8285 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8286 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8287 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8288 __put_user(stfs.f_files, &target_stfs->f_files);
8289 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8290 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8291 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8292 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8293 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8294 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8295 unlock_user_struct(target_stfs, arg3, 1);
8297 return ret;
8298 case TARGET_NR_fstatfs64:
8299 ret = get_errno(fstatfs(arg1, &stfs));
8300 goto convert_statfs64;
8301 #endif
8302 #ifdef TARGET_NR_socketcall
8303 case TARGET_NR_socketcall:
8304 return do_socketcall(arg1, arg2);
8305 #endif
8306 #ifdef TARGET_NR_accept
8307 case TARGET_NR_accept:
8308 return do_accept4(arg1, arg2, arg3, 0);
8309 #endif
8310 #ifdef TARGET_NR_accept4
8311 case TARGET_NR_accept4:
8312 return do_accept4(arg1, arg2, arg3, arg4);
8313 #endif
8314 #ifdef TARGET_NR_bind
8315 case TARGET_NR_bind:
8316 return do_bind(arg1, arg2, arg3);
8317 #endif
8318 #ifdef TARGET_NR_connect
8319 case TARGET_NR_connect:
8320 return do_connect(arg1, arg2, arg3);
8321 #endif
8322 #ifdef TARGET_NR_getpeername
8323 case TARGET_NR_getpeername:
8324 return do_getpeername(arg1, arg2, arg3);
8325 #endif
8326 #ifdef TARGET_NR_getsockname
8327 case TARGET_NR_getsockname:
8328 return do_getsockname(arg1, arg2, arg3);
8329 #endif
8330 #ifdef TARGET_NR_getsockopt
8331 case TARGET_NR_getsockopt:
8332 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8333 #endif
8334 #ifdef TARGET_NR_listen
8335 case TARGET_NR_listen:
8336 return get_errno(listen(arg1, arg2));
8337 #endif
8338 #ifdef TARGET_NR_recv
8339 case TARGET_NR_recv:
8340 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8341 #endif
8342 #ifdef TARGET_NR_recvfrom
8343 case TARGET_NR_recvfrom:
8344 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8345 #endif
8346 #ifdef TARGET_NR_recvmsg
8347 case TARGET_NR_recvmsg:
8348 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8349 #endif
8350 #ifdef TARGET_NR_send
8351 case TARGET_NR_send:
8352 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8353 #endif
8354 #ifdef TARGET_NR_sendmsg
8355 case TARGET_NR_sendmsg:
8356 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8357 #endif
8358 #ifdef TARGET_NR_sendmmsg
8359 case TARGET_NR_sendmmsg:
8360 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8361 case TARGET_NR_recvmmsg:
8362 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8363 #endif
8364 #ifdef TARGET_NR_sendto
8365 case TARGET_NR_sendto:
8366 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8367 #endif
8368 #ifdef TARGET_NR_shutdown
8369 case TARGET_NR_shutdown:
8370 return get_errno(shutdown(arg1, arg2));
8371 #endif
8372 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8373 case TARGET_NR_getrandom:
8374 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8375 if (!p) {
8376 return -TARGET_EFAULT;
8378 ret = get_errno(getrandom(p, arg2, arg3));
8379 unlock_user(p, arg1, ret);
8380 return ret;
8381 #endif
8382 #ifdef TARGET_NR_socket
8383 case TARGET_NR_socket:
8384 return do_socket(arg1, arg2, arg3);
8385 #endif
8386 #ifdef TARGET_NR_socketpair
8387 case TARGET_NR_socketpair:
8388 return do_socketpair(arg1, arg2, arg3, arg4);
8389 #endif
8390 #ifdef TARGET_NR_setsockopt
8391 case TARGET_NR_setsockopt:
8392 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8393 #endif
8394 #if defined(TARGET_NR_syslog)
8395 case TARGET_NR_syslog:
8397 int len = arg2;
8399 switch (arg1) {
8400 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8401 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8402 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8403 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8404 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8405 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8406 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
8407 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
8408 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8409 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
8410 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
8411 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
8413 if (len < 0) {
8414 return -TARGET_EINVAL;
8416 if (len == 0) {
8417 return 0;
8419 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8420 if (!p) {
8421 return -TARGET_EFAULT;
8423 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8424 unlock_user(p, arg2, arg3);
8426 return ret;
8427 default:
8428 return -TARGET_EINVAL;
8431 break;
8432 #endif
8433 case TARGET_NR_setitimer:
8435 struct itimerval value, ovalue, *pvalue;
8437 if (arg2) {
8438 pvalue = &value;
8439 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8440 || copy_from_user_timeval(&pvalue->it_value,
8441 arg2 + sizeof(struct target_timeval)))
8442 return -TARGET_EFAULT;
8443 } else {
8444 pvalue = NULL;
8446 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8447 if (!is_error(ret) && arg3) {
8448 if (copy_to_user_timeval(arg3,
8449 &ovalue.it_interval)
8450 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8451 &ovalue.it_value))
8452 return -TARGET_EFAULT;
8455 return ret;
8456 case TARGET_NR_getitimer:
8458 struct itimerval value;
8460 ret = get_errno(getitimer(arg1, &value));
8461 if (!is_error(ret) && arg2) {
8462 if (copy_to_user_timeval(arg2,
8463 &value.it_interval)
8464 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8465 &value.it_value))
8466 return -TARGET_EFAULT;
8469 return ret;
8470 #ifdef TARGET_NR_stat
8471 case TARGET_NR_stat:
8472 if (!(p = lock_user_string(arg1))) {
8473 return -TARGET_EFAULT;
8475 ret = get_errno(stat(path(p), &st));
8476 unlock_user(p, arg1, 0);
8477 goto do_stat;
8478 #endif
8479 #ifdef TARGET_NR_lstat
8480 case TARGET_NR_lstat:
8481 if (!(p = lock_user_string(arg1))) {
8482 return -TARGET_EFAULT;
8484 ret = get_errno(lstat(path(p), &st));
8485 unlock_user(p, arg1, 0);
8486 goto do_stat;
8487 #endif
8488 #ifdef TARGET_NR_fstat
8489 case TARGET_NR_fstat:
8491 ret = get_errno(fstat(arg1, &st));
8492 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8493 do_stat:
8494 #endif
8495 if (!is_error(ret)) {
8496 struct target_stat *target_st;
8498 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8499 return -TARGET_EFAULT;
8500 memset(target_st, 0, sizeof(*target_st));
8501 __put_user(st.st_dev, &target_st->st_dev);
8502 __put_user(st.st_ino, &target_st->st_ino);
8503 __put_user(st.st_mode, &target_st->st_mode);
8504 __put_user(st.st_uid, &target_st->st_uid);
8505 __put_user(st.st_gid, &target_st->st_gid);
8506 __put_user(st.st_nlink, &target_st->st_nlink);
8507 __put_user(st.st_rdev, &target_st->st_rdev);
8508 __put_user(st.st_size, &target_st->st_size);
8509 __put_user(st.st_blksize, &target_st->st_blksize);
8510 __put_user(st.st_blocks, &target_st->st_blocks);
8511 __put_user(st.st_atime, &target_st->target_st_atime);
8512 __put_user(st.st_mtime, &target_st->target_st_mtime);
8513 __put_user(st.st_ctime, &target_st->target_st_ctime);
8514 unlock_user_struct(target_st, arg2, 1);
8517 return ret;
8518 #endif
8519 case TARGET_NR_vhangup:
8520 return get_errno(vhangup());
8521 #ifdef TARGET_NR_syscall
8522 case TARGET_NR_syscall:
8523 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8524 arg6, arg7, arg8, 0);
8525 #endif
8526 case TARGET_NR_wait4:
8528 int status;
8529 abi_long status_ptr = arg2;
8530 struct rusage rusage, *rusage_ptr;
8531 abi_ulong target_rusage = arg4;
8532 abi_long rusage_err;
8533 if (target_rusage)
8534 rusage_ptr = &rusage;
8535 else
8536 rusage_ptr = NULL;
8537 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8538 if (!is_error(ret)) {
8539 if (status_ptr && ret) {
8540 status = host_to_target_waitstatus(status);
8541 if (put_user_s32(status, status_ptr))
8542 return -TARGET_EFAULT;
8544 if (target_rusage) {
8545 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8546 if (rusage_err) {
8547 ret = rusage_err;
8552 return ret;
8553 #ifdef TARGET_NR_swapoff
8554 case TARGET_NR_swapoff:
8555 if (!(p = lock_user_string(arg1)))
8556 return -TARGET_EFAULT;
8557 ret = get_errno(swapoff(p));
8558 unlock_user(p, arg1, 0);
8559 return ret;
8560 #endif
8561 case TARGET_NR_sysinfo:
8563 struct target_sysinfo *target_value;
8564 struct sysinfo value;
8565 ret = get_errno(sysinfo(&value));
8566 if (!is_error(ret) && arg1)
8568 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8569 return -TARGET_EFAULT;
8570 __put_user(value.uptime, &target_value->uptime);
8571 __put_user(value.loads[0], &target_value->loads[0]);
8572 __put_user(value.loads[1], &target_value->loads[1]);
8573 __put_user(value.loads[2], &target_value->loads[2]);
8574 __put_user(value.totalram, &target_value->totalram);
8575 __put_user(value.freeram, &target_value->freeram);
8576 __put_user(value.sharedram, &target_value->sharedram);
8577 __put_user(value.bufferram, &target_value->bufferram);
8578 __put_user(value.totalswap, &target_value->totalswap);
8579 __put_user(value.freeswap, &target_value->freeswap);
8580 __put_user(value.procs, &target_value->procs);
8581 __put_user(value.totalhigh, &target_value->totalhigh);
8582 __put_user(value.freehigh, &target_value->freehigh);
8583 __put_user(value.mem_unit, &target_value->mem_unit);
8584 unlock_user_struct(target_value, arg1, 1);
8587 return ret;
8588 #ifdef TARGET_NR_ipc
8589 case TARGET_NR_ipc:
8590 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8591 #endif
8592 #ifdef TARGET_NR_semget
8593 case TARGET_NR_semget:
8594 return get_errno(semget(arg1, arg2, arg3));
8595 #endif
8596 #ifdef TARGET_NR_semop
8597 case TARGET_NR_semop:
8598 return do_semop(arg1, arg2, arg3);
8599 #endif
8600 #ifdef TARGET_NR_semctl
8601 case TARGET_NR_semctl:
8602 return do_semctl(arg1, arg2, arg3, arg4);
8603 #endif
8604 #ifdef TARGET_NR_msgctl
8605 case TARGET_NR_msgctl:
8606 return do_msgctl(arg1, arg2, arg3);
8607 #endif
8608 #ifdef TARGET_NR_msgget
8609 case TARGET_NR_msgget:
8610 return get_errno(msgget(arg1, arg2));
8611 #endif
8612 #ifdef TARGET_NR_msgrcv
8613 case TARGET_NR_msgrcv:
8614 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8615 #endif
8616 #ifdef TARGET_NR_msgsnd
8617 case TARGET_NR_msgsnd:
8618 return do_msgsnd(arg1, arg2, arg3, arg4);
8619 #endif
8620 #ifdef TARGET_NR_shmget
8621 case TARGET_NR_shmget:
8622 return get_errno(shmget(arg1, arg2, arg3));
8623 #endif
8624 #ifdef TARGET_NR_shmctl
8625 case TARGET_NR_shmctl:
8626 return do_shmctl(arg1, arg2, arg3);
8627 #endif
8628 #ifdef TARGET_NR_shmat
8629 case TARGET_NR_shmat:
8630 return do_shmat(cpu_env, arg1, arg2, arg3);
8631 #endif
8632 #ifdef TARGET_NR_shmdt
8633 case TARGET_NR_shmdt:
8634 return do_shmdt(arg1);
8635 #endif
8636 case TARGET_NR_fsync:
8637 return get_errno(fsync(arg1));
8638 case TARGET_NR_clone:
8639 /* Linux manages to have three different orderings for its
8640 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8641 * match the kernel's CONFIG_CLONE_* settings.
8642 * Microblaze is further special in that it uses a sixth
8643 * implicit argument to clone for the TLS pointer.
8645 #if defined(TARGET_MICROBLAZE)
8646 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8647 #elif defined(TARGET_CLONE_BACKWARDS)
8648 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8649 #elif defined(TARGET_CLONE_BACKWARDS2)
8650 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8651 #else
8652 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8653 #endif
8654 return ret;
8655 #ifdef __NR_exit_group
8656 /* new thread calls */
8657 case TARGET_NR_exit_group:
8658 preexit_cleanup(cpu_env, arg1);
8659 return get_errno(exit_group(arg1));
8660 #endif
8661 case TARGET_NR_setdomainname:
8662 if (!(p = lock_user_string(arg1)))
8663 return -TARGET_EFAULT;
8664 ret = get_errno(setdomainname(p, arg2));
8665 unlock_user(p, arg1, 0);
8666 return ret;
8667 case TARGET_NR_uname:
8668 /* no need to transcode because we use the linux syscall */
8670 struct new_utsname * buf;
8672 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8673 return -TARGET_EFAULT;
8674 ret = get_errno(sys_uname(buf));
8675 if (!is_error(ret)) {
8676 /* Overwrite the native machine name with whatever is being
8677 emulated. */
8678 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8679 sizeof(buf->machine));
8680 /* Allow the user to override the reported release. */
8681 if (qemu_uname_release && *qemu_uname_release) {
8682 g_strlcpy(buf->release, qemu_uname_release,
8683 sizeof(buf->release));
8686 unlock_user_struct(buf, arg1, 1);
8688 return ret;
8689 #ifdef TARGET_I386
8690 case TARGET_NR_modify_ldt:
8691 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
8692 #if !defined(TARGET_X86_64)
8693 case TARGET_NR_vm86:
8694 return do_vm86(cpu_env, arg1, arg2);
8695 #endif
8696 #endif
8697 case TARGET_NR_adjtimex:
8699 struct timex host_buf;
8701 if (target_to_host_timex(&host_buf, arg1) != 0) {
8702 return -TARGET_EFAULT;
8704 ret = get_errno(adjtimex(&host_buf));
8705 if (!is_error(ret)) {
8706 if (host_to_target_timex(arg1, &host_buf) != 0) {
8707 return -TARGET_EFAULT;
8711 return ret;
8712 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
8713 case TARGET_NR_clock_adjtime:
8715 struct timex htx, *phtx = &htx;
8717 if (target_to_host_timex(phtx, arg2) != 0) {
8718 return -TARGET_EFAULT;
8720 ret = get_errno(clock_adjtime(arg1, phtx));
8721 if (!is_error(ret) && phtx) {
8722 if (host_to_target_timex(arg2, phtx) != 0) {
8723 return -TARGET_EFAULT;
8727 return ret;
8728 #endif
8729 case TARGET_NR_getpgid:
8730 return get_errno(getpgid(arg1));
8731 case TARGET_NR_fchdir:
8732 return get_errno(fchdir(arg1));
8733 case TARGET_NR_personality:
8734 return get_errno(personality(arg1));
8735 #ifdef TARGET_NR__llseek /* Not on alpha */
8736 case TARGET_NR__llseek:
8738 int64_t res;
8739 #if !defined(__NR_llseek)
8740 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
8741 if (res == -1) {
8742 ret = get_errno(res);
8743 } else {
8744 ret = 0;
8746 #else
8747 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8748 #endif
8749 if ((ret == 0) && put_user_s64(res, arg4)) {
8750 return -TARGET_EFAULT;
8753 return ret;
8754 #endif
8755 #ifdef TARGET_NR_getdents
8756 case TARGET_NR_getdents:
8757 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8758 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8760 struct target_dirent *target_dirp;
8761 struct linux_dirent *dirp;
8762 abi_long count = arg3;
8764 dirp = g_try_malloc(count);
8765 if (!dirp) {
8766 return -TARGET_ENOMEM;
8769 ret = get_errno(sys_getdents(arg1, dirp, count));
8770 if (!is_error(ret)) {
8771 struct linux_dirent *de;
8772 struct target_dirent *tde;
8773 int len = ret;
8774 int reclen, treclen;
8775 int count1, tnamelen;
8777 count1 = 0;
8778 de = dirp;
8779 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8780 return -TARGET_EFAULT;
8781 tde = target_dirp;
8782 while (len > 0) {
8783 reclen = de->d_reclen;
8784 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8785 assert(tnamelen >= 0);
8786 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8787 assert(count1 + treclen <= count);
8788 tde->d_reclen = tswap16(treclen);
8789 tde->d_ino = tswapal(de->d_ino);
8790 tde->d_off = tswapal(de->d_off);
8791 memcpy(tde->d_name, de->d_name, tnamelen);
8792 de = (struct linux_dirent *)((char *)de + reclen);
8793 len -= reclen;
8794 tde = (struct target_dirent *)((char *)tde + treclen);
8795 count1 += treclen;
8797 ret = count1;
8798 unlock_user(target_dirp, arg2, ret);
8800 g_free(dirp);
8802 #else
8804 struct linux_dirent *dirp;
8805 abi_long count = arg3;
8807 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8808 return -TARGET_EFAULT;
8809 ret = get_errno(sys_getdents(arg1, dirp, count));
8810 if (!is_error(ret)) {
8811 struct linux_dirent *de;
8812 int len = ret;
8813 int reclen;
8814 de = dirp;
8815 while (len > 0) {
8816 reclen = de->d_reclen;
8817 if (reclen > len)
8818 break;
8819 de->d_reclen = tswap16(reclen);
8820 tswapls(&de->d_ino);
8821 tswapls(&de->d_off);
8822 de = (struct linux_dirent *)((char *)de + reclen);
8823 len -= reclen;
8826 unlock_user(dirp, arg2, ret);
8828 #endif
8829 #else
8830 /* Implement getdents in terms of getdents64 */
8832 struct linux_dirent64 *dirp;
8833 abi_long count = arg3;
8835 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8836 if (!dirp) {
8837 return -TARGET_EFAULT;
8839 ret = get_errno(sys_getdents64(arg1, dirp, count));
8840 if (!is_error(ret)) {
8841 /* Convert the dirent64 structs to target dirent. We do this
8842 * in-place, since we can guarantee that a target_dirent is no
8843 * larger than a dirent64; however this means we have to be
8844 * careful to read everything before writing in the new format.
8846 struct linux_dirent64 *de;
8847 struct target_dirent *tde;
8848 int len = ret;
8849 int tlen = 0;
8851 de = dirp;
8852 tde = (struct target_dirent *)dirp;
8853 while (len > 0) {
8854 int namelen, treclen;
8855 int reclen = de->d_reclen;
8856 uint64_t ino = de->d_ino;
8857 int64_t off = de->d_off;
8858 uint8_t type = de->d_type;
8860 namelen = strlen(de->d_name);
8861 treclen = offsetof(struct target_dirent, d_name)
8862 + namelen + 2;
8863 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8865 memmove(tde->d_name, de->d_name, namelen + 1);
8866 tde->d_ino = tswapal(ino);
8867 tde->d_off = tswapal(off);
8868 tde->d_reclen = tswap16(treclen);
8869 /* The target_dirent type is in what was formerly a padding
8870 * byte at the end of the structure:
8872 *(((char *)tde) + treclen - 1) = type;
8874 de = (struct linux_dirent64 *)((char *)de + reclen);
8875 tde = (struct target_dirent *)((char *)tde + treclen);
8876 len -= reclen;
8877 tlen += treclen;
8879 ret = tlen;
8881 unlock_user(dirp, arg2, ret);
8883 #endif
8884 return ret;
8885 #endif /* TARGET_NR_getdents */
8886 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8887 case TARGET_NR_getdents64:
8889 struct linux_dirent64 *dirp;
8890 abi_long count = arg3;
8891 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8892 return -TARGET_EFAULT;
8893 ret = get_errno(sys_getdents64(arg1, dirp, count));
8894 if (!is_error(ret)) {
8895 struct linux_dirent64 *de;
8896 int len = ret;
8897 int reclen;
8898 de = dirp;
8899 while (len > 0) {
8900 reclen = de->d_reclen;
8901 if (reclen > len)
8902 break;
8903 de->d_reclen = tswap16(reclen);
8904 tswap64s((uint64_t *)&de->d_ino);
8905 tswap64s((uint64_t *)&de->d_off);
8906 de = (struct linux_dirent64 *)((char *)de + reclen);
8907 len -= reclen;
8910 unlock_user(dirp, arg2, ret);
8912 return ret;
8913 #endif /* TARGET_NR_getdents64 */
8914 #if defined(TARGET_NR__newselect)
8915 case TARGET_NR__newselect:
8916 return do_select(arg1, arg2, arg3, arg4, arg5);
8917 #endif
8918 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8919 # ifdef TARGET_NR_poll
8920 case TARGET_NR_poll:
8921 # endif
8922 # ifdef TARGET_NR_ppoll
8923 case TARGET_NR_ppoll:
8924 # endif
8926 struct target_pollfd *target_pfd;
8927 unsigned int nfds = arg2;
8928 struct pollfd *pfd;
8929 unsigned int i;
8931 pfd = NULL;
8932 target_pfd = NULL;
8933 if (nfds) {
8934 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
8935 return -TARGET_EINVAL;
8938 target_pfd = lock_user(VERIFY_WRITE, arg1,
8939 sizeof(struct target_pollfd) * nfds, 1);
8940 if (!target_pfd) {
8941 return -TARGET_EFAULT;
8944 pfd = alloca(sizeof(struct pollfd) * nfds);
8945 for (i = 0; i < nfds; i++) {
8946 pfd[i].fd = tswap32(target_pfd[i].fd);
8947 pfd[i].events = tswap16(target_pfd[i].events);
8951 switch (num) {
8952 # ifdef TARGET_NR_ppoll
8953 case TARGET_NR_ppoll:
8955 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8956 target_sigset_t *target_set;
8957 sigset_t _set, *set = &_set;
8959 if (arg3) {
8960 if (target_to_host_timespec(timeout_ts, arg3)) {
8961 unlock_user(target_pfd, arg1, 0);
8962 return -TARGET_EFAULT;
8964 } else {
8965 timeout_ts = NULL;
8968 if (arg4) {
8969 if (arg5 != sizeof(target_sigset_t)) {
8970 unlock_user(target_pfd, arg1, 0);
8971 return -TARGET_EINVAL;
8974 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8975 if (!target_set) {
8976 unlock_user(target_pfd, arg1, 0);
8977 return -TARGET_EFAULT;
8979 target_to_host_sigset(set, target_set);
8980 } else {
8981 set = NULL;
8984 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
8985 set, SIGSET_T_SIZE));
8987 if (!is_error(ret) && arg3) {
8988 host_to_target_timespec(arg3, timeout_ts);
8990 if (arg4) {
8991 unlock_user(target_set, arg4, 0);
8993 break;
8995 # endif
8996 # ifdef TARGET_NR_poll
8997 case TARGET_NR_poll:
8999 struct timespec ts, *pts;
9001 if (arg3 >= 0) {
9002 /* Convert ms to secs, ns */
9003 ts.tv_sec = arg3 / 1000;
9004 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9005 pts = &ts;
9006 } else {
9007 /* -ve poll() timeout means "infinite" */
9008 pts = NULL;
9010 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9011 break;
9013 # endif
9014 default:
9015 g_assert_not_reached();
9018 if (!is_error(ret)) {
9019 for(i = 0; i < nfds; i++) {
9020 target_pfd[i].revents = tswap16(pfd[i].revents);
9023 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9025 return ret;
9026 #endif
9027 case TARGET_NR_flock:
9028 /* NOTE: the flock constant seems to be the same for every
9029 Linux platform */
9030 return get_errno(safe_flock(arg1, arg2));
9031 case TARGET_NR_readv:
9033 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9034 if (vec != NULL) {
9035 ret = get_errno(safe_readv(arg1, vec, arg3));
9036 unlock_iovec(vec, arg2, arg3, 1);
9037 } else {
9038 ret = -host_to_target_errno(errno);
9041 return ret;
9042 case TARGET_NR_writev:
9044 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9045 if (vec != NULL) {
9046 ret = get_errno(safe_writev(arg1, vec, arg3));
9047 unlock_iovec(vec, arg2, arg3, 0);
9048 } else {
9049 ret = -host_to_target_errno(errno);
9052 return ret;
9053 #if defined(TARGET_NR_preadv)
9054 case TARGET_NR_preadv:
9056 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9057 if (vec != NULL) {
9058 unsigned long low, high;
9060 target_to_host_low_high(arg4, arg5, &low, &high);
9061 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9062 unlock_iovec(vec, arg2, arg3, 1);
9063 } else {
9064 ret = -host_to_target_errno(errno);
9067 return ret;
9068 #endif
9069 #if defined(TARGET_NR_pwritev)
9070 case TARGET_NR_pwritev:
9072 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9073 if (vec != NULL) {
9074 unsigned long low, high;
9076 target_to_host_low_high(arg4, arg5, &low, &high);
9077 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9078 unlock_iovec(vec, arg2, arg3, 0);
9079 } else {
9080 ret = -host_to_target_errno(errno);
9083 return ret;
9084 #endif
9085 case TARGET_NR_getsid:
9086 return get_errno(getsid(arg1));
9087 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9088 case TARGET_NR_fdatasync:
9089 return get_errno(fdatasync(arg1));
9090 #endif
9091 #ifdef TARGET_NR__sysctl
9092 case TARGET_NR__sysctl:
9093 /* We don't implement this, but ENOTDIR is always a safe
9094 return value. */
9095 return -TARGET_ENOTDIR;
9096 #endif
9097 case TARGET_NR_sched_getaffinity:
9099 unsigned int mask_size;
9100 unsigned long *mask;
9103 * sched_getaffinity needs multiples of ulong, so need to take
9104 * care of mismatches between target ulong and host ulong sizes.
9106 if (arg2 & (sizeof(abi_ulong) - 1)) {
9107 return -TARGET_EINVAL;
9109 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9111 mask = alloca(mask_size);
9112 memset(mask, 0, mask_size);
9113 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9115 if (!is_error(ret)) {
9116 if (ret > arg2) {
9117 /* More data returned than the caller's buffer will fit.
9118 * This only happens if sizeof(abi_long) < sizeof(long)
9119 * and the caller passed us a buffer holding an odd number
9120 * of abi_longs. If the host kernel is actually using the
9121 * extra 4 bytes then fail EINVAL; otherwise we can just
9122 * ignore them and only copy the interesting part.
9124 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9125 if (numcpus > arg2 * 8) {
9126 return -TARGET_EINVAL;
9128 ret = arg2;
9131 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9132 return -TARGET_EFAULT;
9136 return ret;
9137 case TARGET_NR_sched_setaffinity:
9139 unsigned int mask_size;
9140 unsigned long *mask;
9143 * sched_setaffinity needs multiples of ulong, so need to take
9144 * care of mismatches between target ulong and host ulong sizes.
9146 if (arg2 & (sizeof(abi_ulong) - 1)) {
9147 return -TARGET_EINVAL;
9149 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9150 mask = alloca(mask_size);
9152 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9153 if (ret) {
9154 return ret;
9157 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9159 case TARGET_NR_getcpu:
9161 unsigned cpu, node;
9162 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9163 arg2 ? &node : NULL,
9164 NULL));
9165 if (is_error(ret)) {
9166 return ret;
9168 if (arg1 && put_user_u32(cpu, arg1)) {
9169 return -TARGET_EFAULT;
9171 if (arg2 && put_user_u32(node, arg2)) {
9172 return -TARGET_EFAULT;
9175 return ret;
9176 case TARGET_NR_sched_setparam:
9178 struct sched_param *target_schp;
9179 struct sched_param schp;
9181 if (arg2 == 0) {
9182 return -TARGET_EINVAL;
9184 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9185 return -TARGET_EFAULT;
9186 schp.sched_priority = tswap32(target_schp->sched_priority);
9187 unlock_user_struct(target_schp, arg2, 0);
9188 return get_errno(sched_setparam(arg1, &schp));
9190 case TARGET_NR_sched_getparam:
9192 struct sched_param *target_schp;
9193 struct sched_param schp;
9195 if (arg2 == 0) {
9196 return -TARGET_EINVAL;
9198 ret = get_errno(sched_getparam(arg1, &schp));
9199 if (!is_error(ret)) {
9200 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9201 return -TARGET_EFAULT;
9202 target_schp->sched_priority = tswap32(schp.sched_priority);
9203 unlock_user_struct(target_schp, arg2, 1);
9206 return ret;
9207 case TARGET_NR_sched_setscheduler:
9209 struct sched_param *target_schp;
9210 struct sched_param schp;
9211 if (arg3 == 0) {
9212 return -TARGET_EINVAL;
9214 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9215 return -TARGET_EFAULT;
9216 schp.sched_priority = tswap32(target_schp->sched_priority);
9217 unlock_user_struct(target_schp, arg3, 0);
9218 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9220 case TARGET_NR_sched_getscheduler:
9221 return get_errno(sched_getscheduler(arg1));
9222 case TARGET_NR_sched_yield:
9223 return get_errno(sched_yield());
9224 case TARGET_NR_sched_get_priority_max:
9225 return get_errno(sched_get_priority_max(arg1));
9226 case TARGET_NR_sched_get_priority_min:
9227 return get_errno(sched_get_priority_min(arg1));
9228 case TARGET_NR_sched_rr_get_interval:
9230 struct timespec ts;
9231 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9232 if (!is_error(ret)) {
9233 ret = host_to_target_timespec(arg2, &ts);
9236 return ret;
9237 case TARGET_NR_nanosleep:
9239 struct timespec req, rem;
9240 target_to_host_timespec(&req, arg1);
9241 ret = get_errno(safe_nanosleep(&req, &rem));
9242 if (is_error(ret) && arg2) {
9243 host_to_target_timespec(arg2, &rem);
9246 return ret;
9247 case TARGET_NR_prctl:
9248 switch (arg1) {
9249 case PR_GET_PDEATHSIG:
9251 int deathsig;
9252 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9253 if (!is_error(ret) && arg2
9254 && put_user_ual(deathsig, arg2)) {
9255 return -TARGET_EFAULT;
9257 return ret;
9259 #ifdef PR_GET_NAME
9260 case PR_GET_NAME:
9262 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9263 if (!name) {
9264 return -TARGET_EFAULT;
9266 ret = get_errno(prctl(arg1, (unsigned long)name,
9267 arg3, arg4, arg5));
9268 unlock_user(name, arg2, 16);
9269 return ret;
9271 case PR_SET_NAME:
9273 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9274 if (!name) {
9275 return -TARGET_EFAULT;
9277 ret = get_errno(prctl(arg1, (unsigned long)name,
9278 arg3, arg4, arg5));
9279 unlock_user(name, arg2, 0);
9280 return ret;
9282 #endif
9283 #ifdef TARGET_AARCH64
9284 case TARGET_PR_SVE_SET_VL:
9286 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9287 * PR_SVE_VL_INHERIT. Note the kernel definition
9288 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9289 * even though the current architectural maximum is VQ=16.
9291 ret = -TARGET_EINVAL;
9292 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
9293 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9294 CPUARMState *env = cpu_env;
9295 ARMCPU *cpu = arm_env_get_cpu(env);
9296 uint32_t vq, old_vq;
9298 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9299 vq = MAX(arg2 / 16, 1);
9300 vq = MIN(vq, cpu->sve_max_vq);
9302 if (vq < old_vq) {
9303 aarch64_sve_narrow_vq(env, vq);
9305 env->vfp.zcr_el[1] = vq - 1;
9306 ret = vq * 16;
9308 return ret;
9309 case TARGET_PR_SVE_GET_VL:
9310 ret = -TARGET_EINVAL;
9311 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
9312 CPUARMState *env = cpu_env;
9313 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
9315 return ret;
9316 #endif /* AARCH64 */
9317 case PR_GET_SECCOMP:
9318 case PR_SET_SECCOMP:
9319 /* Disable seccomp to prevent the target disabling syscalls we
9320 * need. */
9321 return -TARGET_EINVAL;
9322 default:
9323 /* Most prctl options have no pointer arguments */
9324 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9326 break;
9327 #ifdef TARGET_NR_arch_prctl
9328 case TARGET_NR_arch_prctl:
9329 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9330 return do_arch_prctl(cpu_env, arg1, arg2);
9331 #else
9332 #error unreachable
9333 #endif
9334 #endif
9335 #ifdef TARGET_NR_pread64
9336 case TARGET_NR_pread64:
9337 if (regpairs_aligned(cpu_env, num)) {
9338 arg4 = arg5;
9339 arg5 = arg6;
9341 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9342 return -TARGET_EFAULT;
9343 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9344 unlock_user(p, arg2, ret);
9345 return ret;
9346 case TARGET_NR_pwrite64:
9347 if (regpairs_aligned(cpu_env, num)) {
9348 arg4 = arg5;
9349 arg5 = arg6;
9351 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9352 return -TARGET_EFAULT;
9353 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9354 unlock_user(p, arg2, 0);
9355 return ret;
9356 #endif
9357 case TARGET_NR_getcwd:
9358 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9359 return -TARGET_EFAULT;
9360 ret = get_errno(sys_getcwd1(p, arg2));
9361 unlock_user(p, arg1, ret);
9362 return ret;
9363 case TARGET_NR_capget:
9364 case TARGET_NR_capset:
9366 struct target_user_cap_header *target_header;
9367 struct target_user_cap_data *target_data = NULL;
9368 struct __user_cap_header_struct header;
9369 struct __user_cap_data_struct data[2];
9370 struct __user_cap_data_struct *dataptr = NULL;
9371 int i, target_datalen;
9372 int data_items = 1;
9374 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9375 return -TARGET_EFAULT;
9377 header.version = tswap32(target_header->version);
9378 header.pid = tswap32(target_header->pid);
9380 if (header.version != _LINUX_CAPABILITY_VERSION) {
9381 /* Version 2 and up takes pointer to two user_data structs */
9382 data_items = 2;
9385 target_datalen = sizeof(*target_data) * data_items;
9387 if (arg2) {
9388 if (num == TARGET_NR_capget) {
9389 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9390 } else {
9391 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9393 if (!target_data) {
9394 unlock_user_struct(target_header, arg1, 0);
9395 return -TARGET_EFAULT;
9398 if (num == TARGET_NR_capset) {
9399 for (i = 0; i < data_items; i++) {
9400 data[i].effective = tswap32(target_data[i].effective);
9401 data[i].permitted = tswap32(target_data[i].permitted);
9402 data[i].inheritable = tswap32(target_data[i].inheritable);
9406 dataptr = data;
9409 if (num == TARGET_NR_capget) {
9410 ret = get_errno(capget(&header, dataptr));
9411 } else {
9412 ret = get_errno(capset(&header, dataptr));
9415 /* The kernel always updates version for both capget and capset */
9416 target_header->version = tswap32(header.version);
9417 unlock_user_struct(target_header, arg1, 1);
9419 if (arg2) {
9420 if (num == TARGET_NR_capget) {
9421 for (i = 0; i < data_items; i++) {
9422 target_data[i].effective = tswap32(data[i].effective);
9423 target_data[i].permitted = tswap32(data[i].permitted);
9424 target_data[i].inheritable = tswap32(data[i].inheritable);
9426 unlock_user(target_data, arg2, target_datalen);
9427 } else {
9428 unlock_user(target_data, arg2, 0);
9431 return ret;
9433 case TARGET_NR_sigaltstack:
9434 return do_sigaltstack(arg1, arg2,
9435 get_sp_from_cpustate((CPUArchState *)cpu_env));
9437 #ifdef CONFIG_SENDFILE
9438 #ifdef TARGET_NR_sendfile
9439 case TARGET_NR_sendfile:
9441 off_t *offp = NULL;
9442 off_t off;
9443 if (arg3) {
9444 ret = get_user_sal(off, arg3);
9445 if (is_error(ret)) {
9446 return ret;
9448 offp = &off;
9450 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9451 if (!is_error(ret) && arg3) {
9452 abi_long ret2 = put_user_sal(off, arg3);
9453 if (is_error(ret2)) {
9454 ret = ret2;
9457 return ret;
9459 #endif
9460 #ifdef TARGET_NR_sendfile64
9461 case TARGET_NR_sendfile64:
9463 off_t *offp = NULL;
9464 off_t off;
9465 if (arg3) {
9466 ret = get_user_s64(off, arg3);
9467 if (is_error(ret)) {
9468 return ret;
9470 offp = &off;
9472 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9473 if (!is_error(ret) && arg3) {
9474 abi_long ret2 = put_user_s64(off, arg3);
9475 if (is_error(ret2)) {
9476 ret = ret2;
9479 return ret;
9481 #endif
9482 #endif
9483 #ifdef TARGET_NR_vfork
9484 case TARGET_NR_vfork:
9485 return get_errno(do_fork(cpu_env,
9486 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9487 0, 0, 0, 0));
9488 #endif
9489 #ifdef TARGET_NR_ugetrlimit
9490 case TARGET_NR_ugetrlimit:
9492 struct rlimit rlim;
9493 int resource = target_to_host_resource(arg1);
9494 ret = get_errno(getrlimit(resource, &rlim));
9495 if (!is_error(ret)) {
9496 struct target_rlimit *target_rlim;
9497 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9498 return -TARGET_EFAULT;
9499 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9500 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9501 unlock_user_struct(target_rlim, arg2, 1);
9503 return ret;
9505 #endif
9506 #ifdef TARGET_NR_truncate64
9507 case TARGET_NR_truncate64:
9508 if (!(p = lock_user_string(arg1)))
9509 return -TARGET_EFAULT;
9510 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9511 unlock_user(p, arg1, 0);
9512 return ret;
9513 #endif
9514 #ifdef TARGET_NR_ftruncate64
9515 case TARGET_NR_ftruncate64:
9516 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9517 #endif
9518 #ifdef TARGET_NR_stat64
9519 case TARGET_NR_stat64:
9520 if (!(p = lock_user_string(arg1))) {
9521 return -TARGET_EFAULT;
9523 ret = get_errno(stat(path(p), &st));
9524 unlock_user(p, arg1, 0);
9525 if (!is_error(ret))
9526 ret = host_to_target_stat64(cpu_env, arg2, &st);
9527 return ret;
9528 #endif
9529 #ifdef TARGET_NR_lstat64
9530 case TARGET_NR_lstat64:
9531 if (!(p = lock_user_string(arg1))) {
9532 return -TARGET_EFAULT;
9534 ret = get_errno(lstat(path(p), &st));
9535 unlock_user(p, arg1, 0);
9536 if (!is_error(ret))
9537 ret = host_to_target_stat64(cpu_env, arg2, &st);
9538 return ret;
9539 #endif
9540 #ifdef TARGET_NR_fstat64
9541 case TARGET_NR_fstat64:
9542 ret = get_errno(fstat(arg1, &st));
9543 if (!is_error(ret))
9544 ret = host_to_target_stat64(cpu_env, arg2, &st);
9545 return ret;
9546 #endif
9547 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9548 #ifdef TARGET_NR_fstatat64
9549 case TARGET_NR_fstatat64:
9550 #endif
9551 #ifdef TARGET_NR_newfstatat
9552 case TARGET_NR_newfstatat:
9553 #endif
9554 if (!(p = lock_user_string(arg2))) {
9555 return -TARGET_EFAULT;
9557 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9558 unlock_user(p, arg2, 0);
9559 if (!is_error(ret))
9560 ret = host_to_target_stat64(cpu_env, arg3, &st);
9561 return ret;
9562 #endif
9563 #ifdef TARGET_NR_lchown
9564 case TARGET_NR_lchown:
9565 if (!(p = lock_user_string(arg1)))
9566 return -TARGET_EFAULT;
9567 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9568 unlock_user(p, arg1, 0);
9569 return ret;
9570 #endif
9571 #ifdef TARGET_NR_getuid
9572 case TARGET_NR_getuid:
9573 return get_errno(high2lowuid(getuid()));
9574 #endif
9575 #ifdef TARGET_NR_getgid
9576 case TARGET_NR_getgid:
9577 return get_errno(high2lowgid(getgid()));
9578 #endif
9579 #ifdef TARGET_NR_geteuid
9580 case TARGET_NR_geteuid:
9581 return get_errno(high2lowuid(geteuid()));
9582 #endif
9583 #ifdef TARGET_NR_getegid
9584 case TARGET_NR_getegid:
9585 return get_errno(high2lowgid(getegid()));
9586 #endif
9587 case TARGET_NR_setreuid:
9588 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9589 case TARGET_NR_setregid:
9590 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9591 case TARGET_NR_getgroups:
9593 int gidsetsize = arg1;
9594 target_id *target_grouplist;
9595 gid_t *grouplist;
9596 int i;
9598 grouplist = alloca(gidsetsize * sizeof(gid_t));
9599 ret = get_errno(getgroups(gidsetsize, grouplist));
9600 if (gidsetsize == 0)
9601 return ret;
9602 if (!is_error(ret)) {
9603 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9604 if (!target_grouplist)
9605 return -TARGET_EFAULT;
9606 for(i = 0;i < ret; i++)
9607 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9608 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9611 return ret;
9612 case TARGET_NR_setgroups:
9614 int gidsetsize = arg1;
9615 target_id *target_grouplist;
9616 gid_t *grouplist = NULL;
9617 int i;
9618 if (gidsetsize) {
9619 grouplist = alloca(gidsetsize * sizeof(gid_t));
9620 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9621 if (!target_grouplist) {
9622 return -TARGET_EFAULT;
9624 for (i = 0; i < gidsetsize; i++) {
9625 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9627 unlock_user(target_grouplist, arg2, 0);
9629 return get_errno(setgroups(gidsetsize, grouplist));
9631 case TARGET_NR_fchown:
9632 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9633 #if defined(TARGET_NR_fchownat)
9634 case TARGET_NR_fchownat:
9635 if (!(p = lock_user_string(arg2)))
9636 return -TARGET_EFAULT;
9637 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9638 low2highgid(arg4), arg5));
9639 unlock_user(p, arg2, 0);
9640 return ret;
9641 #endif
9642 #ifdef TARGET_NR_setresuid
9643 case TARGET_NR_setresuid:
9644 return get_errno(sys_setresuid(low2highuid(arg1),
9645 low2highuid(arg2),
9646 low2highuid(arg3)));
9647 #endif
9648 #ifdef TARGET_NR_getresuid
9649 case TARGET_NR_getresuid:
9651 uid_t ruid, euid, suid;
9652 ret = get_errno(getresuid(&ruid, &euid, &suid));
9653 if (!is_error(ret)) {
9654 if (put_user_id(high2lowuid(ruid), arg1)
9655 || put_user_id(high2lowuid(euid), arg2)
9656 || put_user_id(high2lowuid(suid), arg3))
9657 return -TARGET_EFAULT;
9660 return ret;
9661 #endif
9662 #ifdef TARGET_NR_getresgid
9663 case TARGET_NR_setresgid:
9664 return get_errno(sys_setresgid(low2highgid(arg1),
9665 low2highgid(arg2),
9666 low2highgid(arg3)));
9667 #endif
9668 #ifdef TARGET_NR_getresgid
9669 case TARGET_NR_getresgid:
9671 gid_t rgid, egid, sgid;
9672 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9673 if (!is_error(ret)) {
9674 if (put_user_id(high2lowgid(rgid), arg1)
9675 || put_user_id(high2lowgid(egid), arg2)
9676 || put_user_id(high2lowgid(sgid), arg3))
9677 return -TARGET_EFAULT;
9680 return ret;
9681 #endif
9682 #ifdef TARGET_NR_chown
9683 case TARGET_NR_chown:
9684 if (!(p = lock_user_string(arg1)))
9685 return -TARGET_EFAULT;
9686 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9687 unlock_user(p, arg1, 0);
9688 return ret;
9689 #endif
9690 case TARGET_NR_setuid:
9691 return get_errno(sys_setuid(low2highuid(arg1)));
9692 case TARGET_NR_setgid:
9693 return get_errno(sys_setgid(low2highgid(arg1)));
9694 case TARGET_NR_setfsuid:
9695 return get_errno(setfsuid(arg1));
9696 case TARGET_NR_setfsgid:
9697 return get_errno(setfsgid(arg1));
9699 #ifdef TARGET_NR_lchown32
9700 case TARGET_NR_lchown32:
9701 if (!(p = lock_user_string(arg1)))
9702 return -TARGET_EFAULT;
9703 ret = get_errno(lchown(p, arg2, arg3));
9704 unlock_user(p, arg1, 0);
9705 return ret;
9706 #endif
9707 #ifdef TARGET_NR_getuid32
9708 case TARGET_NR_getuid32:
9709 return get_errno(getuid());
9710 #endif
9712 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9713 /* Alpha specific */
9714 case TARGET_NR_getxuid:
9716 uid_t euid;
9717 euid=geteuid();
9718 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9720 return get_errno(getuid());
9721 #endif
9722 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9723 /* Alpha specific */
9724 case TARGET_NR_getxgid:
9726 uid_t egid;
9727 egid=getegid();
9728 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9730 return get_errno(getgid());
9731 #endif
9732 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9733 /* Alpha specific */
9734 case TARGET_NR_osf_getsysinfo:
9735 ret = -TARGET_EOPNOTSUPP;
9736 switch (arg1) {
9737 case TARGET_GSI_IEEE_FP_CONTROL:
9739 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9741 /* Copied from linux ieee_fpcr_to_swcr. */
9742 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9743 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9744 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9745 | SWCR_TRAP_ENABLE_DZE
9746 | SWCR_TRAP_ENABLE_OVF);
9747 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9748 | SWCR_TRAP_ENABLE_INE);
9749 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9750 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9752 if (put_user_u64 (swcr, arg2))
9753 return -TARGET_EFAULT;
9754 ret = 0;
9756 break;
9758 /* case GSI_IEEE_STATE_AT_SIGNAL:
9759 -- Not implemented in linux kernel.
9760 case GSI_UACPROC:
9761 -- Retrieves current unaligned access state; not much used.
9762 case GSI_PROC_TYPE:
9763 -- Retrieves implver information; surely not used.
9764 case GSI_GET_HWRPB:
9765 -- Grabs a copy of the HWRPB; surely not used.
9768 return ret;
9769 #endif
9770 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9771 /* Alpha specific */
9772 case TARGET_NR_osf_setsysinfo:
9773 ret = -TARGET_EOPNOTSUPP;
9774 switch (arg1) {
9775 case TARGET_SSI_IEEE_FP_CONTROL:
9777 uint64_t swcr, fpcr, orig_fpcr;
9779 if (get_user_u64 (swcr, arg2)) {
9780 return -TARGET_EFAULT;
9782 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9783 fpcr = orig_fpcr & FPCR_DYN_MASK;
9785 /* Copied from linux ieee_swcr_to_fpcr. */
9786 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9787 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9788 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9789 | SWCR_TRAP_ENABLE_DZE
9790 | SWCR_TRAP_ENABLE_OVF)) << 48;
9791 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9792 | SWCR_TRAP_ENABLE_INE)) << 57;
9793 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9794 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9796 cpu_alpha_store_fpcr(cpu_env, fpcr);
9797 ret = 0;
9799 break;
9801 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9803 uint64_t exc, fpcr, orig_fpcr;
9804 int si_code;
9806 if (get_user_u64(exc, arg2)) {
9807 return -TARGET_EFAULT;
9810 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9812 /* We only add to the exception status here. */
9813 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9815 cpu_alpha_store_fpcr(cpu_env, fpcr);
9816 ret = 0;
9818 /* Old exceptions are not signaled. */
9819 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9821 /* If any exceptions set by this call,
9822 and are unmasked, send a signal. */
9823 si_code = 0;
9824 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9825 si_code = TARGET_FPE_FLTRES;
9827 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9828 si_code = TARGET_FPE_FLTUND;
9830 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9831 si_code = TARGET_FPE_FLTOVF;
9833 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9834 si_code = TARGET_FPE_FLTDIV;
9836 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9837 si_code = TARGET_FPE_FLTINV;
9839 if (si_code != 0) {
9840 target_siginfo_t info;
9841 info.si_signo = SIGFPE;
9842 info.si_errno = 0;
9843 info.si_code = si_code;
9844 info._sifields._sigfault._addr
9845 = ((CPUArchState *)cpu_env)->pc;
9846 queue_signal((CPUArchState *)cpu_env, info.si_signo,
9847 QEMU_SI_FAULT, &info);
9850 break;
9852 /* case SSI_NVPAIRS:
9853 -- Used with SSIN_UACPROC to enable unaligned accesses.
9854 case SSI_IEEE_STATE_AT_SIGNAL:
9855 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9856 -- Not implemented in linux kernel
9859 return ret;
9860 #endif
9861 #ifdef TARGET_NR_osf_sigprocmask
9862 /* Alpha specific. */
9863 case TARGET_NR_osf_sigprocmask:
9865 abi_ulong mask;
9866 int how;
9867 sigset_t set, oldset;
9869 switch(arg1) {
9870 case TARGET_SIG_BLOCK:
9871 how = SIG_BLOCK;
9872 break;
9873 case TARGET_SIG_UNBLOCK:
9874 how = SIG_UNBLOCK;
9875 break;
9876 case TARGET_SIG_SETMASK:
9877 how = SIG_SETMASK;
9878 break;
9879 default:
9880 return -TARGET_EINVAL;
9882 mask = arg2;
9883 target_to_host_old_sigset(&set, &mask);
9884 ret = do_sigprocmask(how, &set, &oldset);
9885 if (!ret) {
9886 host_to_target_old_sigset(&mask, &oldset);
9887 ret = mask;
9890 return ret;
9891 #endif
9893 #ifdef TARGET_NR_getgid32
9894 case TARGET_NR_getgid32:
9895 return get_errno(getgid());
9896 #endif
9897 #ifdef TARGET_NR_geteuid32
9898 case TARGET_NR_geteuid32:
9899 return get_errno(geteuid());
9900 #endif
9901 #ifdef TARGET_NR_getegid32
9902 case TARGET_NR_getegid32:
9903 return get_errno(getegid());
9904 #endif
9905 #ifdef TARGET_NR_setreuid32
9906 case TARGET_NR_setreuid32:
9907 return get_errno(setreuid(arg1, arg2));
9908 #endif
9909 #ifdef TARGET_NR_setregid32
9910 case TARGET_NR_setregid32:
9911 return get_errno(setregid(arg1, arg2));
9912 #endif
9913 #ifdef TARGET_NR_getgroups32
9914 case TARGET_NR_getgroups32:
9916 int gidsetsize = arg1;
9917 uint32_t *target_grouplist;
9918 gid_t *grouplist;
9919 int i;
9921 grouplist = alloca(gidsetsize * sizeof(gid_t));
9922 ret = get_errno(getgroups(gidsetsize, grouplist));
9923 if (gidsetsize == 0)
9924 return ret;
9925 if (!is_error(ret)) {
9926 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9927 if (!target_grouplist) {
9928 return -TARGET_EFAULT;
9930 for(i = 0;i < ret; i++)
9931 target_grouplist[i] = tswap32(grouplist[i]);
9932 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9935 return ret;
9936 #endif
9937 #ifdef TARGET_NR_setgroups32
9938 case TARGET_NR_setgroups32:
9940 int gidsetsize = arg1;
9941 uint32_t *target_grouplist;
9942 gid_t *grouplist;
9943 int i;
9945 grouplist = alloca(gidsetsize * sizeof(gid_t));
9946 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9947 if (!target_grouplist) {
9948 return -TARGET_EFAULT;
9950 for(i = 0;i < gidsetsize; i++)
9951 grouplist[i] = tswap32(target_grouplist[i]);
9952 unlock_user(target_grouplist, arg2, 0);
9953 return get_errno(setgroups(gidsetsize, grouplist));
9955 #endif
9956 #ifdef TARGET_NR_fchown32
9957 case TARGET_NR_fchown32:
9958 return get_errno(fchown(arg1, arg2, arg3));
9959 #endif
9960 #ifdef TARGET_NR_setresuid32
9961 case TARGET_NR_setresuid32:
9962 return get_errno(sys_setresuid(arg1, arg2, arg3));
9963 #endif
9964 #ifdef TARGET_NR_getresuid32
9965 case TARGET_NR_getresuid32:
9967 uid_t ruid, euid, suid;
9968 ret = get_errno(getresuid(&ruid, &euid, &suid));
9969 if (!is_error(ret)) {
9970 if (put_user_u32(ruid, arg1)
9971 || put_user_u32(euid, arg2)
9972 || put_user_u32(suid, arg3))
9973 return -TARGET_EFAULT;
9976 return ret;
9977 #endif
9978 #ifdef TARGET_NR_setresgid32
9979 case TARGET_NR_setresgid32:
9980 return get_errno(sys_setresgid(arg1, arg2, arg3));
9981 #endif
9982 #ifdef TARGET_NR_getresgid32
9983 case TARGET_NR_getresgid32:
9985 gid_t rgid, egid, sgid;
9986 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9987 if (!is_error(ret)) {
9988 if (put_user_u32(rgid, arg1)
9989 || put_user_u32(egid, arg2)
9990 || put_user_u32(sgid, arg3))
9991 return -TARGET_EFAULT;
9994 return ret;
9995 #endif
9996 #ifdef TARGET_NR_chown32
9997 case TARGET_NR_chown32:
9998 if (!(p = lock_user_string(arg1)))
9999 return -TARGET_EFAULT;
10000 ret = get_errno(chown(p, arg2, arg3));
10001 unlock_user(p, arg1, 0);
10002 return ret;
10003 #endif
10004 #ifdef TARGET_NR_setuid32
10005 case TARGET_NR_setuid32:
10006 return get_errno(sys_setuid(arg1));
10007 #endif
10008 #ifdef TARGET_NR_setgid32
10009 case TARGET_NR_setgid32:
10010 return get_errno(sys_setgid(arg1));
10011 #endif
10012 #ifdef TARGET_NR_setfsuid32
10013 case TARGET_NR_setfsuid32:
10014 return get_errno(setfsuid(arg1));
10015 #endif
10016 #ifdef TARGET_NR_setfsgid32
10017 case TARGET_NR_setfsgid32:
10018 return get_errno(setfsgid(arg1));
10019 #endif
10020 #ifdef TARGET_NR_mincore
10021 case TARGET_NR_mincore:
10023 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10024 if (!a) {
10025 return -TARGET_ENOMEM;
10027 p = lock_user_string(arg3);
10028 if (!p) {
10029 ret = -TARGET_EFAULT;
10030 } else {
10031 ret = get_errno(mincore(a, arg2, p));
10032 unlock_user(p, arg3, ret);
10034 unlock_user(a, arg1, 0);
10036 return ret;
10037 #endif
10038 #ifdef TARGET_NR_arm_fadvise64_64
10039 case TARGET_NR_arm_fadvise64_64:
10040 /* arm_fadvise64_64 looks like fadvise64_64 but
10041 * with different argument order: fd, advice, offset, len
10042 * rather than the usual fd, offset, len, advice.
10043 * Note that offset and len are both 64-bit so appear as
10044 * pairs of 32-bit registers.
10046 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10047 target_offset64(arg5, arg6), arg2);
10048 return -host_to_target_errno(ret);
10049 #endif
10051 #if TARGET_ABI_BITS == 32
10053 #ifdef TARGET_NR_fadvise64_64
10054 case TARGET_NR_fadvise64_64:
10055 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10056 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10057 ret = arg2;
10058 arg2 = arg3;
10059 arg3 = arg4;
10060 arg4 = arg5;
10061 arg5 = arg6;
10062 arg6 = ret;
10063 #else
10064 /* 6 args: fd, offset (high, low), len (high, low), advice */
10065 if (regpairs_aligned(cpu_env, num)) {
10066 /* offset is in (3,4), len in (5,6) and advice in 7 */
10067 arg2 = arg3;
10068 arg3 = arg4;
10069 arg4 = arg5;
10070 arg5 = arg6;
10071 arg6 = arg7;
10073 #endif
10074 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10075 target_offset64(arg4, arg5), arg6);
10076 return -host_to_target_errno(ret);
10077 #endif
10079 #ifdef TARGET_NR_fadvise64
10080 case TARGET_NR_fadvise64:
10081 /* 5 args: fd, offset (high, low), len, advice */
10082 if (regpairs_aligned(cpu_env, num)) {
10083 /* offset is in (3,4), len in 5 and advice in 6 */
10084 arg2 = arg3;
10085 arg3 = arg4;
10086 arg4 = arg5;
10087 arg5 = arg6;
10089 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10090 return -host_to_target_errno(ret);
10091 #endif
10093 #else /* not a 32-bit ABI */
10094 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10095 #ifdef TARGET_NR_fadvise64_64
10096 case TARGET_NR_fadvise64_64:
10097 #endif
10098 #ifdef TARGET_NR_fadvise64
10099 case TARGET_NR_fadvise64:
10100 #endif
10101 #ifdef TARGET_S390X
10102 switch (arg4) {
10103 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10104 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10105 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10106 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10107 default: break;
10109 #endif
10110 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10111 #endif
10112 #endif /* end of 64-bit ABI fadvise handling */
10114 #ifdef TARGET_NR_madvise
10115 case TARGET_NR_madvise:
10116 /* A straight passthrough may not be safe because qemu sometimes
10117 turns private file-backed mappings into anonymous mappings.
10118 This will break MADV_DONTNEED.
10119 This is a hint, so ignoring and returning success is ok. */
10120 return 0;
10121 #endif
10122 #if TARGET_ABI_BITS == 32
10123 case TARGET_NR_fcntl64:
10125 int cmd;
10126 struct flock64 fl;
10127 from_flock64_fn *copyfrom = copy_from_user_flock64;
10128 to_flock64_fn *copyto = copy_to_user_flock64;
10130 #ifdef TARGET_ARM
10131 if (!((CPUARMState *)cpu_env)->eabi) {
10132 copyfrom = copy_from_user_oabi_flock64;
10133 copyto = copy_to_user_oabi_flock64;
10135 #endif
10137 cmd = target_to_host_fcntl_cmd(arg2);
10138 if (cmd == -TARGET_EINVAL) {
10139 return cmd;
10142 switch(arg2) {
10143 case TARGET_F_GETLK64:
10144 ret = copyfrom(&fl, arg3);
10145 if (ret) {
10146 break;
10148 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10149 if (ret == 0) {
10150 ret = copyto(arg3, &fl);
10152 break;
10154 case TARGET_F_SETLK64:
10155 case TARGET_F_SETLKW64:
10156 ret = copyfrom(&fl, arg3);
10157 if (ret) {
10158 break;
10160 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10161 break;
10162 default:
10163 ret = do_fcntl(arg1, arg2, arg3);
10164 break;
10166 return ret;
10168 #endif
10169 #ifdef TARGET_NR_cacheflush
10170 case TARGET_NR_cacheflush:
10171 /* self-modifying code is handled automatically, so nothing needed */
10172 return 0;
10173 #endif
10174 #ifdef TARGET_NR_getpagesize
10175 case TARGET_NR_getpagesize:
10176 return TARGET_PAGE_SIZE;
10177 #endif
10178 case TARGET_NR_gettid:
10179 return get_errno(gettid());
10180 #ifdef TARGET_NR_readahead
10181 case TARGET_NR_readahead:
10182 #if TARGET_ABI_BITS == 32
10183 if (regpairs_aligned(cpu_env, num)) {
10184 arg2 = arg3;
10185 arg3 = arg4;
10186 arg4 = arg5;
10188 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10189 #else
10190 ret = get_errno(readahead(arg1, arg2, arg3));
10191 #endif
10192 return ret;
10193 #endif
10194 #ifdef CONFIG_ATTR
10195 #ifdef TARGET_NR_setxattr
10196 case TARGET_NR_listxattr:
10197 case TARGET_NR_llistxattr:
10199 void *p, *b = 0;
10200 if (arg2) {
10201 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10202 if (!b) {
10203 return -TARGET_EFAULT;
10206 p = lock_user_string(arg1);
10207 if (p) {
10208 if (num == TARGET_NR_listxattr) {
10209 ret = get_errno(listxattr(p, b, arg3));
10210 } else {
10211 ret = get_errno(llistxattr(p, b, arg3));
10213 } else {
10214 ret = -TARGET_EFAULT;
10216 unlock_user(p, arg1, 0);
10217 unlock_user(b, arg2, arg3);
10218 return ret;
10220 case TARGET_NR_flistxattr:
10222 void *b = 0;
10223 if (arg2) {
10224 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10225 if (!b) {
10226 return -TARGET_EFAULT;
10229 ret = get_errno(flistxattr(arg1, b, arg3));
10230 unlock_user(b, arg2, arg3);
10231 return ret;
10233 case TARGET_NR_setxattr:
10234 case TARGET_NR_lsetxattr:
10236 void *p, *n, *v = 0;
10237 if (arg3) {
10238 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10239 if (!v) {
10240 return -TARGET_EFAULT;
10243 p = lock_user_string(arg1);
10244 n = lock_user_string(arg2);
10245 if (p && n) {
10246 if (num == TARGET_NR_setxattr) {
10247 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10248 } else {
10249 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10251 } else {
10252 ret = -TARGET_EFAULT;
10254 unlock_user(p, arg1, 0);
10255 unlock_user(n, arg2, 0);
10256 unlock_user(v, arg3, 0);
10258 return ret;
10259 case TARGET_NR_fsetxattr:
10261 void *n, *v = 0;
10262 if (arg3) {
10263 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10264 if (!v) {
10265 return -TARGET_EFAULT;
10268 n = lock_user_string(arg2);
10269 if (n) {
10270 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10271 } else {
10272 ret = -TARGET_EFAULT;
10274 unlock_user(n, arg2, 0);
10275 unlock_user(v, arg3, 0);
10277 return ret;
10278 case TARGET_NR_getxattr:
10279 case TARGET_NR_lgetxattr:
10281 void *p, *n, *v = 0;
10282 if (arg3) {
10283 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10284 if (!v) {
10285 return -TARGET_EFAULT;
10288 p = lock_user_string(arg1);
10289 n = lock_user_string(arg2);
10290 if (p && n) {
10291 if (num == TARGET_NR_getxattr) {
10292 ret = get_errno(getxattr(p, n, v, arg4));
10293 } else {
10294 ret = get_errno(lgetxattr(p, n, v, arg4));
10296 } else {
10297 ret = -TARGET_EFAULT;
10299 unlock_user(p, arg1, 0);
10300 unlock_user(n, arg2, 0);
10301 unlock_user(v, arg3, arg4);
10303 return ret;
10304 case TARGET_NR_fgetxattr:
10306 void *n, *v = 0;
10307 if (arg3) {
10308 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10309 if (!v) {
10310 return -TARGET_EFAULT;
10313 n = lock_user_string(arg2);
10314 if (n) {
10315 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10316 } else {
10317 ret = -TARGET_EFAULT;
10319 unlock_user(n, arg2, 0);
10320 unlock_user(v, arg3, arg4);
10322 return ret;
10323 case TARGET_NR_removexattr:
10324 case TARGET_NR_lremovexattr:
10326 void *p, *n;
10327 p = lock_user_string(arg1);
10328 n = lock_user_string(arg2);
10329 if (p && n) {
10330 if (num == TARGET_NR_removexattr) {
10331 ret = get_errno(removexattr(p, n));
10332 } else {
10333 ret = get_errno(lremovexattr(p, n));
10335 } else {
10336 ret = -TARGET_EFAULT;
10338 unlock_user(p, arg1, 0);
10339 unlock_user(n, arg2, 0);
10341 return ret;
10342 case TARGET_NR_fremovexattr:
10344 void *n;
10345 n = lock_user_string(arg2);
10346 if (n) {
10347 ret = get_errno(fremovexattr(arg1, n));
10348 } else {
10349 ret = -TARGET_EFAULT;
10351 unlock_user(n, arg2, 0);
10353 return ret;
10354 #endif
10355 #endif /* CONFIG_ATTR */
10356 #ifdef TARGET_NR_set_thread_area
10357 case TARGET_NR_set_thread_area:
10358 #if defined(TARGET_MIPS)
10359 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10360 return 0;
10361 #elif defined(TARGET_CRIS)
10362 if (arg1 & 0xff)
10363 ret = -TARGET_EINVAL;
10364 else {
10365 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10366 ret = 0;
10368 return ret;
10369 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10370 return do_set_thread_area(cpu_env, arg1);
10371 #elif defined(TARGET_M68K)
10373 TaskState *ts = cpu->opaque;
10374 ts->tp_value = arg1;
10375 return 0;
10377 #else
10378 return -TARGET_ENOSYS;
10379 #endif
10380 #endif
10381 #ifdef TARGET_NR_get_thread_area
10382 case TARGET_NR_get_thread_area:
10383 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10384 return do_get_thread_area(cpu_env, arg1);
10385 #elif defined(TARGET_M68K)
10387 TaskState *ts = cpu->opaque;
10388 return ts->tp_value;
10390 #else
10391 return -TARGET_ENOSYS;
10392 #endif
10393 #endif
10394 #ifdef TARGET_NR_getdomainname
10395 case TARGET_NR_getdomainname:
10396 return -TARGET_ENOSYS;
10397 #endif
10399 #ifdef TARGET_NR_clock_settime
10400 case TARGET_NR_clock_settime:
10402 struct timespec ts;
10404 ret = target_to_host_timespec(&ts, arg2);
10405 if (!is_error(ret)) {
10406 ret = get_errno(clock_settime(arg1, &ts));
10408 return ret;
10410 #endif
10411 #ifdef TARGET_NR_clock_gettime
10412 case TARGET_NR_clock_gettime:
10414 struct timespec ts;
10415 ret = get_errno(clock_gettime(arg1, &ts));
10416 if (!is_error(ret)) {
10417 ret = host_to_target_timespec(arg2, &ts);
10419 return ret;
10421 #endif
10422 #ifdef TARGET_NR_clock_getres
10423 case TARGET_NR_clock_getres:
10425 struct timespec ts;
10426 ret = get_errno(clock_getres(arg1, &ts));
10427 if (!is_error(ret)) {
10428 host_to_target_timespec(arg2, &ts);
10430 return ret;
10432 #endif
10433 #ifdef TARGET_NR_clock_nanosleep
10434 case TARGET_NR_clock_nanosleep:
10436 struct timespec ts;
10437 target_to_host_timespec(&ts, arg3);
10438 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10439 &ts, arg4 ? &ts : NULL));
10440 if (arg4)
10441 host_to_target_timespec(arg4, &ts);
10443 #if defined(TARGET_PPC)
10444 /* clock_nanosleep is odd in that it returns positive errno values.
10445 * On PPC, CR0 bit 3 should be set in such a situation. */
10446 if (ret && ret != -TARGET_ERESTARTSYS) {
10447 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10449 #endif
10450 return ret;
10452 #endif
10454 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10455 case TARGET_NR_set_tid_address:
10456 return get_errno(set_tid_address((int *)g2h(arg1)));
10457 #endif
10459 case TARGET_NR_tkill:
10460 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10462 case TARGET_NR_tgkill:
10463 return get_errno(safe_tgkill((int)arg1, (int)arg2,
10464 target_to_host_signal(arg3)));
10466 #ifdef TARGET_NR_set_robust_list
10467 case TARGET_NR_set_robust_list:
10468 case TARGET_NR_get_robust_list:
10469 /* The ABI for supporting robust futexes has userspace pass
10470 * the kernel a pointer to a linked list which is updated by
10471 * userspace after the syscall; the list is walked by the kernel
10472 * when the thread exits. Since the linked list in QEMU guest
10473 * memory isn't a valid linked list for the host and we have
10474 * no way to reliably intercept the thread-death event, we can't
10475 * support these. Silently return ENOSYS so that guest userspace
10476 * falls back to a non-robust futex implementation (which should
10477 * be OK except in the corner case of the guest crashing while
10478 * holding a mutex that is shared with another process via
10479 * shared memory).
10481 return -TARGET_ENOSYS;
10482 #endif
10484 #if defined(TARGET_NR_utimensat)
10485 case TARGET_NR_utimensat:
10487 struct timespec *tsp, ts[2];
10488 if (!arg3) {
10489 tsp = NULL;
10490 } else {
10491 target_to_host_timespec(ts, arg3);
10492 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10493 tsp = ts;
10495 if (!arg2)
10496 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10497 else {
10498 if (!(p = lock_user_string(arg2))) {
10499 return -TARGET_EFAULT;
10501 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10502 unlock_user(p, arg2, 0);
10505 return ret;
10506 #endif
10507 case TARGET_NR_futex:
10508 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10510 case TARGET_NR_inotify_init:
10511 ret = get_errno(sys_inotify_init());
10512 if (ret >= 0) {
10513 fd_trans_register(ret, &target_inotify_trans);
10515 return ret;
10516 #endif
10517 #ifdef CONFIG_INOTIFY1
10518 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10519 case TARGET_NR_inotify_init1:
10520 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10521 fcntl_flags_tbl)));
10522 if (ret >= 0) {
10523 fd_trans_register(ret, &target_inotify_trans);
10525 return ret;
10526 #endif
10527 #endif
10528 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10529 case TARGET_NR_inotify_add_watch:
10530 p = lock_user_string(arg2);
10531 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10532 unlock_user(p, arg2, 0);
10533 return ret;
10534 #endif
10535 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10536 case TARGET_NR_inotify_rm_watch:
10537 return get_errno(sys_inotify_rm_watch(arg1, arg2));
10538 #endif
10540 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10541 case TARGET_NR_mq_open:
10543 struct mq_attr posix_mq_attr;
10544 struct mq_attr *pposix_mq_attr;
10545 int host_flags;
10547 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10548 pposix_mq_attr = NULL;
10549 if (arg4) {
10550 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10551 return -TARGET_EFAULT;
10553 pposix_mq_attr = &posix_mq_attr;
10555 p = lock_user_string(arg1 - 1);
10556 if (!p) {
10557 return -TARGET_EFAULT;
10559 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
10560 unlock_user (p, arg1, 0);
10562 return ret;
10564 case TARGET_NR_mq_unlink:
10565 p = lock_user_string(arg1 - 1);
10566 if (!p) {
10567 return -TARGET_EFAULT;
10569 ret = get_errno(mq_unlink(p));
10570 unlock_user (p, arg1, 0);
10571 return ret;
10573 case TARGET_NR_mq_timedsend:
10575 struct timespec ts;
10577 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10578 if (arg5 != 0) {
10579 target_to_host_timespec(&ts, arg5);
10580 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10581 host_to_target_timespec(arg5, &ts);
10582 } else {
10583 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10585 unlock_user (p, arg2, arg3);
10587 return ret;
10589 case TARGET_NR_mq_timedreceive:
10591 struct timespec ts;
10592 unsigned int prio;
10594 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10595 if (arg5 != 0) {
10596 target_to_host_timespec(&ts, arg5);
10597 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10598 &prio, &ts));
10599 host_to_target_timespec(arg5, &ts);
10600 } else {
10601 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10602 &prio, NULL));
10604 unlock_user (p, arg2, arg3);
10605 if (arg4 != 0)
10606 put_user_u32(prio, arg4);
10608 return ret;
10610 /* Not implemented for now... */
10611 /* case TARGET_NR_mq_notify: */
10612 /* break; */
10614 case TARGET_NR_mq_getsetattr:
10616 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10617 ret = 0;
10618 if (arg2 != 0) {
10619 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10620 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
10621 &posix_mq_attr_out));
10622 } else if (arg3 != 0) {
10623 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
10625 if (ret == 0 && arg3 != 0) {
10626 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10629 return ret;
10630 #endif
10632 #ifdef CONFIG_SPLICE
10633 #ifdef TARGET_NR_tee
10634 case TARGET_NR_tee:
10636 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10638 return ret;
10639 #endif
10640 #ifdef TARGET_NR_splice
10641 case TARGET_NR_splice:
10643 loff_t loff_in, loff_out;
10644 loff_t *ploff_in = NULL, *ploff_out = NULL;
10645 if (arg2) {
10646 if (get_user_u64(loff_in, arg2)) {
10647 return -TARGET_EFAULT;
10649 ploff_in = &loff_in;
10651 if (arg4) {
10652 if (get_user_u64(loff_out, arg4)) {
10653 return -TARGET_EFAULT;
10655 ploff_out = &loff_out;
10657 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10658 if (arg2) {
10659 if (put_user_u64(loff_in, arg2)) {
10660 return -TARGET_EFAULT;
10663 if (arg4) {
10664 if (put_user_u64(loff_out, arg4)) {
10665 return -TARGET_EFAULT;
10669 return ret;
10670 #endif
10671 #ifdef TARGET_NR_vmsplice
10672 case TARGET_NR_vmsplice:
10674 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10675 if (vec != NULL) {
10676 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10677 unlock_iovec(vec, arg2, arg3, 0);
10678 } else {
10679 ret = -host_to_target_errno(errno);
10682 return ret;
10683 #endif
10684 #endif /* CONFIG_SPLICE */
10685 #ifdef CONFIG_EVENTFD
10686 #if defined(TARGET_NR_eventfd)
10687 case TARGET_NR_eventfd:
10688 ret = get_errno(eventfd(arg1, 0));
10689 if (ret >= 0) {
10690 fd_trans_register(ret, &target_eventfd_trans);
10692 return ret;
10693 #endif
10694 #if defined(TARGET_NR_eventfd2)
10695 case TARGET_NR_eventfd2:
10697 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10698 if (arg2 & TARGET_O_NONBLOCK) {
10699 host_flags |= O_NONBLOCK;
10701 if (arg2 & TARGET_O_CLOEXEC) {
10702 host_flags |= O_CLOEXEC;
10704 ret = get_errno(eventfd(arg1, host_flags));
10705 if (ret >= 0) {
10706 fd_trans_register(ret, &target_eventfd_trans);
10708 return ret;
10710 #endif
10711 #endif /* CONFIG_EVENTFD */
10712 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10713 case TARGET_NR_fallocate:
10714 #if TARGET_ABI_BITS == 32
10715 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10716 target_offset64(arg5, arg6)));
10717 #else
10718 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10719 #endif
10720 return ret;
10721 #endif
10722 #if defined(CONFIG_SYNC_FILE_RANGE)
10723 #if defined(TARGET_NR_sync_file_range)
10724 case TARGET_NR_sync_file_range:
10725 #if TARGET_ABI_BITS == 32
10726 #if defined(TARGET_MIPS)
10727 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10728 target_offset64(arg5, arg6), arg7));
10729 #else
10730 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10731 target_offset64(arg4, arg5), arg6));
10732 #endif /* !TARGET_MIPS */
10733 #else
10734 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10735 #endif
10736 return ret;
10737 #endif
10738 #if defined(TARGET_NR_sync_file_range2)
10739 case TARGET_NR_sync_file_range2:
10740 /* This is like sync_file_range but the arguments are reordered */
10741 #if TARGET_ABI_BITS == 32
10742 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10743 target_offset64(arg5, arg6), arg2));
10744 #else
10745 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10746 #endif
10747 return ret;
10748 #endif
10749 #endif
10750 #if defined(TARGET_NR_signalfd4)
10751 case TARGET_NR_signalfd4:
10752 return do_signalfd4(arg1, arg2, arg4);
10753 #endif
10754 #if defined(TARGET_NR_signalfd)
10755 case TARGET_NR_signalfd:
10756 return do_signalfd4(arg1, arg2, 0);
10757 #endif
10758 #if defined(CONFIG_EPOLL)
10759 #if defined(TARGET_NR_epoll_create)
10760 case TARGET_NR_epoll_create:
10761 return get_errno(epoll_create(arg1));
10762 #endif
10763 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10764 case TARGET_NR_epoll_create1:
10765 return get_errno(epoll_create1(arg1));
10766 #endif
10767 #if defined(TARGET_NR_epoll_ctl)
10768 case TARGET_NR_epoll_ctl:
10770 struct epoll_event ep;
10771 struct epoll_event *epp = 0;
10772 if (arg4) {
10773 struct target_epoll_event *target_ep;
10774 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10775 return -TARGET_EFAULT;
10777 ep.events = tswap32(target_ep->events);
10778 /* The epoll_data_t union is just opaque data to the kernel,
10779 * so we transfer all 64 bits across and need not worry what
10780 * actual data type it is.
10782 ep.data.u64 = tswap64(target_ep->data.u64);
10783 unlock_user_struct(target_ep, arg4, 0);
10784 epp = &ep;
10786 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10788 #endif
10790 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10791 #if defined(TARGET_NR_epoll_wait)
10792 case TARGET_NR_epoll_wait:
10793 #endif
10794 #if defined(TARGET_NR_epoll_pwait)
10795 case TARGET_NR_epoll_pwait:
10796 #endif
10798 struct target_epoll_event *target_ep;
10799 struct epoll_event *ep;
10800 int epfd = arg1;
10801 int maxevents = arg3;
10802 int timeout = arg4;
10804 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
10805 return -TARGET_EINVAL;
10808 target_ep = lock_user(VERIFY_WRITE, arg2,
10809 maxevents * sizeof(struct target_epoll_event), 1);
10810 if (!target_ep) {
10811 return -TARGET_EFAULT;
10814 ep = g_try_new(struct epoll_event, maxevents);
10815 if (!ep) {
10816 unlock_user(target_ep, arg2, 0);
10817 return -TARGET_ENOMEM;
10820 switch (num) {
10821 #if defined(TARGET_NR_epoll_pwait)
10822 case TARGET_NR_epoll_pwait:
10824 target_sigset_t *target_set;
10825 sigset_t _set, *set = &_set;
10827 if (arg5) {
10828 if (arg6 != sizeof(target_sigset_t)) {
10829 ret = -TARGET_EINVAL;
10830 break;
10833 target_set = lock_user(VERIFY_READ, arg5,
10834 sizeof(target_sigset_t), 1);
10835 if (!target_set) {
10836 ret = -TARGET_EFAULT;
10837 break;
10839 target_to_host_sigset(set, target_set);
10840 unlock_user(target_set, arg5, 0);
10841 } else {
10842 set = NULL;
10845 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10846 set, SIGSET_T_SIZE));
10847 break;
10849 #endif
10850 #if defined(TARGET_NR_epoll_wait)
10851 case TARGET_NR_epoll_wait:
10852 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10853 NULL, 0));
10854 break;
10855 #endif
10856 default:
10857 ret = -TARGET_ENOSYS;
10859 if (!is_error(ret)) {
10860 int i;
10861 for (i = 0; i < ret; i++) {
10862 target_ep[i].events = tswap32(ep[i].events);
10863 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10865 unlock_user(target_ep, arg2,
10866 ret * sizeof(struct target_epoll_event));
10867 } else {
10868 unlock_user(target_ep, arg2, 0);
10870 g_free(ep);
10871 return ret;
10873 #endif
10874 #endif
10875 #ifdef TARGET_NR_prlimit64
10876 case TARGET_NR_prlimit64:
10878 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10879 struct target_rlimit64 *target_rnew, *target_rold;
10880 struct host_rlimit64 rnew, rold, *rnewp = 0;
10881 int resource = target_to_host_resource(arg2);
10882 if (arg3) {
10883 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10884 return -TARGET_EFAULT;
10886 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10887 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10888 unlock_user_struct(target_rnew, arg3, 0);
10889 rnewp = &rnew;
10892 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10893 if (!is_error(ret) && arg4) {
10894 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10895 return -TARGET_EFAULT;
10897 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10898 target_rold->rlim_max = tswap64(rold.rlim_max);
10899 unlock_user_struct(target_rold, arg4, 1);
10901 return ret;
10903 #endif
10904 #ifdef TARGET_NR_gethostname
10905 case TARGET_NR_gethostname:
10907 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10908 if (name) {
10909 ret = get_errno(gethostname(name, arg2));
10910 unlock_user(name, arg1, arg2);
10911 } else {
10912 ret = -TARGET_EFAULT;
10914 return ret;
10916 #endif
10917 #ifdef TARGET_NR_atomic_cmpxchg_32
10918 case TARGET_NR_atomic_cmpxchg_32:
10920 /* should use start_exclusive from main.c */
10921 abi_ulong mem_value;
10922 if (get_user_u32(mem_value, arg6)) {
10923 target_siginfo_t info;
10924 info.si_signo = SIGSEGV;
10925 info.si_errno = 0;
10926 info.si_code = TARGET_SEGV_MAPERR;
10927 info._sifields._sigfault._addr = arg6;
10928 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10929 QEMU_SI_FAULT, &info);
10930 ret = 0xdeadbeef;
10933 if (mem_value == arg2)
10934 put_user_u32(arg1, arg6);
10935 return mem_value;
10937 #endif
10938 #ifdef TARGET_NR_atomic_barrier
10939 case TARGET_NR_atomic_barrier:
10940 /* Like the kernel implementation and the
10941 qemu arm barrier, no-op this? */
10942 return 0;
10943 #endif
10945 #ifdef TARGET_NR_timer_create
10946 case TARGET_NR_timer_create:
10948 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10950 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10952 int clkid = arg1;
10953 int timer_index = next_free_host_timer();
10955 if (timer_index < 0) {
10956 ret = -TARGET_EAGAIN;
10957 } else {
10958 timer_t *phtimer = g_posix_timers + timer_index;
10960 if (arg2) {
10961 phost_sevp = &host_sevp;
10962 ret = target_to_host_sigevent(phost_sevp, arg2);
10963 if (ret != 0) {
10964 return ret;
10968 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10969 if (ret) {
10970 phtimer = NULL;
10971 } else {
10972 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10973 return -TARGET_EFAULT;
10977 return ret;
10979 #endif
10981 #ifdef TARGET_NR_timer_settime
10982 case TARGET_NR_timer_settime:
10984 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10985 * struct itimerspec * old_value */
10986 target_timer_t timerid = get_timer_id(arg1);
10988 if (timerid < 0) {
10989 ret = timerid;
10990 } else if (arg3 == 0) {
10991 ret = -TARGET_EINVAL;
10992 } else {
10993 timer_t htimer = g_posix_timers[timerid];
10994 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10996 if (target_to_host_itimerspec(&hspec_new, arg3)) {
10997 return -TARGET_EFAULT;
10999 ret = get_errno(
11000 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11001 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11002 return -TARGET_EFAULT;
11005 return ret;
11007 #endif
11009 #ifdef TARGET_NR_timer_gettime
11010 case TARGET_NR_timer_gettime:
11012 /* args: timer_t timerid, struct itimerspec *curr_value */
11013 target_timer_t timerid = get_timer_id(arg1);
11015 if (timerid < 0) {
11016 ret = timerid;
11017 } else if (!arg2) {
11018 ret = -TARGET_EFAULT;
11019 } else {
11020 timer_t htimer = g_posix_timers[timerid];
11021 struct itimerspec hspec;
11022 ret = get_errno(timer_gettime(htimer, &hspec));
11024 if (host_to_target_itimerspec(arg2, &hspec)) {
11025 ret = -TARGET_EFAULT;
11028 return ret;
11030 #endif
11032 #ifdef TARGET_NR_timer_getoverrun
11033 case TARGET_NR_timer_getoverrun:
11035 /* args: timer_t timerid */
11036 target_timer_t timerid = get_timer_id(arg1);
11038 if (timerid < 0) {
11039 ret = timerid;
11040 } else {
11041 timer_t htimer = g_posix_timers[timerid];
11042 ret = get_errno(timer_getoverrun(htimer));
11044 fd_trans_unregister(ret);
11045 return ret;
11047 #endif
11049 #ifdef TARGET_NR_timer_delete
11050 case TARGET_NR_timer_delete:
11052 /* args: timer_t timerid */
11053 target_timer_t timerid = get_timer_id(arg1);
11055 if (timerid < 0) {
11056 ret = timerid;
11057 } else {
11058 timer_t htimer = g_posix_timers[timerid];
11059 ret = get_errno(timer_delete(htimer));
11060 g_posix_timers[timerid] = 0;
11062 return ret;
11064 #endif
11066 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11067 case TARGET_NR_timerfd_create:
11068 return get_errno(timerfd_create(arg1,
11069 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11070 #endif
11072 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11073 case TARGET_NR_timerfd_gettime:
11075 struct itimerspec its_curr;
11077 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11079 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11080 return -TARGET_EFAULT;
11083 return ret;
11084 #endif
11086 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11087 case TARGET_NR_timerfd_settime:
11089 struct itimerspec its_new, its_old, *p_new;
11091 if (arg3) {
11092 if (target_to_host_itimerspec(&its_new, arg3)) {
11093 return -TARGET_EFAULT;
11095 p_new = &its_new;
11096 } else {
11097 p_new = NULL;
11100 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11102 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11103 return -TARGET_EFAULT;
11106 return ret;
11107 #endif
11109 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11110 case TARGET_NR_ioprio_get:
11111 return get_errno(ioprio_get(arg1, arg2));
11112 #endif
11114 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11115 case TARGET_NR_ioprio_set:
11116 return get_errno(ioprio_set(arg1, arg2, arg3));
11117 #endif
11119 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11120 case TARGET_NR_setns:
11121 return get_errno(setns(arg1, arg2));
11122 #endif
11123 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11124 case TARGET_NR_unshare:
11125 return get_errno(unshare(arg1));
11126 #endif
11127 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11128 case TARGET_NR_kcmp:
11129 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11130 #endif
11131 #ifdef TARGET_NR_swapcontext
11132 case TARGET_NR_swapcontext:
11133 /* PowerPC specific. */
11134 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11135 #endif
11137 default:
11138 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11139 return -TARGET_ENOSYS;
11141 return ret;
11144 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11145 abi_long arg2, abi_long arg3, abi_long arg4,
11146 abi_long arg5, abi_long arg6, abi_long arg7,
11147 abi_long arg8)
11149 CPUState *cpu = ENV_GET_CPU(cpu_env);
11150 abi_long ret;
11152 #ifdef DEBUG_ERESTARTSYS
11153 /* Debug-only code for exercising the syscall-restart code paths
11154 * in the per-architecture cpu main loops: restart every syscall
11155 * the guest makes once before letting it through.
11158 static bool flag;
11159 flag = !flag;
11160 if (flag) {
11161 return -TARGET_ERESTARTSYS;
11164 #endif
11166 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11167 arg5, arg6, arg7, arg8);
11169 if (unlikely(do_strace)) {
11170 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11171 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11172 arg5, arg6, arg7, arg8);
11173 print_syscall_ret(num, ret);
11174 } else {
11175 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11176 arg5, arg6, arg7, arg8);
11179 trace_guest_user_syscall_ret(cpu, num, ret);
11180 return ret;