linux-user: Add support for 'clock_nanosleep_time64()' and 'clock_adjtime64()'
[qemu/ar7.git] / linux-user / syscall.c
blob6fd3099cd68716ed5efb3712c148c6575f0a295a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
130 #ifndef CLONE_IO
131 #define CLONE_IO 0x80000000 /* Clone io context */
132 #endif
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
239 type6 arg6) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
296 loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300 siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310 const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318 unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325 void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327 struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329 struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342 unsigned long, idx1, unsigned long, idx2)
343 #endif
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350 unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
358 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
359 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
360 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
361 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
362 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
363 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
364 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
365 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
366 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
367 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
368 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
372 #endif
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
375 #endif
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
378 #endif
379 #if defined(O_PATH)
380 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
381 #endif
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
384 #endif
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389 { 0, 0, 0, 0 }
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398 const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401 const struct timespec times[2], int flags)
403 errno = ENOSYS;
404 return -1;
406 #endif
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413 const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416 int newfd, const char *new, int flags)
418 if (flags == 0) {
419 return renameat(oldfd, old, newfd, new);
421 errno = ENOSYS;
422 return -1;
424 #endif
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
439 return (inotify_add_watch(fd, pathname, mask));
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
445 return (inotify_rm_watch(fd, wd));
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
452 return (inotify_init1(flags));
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471 uint64_t rlim_cur;
472 uint64_t rlim_max;
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475 const struct host_rlimit64 *, new_limit,
476 struct host_rlimit64 *, old_limit)
477 #endif
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
486 int k ;
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489 if (g_posix_timers[k] == 0) {
490 g_posix_timers[k] = (timer_t) 1;
491 return k;
494 return -1;
496 #endif
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510 [EAGAIN] = TARGET_EAGAIN,
511 [EIDRM] = TARGET_EIDRM,
512 [ECHRNG] = TARGET_ECHRNG,
513 [EL2NSYNC] = TARGET_EL2NSYNC,
514 [EL3HLT] = TARGET_EL3HLT,
515 [EL3RST] = TARGET_EL3RST,
516 [ELNRNG] = TARGET_ELNRNG,
517 [EUNATCH] = TARGET_EUNATCH,
518 [ENOCSI] = TARGET_ENOCSI,
519 [EL2HLT] = TARGET_EL2HLT,
520 [EDEADLK] = TARGET_EDEADLK,
521 [ENOLCK] = TARGET_ENOLCK,
522 [EBADE] = TARGET_EBADE,
523 [EBADR] = TARGET_EBADR,
524 [EXFULL] = TARGET_EXFULL,
525 [ENOANO] = TARGET_ENOANO,
526 [EBADRQC] = TARGET_EBADRQC,
527 [EBADSLT] = TARGET_EBADSLT,
528 [EBFONT] = TARGET_EBFONT,
529 [ENOSTR] = TARGET_ENOSTR,
530 [ENODATA] = TARGET_ENODATA,
531 [ETIME] = TARGET_ETIME,
532 [ENOSR] = TARGET_ENOSR,
533 [ENONET] = TARGET_ENONET,
534 [ENOPKG] = TARGET_ENOPKG,
535 [EREMOTE] = TARGET_EREMOTE,
536 [ENOLINK] = TARGET_ENOLINK,
537 [EADV] = TARGET_EADV,
538 [ESRMNT] = TARGET_ESRMNT,
539 [ECOMM] = TARGET_ECOMM,
540 [EPROTO] = TARGET_EPROTO,
541 [EDOTDOT] = TARGET_EDOTDOT,
542 [EMULTIHOP] = TARGET_EMULTIHOP,
543 [EBADMSG] = TARGET_EBADMSG,
544 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
545 [EOVERFLOW] = TARGET_EOVERFLOW,
546 [ENOTUNIQ] = TARGET_ENOTUNIQ,
547 [EBADFD] = TARGET_EBADFD,
548 [EREMCHG] = TARGET_EREMCHG,
549 [ELIBACC] = TARGET_ELIBACC,
550 [ELIBBAD] = TARGET_ELIBBAD,
551 [ELIBSCN] = TARGET_ELIBSCN,
552 [ELIBMAX] = TARGET_ELIBMAX,
553 [ELIBEXEC] = TARGET_ELIBEXEC,
554 [EILSEQ] = TARGET_EILSEQ,
555 [ENOSYS] = TARGET_ENOSYS,
556 [ELOOP] = TARGET_ELOOP,
557 [ERESTART] = TARGET_ERESTART,
558 [ESTRPIPE] = TARGET_ESTRPIPE,
559 [ENOTEMPTY] = TARGET_ENOTEMPTY,
560 [EUSERS] = TARGET_EUSERS,
561 [ENOTSOCK] = TARGET_ENOTSOCK,
562 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
563 [EMSGSIZE] = TARGET_EMSGSIZE,
564 [EPROTOTYPE] = TARGET_EPROTOTYPE,
565 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
566 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
567 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
568 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
569 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
570 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
571 [EADDRINUSE] = TARGET_EADDRINUSE,
572 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
573 [ENETDOWN] = TARGET_ENETDOWN,
574 [ENETUNREACH] = TARGET_ENETUNREACH,
575 [ENETRESET] = TARGET_ENETRESET,
576 [ECONNABORTED] = TARGET_ECONNABORTED,
577 [ECONNRESET] = TARGET_ECONNRESET,
578 [ENOBUFS] = TARGET_ENOBUFS,
579 [EISCONN] = TARGET_EISCONN,
580 [ENOTCONN] = TARGET_ENOTCONN,
581 [EUCLEAN] = TARGET_EUCLEAN,
582 [ENOTNAM] = TARGET_ENOTNAM,
583 [ENAVAIL] = TARGET_ENAVAIL,
584 [EISNAM] = TARGET_EISNAM,
585 [EREMOTEIO] = TARGET_EREMOTEIO,
586 [EDQUOT] = TARGET_EDQUOT,
587 [ESHUTDOWN] = TARGET_ESHUTDOWN,
588 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
589 [ETIMEDOUT] = TARGET_ETIMEDOUT,
590 [ECONNREFUSED] = TARGET_ECONNREFUSED,
591 [EHOSTDOWN] = TARGET_EHOSTDOWN,
592 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
593 [EALREADY] = TARGET_EALREADY,
594 [EINPROGRESS] = TARGET_EINPROGRESS,
595 [ESTALE] = TARGET_ESTALE,
596 [ECANCELED] = TARGET_ECANCELED,
597 [ENOMEDIUM] = TARGET_ENOMEDIUM,
598 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600 [ENOKEY] = TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606 [EKEYREVOKED] = TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609 [EKEYREJECTED] = TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612 [EOWNERDEAD] = TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618 [ENOMSG] = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621 [ERFKILL] = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624 [EHWPOISON] = TARGET_EHWPOISON,
625 #endif
628 static inline int host_to_target_errno(int err)
630 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631 host_to_target_errno_table[err]) {
632 return host_to_target_errno_table[err];
634 return err;
637 static inline int target_to_host_errno(int err)
639 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640 target_to_host_errno_table[err]) {
641 return target_to_host_errno_table[err];
643 return err;
646 static inline abi_long get_errno(abi_long ret)
648 if (ret == -1)
649 return -host_to_target_errno(errno);
650 else
651 return ret;
654 const char *target_strerror(int err)
656 if (err == TARGET_ERESTARTSYS) {
657 return "To be restarted";
659 if (err == TARGET_QEMU_ESIGRETURN) {
660 return "Successful exit from sigreturn";
663 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664 return NULL;
666 return strerror(target_to_host_errno(err));
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
672 return safe_syscall(__NR_##name); \
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
678 return safe_syscall(__NR_##name, arg1); \
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
684 return safe_syscall(__NR_##name, arg1, arg2); \
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694 type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701 type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703 type5 arg5) \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711 type5 arg5, type6 arg6) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719 int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722 struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725 int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728 defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734 struct timespec *, tsp, const sigset_t *, sigmask,
735 size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738 int, maxevents, int, timeout, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742 const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746 const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755 unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757 unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759 socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769 const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772 int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775 struct timespec *, rem)
776 #endif
777 #if defined(TARGET_NR_clock_nanosleep) || \
778 defined(TARGET_NR_clock_nanosleep_time64)
779 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
780 const struct timespec *, req, struct timespec *, rem)
781 #endif
782 #ifdef __NR_ipc
783 #ifdef __s390x__
784 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
785 void *, ptr)
786 #else
787 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
788 void *, ptr, long, fifth)
789 #endif
790 #endif
791 #ifdef __NR_msgsnd
792 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
793 int, flags)
794 #endif
795 #ifdef __NR_msgrcv
796 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
797 long, msgtype, int, flags)
798 #endif
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
801 unsigned, nsops, const struct timespec *, timeout)
802 #endif
803 #if defined(TARGET_NR_mq_timedsend) || \
804 defined(TARGET_NR_mq_timedsend_time64)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806 size_t, len, unsigned, prio, const struct timespec *, timeout)
807 #endif
808 #if defined(TARGET_NR_mq_timedreceive) || \
809 defined(TARGET_NR_mq_timedreceive_time64)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811 size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814 * "third argument might be integer or pointer or not present" behaviour of
815 * the libc function.
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820 * use the flock64 struct rather than unsuffixed flock
821 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
829 static inline int host_to_target_sock_type(int host_type)
831 int target_type;
833 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834 case SOCK_DGRAM:
835 target_type = TARGET_SOCK_DGRAM;
836 break;
837 case SOCK_STREAM:
838 target_type = TARGET_SOCK_STREAM;
839 break;
840 default:
841 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842 break;
845 #if defined(SOCK_CLOEXEC)
846 if (host_type & SOCK_CLOEXEC) {
847 target_type |= TARGET_SOCK_CLOEXEC;
849 #endif
851 #if defined(SOCK_NONBLOCK)
852 if (host_type & SOCK_NONBLOCK) {
853 target_type |= TARGET_SOCK_NONBLOCK;
855 #endif
857 return target_type;
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
864 void target_set_brk(abi_ulong new_brk)
866 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867 brk_page = HOST_PAGE_ALIGN(target_brk);
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
876 abi_long mapped_addr;
877 abi_ulong new_alloc_size;
879 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
881 if (!new_brk) {
882 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883 return target_brk;
885 if (new_brk < target_original_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887 target_brk);
888 return target_brk;
891 /* If the new brk is less than the highest page reserved to the
892 * target heap allocation, set it and we're almost done... */
893 if (new_brk <= brk_page) {
894 /* Heap contents are initialized to zero, as for anonymous
895 * mapped pages. */
896 if (new_brk > target_brk) {
897 memset(g2h(target_brk), 0, new_brk - target_brk);
899 target_brk = new_brk;
900 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901 return target_brk;
904 /* We need to allocate more memory after the brk... Note that
905 * we don't use MAP_FIXED because that will map over the top of
906 * any existing mapping (like the one with the host libc or qemu
907 * itself); instead we treat "mapped but at wrong address" as
908 * a failure and unmap again.
910 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912 PROT_READ|PROT_WRITE,
913 MAP_ANON|MAP_PRIVATE, 0, 0));
915 if (mapped_addr == brk_page) {
916 /* Heap contents are initialized to zero, as for anonymous
917 * mapped pages. Technically the new pages are already
918 * initialized to zero since they *are* anonymous mapped
919 * pages, however we have to take care with the contents that
920 * come from the remaining part of the previous page: it may
921 * contains garbage data due to a previous heap usage (grown
922 * then shrunken). */
923 memset(g2h(target_brk), 0, brk_page - target_brk);
925 target_brk = new_brk;
926 brk_page = HOST_PAGE_ALIGN(target_brk);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928 target_brk);
929 return target_brk;
930 } else if (mapped_addr != -1) {
931 /* Mapped but at wrong address, meaning there wasn't actually
932 * enough space for this brk.
934 target_munmap(mapped_addr, new_alloc_size);
935 mapped_addr = -1;
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
938 else {
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
942 #if defined(TARGET_ALPHA)
943 /* We (partially) emulate OSF/1 on Alpha, which requires we
944 return a proper errno, not an unchanged brk value. */
945 return -TARGET_ENOMEM;
946 #endif
947 /* For everything else, return the previous break. */
948 return target_brk;
951 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
952 defined(TARGET_NR_pselect6)
953 static inline abi_long copy_from_user_fdset(fd_set *fds,
954 abi_ulong target_fds_addr,
955 int n)
957 int i, nw, j, k;
958 abi_ulong b, *target_fds;
960 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
961 if (!(target_fds = lock_user(VERIFY_READ,
962 target_fds_addr,
963 sizeof(abi_ulong) * nw,
964 1)))
965 return -TARGET_EFAULT;
967 FD_ZERO(fds);
968 k = 0;
969 for (i = 0; i < nw; i++) {
970 /* grab the abi_ulong */
971 __get_user(b, &target_fds[i]);
972 for (j = 0; j < TARGET_ABI_BITS; j++) {
973 /* check the bit inside the abi_ulong */
974 if ((b >> j) & 1)
975 FD_SET(k, fds);
976 k++;
980 unlock_user(target_fds, target_fds_addr, 0);
982 return 0;
985 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
986 abi_ulong target_fds_addr,
987 int n)
989 if (target_fds_addr) {
990 if (copy_from_user_fdset(fds, target_fds_addr, n))
991 return -TARGET_EFAULT;
992 *fds_ptr = fds;
993 } else {
994 *fds_ptr = NULL;
996 return 0;
999 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1000 const fd_set *fds,
1001 int n)
1003 int i, nw, j, k;
1004 abi_long v;
1005 abi_ulong *target_fds;
1007 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1008 if (!(target_fds = lock_user(VERIFY_WRITE,
1009 target_fds_addr,
1010 sizeof(abi_ulong) * nw,
1011 0)))
1012 return -TARGET_EFAULT;
1014 k = 0;
1015 for (i = 0; i < nw; i++) {
1016 v = 0;
1017 for (j = 0; j < TARGET_ABI_BITS; j++) {
1018 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1019 k++;
1021 __put_user(v, &target_fds[i]);
1024 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1026 return 0;
1028 #endif
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1032 #else
1033 #define HOST_HZ 100
1034 #endif
1036 static inline abi_long host_to_target_clock_t(long ticks)
1038 #if HOST_HZ == TARGET_HZ
1039 return ticks;
1040 #else
1041 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1042 #endif
1045 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1046 const struct rusage *rusage)
1048 struct target_rusage *target_rusage;
1050 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1051 return -TARGET_EFAULT;
1052 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1053 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1054 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1055 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1056 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1057 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1058 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1059 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1060 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1061 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1062 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1063 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1064 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1065 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1066 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1067 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1068 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1069 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1070 unlock_user_struct(target_rusage, target_addr, 1);
1072 return 0;
1075 #ifdef TARGET_NR_setrlimit
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1079 rlim_t result;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1089 return result;
1091 #endif
1093 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1094 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1096 abi_ulong target_rlim_swap;
1097 abi_ulong result;
1099 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1100 target_rlim_swap = TARGET_RLIM_INFINITY;
1101 else
1102 target_rlim_swap = rlim;
1103 result = tswapal(target_rlim_swap);
1105 return result;
1107 #endif
1109 static inline int target_to_host_resource(int code)
1111 switch (code) {
1112 case TARGET_RLIMIT_AS:
1113 return RLIMIT_AS;
1114 case TARGET_RLIMIT_CORE:
1115 return RLIMIT_CORE;
1116 case TARGET_RLIMIT_CPU:
1117 return RLIMIT_CPU;
1118 case TARGET_RLIMIT_DATA:
1119 return RLIMIT_DATA;
1120 case TARGET_RLIMIT_FSIZE:
1121 return RLIMIT_FSIZE;
1122 case TARGET_RLIMIT_LOCKS:
1123 return RLIMIT_LOCKS;
1124 case TARGET_RLIMIT_MEMLOCK:
1125 return RLIMIT_MEMLOCK;
1126 case TARGET_RLIMIT_MSGQUEUE:
1127 return RLIMIT_MSGQUEUE;
1128 case TARGET_RLIMIT_NICE:
1129 return RLIMIT_NICE;
1130 case TARGET_RLIMIT_NOFILE:
1131 return RLIMIT_NOFILE;
1132 case TARGET_RLIMIT_NPROC:
1133 return RLIMIT_NPROC;
1134 case TARGET_RLIMIT_RSS:
1135 return RLIMIT_RSS;
1136 case TARGET_RLIMIT_RTPRIO:
1137 return RLIMIT_RTPRIO;
1138 case TARGET_RLIMIT_SIGPENDING:
1139 return RLIMIT_SIGPENDING;
1140 case TARGET_RLIMIT_STACK:
1141 return RLIMIT_STACK;
1142 default:
1143 return code;
1147 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1148 abi_ulong target_tv_addr)
1150 struct target_timeval *target_tv;
1152 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1153 return -TARGET_EFAULT;
1156 __get_user(tv->tv_sec, &target_tv->tv_sec);
1157 __get_user(tv->tv_usec, &target_tv->tv_usec);
1159 unlock_user_struct(target_tv, target_tv_addr, 0);
1161 return 0;
1164 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1165 const struct timeval *tv)
1167 struct target_timeval *target_tv;
1169 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1170 return -TARGET_EFAULT;
1173 __put_user(tv->tv_sec, &target_tv->tv_sec);
1174 __put_user(tv->tv_usec, &target_tv->tv_usec);
1176 unlock_user_struct(target_tv, target_tv_addr, 1);
1178 return 0;
1181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1182 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1183 abi_ulong target_tv_addr)
1185 struct target__kernel_sock_timeval *target_tv;
1187 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1188 return -TARGET_EFAULT;
1191 __get_user(tv->tv_sec, &target_tv->tv_sec);
1192 __get_user(tv->tv_usec, &target_tv->tv_usec);
1194 unlock_user_struct(target_tv, target_tv_addr, 0);
1196 return 0;
1198 #endif
1200 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1201 const struct timeval *tv)
1203 struct target__kernel_sock_timeval *target_tv;
1205 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1206 return -TARGET_EFAULT;
1209 __put_user(tv->tv_sec, &target_tv->tv_sec);
1210 __put_user(tv->tv_usec, &target_tv->tv_usec);
1212 unlock_user_struct(target_tv, target_tv_addr, 1);
1214 return 0;
1217 #if defined(TARGET_NR_futex) || \
1218 defined(TARGET_NR_rt_sigtimedwait) || \
1219 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1220 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1221 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1222 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1223 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1224 defined(TARGET_NR_timer_settime) || \
1225 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1226 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1227 abi_ulong target_addr)
1229 struct target_timespec *target_ts;
1231 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1232 return -TARGET_EFAULT;
1234 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1235 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236 unlock_user_struct(target_ts, target_addr, 0);
1237 return 0;
1239 #endif
1241 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1242 defined(TARGET_NR_timer_settime64) || \
1243 defined(TARGET_NR_mq_timedsend_time64) || \
1244 defined(TARGET_NR_mq_timedreceive_time64) || \
1245 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1246 defined(TARGET_NR_clock_nanosleep_time64)
1247 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1248 abi_ulong target_addr)
1250 struct target__kernel_timespec *target_ts;
1252 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1253 return -TARGET_EFAULT;
1255 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1256 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1257 /* in 32bit mode, this drops the padding */
1258 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1259 unlock_user_struct(target_ts, target_addr, 0);
1260 return 0;
1262 #endif
1264 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1265 struct timespec *host_ts)
1267 struct target_timespec *target_ts;
1269 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1270 return -TARGET_EFAULT;
1272 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1273 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1274 unlock_user_struct(target_ts, target_addr, 1);
1275 return 0;
1278 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1279 struct timespec *host_ts)
1281 struct target__kernel_timespec *target_ts;
1283 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1284 return -TARGET_EFAULT;
1286 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1287 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1288 unlock_user_struct(target_ts, target_addr, 1);
1289 return 0;
1292 #if defined(TARGET_NR_gettimeofday)
1293 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1294 struct timezone *tz)
1296 struct target_timezone *target_tz;
1298 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1299 return -TARGET_EFAULT;
1302 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1303 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 unlock_user_struct(target_tz, target_tz_addr, 1);
1307 return 0;
1309 #endif
1311 #if defined(TARGET_NR_settimeofday)
1312 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1313 abi_ulong target_tz_addr)
1315 struct target_timezone *target_tz;
1317 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1318 return -TARGET_EFAULT;
1321 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1322 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324 unlock_user_struct(target_tz, target_tz_addr, 0);
1326 return 0;
1328 #endif
1330 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1331 #include <mqueue.h>
1333 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1334 abi_ulong target_mq_attr_addr)
1336 struct target_mq_attr *target_mq_attr;
1338 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1339 target_mq_attr_addr, 1))
1340 return -TARGET_EFAULT;
1342 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1343 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1344 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1345 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349 return 0;
1352 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1353 const struct mq_attr *attr)
1355 struct target_mq_attr *target_mq_attr;
1357 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1358 target_mq_attr_addr, 0))
1359 return -TARGET_EFAULT;
1361 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1362 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1363 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1364 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368 return 0;
1370 #endif
1372 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1373 /* do_select() must return target values and target errnos. */
1374 static abi_long do_select(int n,
1375 abi_ulong rfd_addr, abi_ulong wfd_addr,
1376 abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 fd_set rfds, wfds, efds;
1379 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1380 struct timeval tv;
1381 struct timespec ts, *ts_ptr;
1382 abi_long ret;
1384 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1385 if (ret) {
1386 return ret;
1388 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1389 if (ret) {
1390 return ret;
1392 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1393 if (ret) {
1394 return ret;
1397 if (target_tv_addr) {
1398 if (copy_from_user_timeval(&tv, target_tv_addr))
1399 return -TARGET_EFAULT;
1400 ts.tv_sec = tv.tv_sec;
1401 ts.tv_nsec = tv.tv_usec * 1000;
1402 ts_ptr = &ts;
1403 } else {
1404 ts_ptr = NULL;
1407 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1408 ts_ptr, NULL));
1410 if (!is_error(ret)) {
1411 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1412 return -TARGET_EFAULT;
1413 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1414 return -TARGET_EFAULT;
1415 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1416 return -TARGET_EFAULT;
1418 if (target_tv_addr) {
1419 tv.tv_sec = ts.tv_sec;
1420 tv.tv_usec = ts.tv_nsec / 1000;
1421 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1422 return -TARGET_EFAULT;
1427 return ret;
1430 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1431 static abi_long do_old_select(abi_ulong arg1)
1433 struct target_sel_arg_struct *sel;
1434 abi_ulong inp, outp, exp, tvp;
1435 long nsel;
1437 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1438 return -TARGET_EFAULT;
1441 nsel = tswapal(sel->n);
1442 inp = tswapal(sel->inp);
1443 outp = tswapal(sel->outp);
1444 exp = tswapal(sel->exp);
1445 tvp = tswapal(sel->tvp);
1447 unlock_user_struct(sel, arg1, 0);
1449 return do_select(nsel, inp, outp, exp, tvp);
1451 #endif
1452 #endif
1454 static abi_long do_pipe2(int host_pipe[], int flags)
1456 #ifdef CONFIG_PIPE2
1457 return pipe2(host_pipe, flags);
1458 #else
1459 return -ENOSYS;
1460 #endif
1463 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1464 int flags, int is_pipe2)
1466 int host_pipe[2];
1467 abi_long ret;
1468 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470 if (is_error(ret))
1471 return get_errno(ret);
1473 /* Several targets have special calling conventions for the original
1474 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1475 if (!is_pipe2) {
1476 #if defined(TARGET_ALPHA)
1477 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1478 return host_pipe[0];
1479 #elif defined(TARGET_MIPS)
1480 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1481 return host_pipe[0];
1482 #elif defined(TARGET_SH4)
1483 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1484 return host_pipe[0];
1485 #elif defined(TARGET_SPARC)
1486 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1487 return host_pipe[0];
1488 #endif
1491 if (put_user_s32(host_pipe[0], pipedes)
1492 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1493 return -TARGET_EFAULT;
1494 return get_errno(ret);
1497 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1498 abi_ulong target_addr,
1499 socklen_t len)
1501 struct target_ip_mreqn *target_smreqn;
1503 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1504 if (!target_smreqn)
1505 return -TARGET_EFAULT;
1506 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1507 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1508 if (len == sizeof(struct target_ip_mreqn))
1509 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1510 unlock_user(target_smreqn, target_addr, 0);
1512 return 0;
1515 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1516 abi_ulong target_addr,
1517 socklen_t len)
1519 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1520 sa_family_t sa_family;
1521 struct target_sockaddr *target_saddr;
1523 if (fd_trans_target_to_host_addr(fd)) {
1524 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1527 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1528 if (!target_saddr)
1529 return -TARGET_EFAULT;
1531 sa_family = tswap16(target_saddr->sa_family);
1533 /* Oops. The caller might send a incomplete sun_path; sun_path
1534 * must be terminated by \0 (see the manual page), but
1535 * unfortunately it is quite common to specify sockaddr_un
1536 * length as "strlen(x->sun_path)" while it should be
1537 * "strlen(...) + 1". We'll fix that here if needed.
1538 * Linux kernel has a similar feature.
1541 if (sa_family == AF_UNIX) {
1542 if (len < unix_maxlen && len > 0) {
1543 char *cp = (char*)target_saddr;
1545 if ( cp[len-1] && !cp[len] )
1546 len++;
1548 if (len > unix_maxlen)
1549 len = unix_maxlen;
1552 memcpy(addr, target_saddr, len);
1553 addr->sa_family = sa_family;
1554 if (sa_family == AF_NETLINK) {
1555 struct sockaddr_nl *nladdr;
1557 nladdr = (struct sockaddr_nl *)addr;
1558 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1559 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1560 } else if (sa_family == AF_PACKET) {
1561 struct target_sockaddr_ll *lladdr;
1563 lladdr = (struct target_sockaddr_ll *)addr;
1564 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1565 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567 unlock_user(target_saddr, target_addr, 0);
1569 return 0;
1572 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1573 struct sockaddr *addr,
1574 socklen_t len)
1576 struct target_sockaddr *target_saddr;
1578 if (len == 0) {
1579 return 0;
1581 assert(addr);
1583 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1584 if (!target_saddr)
1585 return -TARGET_EFAULT;
1586 memcpy(target_saddr, addr, len);
1587 if (len >= offsetof(struct target_sockaddr, sa_family) +
1588 sizeof(target_saddr->sa_family)) {
1589 target_saddr->sa_family = tswap16(addr->sa_family);
1591 if (addr->sa_family == AF_NETLINK &&
1592 len >= sizeof(struct target_sockaddr_nl)) {
1593 struct target_sockaddr_nl *target_nl =
1594 (struct target_sockaddr_nl *)target_saddr;
1595 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1596 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1597 } else if (addr->sa_family == AF_PACKET) {
1598 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1599 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1600 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1601 } else if (addr->sa_family == AF_INET6 &&
1602 len >= sizeof(struct target_sockaddr_in6)) {
1603 struct target_sockaddr_in6 *target_in6 =
1604 (struct target_sockaddr_in6 *)target_saddr;
1605 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607 unlock_user(target_saddr, target_addr, len);
1609 return 0;
1612 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1613 struct target_msghdr *target_msgh)
1615 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1616 abi_long msg_controllen;
1617 abi_ulong target_cmsg_addr;
1618 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1619 socklen_t space = 0;
1621 msg_controllen = tswapal(target_msgh->msg_controllen);
1622 if (msg_controllen < sizeof (struct target_cmsghdr))
1623 goto the_end;
1624 target_cmsg_addr = tswapal(target_msgh->msg_control);
1625 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1626 target_cmsg_start = target_cmsg;
1627 if (!target_cmsg)
1628 return -TARGET_EFAULT;
1630 while (cmsg && target_cmsg) {
1631 void *data = CMSG_DATA(cmsg);
1632 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634 int len = tswapal(target_cmsg->cmsg_len)
1635 - sizeof(struct target_cmsghdr);
1637 space += CMSG_SPACE(len);
1638 if (space > msgh->msg_controllen) {
1639 space -= CMSG_SPACE(len);
1640 /* This is a QEMU bug, since we allocated the payload
1641 * area ourselves (unlike overflow in host-to-target
1642 * conversion, which is just the guest giving us a buffer
1643 * that's too small). It can't happen for the payload types
1644 * we currently support; if it becomes an issue in future
1645 * we would need to improve our allocation strategy to
1646 * something more intelligent than "twice the size of the
1647 * target buffer we're reading from".
1649 qemu_log_mask(LOG_UNIMP,
1650 ("Unsupported ancillary data %d/%d: "
1651 "unhandled msg size\n"),
1652 tswap32(target_cmsg->cmsg_level),
1653 tswap32(target_cmsg->cmsg_type));
1654 break;
1657 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1658 cmsg->cmsg_level = SOL_SOCKET;
1659 } else {
1660 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1663 cmsg->cmsg_len = CMSG_LEN(len);
1665 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1666 int *fd = (int *)data;
1667 int *target_fd = (int *)target_data;
1668 int i, numfds = len / sizeof(int);
1670 for (i = 0; i < numfds; i++) {
1671 __get_user(fd[i], target_fd + i);
1673 } else if (cmsg->cmsg_level == SOL_SOCKET
1674 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1675 struct ucred *cred = (struct ucred *)data;
1676 struct target_ucred *target_cred =
1677 (struct target_ucred *)target_data;
1679 __get_user(cred->pid, &target_cred->pid);
1680 __get_user(cred->uid, &target_cred->uid);
1681 __get_user(cred->gid, &target_cred->gid);
1682 } else {
1683 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1684 cmsg->cmsg_level, cmsg->cmsg_type);
1685 memcpy(data, target_data, len);
1688 cmsg = CMSG_NXTHDR(msgh, cmsg);
1689 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1690 target_cmsg_start);
1692 unlock_user(target_cmsg, target_cmsg_addr, 0);
1693 the_end:
1694 msgh->msg_controllen = space;
1695 return 0;
1698 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1699 struct msghdr *msgh)
1701 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1702 abi_long msg_controllen;
1703 abi_ulong target_cmsg_addr;
1704 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1705 socklen_t space = 0;
1707 msg_controllen = tswapal(target_msgh->msg_controllen);
1708 if (msg_controllen < sizeof (struct target_cmsghdr))
1709 goto the_end;
1710 target_cmsg_addr = tswapal(target_msgh->msg_control);
1711 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1712 target_cmsg_start = target_cmsg;
1713 if (!target_cmsg)
1714 return -TARGET_EFAULT;
1716 while (cmsg && target_cmsg) {
1717 void *data = CMSG_DATA(cmsg);
1718 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1721 int tgt_len, tgt_space;
1723 /* We never copy a half-header but may copy half-data;
1724 * this is Linux's behaviour in put_cmsg(). Note that
1725 * truncation here is a guest problem (which we report
1726 * to the guest via the CTRUNC bit), unlike truncation
1727 * in target_to_host_cmsg, which is a QEMU bug.
1729 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1730 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1731 break;
1734 if (cmsg->cmsg_level == SOL_SOCKET) {
1735 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1736 } else {
1737 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741 /* Payload types which need a different size of payload on
1742 * the target must adjust tgt_len here.
1744 tgt_len = len;
1745 switch (cmsg->cmsg_level) {
1746 case SOL_SOCKET:
1747 switch (cmsg->cmsg_type) {
1748 case SO_TIMESTAMP:
1749 tgt_len = sizeof(struct target_timeval);
1750 break;
1751 default:
1752 break;
1754 break;
1755 default:
1756 break;
1759 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1760 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1761 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1764 /* We must now copy-and-convert len bytes of payload
1765 * into tgt_len bytes of destination space. Bear in mind
1766 * that in both source and destination we may be dealing
1767 * with a truncated value!
1769 switch (cmsg->cmsg_level) {
1770 case SOL_SOCKET:
1771 switch (cmsg->cmsg_type) {
1772 case SCM_RIGHTS:
1774 int *fd = (int *)data;
1775 int *target_fd = (int *)target_data;
1776 int i, numfds = tgt_len / sizeof(int);
1778 for (i = 0; i < numfds; i++) {
1779 __put_user(fd[i], target_fd + i);
1781 break;
1783 case SO_TIMESTAMP:
1785 struct timeval *tv = (struct timeval *)data;
1786 struct target_timeval *target_tv =
1787 (struct target_timeval *)target_data;
1789 if (len != sizeof(struct timeval) ||
1790 tgt_len != sizeof(struct target_timeval)) {
1791 goto unimplemented;
1794 /* copy struct timeval to target */
1795 __put_user(tv->tv_sec, &target_tv->tv_sec);
1796 __put_user(tv->tv_usec, &target_tv->tv_usec);
1797 break;
1799 case SCM_CREDENTIALS:
1801 struct ucred *cred = (struct ucred *)data;
1802 struct target_ucred *target_cred =
1803 (struct target_ucred *)target_data;
1805 __put_user(cred->pid, &target_cred->pid);
1806 __put_user(cred->uid, &target_cred->uid);
1807 __put_user(cred->gid, &target_cred->gid);
1808 break;
1810 default:
1811 goto unimplemented;
1813 break;
1815 case SOL_IP:
1816 switch (cmsg->cmsg_type) {
1817 case IP_TTL:
1819 uint32_t *v = (uint32_t *)data;
1820 uint32_t *t_int = (uint32_t *)target_data;
1822 if (len != sizeof(uint32_t) ||
1823 tgt_len != sizeof(uint32_t)) {
1824 goto unimplemented;
1826 __put_user(*v, t_int);
1827 break;
1829 case IP_RECVERR:
1831 struct errhdr_t {
1832 struct sock_extended_err ee;
1833 struct sockaddr_in offender;
1835 struct errhdr_t *errh = (struct errhdr_t *)data;
1836 struct errhdr_t *target_errh =
1837 (struct errhdr_t *)target_data;
1839 if (len != sizeof(struct errhdr_t) ||
1840 tgt_len != sizeof(struct errhdr_t)) {
1841 goto unimplemented;
1843 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1844 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1845 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1846 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1847 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1848 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1849 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1850 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1851 (void *) &errh->offender, sizeof(errh->offender));
1852 break;
1854 default:
1855 goto unimplemented;
1857 break;
1859 case SOL_IPV6:
1860 switch (cmsg->cmsg_type) {
1861 case IPV6_HOPLIMIT:
1863 uint32_t *v = (uint32_t *)data;
1864 uint32_t *t_int = (uint32_t *)target_data;
1866 if (len != sizeof(uint32_t) ||
1867 tgt_len != sizeof(uint32_t)) {
1868 goto unimplemented;
1870 __put_user(*v, t_int);
1871 break;
1873 case IPV6_RECVERR:
1875 struct errhdr6_t {
1876 struct sock_extended_err ee;
1877 struct sockaddr_in6 offender;
1879 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1880 struct errhdr6_t *target_errh =
1881 (struct errhdr6_t *)target_data;
1883 if (len != sizeof(struct errhdr6_t) ||
1884 tgt_len != sizeof(struct errhdr6_t)) {
1885 goto unimplemented;
1887 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1888 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1889 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1890 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1891 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1892 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1893 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1894 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1895 (void *) &errh->offender, sizeof(errh->offender));
1896 break;
1898 default:
1899 goto unimplemented;
1901 break;
1903 default:
1904 unimplemented:
1905 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1906 cmsg->cmsg_level, cmsg->cmsg_type);
1907 memcpy(target_data, data, MIN(len, tgt_len));
1908 if (tgt_len > len) {
1909 memset(target_data + len, 0, tgt_len - len);
1913 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1914 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1915 if (msg_controllen < tgt_space) {
1916 tgt_space = msg_controllen;
1918 msg_controllen -= tgt_space;
1919 space += tgt_space;
1920 cmsg = CMSG_NXTHDR(msgh, cmsg);
1921 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1922 target_cmsg_start);
1924 unlock_user(target_cmsg, target_cmsg_addr, space);
1925 the_end:
1926 target_msgh->msg_controllen = tswapal(space);
1927 return 0;
1930 /* do_setsockopt() Must return target values and target errnos. */
1931 static abi_long do_setsockopt(int sockfd, int level, int optname,
1932 abi_ulong optval_addr, socklen_t optlen)
1934 abi_long ret;
1935 int val;
1936 struct ip_mreqn *ip_mreq;
1937 struct ip_mreq_source *ip_mreq_source;
1939 switch(level) {
1940 case SOL_TCP:
1941 /* TCP options all take an 'int' value. */
1942 if (optlen < sizeof(uint32_t))
1943 return -TARGET_EINVAL;
1945 if (get_user_u32(val, optval_addr))
1946 return -TARGET_EFAULT;
1947 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1948 break;
1949 case SOL_IP:
1950 switch(optname) {
1951 case IP_TOS:
1952 case IP_TTL:
1953 case IP_HDRINCL:
1954 case IP_ROUTER_ALERT:
1955 case IP_RECVOPTS:
1956 case IP_RETOPTS:
1957 case IP_PKTINFO:
1958 case IP_MTU_DISCOVER:
1959 case IP_RECVERR:
1960 case IP_RECVTTL:
1961 case IP_RECVTOS:
1962 #ifdef IP_FREEBIND
1963 case IP_FREEBIND:
1964 #endif
1965 case IP_MULTICAST_TTL:
1966 case IP_MULTICAST_LOOP:
1967 val = 0;
1968 if (optlen >= sizeof(uint32_t)) {
1969 if (get_user_u32(val, optval_addr))
1970 return -TARGET_EFAULT;
1971 } else if (optlen >= 1) {
1972 if (get_user_u8(val, optval_addr))
1973 return -TARGET_EFAULT;
1975 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1976 break;
1977 case IP_ADD_MEMBERSHIP:
1978 case IP_DROP_MEMBERSHIP:
1979 if (optlen < sizeof (struct target_ip_mreq) ||
1980 optlen > sizeof (struct target_ip_mreqn))
1981 return -TARGET_EINVAL;
1983 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1984 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1985 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1986 break;
1988 case IP_BLOCK_SOURCE:
1989 case IP_UNBLOCK_SOURCE:
1990 case IP_ADD_SOURCE_MEMBERSHIP:
1991 case IP_DROP_SOURCE_MEMBERSHIP:
1992 if (optlen != sizeof (struct target_ip_mreq_source))
1993 return -TARGET_EINVAL;
1995 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1997 unlock_user (ip_mreq_source, optval_addr, 0);
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2004 case SOL_IPV6:
2005 switch (optname) {
2006 case IPV6_MTU_DISCOVER:
2007 case IPV6_MTU:
2008 case IPV6_V6ONLY:
2009 case IPV6_RECVPKTINFO:
2010 case IPV6_UNICAST_HOPS:
2011 case IPV6_MULTICAST_HOPS:
2012 case IPV6_MULTICAST_LOOP:
2013 case IPV6_RECVERR:
2014 case IPV6_RECVHOPLIMIT:
2015 case IPV6_2292HOPLIMIT:
2016 case IPV6_CHECKSUM:
2017 case IPV6_ADDRFORM:
2018 case IPV6_2292PKTINFO:
2019 case IPV6_RECVTCLASS:
2020 case IPV6_RECVRTHDR:
2021 case IPV6_2292RTHDR:
2022 case IPV6_RECVHOPOPTS:
2023 case IPV6_2292HOPOPTS:
2024 case IPV6_RECVDSTOPTS:
2025 case IPV6_2292DSTOPTS:
2026 case IPV6_TCLASS:
2027 #ifdef IPV6_RECVPATHMTU
2028 case IPV6_RECVPATHMTU:
2029 #endif
2030 #ifdef IPV6_TRANSPARENT
2031 case IPV6_TRANSPARENT:
2032 #endif
2033 #ifdef IPV6_FREEBIND
2034 case IPV6_FREEBIND:
2035 #endif
2036 #ifdef IPV6_RECVORIGDSTADDR
2037 case IPV6_RECVORIGDSTADDR:
2038 #endif
2039 val = 0;
2040 if (optlen < sizeof(uint32_t)) {
2041 return -TARGET_EINVAL;
2043 if (get_user_u32(val, optval_addr)) {
2044 return -TARGET_EFAULT;
2046 ret = get_errno(setsockopt(sockfd, level, optname,
2047 &val, sizeof(val)));
2048 break;
2049 case IPV6_PKTINFO:
2051 struct in6_pktinfo pki;
2053 if (optlen < sizeof(pki)) {
2054 return -TARGET_EINVAL;
2057 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2058 return -TARGET_EFAULT;
2061 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063 ret = get_errno(setsockopt(sockfd, level, optname,
2064 &pki, sizeof(pki)));
2065 break;
2067 case IPV6_ADD_MEMBERSHIP:
2068 case IPV6_DROP_MEMBERSHIP:
2070 struct ipv6_mreq ipv6mreq;
2072 if (optlen < sizeof(ipv6mreq)) {
2073 return -TARGET_EINVAL;
2076 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2077 return -TARGET_EFAULT;
2080 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082 ret = get_errno(setsockopt(sockfd, level, optname,
2083 &ipv6mreq, sizeof(ipv6mreq)));
2084 break;
2086 default:
2087 goto unimplemented;
2089 break;
2090 case SOL_ICMPV6:
2091 switch (optname) {
2092 case ICMPV6_FILTER:
2094 struct icmp6_filter icmp6f;
2096 if (optlen > sizeof(icmp6f)) {
2097 optlen = sizeof(icmp6f);
2100 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2101 return -TARGET_EFAULT;
2104 for (val = 0; val < 8; val++) {
2105 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2108 ret = get_errno(setsockopt(sockfd, level, optname,
2109 &icmp6f, optlen));
2110 break;
2112 default:
2113 goto unimplemented;
2115 break;
2116 case SOL_RAW:
2117 switch (optname) {
2118 case ICMP_FILTER:
2119 case IPV6_CHECKSUM:
2120 /* those take an u32 value */
2121 if (optlen < sizeof(uint32_t)) {
2122 return -TARGET_EINVAL;
2125 if (get_user_u32(val, optval_addr)) {
2126 return -TARGET_EFAULT;
2128 ret = get_errno(setsockopt(sockfd, level, optname,
2129 &val, sizeof(val)));
2130 break;
2132 default:
2133 goto unimplemented;
2135 break;
2136 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2137 case SOL_ALG:
2138 switch (optname) {
2139 case ALG_SET_KEY:
2141 char *alg_key = g_malloc(optlen);
2143 if (!alg_key) {
2144 return -TARGET_ENOMEM;
2146 if (copy_from_user(alg_key, optval_addr, optlen)) {
2147 g_free(alg_key);
2148 return -TARGET_EFAULT;
2150 ret = get_errno(setsockopt(sockfd, level, optname,
2151 alg_key, optlen));
2152 g_free(alg_key);
2153 break;
2155 case ALG_SET_AEAD_AUTHSIZE:
2157 ret = get_errno(setsockopt(sockfd, level, optname,
2158 NULL, optlen));
2159 break;
2161 default:
2162 goto unimplemented;
2164 break;
2165 #endif
2166 case TARGET_SOL_SOCKET:
2167 switch (optname) {
2168 case TARGET_SO_RCVTIMEO:
2170 struct timeval tv;
2172 optname = SO_RCVTIMEO;
2174 set_timeout:
2175 if (optlen != sizeof(struct target_timeval)) {
2176 return -TARGET_EINVAL;
2179 if (copy_from_user_timeval(&tv, optval_addr)) {
2180 return -TARGET_EFAULT;
2183 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2184 &tv, sizeof(tv)));
2185 return ret;
2187 case TARGET_SO_SNDTIMEO:
2188 optname = SO_SNDTIMEO;
2189 goto set_timeout;
2190 case TARGET_SO_ATTACH_FILTER:
2192 struct target_sock_fprog *tfprog;
2193 struct target_sock_filter *tfilter;
2194 struct sock_fprog fprog;
2195 struct sock_filter *filter;
2196 int i;
2198 if (optlen != sizeof(*tfprog)) {
2199 return -TARGET_EINVAL;
2201 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2202 return -TARGET_EFAULT;
2204 if (!lock_user_struct(VERIFY_READ, tfilter,
2205 tswapal(tfprog->filter), 0)) {
2206 unlock_user_struct(tfprog, optval_addr, 1);
2207 return -TARGET_EFAULT;
2210 fprog.len = tswap16(tfprog->len);
2211 filter = g_try_new(struct sock_filter, fprog.len);
2212 if (filter == NULL) {
2213 unlock_user_struct(tfilter, tfprog->filter, 1);
2214 unlock_user_struct(tfprog, optval_addr, 1);
2215 return -TARGET_ENOMEM;
2217 for (i = 0; i < fprog.len; i++) {
2218 filter[i].code = tswap16(tfilter[i].code);
2219 filter[i].jt = tfilter[i].jt;
2220 filter[i].jf = tfilter[i].jf;
2221 filter[i].k = tswap32(tfilter[i].k);
2223 fprog.filter = filter;
2225 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2226 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2227 g_free(filter);
2229 unlock_user_struct(tfilter, tfprog->filter, 1);
2230 unlock_user_struct(tfprog, optval_addr, 1);
2231 return ret;
2233 case TARGET_SO_BINDTODEVICE:
2235 char *dev_ifname, *addr_ifname;
2237 if (optlen > IFNAMSIZ - 1) {
2238 optlen = IFNAMSIZ - 1;
2240 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241 if (!dev_ifname) {
2242 return -TARGET_EFAULT;
2244 optname = SO_BINDTODEVICE;
2245 addr_ifname = alloca(IFNAMSIZ);
2246 memcpy(addr_ifname, dev_ifname, optlen);
2247 addr_ifname[optlen] = 0;
2248 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2249 addr_ifname, optlen));
2250 unlock_user (dev_ifname, optval_addr, 0);
2251 return ret;
2253 case TARGET_SO_LINGER:
2255 struct linger lg;
2256 struct target_linger *tlg;
2258 if (optlen != sizeof(struct target_linger)) {
2259 return -TARGET_EINVAL;
2261 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2262 return -TARGET_EFAULT;
2264 __get_user(lg.l_onoff, &tlg->l_onoff);
2265 __get_user(lg.l_linger, &tlg->l_linger);
2266 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2267 &lg, sizeof(lg)));
2268 unlock_user_struct(tlg, optval_addr, 0);
2269 return ret;
2271 /* Options with 'int' argument. */
2272 case TARGET_SO_DEBUG:
2273 optname = SO_DEBUG;
2274 break;
2275 case TARGET_SO_REUSEADDR:
2276 optname = SO_REUSEADDR;
2277 break;
2278 #ifdef SO_REUSEPORT
2279 case TARGET_SO_REUSEPORT:
2280 optname = SO_REUSEPORT;
2281 break;
2282 #endif
2283 case TARGET_SO_TYPE:
2284 optname = SO_TYPE;
2285 break;
2286 case TARGET_SO_ERROR:
2287 optname = SO_ERROR;
2288 break;
2289 case TARGET_SO_DONTROUTE:
2290 optname = SO_DONTROUTE;
2291 break;
2292 case TARGET_SO_BROADCAST:
2293 optname = SO_BROADCAST;
2294 break;
2295 case TARGET_SO_SNDBUF:
2296 optname = SO_SNDBUF;
2297 break;
2298 case TARGET_SO_SNDBUFFORCE:
2299 optname = SO_SNDBUFFORCE;
2300 break;
2301 case TARGET_SO_RCVBUF:
2302 optname = SO_RCVBUF;
2303 break;
2304 case TARGET_SO_RCVBUFFORCE:
2305 optname = SO_RCVBUFFORCE;
2306 break;
2307 case TARGET_SO_KEEPALIVE:
2308 optname = SO_KEEPALIVE;
2309 break;
2310 case TARGET_SO_OOBINLINE:
2311 optname = SO_OOBINLINE;
2312 break;
2313 case TARGET_SO_NO_CHECK:
2314 optname = SO_NO_CHECK;
2315 break;
2316 case TARGET_SO_PRIORITY:
2317 optname = SO_PRIORITY;
2318 break;
2319 #ifdef SO_BSDCOMPAT
2320 case TARGET_SO_BSDCOMPAT:
2321 optname = SO_BSDCOMPAT;
2322 break;
2323 #endif
2324 case TARGET_SO_PASSCRED:
2325 optname = SO_PASSCRED;
2326 break;
2327 case TARGET_SO_PASSSEC:
2328 optname = SO_PASSSEC;
2329 break;
2330 case TARGET_SO_TIMESTAMP:
2331 optname = SO_TIMESTAMP;
2332 break;
2333 case TARGET_SO_RCVLOWAT:
2334 optname = SO_RCVLOWAT;
2335 break;
2336 default:
2337 goto unimplemented;
2339 if (optlen < sizeof(uint32_t))
2340 return -TARGET_EINVAL;
2342 if (get_user_u32(val, optval_addr))
2343 return -TARGET_EFAULT;
2344 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2345 break;
2346 #ifdef SOL_NETLINK
2347 case SOL_NETLINK:
2348 switch (optname) {
2349 case NETLINK_PKTINFO:
2350 case NETLINK_ADD_MEMBERSHIP:
2351 case NETLINK_DROP_MEMBERSHIP:
2352 case NETLINK_BROADCAST_ERROR:
2353 case NETLINK_NO_ENOBUFS:
2354 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2355 case NETLINK_LISTEN_ALL_NSID:
2356 case NETLINK_CAP_ACK:
2357 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2359 case NETLINK_EXT_ACK:
2360 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2362 case NETLINK_GET_STRICT_CHK:
2363 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2364 break;
2365 default:
2366 goto unimplemented;
2368 val = 0;
2369 if (optlen < sizeof(uint32_t)) {
2370 return -TARGET_EINVAL;
2372 if (get_user_u32(val, optval_addr)) {
2373 return -TARGET_EFAULT;
2375 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2376 sizeof(val)));
2377 break;
2378 #endif /* SOL_NETLINK */
2379 default:
2380 unimplemented:
2381 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2382 level, optname);
2383 ret = -TARGET_ENOPROTOOPT;
2385 return ret;
2388 /* do_getsockopt() Must return target values and target errnos. */
2389 static abi_long do_getsockopt(int sockfd, int level, int optname,
2390 abi_ulong optval_addr, abi_ulong optlen)
2392 abi_long ret;
2393 int len, val;
2394 socklen_t lv;
2396 switch(level) {
2397 case TARGET_SOL_SOCKET:
2398 level = SOL_SOCKET;
2399 switch (optname) {
2400 /* These don't just return a single integer */
2401 case TARGET_SO_PEERNAME:
2402 goto unimplemented;
2403 case TARGET_SO_RCVTIMEO: {
2404 struct timeval tv;
2405 socklen_t tvlen;
2407 optname = SO_RCVTIMEO;
2409 get_timeout:
2410 if (get_user_u32(len, optlen)) {
2411 return -TARGET_EFAULT;
2413 if (len < 0) {
2414 return -TARGET_EINVAL;
2417 tvlen = sizeof(tv);
2418 ret = get_errno(getsockopt(sockfd, level, optname,
2419 &tv, &tvlen));
2420 if (ret < 0) {
2421 return ret;
2423 if (len > sizeof(struct target_timeval)) {
2424 len = sizeof(struct target_timeval);
2426 if (copy_to_user_timeval(optval_addr, &tv)) {
2427 return -TARGET_EFAULT;
2429 if (put_user_u32(len, optlen)) {
2430 return -TARGET_EFAULT;
2432 break;
2434 case TARGET_SO_SNDTIMEO:
2435 optname = SO_SNDTIMEO;
2436 goto get_timeout;
2437 case TARGET_SO_PEERCRED: {
2438 struct ucred cr;
2439 socklen_t crlen;
2440 struct target_ucred *tcr;
2442 if (get_user_u32(len, optlen)) {
2443 return -TARGET_EFAULT;
2445 if (len < 0) {
2446 return -TARGET_EINVAL;
2449 crlen = sizeof(cr);
2450 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2451 &cr, &crlen));
2452 if (ret < 0) {
2453 return ret;
2455 if (len > crlen) {
2456 len = crlen;
2458 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2459 return -TARGET_EFAULT;
2461 __put_user(cr.pid, &tcr->pid);
2462 __put_user(cr.uid, &tcr->uid);
2463 __put_user(cr.gid, &tcr->gid);
2464 unlock_user_struct(tcr, optval_addr, 1);
2465 if (put_user_u32(len, optlen)) {
2466 return -TARGET_EFAULT;
2468 break;
2470 case TARGET_SO_PEERSEC: {
2471 char *name;
2473 if (get_user_u32(len, optlen)) {
2474 return -TARGET_EFAULT;
2476 if (len < 0) {
2477 return -TARGET_EINVAL;
2479 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2480 if (!name) {
2481 return -TARGET_EFAULT;
2483 lv = len;
2484 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2485 name, &lv));
2486 if (put_user_u32(lv, optlen)) {
2487 ret = -TARGET_EFAULT;
2489 unlock_user(name, optval_addr, lv);
2490 break;
2492 case TARGET_SO_LINGER:
2494 struct linger lg;
2495 socklen_t lglen;
2496 struct target_linger *tlg;
2498 if (get_user_u32(len, optlen)) {
2499 return -TARGET_EFAULT;
2501 if (len < 0) {
2502 return -TARGET_EINVAL;
2505 lglen = sizeof(lg);
2506 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2507 &lg, &lglen));
2508 if (ret < 0) {
2509 return ret;
2511 if (len > lglen) {
2512 len = lglen;
2514 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2515 return -TARGET_EFAULT;
2517 __put_user(lg.l_onoff, &tlg->l_onoff);
2518 __put_user(lg.l_linger, &tlg->l_linger);
2519 unlock_user_struct(tlg, optval_addr, 1);
2520 if (put_user_u32(len, optlen)) {
2521 return -TARGET_EFAULT;
2523 break;
2525 /* Options with 'int' argument. */
2526 case TARGET_SO_DEBUG:
2527 optname = SO_DEBUG;
2528 goto int_case;
2529 case TARGET_SO_REUSEADDR:
2530 optname = SO_REUSEADDR;
2531 goto int_case;
2532 #ifdef SO_REUSEPORT
2533 case TARGET_SO_REUSEPORT:
2534 optname = SO_REUSEPORT;
2535 goto int_case;
2536 #endif
2537 case TARGET_SO_TYPE:
2538 optname = SO_TYPE;
2539 goto int_case;
2540 case TARGET_SO_ERROR:
2541 optname = SO_ERROR;
2542 goto int_case;
2543 case TARGET_SO_DONTROUTE:
2544 optname = SO_DONTROUTE;
2545 goto int_case;
2546 case TARGET_SO_BROADCAST:
2547 optname = SO_BROADCAST;
2548 goto int_case;
2549 case TARGET_SO_SNDBUF:
2550 optname = SO_SNDBUF;
2551 goto int_case;
2552 case TARGET_SO_RCVBUF:
2553 optname = SO_RCVBUF;
2554 goto int_case;
2555 case TARGET_SO_KEEPALIVE:
2556 optname = SO_KEEPALIVE;
2557 goto int_case;
2558 case TARGET_SO_OOBINLINE:
2559 optname = SO_OOBINLINE;
2560 goto int_case;
2561 case TARGET_SO_NO_CHECK:
2562 optname = SO_NO_CHECK;
2563 goto int_case;
2564 case TARGET_SO_PRIORITY:
2565 optname = SO_PRIORITY;
2566 goto int_case;
2567 #ifdef SO_BSDCOMPAT
2568 case TARGET_SO_BSDCOMPAT:
2569 optname = SO_BSDCOMPAT;
2570 goto int_case;
2571 #endif
2572 case TARGET_SO_PASSCRED:
2573 optname = SO_PASSCRED;
2574 goto int_case;
2575 case TARGET_SO_TIMESTAMP:
2576 optname = SO_TIMESTAMP;
2577 goto int_case;
2578 case TARGET_SO_RCVLOWAT:
2579 optname = SO_RCVLOWAT;
2580 goto int_case;
2581 case TARGET_SO_ACCEPTCONN:
2582 optname = SO_ACCEPTCONN;
2583 goto int_case;
2584 default:
2585 goto int_case;
2587 break;
2588 case SOL_TCP:
2589 /* TCP options all take an 'int' value. */
2590 int_case:
2591 if (get_user_u32(len, optlen))
2592 return -TARGET_EFAULT;
2593 if (len < 0)
2594 return -TARGET_EINVAL;
2595 lv = sizeof(lv);
2596 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2597 if (ret < 0)
2598 return ret;
2599 if (optname == SO_TYPE) {
2600 val = host_to_target_sock_type(val);
2602 if (len > lv)
2603 len = lv;
2604 if (len == 4) {
2605 if (put_user_u32(val, optval_addr))
2606 return -TARGET_EFAULT;
2607 } else {
2608 if (put_user_u8(val, optval_addr))
2609 return -TARGET_EFAULT;
2611 if (put_user_u32(len, optlen))
2612 return -TARGET_EFAULT;
2613 break;
2614 case SOL_IP:
2615 switch(optname) {
2616 case IP_TOS:
2617 case IP_TTL:
2618 case IP_HDRINCL:
2619 case IP_ROUTER_ALERT:
2620 case IP_RECVOPTS:
2621 case IP_RETOPTS:
2622 case IP_PKTINFO:
2623 case IP_MTU_DISCOVER:
2624 case IP_RECVERR:
2625 case IP_RECVTOS:
2626 #ifdef IP_FREEBIND
2627 case IP_FREEBIND:
2628 #endif
2629 case IP_MULTICAST_TTL:
2630 case IP_MULTICAST_LOOP:
2631 if (get_user_u32(len, optlen))
2632 return -TARGET_EFAULT;
2633 if (len < 0)
2634 return -TARGET_EINVAL;
2635 lv = sizeof(lv);
2636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2637 if (ret < 0)
2638 return ret;
2639 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2640 len = 1;
2641 if (put_user_u32(len, optlen)
2642 || put_user_u8(val, optval_addr))
2643 return -TARGET_EFAULT;
2644 } else {
2645 if (len > sizeof(int))
2646 len = sizeof(int);
2647 if (put_user_u32(len, optlen)
2648 || put_user_u32(val, optval_addr))
2649 return -TARGET_EFAULT;
2651 break;
2652 default:
2653 ret = -TARGET_ENOPROTOOPT;
2654 break;
2656 break;
2657 case SOL_IPV6:
2658 switch (optname) {
2659 case IPV6_MTU_DISCOVER:
2660 case IPV6_MTU:
2661 case IPV6_V6ONLY:
2662 case IPV6_RECVPKTINFO:
2663 case IPV6_UNICAST_HOPS:
2664 case IPV6_MULTICAST_HOPS:
2665 case IPV6_MULTICAST_LOOP:
2666 case IPV6_RECVERR:
2667 case IPV6_RECVHOPLIMIT:
2668 case IPV6_2292HOPLIMIT:
2669 case IPV6_CHECKSUM:
2670 case IPV6_ADDRFORM:
2671 case IPV6_2292PKTINFO:
2672 case IPV6_RECVTCLASS:
2673 case IPV6_RECVRTHDR:
2674 case IPV6_2292RTHDR:
2675 case IPV6_RECVHOPOPTS:
2676 case IPV6_2292HOPOPTS:
2677 case IPV6_RECVDSTOPTS:
2678 case IPV6_2292DSTOPTS:
2679 case IPV6_TCLASS:
2680 #ifdef IPV6_RECVPATHMTU
2681 case IPV6_RECVPATHMTU:
2682 #endif
2683 #ifdef IPV6_TRANSPARENT
2684 case IPV6_TRANSPARENT:
2685 #endif
2686 #ifdef IPV6_FREEBIND
2687 case IPV6_FREEBIND:
2688 #endif
2689 #ifdef IPV6_RECVORIGDSTADDR
2690 case IPV6_RECVORIGDSTADDR:
2691 #endif
2692 if (get_user_u32(len, optlen))
2693 return -TARGET_EFAULT;
2694 if (len < 0)
2695 return -TARGET_EINVAL;
2696 lv = sizeof(lv);
2697 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2698 if (ret < 0)
2699 return ret;
2700 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2701 len = 1;
2702 if (put_user_u32(len, optlen)
2703 || put_user_u8(val, optval_addr))
2704 return -TARGET_EFAULT;
2705 } else {
2706 if (len > sizeof(int))
2707 len = sizeof(int);
2708 if (put_user_u32(len, optlen)
2709 || put_user_u32(val, optval_addr))
2710 return -TARGET_EFAULT;
2712 break;
2713 default:
2714 ret = -TARGET_ENOPROTOOPT;
2715 break;
2717 break;
2718 #ifdef SOL_NETLINK
2719 case SOL_NETLINK:
2720 switch (optname) {
2721 case NETLINK_PKTINFO:
2722 case NETLINK_BROADCAST_ERROR:
2723 case NETLINK_NO_ENOBUFS:
2724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2725 case NETLINK_LISTEN_ALL_NSID:
2726 case NETLINK_CAP_ACK:
2727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2729 case NETLINK_EXT_ACK:
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2732 case NETLINK_GET_STRICT_CHK:
2733 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2734 if (get_user_u32(len, optlen)) {
2735 return -TARGET_EFAULT;
2737 if (len != sizeof(val)) {
2738 return -TARGET_EINVAL;
2740 lv = len;
2741 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2742 if (ret < 0) {
2743 return ret;
2745 if (put_user_u32(lv, optlen)
2746 || put_user_u32(val, optval_addr)) {
2747 return -TARGET_EFAULT;
2749 break;
2750 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2751 case NETLINK_LIST_MEMBERSHIPS:
2753 uint32_t *results;
2754 int i;
2755 if (get_user_u32(len, optlen)) {
2756 return -TARGET_EFAULT;
2758 if (len < 0) {
2759 return -TARGET_EINVAL;
2761 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2762 if (!results) {
2763 return -TARGET_EFAULT;
2765 lv = len;
2766 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2767 if (ret < 0) {
2768 unlock_user(results, optval_addr, 0);
2769 return ret;
2771 /* swap host endianess to target endianess. */
2772 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2773 results[i] = tswap32(results[i]);
2775 if (put_user_u32(lv, optlen)) {
2776 return -TARGET_EFAULT;
2778 unlock_user(results, optval_addr, 0);
2779 break;
2781 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2782 default:
2783 goto unimplemented;
2785 break;
2786 #endif /* SOL_NETLINK */
2787 default:
2788 unimplemented:
2789 qemu_log_mask(LOG_UNIMP,
2790 "getsockopt level=%d optname=%d not yet supported\n",
2791 level, optname);
2792 ret = -TARGET_EOPNOTSUPP;
2793 break;
2795 return ret;
2798 /* Convert target low/high pair representing file offset into the host
2799 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2800 * as the kernel doesn't handle them either.
2802 static void target_to_host_low_high(abi_ulong tlow,
2803 abi_ulong thigh,
2804 unsigned long *hlow,
2805 unsigned long *hhigh)
2807 uint64_t off = tlow |
2808 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2809 TARGET_LONG_BITS / 2;
2811 *hlow = off;
2812 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2815 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2816 abi_ulong count, int copy)
2818 struct target_iovec *target_vec;
2819 struct iovec *vec;
2820 abi_ulong total_len, max_len;
2821 int i;
2822 int err = 0;
2823 bool bad_address = false;
2825 if (count == 0) {
2826 errno = 0;
2827 return NULL;
2829 if (count > IOV_MAX) {
2830 errno = EINVAL;
2831 return NULL;
2834 vec = g_try_new0(struct iovec, count);
2835 if (vec == NULL) {
2836 errno = ENOMEM;
2837 return NULL;
2840 target_vec = lock_user(VERIFY_READ, target_addr,
2841 count * sizeof(struct target_iovec), 1);
2842 if (target_vec == NULL) {
2843 err = EFAULT;
2844 goto fail2;
2847 /* ??? If host page size > target page size, this will result in a
2848 value larger than what we can actually support. */
2849 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2850 total_len = 0;
2852 for (i = 0; i < count; i++) {
2853 abi_ulong base = tswapal(target_vec[i].iov_base);
2854 abi_long len = tswapal(target_vec[i].iov_len);
2856 if (len < 0) {
2857 err = EINVAL;
2858 goto fail;
2859 } else if (len == 0) {
2860 /* Zero length pointer is ignored. */
2861 vec[i].iov_base = 0;
2862 } else {
2863 vec[i].iov_base = lock_user(type, base, len, copy);
2864 /* If the first buffer pointer is bad, this is a fault. But
2865 * subsequent bad buffers will result in a partial write; this
2866 * is realized by filling the vector with null pointers and
2867 * zero lengths. */
2868 if (!vec[i].iov_base) {
2869 if (i == 0) {
2870 err = EFAULT;
2871 goto fail;
2872 } else {
2873 bad_address = true;
2876 if (bad_address) {
2877 len = 0;
2879 if (len > max_len - total_len) {
2880 len = max_len - total_len;
2883 vec[i].iov_len = len;
2884 total_len += len;
2887 unlock_user(target_vec, target_addr, 0);
2888 return vec;
2890 fail:
2891 while (--i >= 0) {
2892 if (tswapal(target_vec[i].iov_len) > 0) {
2893 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2896 unlock_user(target_vec, target_addr, 0);
2897 fail2:
2898 g_free(vec);
2899 errno = err;
2900 return NULL;
2903 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2904 abi_ulong count, int copy)
2906 struct target_iovec *target_vec;
2907 int i;
2909 target_vec = lock_user(VERIFY_READ, target_addr,
2910 count * sizeof(struct target_iovec), 1);
2911 if (target_vec) {
2912 for (i = 0; i < count; i++) {
2913 abi_ulong base = tswapal(target_vec[i].iov_base);
2914 abi_long len = tswapal(target_vec[i].iov_len);
2915 if (len < 0) {
2916 break;
2918 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920 unlock_user(target_vec, target_addr, 0);
2923 g_free(vec);
2926 static inline int target_to_host_sock_type(int *type)
2928 int host_type = 0;
2929 int target_type = *type;
2931 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2932 case TARGET_SOCK_DGRAM:
2933 host_type = SOCK_DGRAM;
2934 break;
2935 case TARGET_SOCK_STREAM:
2936 host_type = SOCK_STREAM;
2937 break;
2938 default:
2939 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2940 break;
2942 if (target_type & TARGET_SOCK_CLOEXEC) {
2943 #if defined(SOCK_CLOEXEC)
2944 host_type |= SOCK_CLOEXEC;
2945 #else
2946 return -TARGET_EINVAL;
2947 #endif
2949 if (target_type & TARGET_SOCK_NONBLOCK) {
2950 #if defined(SOCK_NONBLOCK)
2951 host_type |= SOCK_NONBLOCK;
2952 #elif !defined(O_NONBLOCK)
2953 return -TARGET_EINVAL;
2954 #endif
2956 *type = host_type;
2957 return 0;
2960 /* Try to emulate socket type flags after socket creation. */
2961 static int sock_flags_fixup(int fd, int target_type)
2963 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2964 if (target_type & TARGET_SOCK_NONBLOCK) {
2965 int flags = fcntl(fd, F_GETFL);
2966 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2967 close(fd);
2968 return -TARGET_EINVAL;
2971 #endif
2972 return fd;
2975 /* do_socket() Must return target values and target errnos. */
2976 static abi_long do_socket(int domain, int type, int protocol)
2978 int target_type = type;
2979 int ret;
2981 ret = target_to_host_sock_type(&type);
2982 if (ret) {
2983 return ret;
2986 if (domain == PF_NETLINK && !(
2987 #ifdef CONFIG_RTNETLINK
2988 protocol == NETLINK_ROUTE ||
2989 #endif
2990 protocol == NETLINK_KOBJECT_UEVENT ||
2991 protocol == NETLINK_AUDIT)) {
2992 return -TARGET_EPROTONOSUPPORT;
2995 if (domain == AF_PACKET ||
2996 (domain == AF_INET && type == SOCK_PACKET)) {
2997 protocol = tswap16(protocol);
3000 ret = get_errno(socket(domain, type, protocol));
3001 if (ret >= 0) {
3002 ret = sock_flags_fixup(ret, target_type);
3003 if (type == SOCK_PACKET) {
3004 /* Manage an obsolete case :
3005 * if socket type is SOCK_PACKET, bind by name
3007 fd_trans_register(ret, &target_packet_trans);
3008 } else if (domain == PF_NETLINK) {
3009 switch (protocol) {
3010 #ifdef CONFIG_RTNETLINK
3011 case NETLINK_ROUTE:
3012 fd_trans_register(ret, &target_netlink_route_trans);
3013 break;
3014 #endif
3015 case NETLINK_KOBJECT_UEVENT:
3016 /* nothing to do: messages are strings */
3017 break;
3018 case NETLINK_AUDIT:
3019 fd_trans_register(ret, &target_netlink_audit_trans);
3020 break;
3021 default:
3022 g_assert_not_reached();
3026 return ret;
3029 /* do_bind() Must return target values and target errnos. */
3030 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3031 socklen_t addrlen)
3033 void *addr;
3034 abi_long ret;
3036 if ((int)addrlen < 0) {
3037 return -TARGET_EINVAL;
3040 addr = alloca(addrlen+1);
3042 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3043 if (ret)
3044 return ret;
3046 return get_errno(bind(sockfd, addr, addrlen));
3049 /* do_connect() Must return target values and target errnos. */
3050 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3051 socklen_t addrlen)
3053 void *addr;
3054 abi_long ret;
3056 if ((int)addrlen < 0) {
3057 return -TARGET_EINVAL;
3060 addr = alloca(addrlen+1);
3062 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3063 if (ret)
3064 return ret;
3066 return get_errno(safe_connect(sockfd, addr, addrlen));
3069 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3070 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3071 int flags, int send)
3073 abi_long ret, len;
3074 struct msghdr msg;
3075 abi_ulong count;
3076 struct iovec *vec;
3077 abi_ulong target_vec;
3079 if (msgp->msg_name) {
3080 msg.msg_namelen = tswap32(msgp->msg_namelen);
3081 msg.msg_name = alloca(msg.msg_namelen+1);
3082 ret = target_to_host_sockaddr(fd, msg.msg_name,
3083 tswapal(msgp->msg_name),
3084 msg.msg_namelen);
3085 if (ret == -TARGET_EFAULT) {
3086 /* For connected sockets msg_name and msg_namelen must
3087 * be ignored, so returning EFAULT immediately is wrong.
3088 * Instead, pass a bad msg_name to the host kernel, and
3089 * let it decide whether to return EFAULT or not.
3091 msg.msg_name = (void *)-1;
3092 } else if (ret) {
3093 goto out2;
3095 } else {
3096 msg.msg_name = NULL;
3097 msg.msg_namelen = 0;
3099 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3100 msg.msg_control = alloca(msg.msg_controllen);
3101 memset(msg.msg_control, 0, msg.msg_controllen);
3103 msg.msg_flags = tswap32(msgp->msg_flags);
3105 count = tswapal(msgp->msg_iovlen);
3106 target_vec = tswapal(msgp->msg_iov);
3108 if (count > IOV_MAX) {
3109 /* sendrcvmsg returns a different errno for this condition than
3110 * readv/writev, so we must catch it here before lock_iovec() does.
3112 ret = -TARGET_EMSGSIZE;
3113 goto out2;
3116 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3117 target_vec, count, send);
3118 if (vec == NULL) {
3119 ret = -host_to_target_errno(errno);
3120 goto out2;
3122 msg.msg_iovlen = count;
3123 msg.msg_iov = vec;
3125 if (send) {
3126 if (fd_trans_target_to_host_data(fd)) {
3127 void *host_msg;
3129 host_msg = g_malloc(msg.msg_iov->iov_len);
3130 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3131 ret = fd_trans_target_to_host_data(fd)(host_msg,
3132 msg.msg_iov->iov_len);
3133 if (ret >= 0) {
3134 msg.msg_iov->iov_base = host_msg;
3135 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137 g_free(host_msg);
3138 } else {
3139 ret = target_to_host_cmsg(&msg, msgp);
3140 if (ret == 0) {
3141 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3144 } else {
3145 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3146 if (!is_error(ret)) {
3147 len = ret;
3148 if (fd_trans_host_to_target_data(fd)) {
3149 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3150 MIN(msg.msg_iov->iov_len, len));
3151 } else {
3152 ret = host_to_target_cmsg(msgp, &msg);
3154 if (!is_error(ret)) {
3155 msgp->msg_namelen = tswap32(msg.msg_namelen);
3156 msgp->msg_flags = tswap32(msg.msg_flags);
3157 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3158 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3159 msg.msg_name, msg.msg_namelen);
3160 if (ret) {
3161 goto out;
3165 ret = len;
3170 out:
3171 unlock_iovec(vec, target_vec, count, !send);
3172 out2:
3173 return ret;
3176 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3177 int flags, int send)
3179 abi_long ret;
3180 struct target_msghdr *msgp;
3182 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3183 msgp,
3184 target_msg,
3185 send ? 1 : 0)) {
3186 return -TARGET_EFAULT;
3188 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3189 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3190 return ret;
3193 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3194 * so it might not have this *mmsg-specific flag either.
3196 #ifndef MSG_WAITFORONE
3197 #define MSG_WAITFORONE 0x10000
3198 #endif
3200 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3201 unsigned int vlen, unsigned int flags,
3202 int send)
3204 struct target_mmsghdr *mmsgp;
3205 abi_long ret = 0;
3206 int i;
3208 if (vlen > UIO_MAXIOV) {
3209 vlen = UIO_MAXIOV;
3212 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3213 if (!mmsgp) {
3214 return -TARGET_EFAULT;
3217 for (i = 0; i < vlen; i++) {
3218 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3219 if (is_error(ret)) {
3220 break;
3222 mmsgp[i].msg_len = tswap32(ret);
3223 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3224 if (flags & MSG_WAITFORONE) {
3225 flags |= MSG_DONTWAIT;
3229 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231 /* Return number of datagrams sent if we sent any at all;
3232 * otherwise return the error.
3234 if (i) {
3235 return i;
3237 return ret;
3240 /* do_accept4() Must return target values and target errnos. */
3241 static abi_long do_accept4(int fd, abi_ulong target_addr,
3242 abi_ulong target_addrlen_addr, int flags)
3244 socklen_t addrlen, ret_addrlen;
3245 void *addr;
3246 abi_long ret;
3247 int host_flags;
3249 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251 if (target_addr == 0) {
3252 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3255 /* linux returns EINVAL if addrlen pointer is invalid */
3256 if (get_user_u32(addrlen, target_addrlen_addr))
3257 return -TARGET_EINVAL;
3259 if ((int)addrlen < 0) {
3260 return -TARGET_EINVAL;
3263 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3264 return -TARGET_EINVAL;
3266 addr = alloca(addrlen);
3268 ret_addrlen = addrlen;
3269 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3270 if (!is_error(ret)) {
3271 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3272 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3273 ret = -TARGET_EFAULT;
3276 return ret;
3279 /* do_getpeername() Must return target values and target errnos. */
3280 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3281 abi_ulong target_addrlen_addr)
3283 socklen_t addrlen, ret_addrlen;
3284 void *addr;
3285 abi_long ret;
3287 if (get_user_u32(addrlen, target_addrlen_addr))
3288 return -TARGET_EFAULT;
3290 if ((int)addrlen < 0) {
3291 return -TARGET_EINVAL;
3294 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3295 return -TARGET_EFAULT;
3297 addr = alloca(addrlen);
3299 ret_addrlen = addrlen;
3300 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3301 if (!is_error(ret)) {
3302 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3303 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3304 ret = -TARGET_EFAULT;
3307 return ret;
3310 /* do_getsockname() Must return target values and target errnos. */
3311 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3312 abi_ulong target_addrlen_addr)
3314 socklen_t addrlen, ret_addrlen;
3315 void *addr;
3316 abi_long ret;
3318 if (get_user_u32(addrlen, target_addrlen_addr))
3319 return -TARGET_EFAULT;
3321 if ((int)addrlen < 0) {
3322 return -TARGET_EINVAL;
3325 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3326 return -TARGET_EFAULT;
3328 addr = alloca(addrlen);
3330 ret_addrlen = addrlen;
3331 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3332 if (!is_error(ret)) {
3333 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3334 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3335 ret = -TARGET_EFAULT;
3338 return ret;
3341 /* do_socketpair() Must return target values and target errnos. */
3342 static abi_long do_socketpair(int domain, int type, int protocol,
3343 abi_ulong target_tab_addr)
3345 int tab[2];
3346 abi_long ret;
3348 target_to_host_sock_type(&type);
3350 ret = get_errno(socketpair(domain, type, protocol, tab));
3351 if (!is_error(ret)) {
3352 if (put_user_s32(tab[0], target_tab_addr)
3353 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3354 ret = -TARGET_EFAULT;
3356 return ret;
3359 /* do_sendto() Must return target values and target errnos. */
3360 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3361 abi_ulong target_addr, socklen_t addrlen)
3363 void *addr;
3364 void *host_msg;
3365 void *copy_msg = NULL;
3366 abi_long ret;
3368 if ((int)addrlen < 0) {
3369 return -TARGET_EINVAL;
3372 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3373 if (!host_msg)
3374 return -TARGET_EFAULT;
3375 if (fd_trans_target_to_host_data(fd)) {
3376 copy_msg = host_msg;
3377 host_msg = g_malloc(len);
3378 memcpy(host_msg, copy_msg, len);
3379 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3380 if (ret < 0) {
3381 goto fail;
3384 if (target_addr) {
3385 addr = alloca(addrlen+1);
3386 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3387 if (ret) {
3388 goto fail;
3390 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3391 } else {
3392 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394 fail:
3395 if (copy_msg) {
3396 g_free(host_msg);
3397 host_msg = copy_msg;
3399 unlock_user(host_msg, msg, 0);
3400 return ret;
3403 /* do_recvfrom() Must return target values and target errnos. */
3404 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3405 abi_ulong target_addr,
3406 abi_ulong target_addrlen)
3408 socklen_t addrlen, ret_addrlen;
3409 void *addr;
3410 void *host_msg;
3411 abi_long ret;
3413 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3414 if (!host_msg)
3415 return -TARGET_EFAULT;
3416 if (target_addr) {
3417 if (get_user_u32(addrlen, target_addrlen)) {
3418 ret = -TARGET_EFAULT;
3419 goto fail;
3421 if ((int)addrlen < 0) {
3422 ret = -TARGET_EINVAL;
3423 goto fail;
3425 addr = alloca(addrlen);
3426 ret_addrlen = addrlen;
3427 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3428 addr, &ret_addrlen));
3429 } else {
3430 addr = NULL; /* To keep compiler quiet. */
3431 addrlen = 0; /* To keep compiler quiet. */
3432 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434 if (!is_error(ret)) {
3435 if (fd_trans_host_to_target_data(fd)) {
3436 abi_long trans;
3437 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3438 if (is_error(trans)) {
3439 ret = trans;
3440 goto fail;
3443 if (target_addr) {
3444 host_to_target_sockaddr(target_addr, addr,
3445 MIN(addrlen, ret_addrlen));
3446 if (put_user_u32(ret_addrlen, target_addrlen)) {
3447 ret = -TARGET_EFAULT;
3448 goto fail;
3451 unlock_user(host_msg, msg, len);
3452 } else {
3453 fail:
3454 unlock_user(host_msg, msg, 0);
3456 return ret;
3459 #ifdef TARGET_NR_socketcall
3460 /* do_socketcall() must return target values and target errnos. */
3461 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 static const unsigned nargs[] = { /* number of arguments per operation */
3464 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3465 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3466 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3467 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3468 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3469 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3470 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3472 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3473 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3474 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3475 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3476 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3477 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3478 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3479 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3480 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3481 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3482 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3483 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3485 abi_long a[6]; /* max 6 args */
3486 unsigned i;
3488 /* check the range of the first argument num */
3489 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3490 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3491 return -TARGET_EINVAL;
3493 /* ensure we have space for args */
3494 if (nargs[num] > ARRAY_SIZE(a)) {
3495 return -TARGET_EINVAL;
3497 /* collect the arguments in a[] according to nargs[] */
3498 for (i = 0; i < nargs[num]; ++i) {
3499 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3500 return -TARGET_EFAULT;
3503 /* now when we have the args, invoke the appropriate underlying function */
3504 switch (num) {
3505 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3506 return do_socket(a[0], a[1], a[2]);
3507 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3508 return do_bind(a[0], a[1], a[2]);
3509 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3510 return do_connect(a[0], a[1], a[2]);
3511 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3512 return get_errno(listen(a[0], a[1]));
3513 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3514 return do_accept4(a[0], a[1], a[2], 0);
3515 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3516 return do_getsockname(a[0], a[1], a[2]);
3517 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3518 return do_getpeername(a[0], a[1], a[2]);
3519 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3520 return do_socketpair(a[0], a[1], a[2], a[3]);
3521 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3522 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3523 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3524 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3525 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3526 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3527 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3528 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3529 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3530 return get_errno(shutdown(a[0], a[1]));
3531 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3532 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3533 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3534 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3535 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3536 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3537 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3538 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3539 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3540 return do_accept4(a[0], a[1], a[2], a[3]);
3541 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3542 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3543 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3544 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3545 default:
3546 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3547 return -TARGET_EINVAL;
3550 #endif
3552 #define N_SHM_REGIONS 32
3554 static struct shm_region {
3555 abi_ulong start;
3556 abi_ulong size;
3557 bool in_use;
3558 } shm_regions[N_SHM_REGIONS];
3560 #ifndef TARGET_SEMID64_DS
3561 /* asm-generic version of this struct */
3562 struct target_semid64_ds
3564 struct target_ipc_perm sem_perm;
3565 abi_ulong sem_otime;
3566 #if TARGET_ABI_BITS == 32
3567 abi_ulong __unused1;
3568 #endif
3569 abi_ulong sem_ctime;
3570 #if TARGET_ABI_BITS == 32
3571 abi_ulong __unused2;
3572 #endif
3573 abi_ulong sem_nsems;
3574 abi_ulong __unused3;
3575 abi_ulong __unused4;
3577 #endif
3579 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3580 abi_ulong target_addr)
3582 struct target_ipc_perm *target_ip;
3583 struct target_semid64_ds *target_sd;
3585 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3586 return -TARGET_EFAULT;
3587 target_ip = &(target_sd->sem_perm);
3588 host_ip->__key = tswap32(target_ip->__key);
3589 host_ip->uid = tswap32(target_ip->uid);
3590 host_ip->gid = tswap32(target_ip->gid);
3591 host_ip->cuid = tswap32(target_ip->cuid);
3592 host_ip->cgid = tswap32(target_ip->cgid);
3593 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3594 host_ip->mode = tswap32(target_ip->mode);
3595 #else
3596 host_ip->mode = tswap16(target_ip->mode);
3597 #endif
3598 #if defined(TARGET_PPC)
3599 host_ip->__seq = tswap32(target_ip->__seq);
3600 #else
3601 host_ip->__seq = tswap16(target_ip->__seq);
3602 #endif
3603 unlock_user_struct(target_sd, target_addr, 0);
3604 return 0;
3607 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3608 struct ipc_perm *host_ip)
3610 struct target_ipc_perm *target_ip;
3611 struct target_semid64_ds *target_sd;
3613 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3614 return -TARGET_EFAULT;
3615 target_ip = &(target_sd->sem_perm);
3616 target_ip->__key = tswap32(host_ip->__key);
3617 target_ip->uid = tswap32(host_ip->uid);
3618 target_ip->gid = tswap32(host_ip->gid);
3619 target_ip->cuid = tswap32(host_ip->cuid);
3620 target_ip->cgid = tswap32(host_ip->cgid);
3621 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3622 target_ip->mode = tswap32(host_ip->mode);
3623 #else
3624 target_ip->mode = tswap16(host_ip->mode);
3625 #endif
3626 #if defined(TARGET_PPC)
3627 target_ip->__seq = tswap32(host_ip->__seq);
3628 #else
3629 target_ip->__seq = tswap16(host_ip->__seq);
3630 #endif
3631 unlock_user_struct(target_sd, target_addr, 1);
3632 return 0;
3635 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3636 abi_ulong target_addr)
3638 struct target_semid64_ds *target_sd;
3640 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3641 return -TARGET_EFAULT;
3642 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3643 return -TARGET_EFAULT;
3644 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3645 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3646 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3647 unlock_user_struct(target_sd, target_addr, 0);
3648 return 0;
3651 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3652 struct semid_ds *host_sd)
3654 struct target_semid64_ds *target_sd;
3656 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3657 return -TARGET_EFAULT;
3658 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3659 return -TARGET_EFAULT;
3660 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3661 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3662 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3663 unlock_user_struct(target_sd, target_addr, 1);
3664 return 0;
3667 struct target_seminfo {
3668 int semmap;
3669 int semmni;
3670 int semmns;
3671 int semmnu;
3672 int semmsl;
3673 int semopm;
3674 int semume;
3675 int semusz;
3676 int semvmx;
3677 int semaem;
3680 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3681 struct seminfo *host_seminfo)
3683 struct target_seminfo *target_seminfo;
3684 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3685 return -TARGET_EFAULT;
3686 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3687 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3688 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3689 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3690 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3691 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3692 __put_user(host_seminfo->semume, &target_seminfo->semume);
3693 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3694 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3695 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3696 unlock_user_struct(target_seminfo, target_addr, 1);
3697 return 0;
3700 union semun {
3701 int val;
3702 struct semid_ds *buf;
3703 unsigned short *array;
3704 struct seminfo *__buf;
3707 union target_semun {
3708 int val;
3709 abi_ulong buf;
3710 abi_ulong array;
3711 abi_ulong __buf;
3714 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3715 abi_ulong target_addr)
3717 int nsems;
3718 unsigned short *array;
3719 union semun semun;
3720 struct semid_ds semid_ds;
3721 int i, ret;
3723 semun.buf = &semid_ds;
3725 ret = semctl(semid, 0, IPC_STAT, semun);
3726 if (ret == -1)
3727 return get_errno(ret);
3729 nsems = semid_ds.sem_nsems;
3731 *host_array = g_try_new(unsigned short, nsems);
3732 if (!*host_array) {
3733 return -TARGET_ENOMEM;
3735 array = lock_user(VERIFY_READ, target_addr,
3736 nsems*sizeof(unsigned short), 1);
3737 if (!array) {
3738 g_free(*host_array);
3739 return -TARGET_EFAULT;
3742 for(i=0; i<nsems; i++) {
3743 __get_user((*host_array)[i], &array[i]);
3745 unlock_user(array, target_addr, 0);
3747 return 0;
3750 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3751 unsigned short **host_array)
3753 int nsems;
3754 unsigned short *array;
3755 union semun semun;
3756 struct semid_ds semid_ds;
3757 int i, ret;
3759 semun.buf = &semid_ds;
3761 ret = semctl(semid, 0, IPC_STAT, semun);
3762 if (ret == -1)
3763 return get_errno(ret);
3765 nsems = semid_ds.sem_nsems;
3767 array = lock_user(VERIFY_WRITE, target_addr,
3768 nsems*sizeof(unsigned short), 0);
3769 if (!array)
3770 return -TARGET_EFAULT;
3772 for(i=0; i<nsems; i++) {
3773 __put_user((*host_array)[i], &array[i]);
3775 g_free(*host_array);
3776 unlock_user(array, target_addr, 1);
3778 return 0;
3781 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3782 abi_ulong target_arg)
3784 union target_semun target_su = { .buf = target_arg };
3785 union semun arg;
3786 struct semid_ds dsarg;
3787 unsigned short *array = NULL;
3788 struct seminfo seminfo;
3789 abi_long ret = -TARGET_EINVAL;
3790 abi_long err;
3791 cmd &= 0xff;
3793 switch( cmd ) {
3794 case GETVAL:
3795 case SETVAL:
3796 /* In 64 bit cross-endian situations, we will erroneously pick up
3797 * the wrong half of the union for the "val" element. To rectify
3798 * this, the entire 8-byte structure is byteswapped, followed by
3799 * a swap of the 4 byte val field. In other cases, the data is
3800 * already in proper host byte order. */
3801 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3802 target_su.buf = tswapal(target_su.buf);
3803 arg.val = tswap32(target_su.val);
3804 } else {
3805 arg.val = target_su.val;
3807 ret = get_errno(semctl(semid, semnum, cmd, arg));
3808 break;
3809 case GETALL:
3810 case SETALL:
3811 err = target_to_host_semarray(semid, &array, target_su.array);
3812 if (err)
3813 return err;
3814 arg.array = array;
3815 ret = get_errno(semctl(semid, semnum, cmd, arg));
3816 err = host_to_target_semarray(semid, target_su.array, &array);
3817 if (err)
3818 return err;
3819 break;
3820 case IPC_STAT:
3821 case IPC_SET:
3822 case SEM_STAT:
3823 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3824 if (err)
3825 return err;
3826 arg.buf = &dsarg;
3827 ret = get_errno(semctl(semid, semnum, cmd, arg));
3828 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3829 if (err)
3830 return err;
3831 break;
3832 case IPC_INFO:
3833 case SEM_INFO:
3834 arg.__buf = &seminfo;
3835 ret = get_errno(semctl(semid, semnum, cmd, arg));
3836 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3837 if (err)
3838 return err;
3839 break;
3840 case IPC_RMID:
3841 case GETPID:
3842 case GETNCNT:
3843 case GETZCNT:
3844 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3845 break;
3848 return ret;
3851 struct target_sembuf {
3852 unsigned short sem_num;
3853 short sem_op;
3854 short sem_flg;
3857 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3858 abi_ulong target_addr,
3859 unsigned nsops)
3861 struct target_sembuf *target_sembuf;
3862 int i;
3864 target_sembuf = lock_user(VERIFY_READ, target_addr,
3865 nsops*sizeof(struct target_sembuf), 1);
3866 if (!target_sembuf)
3867 return -TARGET_EFAULT;
3869 for(i=0; i<nsops; i++) {
3870 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3871 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3872 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3875 unlock_user(target_sembuf, target_addr, 0);
3877 return 0;
3880 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3881 defined(TARGET_NR_semtimedop)
3884 * This macro is required to handle the s390 variants, which passes the
3885 * arguments in a different order than default.
3887 #ifdef __s390x__
3888 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3889 (__nsops), (__timeout), (__sops)
3890 #else
3891 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3892 (__nsops), 0, (__sops), (__timeout)
3893 #endif
3895 static inline abi_long do_semtimedop(int semid,
3896 abi_long ptr,
3897 unsigned nsops,
3898 abi_long timeout)
3900 struct sembuf *sops;
3901 struct timespec ts, *pts = NULL;
3902 abi_long ret;
3904 if (timeout) {
3905 pts = &ts;
3906 if (target_to_host_timespec(pts, timeout)) {
3907 return -TARGET_EFAULT;
3911 if (nsops > TARGET_SEMOPM) {
3912 return -TARGET_E2BIG;
3915 sops = g_new(struct sembuf, nsops);
3917 if (target_to_host_sembuf(sops, ptr, nsops)) {
3918 g_free(sops);
3919 return -TARGET_EFAULT;
3922 ret = -TARGET_ENOSYS;
3923 #ifdef __NR_semtimedop
3924 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3925 #endif
3926 #ifdef __NR_ipc
3927 if (ret == -TARGET_ENOSYS) {
3928 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3929 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3931 #endif
3932 g_free(sops);
3933 return ret;
3935 #endif
3937 struct target_msqid_ds
3939 struct target_ipc_perm msg_perm;
3940 abi_ulong msg_stime;
3941 #if TARGET_ABI_BITS == 32
3942 abi_ulong __unused1;
3943 #endif
3944 abi_ulong msg_rtime;
3945 #if TARGET_ABI_BITS == 32
3946 abi_ulong __unused2;
3947 #endif
3948 abi_ulong msg_ctime;
3949 #if TARGET_ABI_BITS == 32
3950 abi_ulong __unused3;
3951 #endif
3952 abi_ulong __msg_cbytes;
3953 abi_ulong msg_qnum;
3954 abi_ulong msg_qbytes;
3955 abi_ulong msg_lspid;
3956 abi_ulong msg_lrpid;
3957 abi_ulong __unused4;
3958 abi_ulong __unused5;
3961 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3962 abi_ulong target_addr)
3964 struct target_msqid_ds *target_md;
3966 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3967 return -TARGET_EFAULT;
3968 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3969 return -TARGET_EFAULT;
3970 host_md->msg_stime = tswapal(target_md->msg_stime);
3971 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3972 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3973 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3974 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3975 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3976 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3977 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3978 unlock_user_struct(target_md, target_addr, 0);
3979 return 0;
3982 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3983 struct msqid_ds *host_md)
3985 struct target_msqid_ds *target_md;
3987 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3988 return -TARGET_EFAULT;
3989 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3990 return -TARGET_EFAULT;
3991 target_md->msg_stime = tswapal(host_md->msg_stime);
3992 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3993 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3994 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3995 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3996 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3997 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3998 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3999 unlock_user_struct(target_md, target_addr, 1);
4000 return 0;
4003 struct target_msginfo {
4004 int msgpool;
4005 int msgmap;
4006 int msgmax;
4007 int msgmnb;
4008 int msgmni;
4009 int msgssz;
4010 int msgtql;
4011 unsigned short int msgseg;
4014 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4015 struct msginfo *host_msginfo)
4017 struct target_msginfo *target_msginfo;
4018 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4019 return -TARGET_EFAULT;
4020 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4021 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4022 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4023 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4024 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4025 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4026 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4027 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4028 unlock_user_struct(target_msginfo, target_addr, 1);
4029 return 0;
4032 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4034 struct msqid_ds dsarg;
4035 struct msginfo msginfo;
4036 abi_long ret = -TARGET_EINVAL;
4038 cmd &= 0xff;
4040 switch (cmd) {
4041 case IPC_STAT:
4042 case IPC_SET:
4043 case MSG_STAT:
4044 if (target_to_host_msqid_ds(&dsarg,ptr))
4045 return -TARGET_EFAULT;
4046 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4047 if (host_to_target_msqid_ds(ptr,&dsarg))
4048 return -TARGET_EFAULT;
4049 break;
4050 case IPC_RMID:
4051 ret = get_errno(msgctl(msgid, cmd, NULL));
4052 break;
4053 case IPC_INFO:
4054 case MSG_INFO:
4055 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4056 if (host_to_target_msginfo(ptr, &msginfo))
4057 return -TARGET_EFAULT;
4058 break;
4061 return ret;
4064 struct target_msgbuf {
4065 abi_long mtype;
4066 char mtext[1];
4069 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4070 ssize_t msgsz, int msgflg)
4072 struct target_msgbuf *target_mb;
4073 struct msgbuf *host_mb;
4074 abi_long ret = 0;
4076 if (msgsz < 0) {
4077 return -TARGET_EINVAL;
4080 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4081 return -TARGET_EFAULT;
4082 host_mb = g_try_malloc(msgsz + sizeof(long));
4083 if (!host_mb) {
4084 unlock_user_struct(target_mb, msgp, 0);
4085 return -TARGET_ENOMEM;
4087 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4088 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4089 ret = -TARGET_ENOSYS;
4090 #ifdef __NR_msgsnd
4091 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4092 #endif
4093 #ifdef __NR_ipc
4094 if (ret == -TARGET_ENOSYS) {
4095 #ifdef __s390x__
4096 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4097 host_mb));
4098 #else
4099 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4100 host_mb, 0));
4101 #endif
4103 #endif
4104 g_free(host_mb);
4105 unlock_user_struct(target_mb, msgp, 0);
4107 return ret;
4110 #ifdef __NR_ipc
4111 #if defined(__sparc__)
4112 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4113 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4114 #elif defined(__s390x__)
4115 /* The s390 sys_ipc variant has only five parameters. */
4116 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4117 ((long int[]){(long int)__msgp, __msgtyp})
4118 #else
4119 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4120 ((long int[]){(long int)__msgp, __msgtyp}), 0
4121 #endif
4122 #endif
4124 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4125 ssize_t msgsz, abi_long msgtyp,
4126 int msgflg)
4128 struct target_msgbuf *target_mb;
4129 char *target_mtext;
4130 struct msgbuf *host_mb;
4131 abi_long ret = 0;
4133 if (msgsz < 0) {
4134 return -TARGET_EINVAL;
4137 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4138 return -TARGET_EFAULT;
4140 host_mb = g_try_malloc(msgsz + sizeof(long));
4141 if (!host_mb) {
4142 ret = -TARGET_ENOMEM;
4143 goto end;
4145 ret = -TARGET_ENOSYS;
4146 #ifdef __NR_msgrcv
4147 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4148 #endif
4149 #ifdef __NR_ipc
4150 if (ret == -TARGET_ENOSYS) {
4151 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4152 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4154 #endif
4156 if (ret > 0) {
4157 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4158 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4159 if (!target_mtext) {
4160 ret = -TARGET_EFAULT;
4161 goto end;
4163 memcpy(target_mb->mtext, host_mb->mtext, ret);
4164 unlock_user(target_mtext, target_mtext_addr, ret);
4167 target_mb->mtype = tswapal(host_mb->mtype);
4169 end:
4170 if (target_mb)
4171 unlock_user_struct(target_mb, msgp, 1);
4172 g_free(host_mb);
4173 return ret;
4176 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4177 abi_ulong target_addr)
4179 struct target_shmid_ds *target_sd;
4181 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4182 return -TARGET_EFAULT;
4183 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4184 return -TARGET_EFAULT;
4185 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4186 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4187 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4188 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4189 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4190 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4191 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4192 unlock_user_struct(target_sd, target_addr, 0);
4193 return 0;
4196 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4197 struct shmid_ds *host_sd)
4199 struct target_shmid_ds *target_sd;
4201 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4202 return -TARGET_EFAULT;
4203 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4204 return -TARGET_EFAULT;
4205 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4206 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4207 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4208 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4209 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4210 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4211 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4212 unlock_user_struct(target_sd, target_addr, 1);
4213 return 0;
4216 struct target_shminfo {
4217 abi_ulong shmmax;
4218 abi_ulong shmmin;
4219 abi_ulong shmmni;
4220 abi_ulong shmseg;
4221 abi_ulong shmall;
4224 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4225 struct shminfo *host_shminfo)
4227 struct target_shminfo *target_shminfo;
4228 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4229 return -TARGET_EFAULT;
4230 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4231 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4232 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4233 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4234 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4235 unlock_user_struct(target_shminfo, target_addr, 1);
4236 return 0;
4239 struct target_shm_info {
4240 int used_ids;
4241 abi_ulong shm_tot;
4242 abi_ulong shm_rss;
4243 abi_ulong shm_swp;
4244 abi_ulong swap_attempts;
4245 abi_ulong swap_successes;
4248 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4249 struct shm_info *host_shm_info)
4251 struct target_shm_info *target_shm_info;
4252 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4253 return -TARGET_EFAULT;
4254 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4255 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4256 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4257 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4258 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4259 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4260 unlock_user_struct(target_shm_info, target_addr, 1);
4261 return 0;
4264 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4266 struct shmid_ds dsarg;
4267 struct shminfo shminfo;
4268 struct shm_info shm_info;
4269 abi_long ret = -TARGET_EINVAL;
4271 cmd &= 0xff;
4273 switch(cmd) {
4274 case IPC_STAT:
4275 case IPC_SET:
4276 case SHM_STAT:
4277 if (target_to_host_shmid_ds(&dsarg, buf))
4278 return -TARGET_EFAULT;
4279 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4280 if (host_to_target_shmid_ds(buf, &dsarg))
4281 return -TARGET_EFAULT;
4282 break;
4283 case IPC_INFO:
4284 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4285 if (host_to_target_shminfo(buf, &shminfo))
4286 return -TARGET_EFAULT;
4287 break;
4288 case SHM_INFO:
4289 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4290 if (host_to_target_shm_info(buf, &shm_info))
4291 return -TARGET_EFAULT;
4292 break;
4293 case IPC_RMID:
4294 case SHM_LOCK:
4295 case SHM_UNLOCK:
4296 ret = get_errno(shmctl(shmid, cmd, NULL));
4297 break;
4300 return ret;
4303 #ifndef TARGET_FORCE_SHMLBA
4304 /* For most architectures, SHMLBA is the same as the page size;
4305 * some architectures have larger values, in which case they should
4306 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4307 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4308 * and defining its own value for SHMLBA.
4310 * The kernel also permits SHMLBA to be set by the architecture to a
4311 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4312 * this means that addresses are rounded to the large size if
4313 * SHM_RND is set but addresses not aligned to that size are not rejected
4314 * as long as they are at least page-aligned. Since the only architecture
4315 * which uses this is ia64 this code doesn't provide for that oddity.
4317 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4319 return TARGET_PAGE_SIZE;
4321 #endif
4323 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4324 int shmid, abi_ulong shmaddr, int shmflg)
4326 abi_long raddr;
4327 void *host_raddr;
4328 struct shmid_ds shm_info;
4329 int i,ret;
4330 abi_ulong shmlba;
4332 /* find out the length of the shared memory segment */
4333 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4334 if (is_error(ret)) {
4335 /* can't get length, bail out */
4336 return ret;
4339 shmlba = target_shmlba(cpu_env);
4341 if (shmaddr & (shmlba - 1)) {
4342 if (shmflg & SHM_RND) {
4343 shmaddr &= ~(shmlba - 1);
4344 } else {
4345 return -TARGET_EINVAL;
4348 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4349 return -TARGET_EINVAL;
4352 mmap_lock();
4354 if (shmaddr)
4355 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4356 else {
4357 abi_ulong mmap_start;
4359 /* In order to use the host shmat, we need to honor host SHMLBA. */
4360 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4362 if (mmap_start == -1) {
4363 errno = ENOMEM;
4364 host_raddr = (void *)-1;
4365 } else
4366 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4369 if (host_raddr == (void *)-1) {
4370 mmap_unlock();
4371 return get_errno((long)host_raddr);
4373 raddr=h2g((unsigned long)host_raddr);
4375 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4376 PAGE_VALID | PAGE_READ |
4377 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4379 for (i = 0; i < N_SHM_REGIONS; i++) {
4380 if (!shm_regions[i].in_use) {
4381 shm_regions[i].in_use = true;
4382 shm_regions[i].start = raddr;
4383 shm_regions[i].size = shm_info.shm_segsz;
4384 break;
4388 mmap_unlock();
4389 return raddr;
4393 static inline abi_long do_shmdt(abi_ulong shmaddr)
4395 int i;
4396 abi_long rv;
4398 mmap_lock();
4400 for (i = 0; i < N_SHM_REGIONS; ++i) {
4401 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4402 shm_regions[i].in_use = false;
4403 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4404 break;
4407 rv = get_errno(shmdt(g2h(shmaddr)));
4409 mmap_unlock();
4411 return rv;
4414 #ifdef TARGET_NR_ipc
4415 /* ??? This only works with linear mappings. */
4416 /* do_ipc() must return target values and target errnos. */
4417 static abi_long do_ipc(CPUArchState *cpu_env,
4418 unsigned int call, abi_long first,
4419 abi_long second, abi_long third,
4420 abi_long ptr, abi_long fifth)
4422 int version;
4423 abi_long ret = 0;
4425 version = call >> 16;
4426 call &= 0xffff;
4428 switch (call) {
4429 case IPCOP_semop:
4430 ret = do_semtimedop(first, ptr, second, 0);
4431 break;
4432 case IPCOP_semtimedop:
4434 * The s390 sys_ipc variant has only five parameters instead of six
4435 * (as for default variant) and the only difference is the handling of
4436 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4437 * to a struct timespec where the generic variant uses fifth parameter.
4439 #if defined(TARGET_S390X)
4440 ret = do_semtimedop(first, ptr, second, third);
4441 #else
4442 ret = do_semtimedop(first, ptr, second, fifth);
4443 #endif
4444 break;
4446 case IPCOP_semget:
4447 ret = get_errno(semget(first, second, third));
4448 break;
4450 case IPCOP_semctl: {
4451 /* The semun argument to semctl is passed by value, so dereference the
4452 * ptr argument. */
4453 abi_ulong atptr;
4454 get_user_ual(atptr, ptr);
4455 ret = do_semctl(first, second, third, atptr);
4456 break;
4459 case IPCOP_msgget:
4460 ret = get_errno(msgget(first, second));
4461 break;
4463 case IPCOP_msgsnd:
4464 ret = do_msgsnd(first, ptr, second, third);
4465 break;
4467 case IPCOP_msgctl:
4468 ret = do_msgctl(first, second, ptr);
4469 break;
4471 case IPCOP_msgrcv:
4472 switch (version) {
4473 case 0:
4475 struct target_ipc_kludge {
4476 abi_long msgp;
4477 abi_long msgtyp;
4478 } *tmp;
4480 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4481 ret = -TARGET_EFAULT;
4482 break;
4485 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4487 unlock_user_struct(tmp, ptr, 0);
4488 break;
4490 default:
4491 ret = do_msgrcv(first, ptr, second, fifth, third);
4493 break;
4495 case IPCOP_shmat:
4496 switch (version) {
4497 default:
4499 abi_ulong raddr;
4500 raddr = do_shmat(cpu_env, first, ptr, second);
4501 if (is_error(raddr))
4502 return get_errno(raddr);
4503 if (put_user_ual(raddr, third))
4504 return -TARGET_EFAULT;
4505 break;
4507 case 1:
4508 ret = -TARGET_EINVAL;
4509 break;
4511 break;
4512 case IPCOP_shmdt:
4513 ret = do_shmdt(ptr);
4514 break;
4516 case IPCOP_shmget:
4517 /* IPC_* flag values are the same on all linux platforms */
4518 ret = get_errno(shmget(first, second, third));
4519 break;
4521 /* IPC_* and SHM_* command values are the same on all linux platforms */
4522 case IPCOP_shmctl:
4523 ret = do_shmctl(first, second, ptr);
4524 break;
4525 default:
4526 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4527 call, version);
4528 ret = -TARGET_ENOSYS;
4529 break;
4531 return ret;
4533 #endif
4535 /* kernel structure types definitions */
4537 #define STRUCT(name, ...) STRUCT_ ## name,
4538 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4539 enum {
4540 #include "syscall_types.h"
4541 STRUCT_MAX
4543 #undef STRUCT
4544 #undef STRUCT_SPECIAL
4546 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4547 #define STRUCT_SPECIAL(name)
4548 #include "syscall_types.h"
4549 #undef STRUCT
4550 #undef STRUCT_SPECIAL
4552 #define MAX_STRUCT_SIZE 4096
4554 #ifdef CONFIG_FIEMAP
4555 /* So fiemap access checks don't overflow on 32 bit systems.
4556 * This is very slightly smaller than the limit imposed by
4557 * the underlying kernel.
4559 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4560 / sizeof(struct fiemap_extent))
4562 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4563 int fd, int cmd, abi_long arg)
4565 /* The parameter for this ioctl is a struct fiemap followed
4566 * by an array of struct fiemap_extent whose size is set
4567 * in fiemap->fm_extent_count. The array is filled in by the
4568 * ioctl.
4570 int target_size_in, target_size_out;
4571 struct fiemap *fm;
4572 const argtype *arg_type = ie->arg_type;
4573 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4574 void *argptr, *p;
4575 abi_long ret;
4576 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4577 uint32_t outbufsz;
4578 int free_fm = 0;
4580 assert(arg_type[0] == TYPE_PTR);
4581 assert(ie->access == IOC_RW);
4582 arg_type++;
4583 target_size_in = thunk_type_size(arg_type, 0);
4584 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4585 if (!argptr) {
4586 return -TARGET_EFAULT;
4588 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4589 unlock_user(argptr, arg, 0);
4590 fm = (struct fiemap *)buf_temp;
4591 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4592 return -TARGET_EINVAL;
4595 outbufsz = sizeof (*fm) +
4596 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4598 if (outbufsz > MAX_STRUCT_SIZE) {
4599 /* We can't fit all the extents into the fixed size buffer.
4600 * Allocate one that is large enough and use it instead.
4602 fm = g_try_malloc(outbufsz);
4603 if (!fm) {
4604 return -TARGET_ENOMEM;
4606 memcpy(fm, buf_temp, sizeof(struct fiemap));
4607 free_fm = 1;
4609 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4610 if (!is_error(ret)) {
4611 target_size_out = target_size_in;
4612 /* An extent_count of 0 means we were only counting the extents
4613 * so there are no structs to copy
4615 if (fm->fm_extent_count != 0) {
4616 target_size_out += fm->fm_mapped_extents * extent_size;
4618 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4619 if (!argptr) {
4620 ret = -TARGET_EFAULT;
4621 } else {
4622 /* Convert the struct fiemap */
4623 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4624 if (fm->fm_extent_count != 0) {
4625 p = argptr + target_size_in;
4626 /* ...and then all the struct fiemap_extents */
4627 for (i = 0; i < fm->fm_mapped_extents; i++) {
4628 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4629 THUNK_TARGET);
4630 p += extent_size;
4633 unlock_user(argptr, arg, target_size_out);
4636 if (free_fm) {
4637 g_free(fm);
4639 return ret;
4641 #endif
4643 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4644 int fd, int cmd, abi_long arg)
4646 const argtype *arg_type = ie->arg_type;
4647 int target_size;
4648 void *argptr;
4649 int ret;
4650 struct ifconf *host_ifconf;
4651 uint32_t outbufsz;
4652 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4653 int target_ifreq_size;
4654 int nb_ifreq;
4655 int free_buf = 0;
4656 int i;
4657 int target_ifc_len;
4658 abi_long target_ifc_buf;
4659 int host_ifc_len;
4660 char *host_ifc_buf;
4662 assert(arg_type[0] == TYPE_PTR);
4663 assert(ie->access == IOC_RW);
4665 arg_type++;
4666 target_size = thunk_type_size(arg_type, 0);
4668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4669 if (!argptr)
4670 return -TARGET_EFAULT;
4671 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4672 unlock_user(argptr, arg, 0);
4674 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4675 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4676 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4678 if (target_ifc_buf != 0) {
4679 target_ifc_len = host_ifconf->ifc_len;
4680 nb_ifreq = target_ifc_len / target_ifreq_size;
4681 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4683 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4684 if (outbufsz > MAX_STRUCT_SIZE) {
4686 * We can't fit all the extents into the fixed size buffer.
4687 * Allocate one that is large enough and use it instead.
4689 host_ifconf = malloc(outbufsz);
4690 if (!host_ifconf) {
4691 return -TARGET_ENOMEM;
4693 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4694 free_buf = 1;
4696 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4698 host_ifconf->ifc_len = host_ifc_len;
4699 } else {
4700 host_ifc_buf = NULL;
4702 host_ifconf->ifc_buf = host_ifc_buf;
4704 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4705 if (!is_error(ret)) {
4706 /* convert host ifc_len to target ifc_len */
4708 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4709 target_ifc_len = nb_ifreq * target_ifreq_size;
4710 host_ifconf->ifc_len = target_ifc_len;
4712 /* restore target ifc_buf */
4714 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4716 /* copy struct ifconf to target user */
4718 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4719 if (!argptr)
4720 return -TARGET_EFAULT;
4721 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4722 unlock_user(argptr, arg, target_size);
4724 if (target_ifc_buf != 0) {
4725 /* copy ifreq[] to target user */
4726 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4727 for (i = 0; i < nb_ifreq ; i++) {
4728 thunk_convert(argptr + i * target_ifreq_size,
4729 host_ifc_buf + i * sizeof(struct ifreq),
4730 ifreq_arg_type, THUNK_TARGET);
4732 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4736 if (free_buf) {
4737 free(host_ifconf);
4740 return ret;
4743 #if defined(CONFIG_USBFS)
4744 #if HOST_LONG_BITS > 64
4745 #error USBDEVFS thunks do not support >64 bit hosts yet.
4746 #endif
4747 struct live_urb {
4748 uint64_t target_urb_adr;
4749 uint64_t target_buf_adr;
4750 char *target_buf_ptr;
4751 struct usbdevfs_urb host_urb;
4754 static GHashTable *usbdevfs_urb_hashtable(void)
4756 static GHashTable *urb_hashtable;
4758 if (!urb_hashtable) {
4759 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4761 return urb_hashtable;
4764 static void urb_hashtable_insert(struct live_urb *urb)
4766 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4767 g_hash_table_insert(urb_hashtable, urb, urb);
4770 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4772 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4773 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4776 static void urb_hashtable_remove(struct live_urb *urb)
4778 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4779 g_hash_table_remove(urb_hashtable, urb);
4782 static abi_long
4783 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4784 int fd, int cmd, abi_long arg)
4786 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4787 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4788 struct live_urb *lurb;
4789 void *argptr;
4790 uint64_t hurb;
4791 int target_size;
4792 uintptr_t target_urb_adr;
4793 abi_long ret;
4795 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4797 memset(buf_temp, 0, sizeof(uint64_t));
4798 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4799 if (is_error(ret)) {
4800 return ret;
4803 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4804 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4805 if (!lurb->target_urb_adr) {
4806 return -TARGET_EFAULT;
4808 urb_hashtable_remove(lurb);
4809 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4810 lurb->host_urb.buffer_length);
4811 lurb->target_buf_ptr = NULL;
4813 /* restore the guest buffer pointer */
4814 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4816 /* update the guest urb struct */
4817 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4818 if (!argptr) {
4819 g_free(lurb);
4820 return -TARGET_EFAULT;
4822 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4823 unlock_user(argptr, lurb->target_urb_adr, target_size);
4825 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4826 /* write back the urb handle */
4827 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4828 if (!argptr) {
4829 g_free(lurb);
4830 return -TARGET_EFAULT;
4833 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4834 target_urb_adr = lurb->target_urb_adr;
4835 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4836 unlock_user(argptr, arg, target_size);
4838 g_free(lurb);
4839 return ret;
4842 static abi_long
4843 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4844 uint8_t *buf_temp __attribute__((unused)),
4845 int fd, int cmd, abi_long arg)
4847 struct live_urb *lurb;
4849 /* map target address back to host URB with metadata. */
4850 lurb = urb_hashtable_lookup(arg);
4851 if (!lurb) {
4852 return -TARGET_EFAULT;
4854 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4857 static abi_long
4858 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4859 int fd, int cmd, abi_long arg)
4861 const argtype *arg_type = ie->arg_type;
4862 int target_size;
4863 abi_long ret;
4864 void *argptr;
4865 int rw_dir;
4866 struct live_urb *lurb;
4869 * each submitted URB needs to map to a unique ID for the
4870 * kernel, and that unique ID needs to be a pointer to
4871 * host memory. hence, we need to malloc for each URB.
4872 * isochronous transfers have a variable length struct.
4874 arg_type++;
4875 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4877 /* construct host copy of urb and metadata */
4878 lurb = g_try_malloc0(sizeof(struct live_urb));
4879 if (!lurb) {
4880 return -TARGET_ENOMEM;
4883 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4884 if (!argptr) {
4885 g_free(lurb);
4886 return -TARGET_EFAULT;
4888 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4889 unlock_user(argptr, arg, 0);
4891 lurb->target_urb_adr = arg;
4892 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4894 /* buffer space used depends on endpoint type so lock the entire buffer */
4895 /* control type urbs should check the buffer contents for true direction */
4896 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4897 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4898 lurb->host_urb.buffer_length, 1);
4899 if (lurb->target_buf_ptr == NULL) {
4900 g_free(lurb);
4901 return -TARGET_EFAULT;
4904 /* update buffer pointer in host copy */
4905 lurb->host_urb.buffer = lurb->target_buf_ptr;
4907 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4908 if (is_error(ret)) {
4909 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4910 g_free(lurb);
4911 } else {
4912 urb_hashtable_insert(lurb);
4915 return ret;
4917 #endif /* CONFIG_USBFS */
4919 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4920 int cmd, abi_long arg)
4922 void *argptr;
4923 struct dm_ioctl *host_dm;
4924 abi_long guest_data;
4925 uint32_t guest_data_size;
4926 int target_size;
4927 const argtype *arg_type = ie->arg_type;
4928 abi_long ret;
4929 void *big_buf = NULL;
4930 char *host_data;
4932 arg_type++;
4933 target_size = thunk_type_size(arg_type, 0);
4934 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4935 if (!argptr) {
4936 ret = -TARGET_EFAULT;
4937 goto out;
4939 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4940 unlock_user(argptr, arg, 0);
4942 /* buf_temp is too small, so fetch things into a bigger buffer */
4943 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4944 memcpy(big_buf, buf_temp, target_size);
4945 buf_temp = big_buf;
4946 host_dm = big_buf;
4948 guest_data = arg + host_dm->data_start;
4949 if ((guest_data - arg) < 0) {
4950 ret = -TARGET_EINVAL;
4951 goto out;
4953 guest_data_size = host_dm->data_size - host_dm->data_start;
4954 host_data = (char*)host_dm + host_dm->data_start;
4956 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4957 if (!argptr) {
4958 ret = -TARGET_EFAULT;
4959 goto out;
4962 switch (ie->host_cmd) {
4963 case DM_REMOVE_ALL:
4964 case DM_LIST_DEVICES:
4965 case DM_DEV_CREATE:
4966 case DM_DEV_REMOVE:
4967 case DM_DEV_SUSPEND:
4968 case DM_DEV_STATUS:
4969 case DM_DEV_WAIT:
4970 case DM_TABLE_STATUS:
4971 case DM_TABLE_CLEAR:
4972 case DM_TABLE_DEPS:
4973 case DM_LIST_VERSIONS:
4974 /* no input data */
4975 break;
4976 case DM_DEV_RENAME:
4977 case DM_DEV_SET_GEOMETRY:
4978 /* data contains only strings */
4979 memcpy(host_data, argptr, guest_data_size);
4980 break;
4981 case DM_TARGET_MSG:
4982 memcpy(host_data, argptr, guest_data_size);
4983 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4984 break;
4985 case DM_TABLE_LOAD:
4987 void *gspec = argptr;
4988 void *cur_data = host_data;
4989 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4990 int spec_size = thunk_type_size(arg_type, 0);
4991 int i;
4993 for (i = 0; i < host_dm->target_count; i++) {
4994 struct dm_target_spec *spec = cur_data;
4995 uint32_t next;
4996 int slen;
4998 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4999 slen = strlen((char*)gspec + spec_size) + 1;
5000 next = spec->next;
5001 spec->next = sizeof(*spec) + slen;
5002 strcpy((char*)&spec[1], gspec + spec_size);
5003 gspec += next;
5004 cur_data += spec->next;
5006 break;
5008 default:
5009 ret = -TARGET_EINVAL;
5010 unlock_user(argptr, guest_data, 0);
5011 goto out;
5013 unlock_user(argptr, guest_data, 0);
5015 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5016 if (!is_error(ret)) {
5017 guest_data = arg + host_dm->data_start;
5018 guest_data_size = host_dm->data_size - host_dm->data_start;
5019 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5020 switch (ie->host_cmd) {
5021 case DM_REMOVE_ALL:
5022 case DM_DEV_CREATE:
5023 case DM_DEV_REMOVE:
5024 case DM_DEV_RENAME:
5025 case DM_DEV_SUSPEND:
5026 case DM_DEV_STATUS:
5027 case DM_TABLE_LOAD:
5028 case DM_TABLE_CLEAR:
5029 case DM_TARGET_MSG:
5030 case DM_DEV_SET_GEOMETRY:
5031 /* no return data */
5032 break;
5033 case DM_LIST_DEVICES:
5035 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5036 uint32_t remaining_data = guest_data_size;
5037 void *cur_data = argptr;
5038 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5039 int nl_size = 12; /* can't use thunk_size due to alignment */
5041 while (1) {
5042 uint32_t next = nl->next;
5043 if (next) {
5044 nl->next = nl_size + (strlen(nl->name) + 1);
5046 if (remaining_data < nl->next) {
5047 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5048 break;
5050 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5051 strcpy(cur_data + nl_size, nl->name);
5052 cur_data += nl->next;
5053 remaining_data -= nl->next;
5054 if (!next) {
5055 break;
5057 nl = (void*)nl + next;
5059 break;
5061 case DM_DEV_WAIT:
5062 case DM_TABLE_STATUS:
5064 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5065 void *cur_data = argptr;
5066 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5067 int spec_size = thunk_type_size(arg_type, 0);
5068 int i;
5070 for (i = 0; i < host_dm->target_count; i++) {
5071 uint32_t next = spec->next;
5072 int slen = strlen((char*)&spec[1]) + 1;
5073 spec->next = (cur_data - argptr) + spec_size + slen;
5074 if (guest_data_size < spec->next) {
5075 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5076 break;
5078 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5079 strcpy(cur_data + spec_size, (char*)&spec[1]);
5080 cur_data = argptr + spec->next;
5081 spec = (void*)host_dm + host_dm->data_start + next;
5083 break;
5085 case DM_TABLE_DEPS:
5087 void *hdata = (void*)host_dm + host_dm->data_start;
5088 int count = *(uint32_t*)hdata;
5089 uint64_t *hdev = hdata + 8;
5090 uint64_t *gdev = argptr + 8;
5091 int i;
5093 *(uint32_t*)argptr = tswap32(count);
5094 for (i = 0; i < count; i++) {
5095 *gdev = tswap64(*hdev);
5096 gdev++;
5097 hdev++;
5099 break;
5101 case DM_LIST_VERSIONS:
5103 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5104 uint32_t remaining_data = guest_data_size;
5105 void *cur_data = argptr;
5106 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5107 int vers_size = thunk_type_size(arg_type, 0);
5109 while (1) {
5110 uint32_t next = vers->next;
5111 if (next) {
5112 vers->next = vers_size + (strlen(vers->name) + 1);
5114 if (remaining_data < vers->next) {
5115 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5116 break;
5118 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5119 strcpy(cur_data + vers_size, vers->name);
5120 cur_data += vers->next;
5121 remaining_data -= vers->next;
5122 if (!next) {
5123 break;
5125 vers = (void*)vers + next;
5127 break;
5129 default:
5130 unlock_user(argptr, guest_data, 0);
5131 ret = -TARGET_EINVAL;
5132 goto out;
5134 unlock_user(argptr, guest_data, guest_data_size);
5136 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5137 if (!argptr) {
5138 ret = -TARGET_EFAULT;
5139 goto out;
5141 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5142 unlock_user(argptr, arg, target_size);
5144 out:
5145 g_free(big_buf);
5146 return ret;
5149 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5150 int cmd, abi_long arg)
5152 void *argptr;
5153 int target_size;
5154 const argtype *arg_type = ie->arg_type;
5155 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5156 abi_long ret;
5158 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5159 struct blkpg_partition host_part;
5161 /* Read and convert blkpg */
5162 arg_type++;
5163 target_size = thunk_type_size(arg_type, 0);
5164 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5165 if (!argptr) {
5166 ret = -TARGET_EFAULT;
5167 goto out;
5169 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5170 unlock_user(argptr, arg, 0);
5172 switch (host_blkpg->op) {
5173 case BLKPG_ADD_PARTITION:
5174 case BLKPG_DEL_PARTITION:
5175 /* payload is struct blkpg_partition */
5176 break;
5177 default:
5178 /* Unknown opcode */
5179 ret = -TARGET_EINVAL;
5180 goto out;
5183 /* Read and convert blkpg->data */
5184 arg = (abi_long)(uintptr_t)host_blkpg->data;
5185 target_size = thunk_type_size(part_arg_type, 0);
5186 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5187 if (!argptr) {
5188 ret = -TARGET_EFAULT;
5189 goto out;
5191 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5192 unlock_user(argptr, arg, 0);
5194 /* Swizzle the data pointer to our local copy and call! */
5195 host_blkpg->data = &host_part;
5196 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5198 out:
5199 return ret;
5202 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5203 int fd, int cmd, abi_long arg)
5205 const argtype *arg_type = ie->arg_type;
5206 const StructEntry *se;
5207 const argtype *field_types;
5208 const int *dst_offsets, *src_offsets;
5209 int target_size;
5210 void *argptr;
5211 abi_ulong *target_rt_dev_ptr = NULL;
5212 unsigned long *host_rt_dev_ptr = NULL;
5213 abi_long ret;
5214 int i;
5216 assert(ie->access == IOC_W);
5217 assert(*arg_type == TYPE_PTR);
5218 arg_type++;
5219 assert(*arg_type == TYPE_STRUCT);
5220 target_size = thunk_type_size(arg_type, 0);
5221 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5222 if (!argptr) {
5223 return -TARGET_EFAULT;
5225 arg_type++;
5226 assert(*arg_type == (int)STRUCT_rtentry);
5227 se = struct_entries + *arg_type++;
5228 assert(se->convert[0] == NULL);
5229 /* convert struct here to be able to catch rt_dev string */
5230 field_types = se->field_types;
5231 dst_offsets = se->field_offsets[THUNK_HOST];
5232 src_offsets = se->field_offsets[THUNK_TARGET];
5233 for (i = 0; i < se->nb_fields; i++) {
5234 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5235 assert(*field_types == TYPE_PTRVOID);
5236 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5237 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5238 if (*target_rt_dev_ptr != 0) {
5239 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5240 tswapal(*target_rt_dev_ptr));
5241 if (!*host_rt_dev_ptr) {
5242 unlock_user(argptr, arg, 0);
5243 return -TARGET_EFAULT;
5245 } else {
5246 *host_rt_dev_ptr = 0;
5248 field_types++;
5249 continue;
5251 field_types = thunk_convert(buf_temp + dst_offsets[i],
5252 argptr + src_offsets[i],
5253 field_types, THUNK_HOST);
5255 unlock_user(argptr, arg, 0);
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5259 assert(host_rt_dev_ptr != NULL);
5260 assert(target_rt_dev_ptr != NULL);
5261 if (*host_rt_dev_ptr != 0) {
5262 unlock_user((void *)*host_rt_dev_ptr,
5263 *target_rt_dev_ptr, 0);
5265 return ret;
5268 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5269 int fd, int cmd, abi_long arg)
5271 int sig = target_to_host_signal(arg);
5272 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5275 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5276 int fd, int cmd, abi_long arg)
5278 struct timeval tv;
5279 abi_long ret;
5281 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5282 if (is_error(ret)) {
5283 return ret;
5286 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5287 if (copy_to_user_timeval(arg, &tv)) {
5288 return -TARGET_EFAULT;
5290 } else {
5291 if (copy_to_user_timeval64(arg, &tv)) {
5292 return -TARGET_EFAULT;
5296 return ret;
5299 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5300 int fd, int cmd, abi_long arg)
5302 struct timespec ts;
5303 abi_long ret;
5305 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5306 if (is_error(ret)) {
5307 return ret;
5310 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5311 if (host_to_target_timespec(arg, &ts)) {
5312 return -TARGET_EFAULT;
5314 } else{
5315 if (host_to_target_timespec64(arg, &ts)) {
5316 return -TARGET_EFAULT;
5320 return ret;
5323 #ifdef TIOCGPTPEER
5324 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5325 int fd, int cmd, abi_long arg)
5327 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5328 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5330 #endif
5332 #ifdef HAVE_DRM_H
5334 static void unlock_drm_version(struct drm_version *host_ver,
5335 struct target_drm_version *target_ver,
5336 bool copy)
5338 unlock_user(host_ver->name, target_ver->name,
5339 copy ? host_ver->name_len : 0);
5340 unlock_user(host_ver->date, target_ver->date,
5341 copy ? host_ver->date_len : 0);
5342 unlock_user(host_ver->desc, target_ver->desc,
5343 copy ? host_ver->desc_len : 0);
5346 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5347 struct target_drm_version *target_ver)
5349 memset(host_ver, 0, sizeof(*host_ver));
5351 __get_user(host_ver->name_len, &target_ver->name_len);
5352 if (host_ver->name_len) {
5353 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5354 target_ver->name_len, 0);
5355 if (!host_ver->name) {
5356 return -EFAULT;
5360 __get_user(host_ver->date_len, &target_ver->date_len);
5361 if (host_ver->date_len) {
5362 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5363 target_ver->date_len, 0);
5364 if (!host_ver->date) {
5365 goto err;
5369 __get_user(host_ver->desc_len, &target_ver->desc_len);
5370 if (host_ver->desc_len) {
5371 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5372 target_ver->desc_len, 0);
5373 if (!host_ver->desc) {
5374 goto err;
5378 return 0;
5379 err:
5380 unlock_drm_version(host_ver, target_ver, false);
5381 return -EFAULT;
5384 static inline void host_to_target_drmversion(
5385 struct target_drm_version *target_ver,
5386 struct drm_version *host_ver)
5388 __put_user(host_ver->version_major, &target_ver->version_major);
5389 __put_user(host_ver->version_minor, &target_ver->version_minor);
5390 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5391 __put_user(host_ver->name_len, &target_ver->name_len);
5392 __put_user(host_ver->date_len, &target_ver->date_len);
5393 __put_user(host_ver->desc_len, &target_ver->desc_len);
5394 unlock_drm_version(host_ver, target_ver, true);
5397 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5398 int fd, int cmd, abi_long arg)
5400 struct drm_version *ver;
5401 struct target_drm_version *target_ver;
5402 abi_long ret;
5404 switch (ie->host_cmd) {
5405 case DRM_IOCTL_VERSION:
5406 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5407 return -TARGET_EFAULT;
5409 ver = (struct drm_version *)buf_temp;
5410 ret = target_to_host_drmversion(ver, target_ver);
5411 if (!is_error(ret)) {
5412 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5413 if (is_error(ret)) {
5414 unlock_drm_version(ver, target_ver, false);
5415 } else {
5416 host_to_target_drmversion(target_ver, ver);
5419 unlock_user_struct(target_ver, arg, 0);
5420 return ret;
5422 return -TARGET_ENOSYS;
5425 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5426 struct drm_i915_getparam *gparam,
5427 int fd, abi_long arg)
5429 abi_long ret;
5430 int value;
5431 struct target_drm_i915_getparam *target_gparam;
5433 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5434 return -TARGET_EFAULT;
5437 __get_user(gparam->param, &target_gparam->param);
5438 gparam->value = &value;
5439 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5440 put_user_s32(value, target_gparam->value);
5442 unlock_user_struct(target_gparam, arg, 0);
5443 return ret;
5446 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5447 int fd, int cmd, abi_long arg)
5449 switch (ie->host_cmd) {
5450 case DRM_IOCTL_I915_GETPARAM:
5451 return do_ioctl_drm_i915_getparam(ie,
5452 (struct drm_i915_getparam *)buf_temp,
5453 fd, arg);
5454 default:
5455 return -TARGET_ENOSYS;
5459 #endif
5461 IOCTLEntry ioctl_entries[] = {
5462 #define IOCTL(cmd, access, ...) \
5463 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5464 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5465 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5466 #define IOCTL_IGNORE(cmd) \
5467 { TARGET_ ## cmd, 0, #cmd },
5468 #include "ioctls.h"
5469 { 0, 0, },
5472 /* ??? Implement proper locking for ioctls. */
5473 /* do_ioctl() Must return target values and target errnos. */
5474 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5476 const IOCTLEntry *ie;
5477 const argtype *arg_type;
5478 abi_long ret;
5479 uint8_t buf_temp[MAX_STRUCT_SIZE];
5480 int target_size;
5481 void *argptr;
5483 ie = ioctl_entries;
5484 for(;;) {
5485 if (ie->target_cmd == 0) {
5486 qemu_log_mask(
5487 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5488 return -TARGET_ENOSYS;
5490 if (ie->target_cmd == cmd)
5491 break;
5492 ie++;
5494 arg_type = ie->arg_type;
5495 if (ie->do_ioctl) {
5496 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5497 } else if (!ie->host_cmd) {
5498 /* Some architectures define BSD ioctls in their headers
5499 that are not implemented in Linux. */
5500 return -TARGET_ENOSYS;
5503 switch(arg_type[0]) {
5504 case TYPE_NULL:
5505 /* no argument */
5506 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5507 break;
5508 case TYPE_PTRVOID:
5509 case TYPE_INT:
5510 case TYPE_LONG:
5511 case TYPE_ULONG:
5512 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5513 break;
5514 case TYPE_PTR:
5515 arg_type++;
5516 target_size = thunk_type_size(arg_type, 0);
5517 switch(ie->access) {
5518 case IOC_R:
5519 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5520 if (!is_error(ret)) {
5521 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5522 if (!argptr)
5523 return -TARGET_EFAULT;
5524 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5525 unlock_user(argptr, arg, target_size);
5527 break;
5528 case IOC_W:
5529 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5530 if (!argptr)
5531 return -TARGET_EFAULT;
5532 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5533 unlock_user(argptr, arg, 0);
5534 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5535 break;
5536 default:
5537 case IOC_RW:
5538 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5539 if (!argptr)
5540 return -TARGET_EFAULT;
5541 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5542 unlock_user(argptr, arg, 0);
5543 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5544 if (!is_error(ret)) {
5545 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5546 if (!argptr)
5547 return -TARGET_EFAULT;
5548 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5549 unlock_user(argptr, arg, target_size);
5551 break;
5553 break;
5554 default:
5555 qemu_log_mask(LOG_UNIMP,
5556 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5557 (long)cmd, arg_type[0]);
5558 ret = -TARGET_ENOSYS;
5559 break;
5561 return ret;
5564 static const bitmask_transtbl iflag_tbl[] = {
5565 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5566 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5567 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5568 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5569 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5570 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5571 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5572 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5573 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5574 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5575 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5576 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5577 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5578 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5579 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5580 { 0, 0, 0, 0 }
5583 static const bitmask_transtbl oflag_tbl[] = {
5584 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5585 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5586 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5587 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5588 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5589 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5590 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5591 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5592 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5593 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5594 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5595 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5596 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5597 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5598 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5599 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5600 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5601 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5602 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5603 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5604 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5605 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5606 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5607 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5608 { 0, 0, 0, 0 }
5611 static const bitmask_transtbl cflag_tbl[] = {
5612 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5613 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5614 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5615 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5616 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5617 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5618 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5619 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5620 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5621 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5622 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5623 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5624 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5625 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5626 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5627 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5628 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5629 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5630 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5631 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5632 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5633 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5634 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5635 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5636 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5637 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5638 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5639 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5640 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5641 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5642 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5643 { 0, 0, 0, 0 }
5646 static const bitmask_transtbl lflag_tbl[] = {
5647 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5648 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5649 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5650 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5651 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5652 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5653 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5654 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5655 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5656 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5657 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5658 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5659 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5660 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5661 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5662 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5663 { 0, 0, 0, 0 }
5666 static void target_to_host_termios (void *dst, const void *src)
5668 struct host_termios *host = dst;
5669 const struct target_termios *target = src;
5671 host->c_iflag =
5672 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5673 host->c_oflag =
5674 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5675 host->c_cflag =
5676 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5677 host->c_lflag =
5678 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5679 host->c_line = target->c_line;
5681 memset(host->c_cc, 0, sizeof(host->c_cc));
5682 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5683 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5684 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5685 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5686 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5687 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5688 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5689 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5690 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5691 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5692 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5693 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5694 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5695 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5696 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5697 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5698 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5701 static void host_to_target_termios (void *dst, const void *src)
5703 struct target_termios *target = dst;
5704 const struct host_termios *host = src;
5706 target->c_iflag =
5707 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5708 target->c_oflag =
5709 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5710 target->c_cflag =
5711 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5712 target->c_lflag =
5713 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5714 target->c_line = host->c_line;
5716 memset(target->c_cc, 0, sizeof(target->c_cc));
5717 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5718 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5719 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5720 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5721 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5722 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5723 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5724 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5725 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5726 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5727 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5728 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5729 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5730 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5731 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5732 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5733 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5736 static const StructEntry struct_termios_def = {
5737 .convert = { host_to_target_termios, target_to_host_termios },
5738 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5739 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5740 .print = print_termios,
5743 static bitmask_transtbl mmap_flags_tbl[] = {
5744 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5745 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5746 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5747 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5748 MAP_ANONYMOUS, MAP_ANONYMOUS },
5749 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5750 MAP_GROWSDOWN, MAP_GROWSDOWN },
5751 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5752 MAP_DENYWRITE, MAP_DENYWRITE },
5753 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5754 MAP_EXECUTABLE, MAP_EXECUTABLE },
5755 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5756 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5757 MAP_NORESERVE, MAP_NORESERVE },
5758 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5759 /* MAP_STACK had been ignored by the kernel for quite some time.
5760 Recognize it for the target insofar as we do not want to pass
5761 it through to the host. */
5762 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5763 { 0, 0, 0, 0 }
5767 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5768 * TARGET_I386 is defined if TARGET_X86_64 is defined
5770 #if defined(TARGET_I386)
5772 /* NOTE: there is really one LDT for all the threads */
5773 static uint8_t *ldt_table;
5775 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5777 int size;
5778 void *p;
5780 if (!ldt_table)
5781 return 0;
5782 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5783 if (size > bytecount)
5784 size = bytecount;
5785 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5786 if (!p)
5787 return -TARGET_EFAULT;
5788 /* ??? Should this by byteswapped? */
5789 memcpy(p, ldt_table, size);
5790 unlock_user(p, ptr, size);
5791 return size;
5794 /* XXX: add locking support */
5795 static abi_long write_ldt(CPUX86State *env,
5796 abi_ulong ptr, unsigned long bytecount, int oldmode)
5798 struct target_modify_ldt_ldt_s ldt_info;
5799 struct target_modify_ldt_ldt_s *target_ldt_info;
5800 int seg_32bit, contents, read_exec_only, limit_in_pages;
5801 int seg_not_present, useable, lm;
5802 uint32_t *lp, entry_1, entry_2;
5804 if (bytecount != sizeof(ldt_info))
5805 return -TARGET_EINVAL;
5806 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5807 return -TARGET_EFAULT;
5808 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5809 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5810 ldt_info.limit = tswap32(target_ldt_info->limit);
5811 ldt_info.flags = tswap32(target_ldt_info->flags);
5812 unlock_user_struct(target_ldt_info, ptr, 0);
5814 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5815 return -TARGET_EINVAL;
5816 seg_32bit = ldt_info.flags & 1;
5817 contents = (ldt_info.flags >> 1) & 3;
5818 read_exec_only = (ldt_info.flags >> 3) & 1;
5819 limit_in_pages = (ldt_info.flags >> 4) & 1;
5820 seg_not_present = (ldt_info.flags >> 5) & 1;
5821 useable = (ldt_info.flags >> 6) & 1;
5822 #ifdef TARGET_ABI32
5823 lm = 0;
5824 #else
5825 lm = (ldt_info.flags >> 7) & 1;
5826 #endif
5827 if (contents == 3) {
5828 if (oldmode)
5829 return -TARGET_EINVAL;
5830 if (seg_not_present == 0)
5831 return -TARGET_EINVAL;
5833 /* allocate the LDT */
5834 if (!ldt_table) {
5835 env->ldt.base = target_mmap(0,
5836 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5837 PROT_READ|PROT_WRITE,
5838 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5839 if (env->ldt.base == -1)
5840 return -TARGET_ENOMEM;
5841 memset(g2h(env->ldt.base), 0,
5842 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5843 env->ldt.limit = 0xffff;
5844 ldt_table = g2h(env->ldt.base);
5847 /* NOTE: same code as Linux kernel */
5848 /* Allow LDTs to be cleared by the user. */
5849 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5850 if (oldmode ||
5851 (contents == 0 &&
5852 read_exec_only == 1 &&
5853 seg_32bit == 0 &&
5854 limit_in_pages == 0 &&
5855 seg_not_present == 1 &&
5856 useable == 0 )) {
5857 entry_1 = 0;
5858 entry_2 = 0;
5859 goto install;
5863 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5864 (ldt_info.limit & 0x0ffff);
5865 entry_2 = (ldt_info.base_addr & 0xff000000) |
5866 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5867 (ldt_info.limit & 0xf0000) |
5868 ((read_exec_only ^ 1) << 9) |
5869 (contents << 10) |
5870 ((seg_not_present ^ 1) << 15) |
5871 (seg_32bit << 22) |
5872 (limit_in_pages << 23) |
5873 (lm << 21) |
5874 0x7000;
5875 if (!oldmode)
5876 entry_2 |= (useable << 20);
5878 /* Install the new entry ... */
5879 install:
5880 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5881 lp[0] = tswap32(entry_1);
5882 lp[1] = tswap32(entry_2);
5883 return 0;
5886 /* specific and weird i386 syscalls */
5887 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5888 unsigned long bytecount)
5890 abi_long ret;
5892 switch (func) {
5893 case 0:
5894 ret = read_ldt(ptr, bytecount);
5895 break;
5896 case 1:
5897 ret = write_ldt(env, ptr, bytecount, 1);
5898 break;
5899 case 0x11:
5900 ret = write_ldt(env, ptr, bytecount, 0);
5901 break;
5902 default:
5903 ret = -TARGET_ENOSYS;
5904 break;
5906 return ret;
5909 #if defined(TARGET_ABI32)
5910 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5912 uint64_t *gdt_table = g2h(env->gdt.base);
5913 struct target_modify_ldt_ldt_s ldt_info;
5914 struct target_modify_ldt_ldt_s *target_ldt_info;
5915 int seg_32bit, contents, read_exec_only, limit_in_pages;
5916 int seg_not_present, useable, lm;
5917 uint32_t *lp, entry_1, entry_2;
5918 int i;
5920 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5921 if (!target_ldt_info)
5922 return -TARGET_EFAULT;
5923 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5924 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5925 ldt_info.limit = tswap32(target_ldt_info->limit);
5926 ldt_info.flags = tswap32(target_ldt_info->flags);
5927 if (ldt_info.entry_number == -1) {
5928 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5929 if (gdt_table[i] == 0) {
5930 ldt_info.entry_number = i;
5931 target_ldt_info->entry_number = tswap32(i);
5932 break;
5936 unlock_user_struct(target_ldt_info, ptr, 1);
5938 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5939 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5940 return -TARGET_EINVAL;
5941 seg_32bit = ldt_info.flags & 1;
5942 contents = (ldt_info.flags >> 1) & 3;
5943 read_exec_only = (ldt_info.flags >> 3) & 1;
5944 limit_in_pages = (ldt_info.flags >> 4) & 1;
5945 seg_not_present = (ldt_info.flags >> 5) & 1;
5946 useable = (ldt_info.flags >> 6) & 1;
5947 #ifdef TARGET_ABI32
5948 lm = 0;
5949 #else
5950 lm = (ldt_info.flags >> 7) & 1;
5951 #endif
5953 if (contents == 3) {
5954 if (seg_not_present == 0)
5955 return -TARGET_EINVAL;
5958 /* NOTE: same code as Linux kernel */
5959 /* Allow LDTs to be cleared by the user. */
5960 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5961 if ((contents == 0 &&
5962 read_exec_only == 1 &&
5963 seg_32bit == 0 &&
5964 limit_in_pages == 0 &&
5965 seg_not_present == 1 &&
5966 useable == 0 )) {
5967 entry_1 = 0;
5968 entry_2 = 0;
5969 goto install;
5973 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5974 (ldt_info.limit & 0x0ffff);
5975 entry_2 = (ldt_info.base_addr & 0xff000000) |
5976 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5977 (ldt_info.limit & 0xf0000) |
5978 ((read_exec_only ^ 1) << 9) |
5979 (contents << 10) |
5980 ((seg_not_present ^ 1) << 15) |
5981 (seg_32bit << 22) |
5982 (limit_in_pages << 23) |
5983 (useable << 20) |
5984 (lm << 21) |
5985 0x7000;
5987 /* Install the new entry ... */
5988 install:
5989 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5990 lp[0] = tswap32(entry_1);
5991 lp[1] = tswap32(entry_2);
5992 return 0;
5995 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5997 struct target_modify_ldt_ldt_s *target_ldt_info;
5998 uint64_t *gdt_table = g2h(env->gdt.base);
5999 uint32_t base_addr, limit, flags;
6000 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6001 int seg_not_present, useable, lm;
6002 uint32_t *lp, entry_1, entry_2;
6004 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6005 if (!target_ldt_info)
6006 return -TARGET_EFAULT;
6007 idx = tswap32(target_ldt_info->entry_number);
6008 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6009 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6010 unlock_user_struct(target_ldt_info, ptr, 1);
6011 return -TARGET_EINVAL;
6013 lp = (uint32_t *)(gdt_table + idx);
6014 entry_1 = tswap32(lp[0]);
6015 entry_2 = tswap32(lp[1]);
6017 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6018 contents = (entry_2 >> 10) & 3;
6019 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6020 seg_32bit = (entry_2 >> 22) & 1;
6021 limit_in_pages = (entry_2 >> 23) & 1;
6022 useable = (entry_2 >> 20) & 1;
6023 #ifdef TARGET_ABI32
6024 lm = 0;
6025 #else
6026 lm = (entry_2 >> 21) & 1;
6027 #endif
6028 flags = (seg_32bit << 0) | (contents << 1) |
6029 (read_exec_only << 3) | (limit_in_pages << 4) |
6030 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6031 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6032 base_addr = (entry_1 >> 16) |
6033 (entry_2 & 0xff000000) |
6034 ((entry_2 & 0xff) << 16);
6035 target_ldt_info->base_addr = tswapal(base_addr);
6036 target_ldt_info->limit = tswap32(limit);
6037 target_ldt_info->flags = tswap32(flags);
6038 unlock_user_struct(target_ldt_info, ptr, 1);
6039 return 0;
6042 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6044 return -TARGET_ENOSYS;
6046 #else
6047 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6049 abi_long ret = 0;
6050 abi_ulong val;
6051 int idx;
6053 switch(code) {
6054 case TARGET_ARCH_SET_GS:
6055 case TARGET_ARCH_SET_FS:
6056 if (code == TARGET_ARCH_SET_GS)
6057 idx = R_GS;
6058 else
6059 idx = R_FS;
6060 cpu_x86_load_seg(env, idx, 0);
6061 env->segs[idx].base = addr;
6062 break;
6063 case TARGET_ARCH_GET_GS:
6064 case TARGET_ARCH_GET_FS:
6065 if (code == TARGET_ARCH_GET_GS)
6066 idx = R_GS;
6067 else
6068 idx = R_FS;
6069 val = env->segs[idx].base;
6070 if (put_user(val, addr, abi_ulong))
6071 ret = -TARGET_EFAULT;
6072 break;
6073 default:
6074 ret = -TARGET_EINVAL;
6075 break;
6077 return ret;
6079 #endif /* defined(TARGET_ABI32 */
6081 #endif /* defined(TARGET_I386) */
6083 #define NEW_STACK_SIZE 0x40000
6086 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6087 typedef struct {
6088 CPUArchState *env;
6089 pthread_mutex_t mutex;
6090 pthread_cond_t cond;
6091 pthread_t thread;
6092 uint32_t tid;
6093 abi_ulong child_tidptr;
6094 abi_ulong parent_tidptr;
6095 sigset_t sigmask;
6096 } new_thread_info;
6098 static void *clone_func(void *arg)
6100 new_thread_info *info = arg;
6101 CPUArchState *env;
6102 CPUState *cpu;
6103 TaskState *ts;
6105 rcu_register_thread();
6106 tcg_register_thread();
6107 env = info->env;
6108 cpu = env_cpu(env);
6109 thread_cpu = cpu;
6110 ts = (TaskState *)cpu->opaque;
6111 info->tid = sys_gettid();
6112 task_settid(ts);
6113 if (info->child_tidptr)
6114 put_user_u32(info->tid, info->child_tidptr);
6115 if (info->parent_tidptr)
6116 put_user_u32(info->tid, info->parent_tidptr);
6117 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6118 /* Enable signals. */
6119 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6120 /* Signal to the parent that we're ready. */
6121 pthread_mutex_lock(&info->mutex);
6122 pthread_cond_broadcast(&info->cond);
6123 pthread_mutex_unlock(&info->mutex);
6124 /* Wait until the parent has finished initializing the tls state. */
6125 pthread_mutex_lock(&clone_lock);
6126 pthread_mutex_unlock(&clone_lock);
6127 cpu_loop(env);
6128 /* never exits */
6129 return NULL;
6132 /* do_fork() Must return host values and target errnos (unlike most
6133 do_*() functions). */
6134 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6135 abi_ulong parent_tidptr, target_ulong newtls,
6136 abi_ulong child_tidptr)
6138 CPUState *cpu = env_cpu(env);
6139 int ret;
6140 TaskState *ts;
6141 CPUState *new_cpu;
6142 CPUArchState *new_env;
6143 sigset_t sigmask;
6145 flags &= ~CLONE_IGNORED_FLAGS;
6147 /* Emulate vfork() with fork() */
6148 if (flags & CLONE_VFORK)
6149 flags &= ~(CLONE_VFORK | CLONE_VM);
6151 if (flags & CLONE_VM) {
6152 TaskState *parent_ts = (TaskState *)cpu->opaque;
6153 new_thread_info info;
6154 pthread_attr_t attr;
6156 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6157 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6158 return -TARGET_EINVAL;
6161 ts = g_new0(TaskState, 1);
6162 init_task_state(ts);
6164 /* Grab a mutex so that thread setup appears atomic. */
6165 pthread_mutex_lock(&clone_lock);
6167 /* we create a new CPU instance. */
6168 new_env = cpu_copy(env);
6169 /* Init regs that differ from the parent. */
6170 cpu_clone_regs_child(new_env, newsp, flags);
6171 cpu_clone_regs_parent(env, flags);
6172 new_cpu = env_cpu(new_env);
6173 new_cpu->opaque = ts;
6174 ts->bprm = parent_ts->bprm;
6175 ts->info = parent_ts->info;
6176 ts->signal_mask = parent_ts->signal_mask;
6178 if (flags & CLONE_CHILD_CLEARTID) {
6179 ts->child_tidptr = child_tidptr;
6182 if (flags & CLONE_SETTLS) {
6183 cpu_set_tls (new_env, newtls);
6186 memset(&info, 0, sizeof(info));
6187 pthread_mutex_init(&info.mutex, NULL);
6188 pthread_mutex_lock(&info.mutex);
6189 pthread_cond_init(&info.cond, NULL);
6190 info.env = new_env;
6191 if (flags & CLONE_CHILD_SETTID) {
6192 info.child_tidptr = child_tidptr;
6194 if (flags & CLONE_PARENT_SETTID) {
6195 info.parent_tidptr = parent_tidptr;
6198 ret = pthread_attr_init(&attr);
6199 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6200 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6201 /* It is not safe to deliver signals until the child has finished
6202 initializing, so temporarily block all signals. */
6203 sigfillset(&sigmask);
6204 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6205 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6207 /* If this is our first additional thread, we need to ensure we
6208 * generate code for parallel execution and flush old translations.
6210 if (!parallel_cpus) {
6211 parallel_cpus = true;
6212 tb_flush(cpu);
6215 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6216 /* TODO: Free new CPU state if thread creation failed. */
6218 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6219 pthread_attr_destroy(&attr);
6220 if (ret == 0) {
6221 /* Wait for the child to initialize. */
6222 pthread_cond_wait(&info.cond, &info.mutex);
6223 ret = info.tid;
6224 } else {
6225 ret = -1;
6227 pthread_mutex_unlock(&info.mutex);
6228 pthread_cond_destroy(&info.cond);
6229 pthread_mutex_destroy(&info.mutex);
6230 pthread_mutex_unlock(&clone_lock);
6231 } else {
6232 /* if no CLONE_VM, we consider it is a fork */
6233 if (flags & CLONE_INVALID_FORK_FLAGS) {
6234 return -TARGET_EINVAL;
6237 /* We can't support custom termination signals */
6238 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6239 return -TARGET_EINVAL;
6242 if (block_signals()) {
6243 return -TARGET_ERESTARTSYS;
6246 fork_start();
6247 ret = fork();
6248 if (ret == 0) {
6249 /* Child Process. */
6250 cpu_clone_regs_child(env, newsp, flags);
6251 fork_end(1);
6252 /* There is a race condition here. The parent process could
6253 theoretically read the TID in the child process before the child
6254 tid is set. This would require using either ptrace
6255 (not implemented) or having *_tidptr to point at a shared memory
6256 mapping. We can't repeat the spinlock hack used above because
6257 the child process gets its own copy of the lock. */
6258 if (flags & CLONE_CHILD_SETTID)
6259 put_user_u32(sys_gettid(), child_tidptr);
6260 if (flags & CLONE_PARENT_SETTID)
6261 put_user_u32(sys_gettid(), parent_tidptr);
6262 ts = (TaskState *)cpu->opaque;
6263 if (flags & CLONE_SETTLS)
6264 cpu_set_tls (env, newtls);
6265 if (flags & CLONE_CHILD_CLEARTID)
6266 ts->child_tidptr = child_tidptr;
6267 } else {
6268 cpu_clone_regs_parent(env, flags);
6269 fork_end(0);
6272 return ret;
6275 /* warning : doesn't handle linux specific flags... */
6276 static int target_to_host_fcntl_cmd(int cmd)
6278 int ret;
6280 switch(cmd) {
6281 case TARGET_F_DUPFD:
6282 case TARGET_F_GETFD:
6283 case TARGET_F_SETFD:
6284 case TARGET_F_GETFL:
6285 case TARGET_F_SETFL:
6286 case TARGET_F_OFD_GETLK:
6287 case TARGET_F_OFD_SETLK:
6288 case TARGET_F_OFD_SETLKW:
6289 ret = cmd;
6290 break;
6291 case TARGET_F_GETLK:
6292 ret = F_GETLK64;
6293 break;
6294 case TARGET_F_SETLK:
6295 ret = F_SETLK64;
6296 break;
6297 case TARGET_F_SETLKW:
6298 ret = F_SETLKW64;
6299 break;
6300 case TARGET_F_GETOWN:
6301 ret = F_GETOWN;
6302 break;
6303 case TARGET_F_SETOWN:
6304 ret = F_SETOWN;
6305 break;
6306 case TARGET_F_GETSIG:
6307 ret = F_GETSIG;
6308 break;
6309 case TARGET_F_SETSIG:
6310 ret = F_SETSIG;
6311 break;
6312 #if TARGET_ABI_BITS == 32
6313 case TARGET_F_GETLK64:
6314 ret = F_GETLK64;
6315 break;
6316 case TARGET_F_SETLK64:
6317 ret = F_SETLK64;
6318 break;
6319 case TARGET_F_SETLKW64:
6320 ret = F_SETLKW64;
6321 break;
6322 #endif
6323 case TARGET_F_SETLEASE:
6324 ret = F_SETLEASE;
6325 break;
6326 case TARGET_F_GETLEASE:
6327 ret = F_GETLEASE;
6328 break;
6329 #ifdef F_DUPFD_CLOEXEC
6330 case TARGET_F_DUPFD_CLOEXEC:
6331 ret = F_DUPFD_CLOEXEC;
6332 break;
6333 #endif
6334 case TARGET_F_NOTIFY:
6335 ret = F_NOTIFY;
6336 break;
6337 #ifdef F_GETOWN_EX
6338 case TARGET_F_GETOWN_EX:
6339 ret = F_GETOWN_EX;
6340 break;
6341 #endif
6342 #ifdef F_SETOWN_EX
6343 case TARGET_F_SETOWN_EX:
6344 ret = F_SETOWN_EX;
6345 break;
6346 #endif
6347 #ifdef F_SETPIPE_SZ
6348 case TARGET_F_SETPIPE_SZ:
6349 ret = F_SETPIPE_SZ;
6350 break;
6351 case TARGET_F_GETPIPE_SZ:
6352 ret = F_GETPIPE_SZ;
6353 break;
6354 #endif
6355 default:
6356 ret = -TARGET_EINVAL;
6357 break;
6360 #if defined(__powerpc64__)
6361 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6362 * is not supported by kernel. The glibc fcntl call actually adjusts
6363 * them to 5, 6 and 7 before making the syscall(). Since we make the
6364 * syscall directly, adjust to what is supported by the kernel.
6366 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6367 ret -= F_GETLK64 - 5;
6369 #endif
6371 return ret;
6374 #define FLOCK_TRANSTBL \
6375 switch (type) { \
6376 TRANSTBL_CONVERT(F_RDLCK); \
6377 TRANSTBL_CONVERT(F_WRLCK); \
6378 TRANSTBL_CONVERT(F_UNLCK); \
6379 TRANSTBL_CONVERT(F_EXLCK); \
6380 TRANSTBL_CONVERT(F_SHLCK); \
6383 static int target_to_host_flock(int type)
6385 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6386 FLOCK_TRANSTBL
6387 #undef TRANSTBL_CONVERT
6388 return -TARGET_EINVAL;
6391 static int host_to_target_flock(int type)
6393 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6394 FLOCK_TRANSTBL
6395 #undef TRANSTBL_CONVERT
6396 /* if we don't know how to convert the value coming
6397 * from the host we copy to the target field as-is
6399 return type;
6402 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6403 abi_ulong target_flock_addr)
6405 struct target_flock *target_fl;
6406 int l_type;
6408 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6409 return -TARGET_EFAULT;
6412 __get_user(l_type, &target_fl->l_type);
6413 l_type = target_to_host_flock(l_type);
6414 if (l_type < 0) {
6415 return l_type;
6417 fl->l_type = l_type;
6418 __get_user(fl->l_whence, &target_fl->l_whence);
6419 __get_user(fl->l_start, &target_fl->l_start);
6420 __get_user(fl->l_len, &target_fl->l_len);
6421 __get_user(fl->l_pid, &target_fl->l_pid);
6422 unlock_user_struct(target_fl, target_flock_addr, 0);
6423 return 0;
6426 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6427 const struct flock64 *fl)
6429 struct target_flock *target_fl;
6430 short l_type;
6432 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6433 return -TARGET_EFAULT;
6436 l_type = host_to_target_flock(fl->l_type);
6437 __put_user(l_type, &target_fl->l_type);
6438 __put_user(fl->l_whence, &target_fl->l_whence);
6439 __put_user(fl->l_start, &target_fl->l_start);
6440 __put_user(fl->l_len, &target_fl->l_len);
6441 __put_user(fl->l_pid, &target_fl->l_pid);
6442 unlock_user_struct(target_fl, target_flock_addr, 1);
6443 return 0;
6446 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6447 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6449 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6450 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6451 abi_ulong target_flock_addr)
6453 struct target_oabi_flock64 *target_fl;
6454 int l_type;
6456 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6457 return -TARGET_EFAULT;
6460 __get_user(l_type, &target_fl->l_type);
6461 l_type = target_to_host_flock(l_type);
6462 if (l_type < 0) {
6463 return l_type;
6465 fl->l_type = l_type;
6466 __get_user(fl->l_whence, &target_fl->l_whence);
6467 __get_user(fl->l_start, &target_fl->l_start);
6468 __get_user(fl->l_len, &target_fl->l_len);
6469 __get_user(fl->l_pid, &target_fl->l_pid);
6470 unlock_user_struct(target_fl, target_flock_addr, 0);
6471 return 0;
6474 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6475 const struct flock64 *fl)
6477 struct target_oabi_flock64 *target_fl;
6478 short l_type;
6480 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6481 return -TARGET_EFAULT;
6484 l_type = host_to_target_flock(fl->l_type);
6485 __put_user(l_type, &target_fl->l_type);
6486 __put_user(fl->l_whence, &target_fl->l_whence);
6487 __put_user(fl->l_start, &target_fl->l_start);
6488 __put_user(fl->l_len, &target_fl->l_len);
6489 __put_user(fl->l_pid, &target_fl->l_pid);
6490 unlock_user_struct(target_fl, target_flock_addr, 1);
6491 return 0;
6493 #endif
6495 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6496 abi_ulong target_flock_addr)
6498 struct target_flock64 *target_fl;
6499 int l_type;
6501 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6502 return -TARGET_EFAULT;
6505 __get_user(l_type, &target_fl->l_type);
6506 l_type = target_to_host_flock(l_type);
6507 if (l_type < 0) {
6508 return l_type;
6510 fl->l_type = l_type;
6511 __get_user(fl->l_whence, &target_fl->l_whence);
6512 __get_user(fl->l_start, &target_fl->l_start);
6513 __get_user(fl->l_len, &target_fl->l_len);
6514 __get_user(fl->l_pid, &target_fl->l_pid);
6515 unlock_user_struct(target_fl, target_flock_addr, 0);
6516 return 0;
6519 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6520 const struct flock64 *fl)
6522 struct target_flock64 *target_fl;
6523 short l_type;
6525 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6526 return -TARGET_EFAULT;
6529 l_type = host_to_target_flock(fl->l_type);
6530 __put_user(l_type, &target_fl->l_type);
6531 __put_user(fl->l_whence, &target_fl->l_whence);
6532 __put_user(fl->l_start, &target_fl->l_start);
6533 __put_user(fl->l_len, &target_fl->l_len);
6534 __put_user(fl->l_pid, &target_fl->l_pid);
6535 unlock_user_struct(target_fl, target_flock_addr, 1);
6536 return 0;
6539 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6541 struct flock64 fl64;
6542 #ifdef F_GETOWN_EX
6543 struct f_owner_ex fox;
6544 struct target_f_owner_ex *target_fox;
6545 #endif
6546 abi_long ret;
6547 int host_cmd = target_to_host_fcntl_cmd(cmd);
6549 if (host_cmd == -TARGET_EINVAL)
6550 return host_cmd;
6552 switch(cmd) {
6553 case TARGET_F_GETLK:
6554 ret = copy_from_user_flock(&fl64, arg);
6555 if (ret) {
6556 return ret;
6558 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6559 if (ret == 0) {
6560 ret = copy_to_user_flock(arg, &fl64);
6562 break;
6564 case TARGET_F_SETLK:
6565 case TARGET_F_SETLKW:
6566 ret = copy_from_user_flock(&fl64, arg);
6567 if (ret) {
6568 return ret;
6570 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6571 break;
6573 case TARGET_F_GETLK64:
6574 case TARGET_F_OFD_GETLK:
6575 ret = copy_from_user_flock64(&fl64, arg);
6576 if (ret) {
6577 return ret;
6579 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6580 if (ret == 0) {
6581 ret = copy_to_user_flock64(arg, &fl64);
6583 break;
6584 case TARGET_F_SETLK64:
6585 case TARGET_F_SETLKW64:
6586 case TARGET_F_OFD_SETLK:
6587 case TARGET_F_OFD_SETLKW:
6588 ret = copy_from_user_flock64(&fl64, arg);
6589 if (ret) {
6590 return ret;
6592 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6593 break;
6595 case TARGET_F_GETFL:
6596 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6597 if (ret >= 0) {
6598 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6600 break;
6602 case TARGET_F_SETFL:
6603 ret = get_errno(safe_fcntl(fd, host_cmd,
6604 target_to_host_bitmask(arg,
6605 fcntl_flags_tbl)));
6606 break;
6608 #ifdef F_GETOWN_EX
6609 case TARGET_F_GETOWN_EX:
6610 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6611 if (ret >= 0) {
6612 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6613 return -TARGET_EFAULT;
6614 target_fox->type = tswap32(fox.type);
6615 target_fox->pid = tswap32(fox.pid);
6616 unlock_user_struct(target_fox, arg, 1);
6618 break;
6619 #endif
6621 #ifdef F_SETOWN_EX
6622 case TARGET_F_SETOWN_EX:
6623 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6624 return -TARGET_EFAULT;
6625 fox.type = tswap32(target_fox->type);
6626 fox.pid = tswap32(target_fox->pid);
6627 unlock_user_struct(target_fox, arg, 0);
6628 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6629 break;
6630 #endif
6632 case TARGET_F_SETOWN:
6633 case TARGET_F_GETOWN:
6634 case TARGET_F_SETSIG:
6635 case TARGET_F_GETSIG:
6636 case TARGET_F_SETLEASE:
6637 case TARGET_F_GETLEASE:
6638 case TARGET_F_SETPIPE_SZ:
6639 case TARGET_F_GETPIPE_SZ:
6640 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6641 break;
6643 default:
6644 ret = get_errno(safe_fcntl(fd, cmd, arg));
6645 break;
6647 return ret;
6650 #ifdef USE_UID16
6652 static inline int high2lowuid(int uid)
6654 if (uid > 65535)
6655 return 65534;
6656 else
6657 return uid;
6660 static inline int high2lowgid(int gid)
6662 if (gid > 65535)
6663 return 65534;
6664 else
6665 return gid;
6668 static inline int low2highuid(int uid)
6670 if ((int16_t)uid == -1)
6671 return -1;
6672 else
6673 return uid;
6676 static inline int low2highgid(int gid)
6678 if ((int16_t)gid == -1)
6679 return -1;
6680 else
6681 return gid;
6683 static inline int tswapid(int id)
6685 return tswap16(id);
6688 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6690 #else /* !USE_UID16 */
6691 static inline int high2lowuid(int uid)
6693 return uid;
6695 static inline int high2lowgid(int gid)
6697 return gid;
6699 static inline int low2highuid(int uid)
6701 return uid;
6703 static inline int low2highgid(int gid)
6705 return gid;
6707 static inline int tswapid(int id)
6709 return tswap32(id);
6712 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6714 #endif /* USE_UID16 */
6716 /* We must do direct syscalls for setting UID/GID, because we want to
6717 * implement the Linux system call semantics of "change only for this thread",
6718 * not the libc/POSIX semantics of "change for all threads in process".
6719 * (See http://ewontfix.com/17/ for more details.)
6720 * We use the 32-bit version of the syscalls if present; if it is not
6721 * then either the host architecture supports 32-bit UIDs natively with
6722 * the standard syscall, or the 16-bit UID is the best we can do.
6724 #ifdef __NR_setuid32
6725 #define __NR_sys_setuid __NR_setuid32
6726 #else
6727 #define __NR_sys_setuid __NR_setuid
6728 #endif
6729 #ifdef __NR_setgid32
6730 #define __NR_sys_setgid __NR_setgid32
6731 #else
6732 #define __NR_sys_setgid __NR_setgid
6733 #endif
6734 #ifdef __NR_setresuid32
6735 #define __NR_sys_setresuid __NR_setresuid32
6736 #else
6737 #define __NR_sys_setresuid __NR_setresuid
6738 #endif
6739 #ifdef __NR_setresgid32
6740 #define __NR_sys_setresgid __NR_setresgid32
6741 #else
6742 #define __NR_sys_setresgid __NR_setresgid
6743 #endif
6745 _syscall1(int, sys_setuid, uid_t, uid)
6746 _syscall1(int, sys_setgid, gid_t, gid)
6747 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6748 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6750 void syscall_init(void)
6752 IOCTLEntry *ie;
6753 const argtype *arg_type;
6754 int size;
6755 int i;
6757 thunk_init(STRUCT_MAX);
6759 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6760 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6761 #include "syscall_types.h"
6762 #undef STRUCT
6763 #undef STRUCT_SPECIAL
6765 /* Build target_to_host_errno_table[] table from
6766 * host_to_target_errno_table[]. */
6767 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6768 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6771 /* we patch the ioctl size if necessary. We rely on the fact that
6772 no ioctl has all the bits at '1' in the size field */
6773 ie = ioctl_entries;
6774 while (ie->target_cmd != 0) {
6775 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6776 TARGET_IOC_SIZEMASK) {
6777 arg_type = ie->arg_type;
6778 if (arg_type[0] != TYPE_PTR) {
6779 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6780 ie->target_cmd);
6781 exit(1);
6783 arg_type++;
6784 size = thunk_type_size(arg_type, 0);
6785 ie->target_cmd = (ie->target_cmd &
6786 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6787 (size << TARGET_IOC_SIZESHIFT);
6790 /* automatic consistency check if same arch */
6791 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6792 (defined(__x86_64__) && defined(TARGET_X86_64))
6793 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6794 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6795 ie->name, ie->target_cmd, ie->host_cmd);
6797 #endif
6798 ie++;
6802 #ifdef TARGET_NR_truncate64
6803 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6804 abi_long arg2,
6805 abi_long arg3,
6806 abi_long arg4)
6808 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6809 arg2 = arg3;
6810 arg3 = arg4;
6812 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6814 #endif
6816 #ifdef TARGET_NR_ftruncate64
6817 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6818 abi_long arg2,
6819 abi_long arg3,
6820 abi_long arg4)
6822 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6823 arg2 = arg3;
6824 arg3 = arg4;
6826 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6828 #endif
6830 #if defined(TARGET_NR_timer_settime) || \
6831 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6832 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6833 abi_ulong target_addr)
6835 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6836 offsetof(struct target_itimerspec,
6837 it_interval)) ||
6838 target_to_host_timespec(&host_its->it_value, target_addr +
6839 offsetof(struct target_itimerspec,
6840 it_value))) {
6841 return -TARGET_EFAULT;
6844 return 0;
6846 #endif
6848 #if defined(TARGET_NR_timer_settime64) || \
6849 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6850 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6851 abi_ulong target_addr)
6853 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6854 offsetof(struct target__kernel_itimerspec,
6855 it_interval)) ||
6856 target_to_host_timespec64(&host_its->it_value, target_addr +
6857 offsetof(struct target__kernel_itimerspec,
6858 it_value))) {
6859 return -TARGET_EFAULT;
6862 return 0;
6864 #endif
6866 #if ((defined(TARGET_NR_timerfd_gettime) || \
6867 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6868 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6869 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6870 struct itimerspec *host_its)
6872 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6873 it_interval),
6874 &host_its->it_interval) ||
6875 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6876 it_value),
6877 &host_its->it_value)) {
6878 return -TARGET_EFAULT;
6880 return 0;
6882 #endif
6884 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6885 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6886 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6887 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6888 struct itimerspec *host_its)
6890 if (host_to_target_timespec64(target_addr +
6891 offsetof(struct target__kernel_itimerspec,
6892 it_interval),
6893 &host_its->it_interval) ||
6894 host_to_target_timespec64(target_addr +
6895 offsetof(struct target__kernel_itimerspec,
6896 it_value),
6897 &host_its->it_value)) {
6898 return -TARGET_EFAULT;
6900 return 0;
6902 #endif
6904 #if defined(TARGET_NR_adjtimex) || \
6905 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6906 static inline abi_long target_to_host_timex(struct timex *host_tx,
6907 abi_long target_addr)
6909 struct target_timex *target_tx;
6911 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6912 return -TARGET_EFAULT;
6915 __get_user(host_tx->modes, &target_tx->modes);
6916 __get_user(host_tx->offset, &target_tx->offset);
6917 __get_user(host_tx->freq, &target_tx->freq);
6918 __get_user(host_tx->maxerror, &target_tx->maxerror);
6919 __get_user(host_tx->esterror, &target_tx->esterror);
6920 __get_user(host_tx->status, &target_tx->status);
6921 __get_user(host_tx->constant, &target_tx->constant);
6922 __get_user(host_tx->precision, &target_tx->precision);
6923 __get_user(host_tx->tolerance, &target_tx->tolerance);
6924 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6925 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6926 __get_user(host_tx->tick, &target_tx->tick);
6927 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6928 __get_user(host_tx->jitter, &target_tx->jitter);
6929 __get_user(host_tx->shift, &target_tx->shift);
6930 __get_user(host_tx->stabil, &target_tx->stabil);
6931 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6932 __get_user(host_tx->calcnt, &target_tx->calcnt);
6933 __get_user(host_tx->errcnt, &target_tx->errcnt);
6934 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6935 __get_user(host_tx->tai, &target_tx->tai);
6937 unlock_user_struct(target_tx, target_addr, 0);
6938 return 0;
6941 static inline abi_long host_to_target_timex(abi_long target_addr,
6942 struct timex *host_tx)
6944 struct target_timex *target_tx;
6946 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6947 return -TARGET_EFAULT;
6950 __put_user(host_tx->modes, &target_tx->modes);
6951 __put_user(host_tx->offset, &target_tx->offset);
6952 __put_user(host_tx->freq, &target_tx->freq);
6953 __put_user(host_tx->maxerror, &target_tx->maxerror);
6954 __put_user(host_tx->esterror, &target_tx->esterror);
6955 __put_user(host_tx->status, &target_tx->status);
6956 __put_user(host_tx->constant, &target_tx->constant);
6957 __put_user(host_tx->precision, &target_tx->precision);
6958 __put_user(host_tx->tolerance, &target_tx->tolerance);
6959 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6960 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6961 __put_user(host_tx->tick, &target_tx->tick);
6962 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6963 __put_user(host_tx->jitter, &target_tx->jitter);
6964 __put_user(host_tx->shift, &target_tx->shift);
6965 __put_user(host_tx->stabil, &target_tx->stabil);
6966 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6967 __put_user(host_tx->calcnt, &target_tx->calcnt);
6968 __put_user(host_tx->errcnt, &target_tx->errcnt);
6969 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6970 __put_user(host_tx->tai, &target_tx->tai);
6972 unlock_user_struct(target_tx, target_addr, 1);
6973 return 0;
6975 #endif
6978 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6979 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6980 abi_long target_addr)
6982 struct target__kernel_timex *target_tx;
6984 if (copy_from_user_timeval64(&host_tx->time, target_addr +
6985 offsetof(struct target__kernel_timex,
6986 time))) {
6987 return -TARGET_EFAULT;
6990 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6991 return -TARGET_EFAULT;
6994 __get_user(host_tx->modes, &target_tx->modes);
6995 __get_user(host_tx->offset, &target_tx->offset);
6996 __get_user(host_tx->freq, &target_tx->freq);
6997 __get_user(host_tx->maxerror, &target_tx->maxerror);
6998 __get_user(host_tx->esterror, &target_tx->esterror);
6999 __get_user(host_tx->status, &target_tx->status);
7000 __get_user(host_tx->constant, &target_tx->constant);
7001 __get_user(host_tx->precision, &target_tx->precision);
7002 __get_user(host_tx->tolerance, &target_tx->tolerance);
7003 __get_user(host_tx->tick, &target_tx->tick);
7004 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7005 __get_user(host_tx->jitter, &target_tx->jitter);
7006 __get_user(host_tx->shift, &target_tx->shift);
7007 __get_user(host_tx->stabil, &target_tx->stabil);
7008 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7009 __get_user(host_tx->calcnt, &target_tx->calcnt);
7010 __get_user(host_tx->errcnt, &target_tx->errcnt);
7011 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7012 __get_user(host_tx->tai, &target_tx->tai);
7014 unlock_user_struct(target_tx, target_addr, 0);
7015 return 0;
7018 static inline abi_long host_to_target_timex64(abi_long target_addr,
7019 struct timex *host_tx)
7021 struct target__kernel_timex *target_tx;
7023 if (copy_to_user_timeval64(target_addr +
7024 offsetof(struct target__kernel_timex, time),
7025 &host_tx->time)) {
7026 return -TARGET_EFAULT;
7029 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7030 return -TARGET_EFAULT;
7033 __put_user(host_tx->modes, &target_tx->modes);
7034 __put_user(host_tx->offset, &target_tx->offset);
7035 __put_user(host_tx->freq, &target_tx->freq);
7036 __put_user(host_tx->maxerror, &target_tx->maxerror);
7037 __put_user(host_tx->esterror, &target_tx->esterror);
7038 __put_user(host_tx->status, &target_tx->status);
7039 __put_user(host_tx->constant, &target_tx->constant);
7040 __put_user(host_tx->precision, &target_tx->precision);
7041 __put_user(host_tx->tolerance, &target_tx->tolerance);
7042 __put_user(host_tx->tick, &target_tx->tick);
7043 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7044 __put_user(host_tx->jitter, &target_tx->jitter);
7045 __put_user(host_tx->shift, &target_tx->shift);
7046 __put_user(host_tx->stabil, &target_tx->stabil);
7047 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7048 __put_user(host_tx->calcnt, &target_tx->calcnt);
7049 __put_user(host_tx->errcnt, &target_tx->errcnt);
7050 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7051 __put_user(host_tx->tai, &target_tx->tai);
7053 unlock_user_struct(target_tx, target_addr, 1);
7054 return 0;
7056 #endif
7058 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7059 abi_ulong target_addr)
7061 struct target_sigevent *target_sevp;
7063 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7064 return -TARGET_EFAULT;
7067 /* This union is awkward on 64 bit systems because it has a 32 bit
7068 * integer and a pointer in it; we follow the conversion approach
7069 * used for handling sigval types in signal.c so the guest should get
7070 * the correct value back even if we did a 64 bit byteswap and it's
7071 * using the 32 bit integer.
7073 host_sevp->sigev_value.sival_ptr =
7074 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7075 host_sevp->sigev_signo =
7076 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7077 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7078 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7080 unlock_user_struct(target_sevp, target_addr, 1);
7081 return 0;
7084 #if defined(TARGET_NR_mlockall)
7085 static inline int target_to_host_mlockall_arg(int arg)
7087 int result = 0;
7089 if (arg & TARGET_MCL_CURRENT) {
7090 result |= MCL_CURRENT;
7092 if (arg & TARGET_MCL_FUTURE) {
7093 result |= MCL_FUTURE;
7095 #ifdef MCL_ONFAULT
7096 if (arg & TARGET_MCL_ONFAULT) {
7097 result |= MCL_ONFAULT;
7099 #endif
7101 return result;
7103 #endif
7105 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7106 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7107 defined(TARGET_NR_newfstatat))
7108 static inline abi_long host_to_target_stat64(void *cpu_env,
7109 abi_ulong target_addr,
7110 struct stat *host_st)
7112 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7113 if (((CPUARMState *)cpu_env)->eabi) {
7114 struct target_eabi_stat64 *target_st;
7116 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7117 return -TARGET_EFAULT;
7118 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7119 __put_user(host_st->st_dev, &target_st->st_dev);
7120 __put_user(host_st->st_ino, &target_st->st_ino);
7121 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7122 __put_user(host_st->st_ino, &target_st->__st_ino);
7123 #endif
7124 __put_user(host_st->st_mode, &target_st->st_mode);
7125 __put_user(host_st->st_nlink, &target_st->st_nlink);
7126 __put_user(host_st->st_uid, &target_st->st_uid);
7127 __put_user(host_st->st_gid, &target_st->st_gid);
7128 __put_user(host_st->st_rdev, &target_st->st_rdev);
7129 __put_user(host_st->st_size, &target_st->st_size);
7130 __put_user(host_st->st_blksize, &target_st->st_blksize);
7131 __put_user(host_st->st_blocks, &target_st->st_blocks);
7132 __put_user(host_st->st_atime, &target_st->target_st_atime);
7133 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7134 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7135 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7136 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7137 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7138 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7139 #endif
7140 unlock_user_struct(target_st, target_addr, 1);
7141 } else
7142 #endif
7144 #if defined(TARGET_HAS_STRUCT_STAT64)
7145 struct target_stat64 *target_st;
7146 #else
7147 struct target_stat *target_st;
7148 #endif
7150 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7151 return -TARGET_EFAULT;
7152 memset(target_st, 0, sizeof(*target_st));
7153 __put_user(host_st->st_dev, &target_st->st_dev);
7154 __put_user(host_st->st_ino, &target_st->st_ino);
7155 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7156 __put_user(host_st->st_ino, &target_st->__st_ino);
7157 #endif
7158 __put_user(host_st->st_mode, &target_st->st_mode);
7159 __put_user(host_st->st_nlink, &target_st->st_nlink);
7160 __put_user(host_st->st_uid, &target_st->st_uid);
7161 __put_user(host_st->st_gid, &target_st->st_gid);
7162 __put_user(host_st->st_rdev, &target_st->st_rdev);
7163 /* XXX: better use of kernel struct */
7164 __put_user(host_st->st_size, &target_st->st_size);
7165 __put_user(host_st->st_blksize, &target_st->st_blksize);
7166 __put_user(host_st->st_blocks, &target_st->st_blocks);
7167 __put_user(host_st->st_atime, &target_st->target_st_atime);
7168 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7169 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7170 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7171 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7172 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7173 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7174 #endif
7175 unlock_user_struct(target_st, target_addr, 1);
7178 return 0;
7180 #endif
7182 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7183 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7184 abi_ulong target_addr)
7186 struct target_statx *target_stx;
7188 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7189 return -TARGET_EFAULT;
7191 memset(target_stx, 0, sizeof(*target_stx));
7193 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7194 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7195 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7196 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7197 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7198 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7199 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7200 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7201 __put_user(host_stx->stx_size, &target_stx->stx_size);
7202 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7203 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7204 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7205 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7206 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7207 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7208 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7209 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7210 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7211 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7212 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7213 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7214 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7215 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7217 unlock_user_struct(target_stx, target_addr, 1);
7219 return 0;
7221 #endif
7223 static int do_sys_futex(int *uaddr, int op, int val,
7224 const struct timespec *timeout, int *uaddr2,
7225 int val3)
7227 #if HOST_LONG_BITS == 64
7228 #if defined(__NR_futex)
7229 /* always a 64-bit time_t, it doesn't define _time64 version */
7230 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7232 #endif
7233 #else /* HOST_LONG_BITS == 64 */
7234 #if defined(__NR_futex_time64)
7235 if (sizeof(timeout->tv_sec) == 8) {
7236 /* _time64 function on 32bit arch */
7237 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7239 #endif
7240 #if defined(__NR_futex)
7241 /* old function on 32bit arch */
7242 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7243 #endif
7244 #endif /* HOST_LONG_BITS == 64 */
7245 g_assert_not_reached();
7248 static int do_safe_futex(int *uaddr, int op, int val,
7249 const struct timespec *timeout, int *uaddr2,
7250 int val3)
7252 #if HOST_LONG_BITS == 64
7253 #if defined(__NR_futex)
7254 /* always a 64-bit time_t, it doesn't define _time64 version */
7255 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7256 #endif
7257 #else /* HOST_LONG_BITS == 64 */
7258 #if defined(__NR_futex_time64)
7259 if (sizeof(timeout->tv_sec) == 8) {
7260 /* _time64 function on 32bit arch */
7261 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7262 val3));
7264 #endif
7265 #if defined(__NR_futex)
7266 /* old function on 32bit arch */
7267 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7268 #endif
7269 #endif /* HOST_LONG_BITS == 64 */
7270 return -TARGET_ENOSYS;
7273 /* ??? Using host futex calls even when target atomic operations
7274 are not really atomic probably breaks things. However implementing
7275 futexes locally would make futexes shared between multiple processes
7276 tricky. However they're probably useless because guest atomic
7277 operations won't work either. */
7278 #if defined(TARGET_NR_futex)
7279 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7280 target_ulong uaddr2, int val3)
7282 struct timespec ts, *pts;
7283 int base_op;
7285 /* ??? We assume FUTEX_* constants are the same on both host
7286 and target. */
7287 #ifdef FUTEX_CMD_MASK
7288 base_op = op & FUTEX_CMD_MASK;
7289 #else
7290 base_op = op;
7291 #endif
7292 switch (base_op) {
7293 case FUTEX_WAIT:
7294 case FUTEX_WAIT_BITSET:
7295 if (timeout) {
7296 pts = &ts;
7297 target_to_host_timespec(pts, timeout);
7298 } else {
7299 pts = NULL;
7301 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7302 case FUTEX_WAKE:
7303 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7304 case FUTEX_FD:
7305 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7306 case FUTEX_REQUEUE:
7307 case FUTEX_CMP_REQUEUE:
7308 case FUTEX_WAKE_OP:
7309 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7310 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7311 But the prototype takes a `struct timespec *'; insert casts
7312 to satisfy the compiler. We do not need to tswap TIMEOUT
7313 since it's not compared to guest memory. */
7314 pts = (struct timespec *)(uintptr_t) timeout;
7315 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7316 (base_op == FUTEX_CMP_REQUEUE
7317 ? tswap32(val3)
7318 : val3));
7319 default:
7320 return -TARGET_ENOSYS;
7323 #endif
7325 #if defined(TARGET_NR_futex_time64)
7326 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7327 target_ulong uaddr2, int val3)
7329 struct timespec ts, *pts;
7330 int base_op;
7332 /* ??? We assume FUTEX_* constants are the same on both host
7333 and target. */
7334 #ifdef FUTEX_CMD_MASK
7335 base_op = op & FUTEX_CMD_MASK;
7336 #else
7337 base_op = op;
7338 #endif
7339 switch (base_op) {
7340 case FUTEX_WAIT:
7341 case FUTEX_WAIT_BITSET:
7342 if (timeout) {
7343 pts = &ts;
7344 target_to_host_timespec64(pts, timeout);
7345 } else {
7346 pts = NULL;
7348 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7349 case FUTEX_WAKE:
7350 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7351 case FUTEX_FD:
7352 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7353 case FUTEX_REQUEUE:
7354 case FUTEX_CMP_REQUEUE:
7355 case FUTEX_WAKE_OP:
7356 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7357 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7358 But the prototype takes a `struct timespec *'; insert casts
7359 to satisfy the compiler. We do not need to tswap TIMEOUT
7360 since it's not compared to guest memory. */
7361 pts = (struct timespec *)(uintptr_t) timeout;
7362 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7363 (base_op == FUTEX_CMP_REQUEUE
7364 ? tswap32(val3)
7365 : val3));
7366 default:
7367 return -TARGET_ENOSYS;
7370 #endif
7372 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7373 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7374 abi_long handle, abi_long mount_id,
7375 abi_long flags)
7377 struct file_handle *target_fh;
7378 struct file_handle *fh;
7379 int mid = 0;
7380 abi_long ret;
7381 char *name;
7382 unsigned int size, total_size;
7384 if (get_user_s32(size, handle)) {
7385 return -TARGET_EFAULT;
7388 name = lock_user_string(pathname);
7389 if (!name) {
7390 return -TARGET_EFAULT;
7393 total_size = sizeof(struct file_handle) + size;
7394 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7395 if (!target_fh) {
7396 unlock_user(name, pathname, 0);
7397 return -TARGET_EFAULT;
7400 fh = g_malloc0(total_size);
7401 fh->handle_bytes = size;
7403 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7404 unlock_user(name, pathname, 0);
7406 /* man name_to_handle_at(2):
7407 * Other than the use of the handle_bytes field, the caller should treat
7408 * the file_handle structure as an opaque data type
7411 memcpy(target_fh, fh, total_size);
7412 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7413 target_fh->handle_type = tswap32(fh->handle_type);
7414 g_free(fh);
7415 unlock_user(target_fh, handle, total_size);
7417 if (put_user_s32(mid, mount_id)) {
7418 return -TARGET_EFAULT;
7421 return ret;
7424 #endif
7426 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7427 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7428 abi_long flags)
7430 struct file_handle *target_fh;
7431 struct file_handle *fh;
7432 unsigned int size, total_size;
7433 abi_long ret;
7435 if (get_user_s32(size, handle)) {
7436 return -TARGET_EFAULT;
7439 total_size = sizeof(struct file_handle) + size;
7440 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7441 if (!target_fh) {
7442 return -TARGET_EFAULT;
7445 fh = g_memdup(target_fh, total_size);
7446 fh->handle_bytes = size;
7447 fh->handle_type = tswap32(target_fh->handle_type);
7449 ret = get_errno(open_by_handle_at(mount_fd, fh,
7450 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7452 g_free(fh);
7454 unlock_user(target_fh, handle, total_size);
7456 return ret;
7458 #endif
7460 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7462 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7464 int host_flags;
7465 target_sigset_t *target_mask;
7466 sigset_t host_mask;
7467 abi_long ret;
7469 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7470 return -TARGET_EINVAL;
7472 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7473 return -TARGET_EFAULT;
7476 target_to_host_sigset(&host_mask, target_mask);
7478 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7480 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7481 if (ret >= 0) {
7482 fd_trans_register(ret, &target_signalfd_trans);
7485 unlock_user_struct(target_mask, mask, 0);
7487 return ret;
7489 #endif
7491 /* Map host to target signal numbers for the wait family of syscalls.
7492 Assume all other status bits are the same. */
7493 int host_to_target_waitstatus(int status)
7495 if (WIFSIGNALED(status)) {
7496 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7498 if (WIFSTOPPED(status)) {
7499 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7500 | (status & 0xff);
7502 return status;
7505 static int open_self_cmdline(void *cpu_env, int fd)
7507 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7508 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7509 int i;
7511 for (i = 0; i < bprm->argc; i++) {
7512 size_t len = strlen(bprm->argv[i]) + 1;
7514 if (write(fd, bprm->argv[i], len) != len) {
7515 return -1;
7519 return 0;
7522 static int open_self_maps(void *cpu_env, int fd)
7524 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7525 TaskState *ts = cpu->opaque;
7526 GSList *map_info = read_self_maps();
7527 GSList *s;
7528 int count;
7530 for (s = map_info; s; s = g_slist_next(s)) {
7531 MapInfo *e = (MapInfo *) s->data;
7533 if (h2g_valid(e->start)) {
7534 unsigned long min = e->start;
7535 unsigned long max = e->end;
7536 int flags = page_get_flags(h2g(min));
7537 const char *path;
7539 max = h2g_valid(max - 1) ?
7540 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7542 if (page_check_range(h2g(min), max - min, flags) == -1) {
7543 continue;
7546 if (h2g(min) == ts->info->stack_limit) {
7547 path = "[stack]";
7548 } else {
7549 path = e->path;
7552 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7553 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7554 h2g(min), h2g(max - 1) + 1,
7555 e->is_read ? 'r' : '-',
7556 e->is_write ? 'w' : '-',
7557 e->is_exec ? 'x' : '-',
7558 e->is_priv ? 'p' : '-',
7559 (uint64_t) e->offset, e->dev, e->inode);
7560 if (path) {
7561 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7562 } else {
7563 dprintf(fd, "\n");
7568 free_self_maps(map_info);
7570 #ifdef TARGET_VSYSCALL_PAGE
7572 * We only support execution from the vsyscall page.
7573 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7575 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7576 " --xp 00000000 00:00 0",
7577 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7578 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7579 #endif
7581 return 0;
7584 static int open_self_stat(void *cpu_env, int fd)
7586 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7587 TaskState *ts = cpu->opaque;
7588 g_autoptr(GString) buf = g_string_new(NULL);
7589 int i;
7591 for (i = 0; i < 44; i++) {
7592 if (i == 0) {
7593 /* pid */
7594 g_string_printf(buf, FMT_pid " ", getpid());
7595 } else if (i == 1) {
7596 /* app name */
7597 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7598 bin = bin ? bin + 1 : ts->bprm->argv[0];
7599 g_string_printf(buf, "(%.15s) ", bin);
7600 } else if (i == 27) {
7601 /* stack bottom */
7602 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7603 } else {
7604 /* for the rest, there is MasterCard */
7605 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7608 if (write(fd, buf->str, buf->len) != buf->len) {
7609 return -1;
7613 return 0;
7616 static int open_self_auxv(void *cpu_env, int fd)
7618 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7619 TaskState *ts = cpu->opaque;
7620 abi_ulong auxv = ts->info->saved_auxv;
7621 abi_ulong len = ts->info->auxv_len;
7622 char *ptr;
7625 * Auxiliary vector is stored in target process stack.
7626 * read in whole auxv vector and copy it to file
7628 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7629 if (ptr != NULL) {
7630 while (len > 0) {
7631 ssize_t r;
7632 r = write(fd, ptr, len);
7633 if (r <= 0) {
7634 break;
7636 len -= r;
7637 ptr += r;
7639 lseek(fd, 0, SEEK_SET);
7640 unlock_user(ptr, auxv, len);
7643 return 0;
7646 static int is_proc_myself(const char *filename, const char *entry)
7648 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7649 filename += strlen("/proc/");
7650 if (!strncmp(filename, "self/", strlen("self/"))) {
7651 filename += strlen("self/");
7652 } else if (*filename >= '1' && *filename <= '9') {
7653 char myself[80];
7654 snprintf(myself, sizeof(myself), "%d/", getpid());
7655 if (!strncmp(filename, myself, strlen(myself))) {
7656 filename += strlen(myself);
7657 } else {
7658 return 0;
7660 } else {
7661 return 0;
7663 if (!strcmp(filename, entry)) {
7664 return 1;
7667 return 0;
7670 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7671 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7672 static int is_proc(const char *filename, const char *entry)
7674 return strcmp(filename, entry) == 0;
7676 #endif
7678 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7679 static int open_net_route(void *cpu_env, int fd)
7681 FILE *fp;
7682 char *line = NULL;
7683 size_t len = 0;
7684 ssize_t read;
7686 fp = fopen("/proc/net/route", "r");
7687 if (fp == NULL) {
7688 return -1;
7691 /* read header */
7693 read = getline(&line, &len, fp);
7694 dprintf(fd, "%s", line);
7696 /* read routes */
7698 while ((read = getline(&line, &len, fp)) != -1) {
7699 char iface[16];
7700 uint32_t dest, gw, mask;
7701 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7702 int fields;
7704 fields = sscanf(line,
7705 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7706 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7707 &mask, &mtu, &window, &irtt);
7708 if (fields != 11) {
7709 continue;
7711 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7712 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7713 metric, tswap32(mask), mtu, window, irtt);
7716 free(line);
7717 fclose(fp);
7719 return 0;
7721 #endif
7723 #if defined(TARGET_SPARC)
7724 static int open_cpuinfo(void *cpu_env, int fd)
7726 dprintf(fd, "type\t\t: sun4u\n");
7727 return 0;
7729 #endif
7731 #if defined(TARGET_HPPA)
7732 static int open_cpuinfo(void *cpu_env, int fd)
7734 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7735 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7736 dprintf(fd, "capabilities\t: os32\n");
7737 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7738 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7739 return 0;
7741 #endif
7743 #if defined(TARGET_M68K)
7744 static int open_hardware(void *cpu_env, int fd)
7746 dprintf(fd, "Model:\t\tqemu-m68k\n");
7747 return 0;
7749 #endif
7751 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7753 struct fake_open {
7754 const char *filename;
7755 int (*fill)(void *cpu_env, int fd);
7756 int (*cmp)(const char *s1, const char *s2);
7758 const struct fake_open *fake_open;
7759 static const struct fake_open fakes[] = {
7760 { "maps", open_self_maps, is_proc_myself },
7761 { "stat", open_self_stat, is_proc_myself },
7762 { "auxv", open_self_auxv, is_proc_myself },
7763 { "cmdline", open_self_cmdline, is_proc_myself },
7764 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7765 { "/proc/net/route", open_net_route, is_proc },
7766 #endif
7767 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7768 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7769 #endif
7770 #if defined(TARGET_M68K)
7771 { "/proc/hardware", open_hardware, is_proc },
7772 #endif
7773 { NULL, NULL, NULL }
7776 if (is_proc_myself(pathname, "exe")) {
7777 int execfd = qemu_getauxval(AT_EXECFD);
7778 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7781 for (fake_open = fakes; fake_open->filename; fake_open++) {
7782 if (fake_open->cmp(pathname, fake_open->filename)) {
7783 break;
7787 if (fake_open->filename) {
7788 const char *tmpdir;
7789 char filename[PATH_MAX];
7790 int fd, r;
7792 /* create temporary file to map stat to */
7793 tmpdir = getenv("TMPDIR");
7794 if (!tmpdir)
7795 tmpdir = "/tmp";
7796 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7797 fd = mkstemp(filename);
7798 if (fd < 0) {
7799 return fd;
7801 unlink(filename);
7803 if ((r = fake_open->fill(cpu_env, fd))) {
7804 int e = errno;
7805 close(fd);
7806 errno = e;
7807 return r;
7809 lseek(fd, 0, SEEK_SET);
7811 return fd;
7814 return safe_openat(dirfd, path(pathname), flags, mode);
7817 #define TIMER_MAGIC 0x0caf0000
7818 #define TIMER_MAGIC_MASK 0xffff0000
7820 /* Convert QEMU provided timer ID back to internal 16bit index format */
7821 static target_timer_t get_timer_id(abi_long arg)
7823 target_timer_t timerid = arg;
7825 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7826 return -TARGET_EINVAL;
7829 timerid &= 0xffff;
7831 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7832 return -TARGET_EINVAL;
7835 return timerid;
7838 static int target_to_host_cpu_mask(unsigned long *host_mask,
7839 size_t host_size,
7840 abi_ulong target_addr,
7841 size_t target_size)
7843 unsigned target_bits = sizeof(abi_ulong) * 8;
7844 unsigned host_bits = sizeof(*host_mask) * 8;
7845 abi_ulong *target_mask;
7846 unsigned i, j;
7848 assert(host_size >= target_size);
7850 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7851 if (!target_mask) {
7852 return -TARGET_EFAULT;
7854 memset(host_mask, 0, host_size);
7856 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7857 unsigned bit = i * target_bits;
7858 abi_ulong val;
7860 __get_user(val, &target_mask[i]);
7861 for (j = 0; j < target_bits; j++, bit++) {
7862 if (val & (1UL << j)) {
7863 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7868 unlock_user(target_mask, target_addr, 0);
7869 return 0;
7872 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7873 size_t host_size,
7874 abi_ulong target_addr,
7875 size_t target_size)
7877 unsigned target_bits = sizeof(abi_ulong) * 8;
7878 unsigned host_bits = sizeof(*host_mask) * 8;
7879 abi_ulong *target_mask;
7880 unsigned i, j;
7882 assert(host_size >= target_size);
7884 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7885 if (!target_mask) {
7886 return -TARGET_EFAULT;
7889 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7890 unsigned bit = i * target_bits;
7891 abi_ulong val = 0;
7893 for (j = 0; j < target_bits; j++, bit++) {
7894 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7895 val |= 1UL << j;
7898 __put_user(val, &target_mask[i]);
7901 unlock_user(target_mask, target_addr, target_size);
7902 return 0;
7905 /* This is an internal helper for do_syscall so that it is easier
7906 * to have a single return point, so that actions, such as logging
7907 * of syscall results, can be performed.
7908 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7910 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7911 abi_long arg2, abi_long arg3, abi_long arg4,
7912 abi_long arg5, abi_long arg6, abi_long arg7,
7913 abi_long arg8)
7915 CPUState *cpu = env_cpu(cpu_env);
7916 abi_long ret;
7917 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7918 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7919 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7920 || defined(TARGET_NR_statx)
7921 struct stat st;
7922 #endif
7923 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7924 || defined(TARGET_NR_fstatfs)
7925 struct statfs stfs;
7926 #endif
7927 void *p;
7929 switch(num) {
7930 case TARGET_NR_exit:
7931 /* In old applications this may be used to implement _exit(2).
7932 However in threaded applictions it is used for thread termination,
7933 and _exit_group is used for application termination.
7934 Do thread termination if we have more then one thread. */
7936 if (block_signals()) {
7937 return -TARGET_ERESTARTSYS;
7940 pthread_mutex_lock(&clone_lock);
7942 if (CPU_NEXT(first_cpu)) {
7943 TaskState *ts = cpu->opaque;
7945 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7946 object_unref(OBJECT(cpu));
7948 * At this point the CPU should be unrealized and removed
7949 * from cpu lists. We can clean-up the rest of the thread
7950 * data without the lock held.
7953 pthread_mutex_unlock(&clone_lock);
7955 if (ts->child_tidptr) {
7956 put_user_u32(0, ts->child_tidptr);
7957 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7958 NULL, NULL, 0);
7960 thread_cpu = NULL;
7961 g_free(ts);
7962 rcu_unregister_thread();
7963 pthread_exit(NULL);
7966 pthread_mutex_unlock(&clone_lock);
7967 preexit_cleanup(cpu_env, arg1);
7968 _exit(arg1);
7969 return 0; /* avoid warning */
7970 case TARGET_NR_read:
7971 if (arg2 == 0 && arg3 == 0) {
7972 return get_errno(safe_read(arg1, 0, 0));
7973 } else {
7974 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7975 return -TARGET_EFAULT;
7976 ret = get_errno(safe_read(arg1, p, arg3));
7977 if (ret >= 0 &&
7978 fd_trans_host_to_target_data(arg1)) {
7979 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7981 unlock_user(p, arg2, ret);
7983 return ret;
7984 case TARGET_NR_write:
7985 if (arg2 == 0 && arg3 == 0) {
7986 return get_errno(safe_write(arg1, 0, 0));
7988 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7989 return -TARGET_EFAULT;
7990 if (fd_trans_target_to_host_data(arg1)) {
7991 void *copy = g_malloc(arg3);
7992 memcpy(copy, p, arg3);
7993 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7994 if (ret >= 0) {
7995 ret = get_errno(safe_write(arg1, copy, ret));
7997 g_free(copy);
7998 } else {
7999 ret = get_errno(safe_write(arg1, p, arg3));
8001 unlock_user(p, arg2, 0);
8002 return ret;
8004 #ifdef TARGET_NR_open
8005 case TARGET_NR_open:
8006 if (!(p = lock_user_string(arg1)))
8007 return -TARGET_EFAULT;
8008 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8009 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8010 arg3));
8011 fd_trans_unregister(ret);
8012 unlock_user(p, arg1, 0);
8013 return ret;
8014 #endif
8015 case TARGET_NR_openat:
8016 if (!(p = lock_user_string(arg2)))
8017 return -TARGET_EFAULT;
8018 ret = get_errno(do_openat(cpu_env, arg1, p,
8019 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8020 arg4));
8021 fd_trans_unregister(ret);
8022 unlock_user(p, arg2, 0);
8023 return ret;
8024 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8025 case TARGET_NR_name_to_handle_at:
8026 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8027 return ret;
8028 #endif
8029 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8030 case TARGET_NR_open_by_handle_at:
8031 ret = do_open_by_handle_at(arg1, arg2, arg3);
8032 fd_trans_unregister(ret);
8033 return ret;
8034 #endif
8035 case TARGET_NR_close:
8036 fd_trans_unregister(arg1);
8037 return get_errno(close(arg1));
8039 case TARGET_NR_brk:
8040 return do_brk(arg1);
8041 #ifdef TARGET_NR_fork
8042 case TARGET_NR_fork:
8043 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8044 #endif
8045 #ifdef TARGET_NR_waitpid
8046 case TARGET_NR_waitpid:
8048 int status;
8049 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8050 if (!is_error(ret) && arg2 && ret
8051 && put_user_s32(host_to_target_waitstatus(status), arg2))
8052 return -TARGET_EFAULT;
8054 return ret;
8055 #endif
8056 #ifdef TARGET_NR_waitid
8057 case TARGET_NR_waitid:
8059 siginfo_t info;
8060 info.si_pid = 0;
8061 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8062 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8063 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8064 return -TARGET_EFAULT;
8065 host_to_target_siginfo(p, &info);
8066 unlock_user(p, arg3, sizeof(target_siginfo_t));
8069 return ret;
8070 #endif
8071 #ifdef TARGET_NR_creat /* not on alpha */
8072 case TARGET_NR_creat:
8073 if (!(p = lock_user_string(arg1)))
8074 return -TARGET_EFAULT;
8075 ret = get_errno(creat(p, arg2));
8076 fd_trans_unregister(ret);
8077 unlock_user(p, arg1, 0);
8078 return ret;
8079 #endif
8080 #ifdef TARGET_NR_link
8081 case TARGET_NR_link:
8083 void * p2;
8084 p = lock_user_string(arg1);
8085 p2 = lock_user_string(arg2);
8086 if (!p || !p2)
8087 ret = -TARGET_EFAULT;
8088 else
8089 ret = get_errno(link(p, p2));
8090 unlock_user(p2, arg2, 0);
8091 unlock_user(p, arg1, 0);
8093 return ret;
8094 #endif
8095 #if defined(TARGET_NR_linkat)
8096 case TARGET_NR_linkat:
8098 void * p2 = NULL;
8099 if (!arg2 || !arg4)
8100 return -TARGET_EFAULT;
8101 p = lock_user_string(arg2);
8102 p2 = lock_user_string(arg4);
8103 if (!p || !p2)
8104 ret = -TARGET_EFAULT;
8105 else
8106 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8107 unlock_user(p, arg2, 0);
8108 unlock_user(p2, arg4, 0);
8110 return ret;
8111 #endif
8112 #ifdef TARGET_NR_unlink
8113 case TARGET_NR_unlink:
8114 if (!(p = lock_user_string(arg1)))
8115 return -TARGET_EFAULT;
8116 ret = get_errno(unlink(p));
8117 unlock_user(p, arg1, 0);
8118 return ret;
8119 #endif
8120 #if defined(TARGET_NR_unlinkat)
8121 case TARGET_NR_unlinkat:
8122 if (!(p = lock_user_string(arg2)))
8123 return -TARGET_EFAULT;
8124 ret = get_errno(unlinkat(arg1, p, arg3));
8125 unlock_user(p, arg2, 0);
8126 return ret;
8127 #endif
8128 case TARGET_NR_execve:
8130 char **argp, **envp;
8131 int argc, envc;
8132 abi_ulong gp;
8133 abi_ulong guest_argp;
8134 abi_ulong guest_envp;
8135 abi_ulong addr;
8136 char **q;
8137 int total_size = 0;
8139 argc = 0;
8140 guest_argp = arg2;
8141 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8142 if (get_user_ual(addr, gp))
8143 return -TARGET_EFAULT;
8144 if (!addr)
8145 break;
8146 argc++;
8148 envc = 0;
8149 guest_envp = arg3;
8150 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8151 if (get_user_ual(addr, gp))
8152 return -TARGET_EFAULT;
8153 if (!addr)
8154 break;
8155 envc++;
8158 argp = g_new0(char *, argc + 1);
8159 envp = g_new0(char *, envc + 1);
8161 for (gp = guest_argp, q = argp; gp;
8162 gp += sizeof(abi_ulong), q++) {
8163 if (get_user_ual(addr, gp))
8164 goto execve_efault;
8165 if (!addr)
8166 break;
8167 if (!(*q = lock_user_string(addr)))
8168 goto execve_efault;
8169 total_size += strlen(*q) + 1;
8171 *q = NULL;
8173 for (gp = guest_envp, q = envp; gp;
8174 gp += sizeof(abi_ulong), q++) {
8175 if (get_user_ual(addr, gp))
8176 goto execve_efault;
8177 if (!addr)
8178 break;
8179 if (!(*q = lock_user_string(addr)))
8180 goto execve_efault;
8181 total_size += strlen(*q) + 1;
8183 *q = NULL;
8185 if (!(p = lock_user_string(arg1)))
8186 goto execve_efault;
8187 /* Although execve() is not an interruptible syscall it is
8188 * a special case where we must use the safe_syscall wrapper:
8189 * if we allow a signal to happen before we make the host
8190 * syscall then we will 'lose' it, because at the point of
8191 * execve the process leaves QEMU's control. So we use the
8192 * safe syscall wrapper to ensure that we either take the
8193 * signal as a guest signal, or else it does not happen
8194 * before the execve completes and makes it the other
8195 * program's problem.
8197 ret = get_errno(safe_execve(p, argp, envp));
8198 unlock_user(p, arg1, 0);
8200 goto execve_end;
8202 execve_efault:
8203 ret = -TARGET_EFAULT;
8205 execve_end:
8206 for (gp = guest_argp, q = argp; *q;
8207 gp += sizeof(abi_ulong), q++) {
8208 if (get_user_ual(addr, gp)
8209 || !addr)
8210 break;
8211 unlock_user(*q, addr, 0);
8213 for (gp = guest_envp, q = envp; *q;
8214 gp += sizeof(abi_ulong), q++) {
8215 if (get_user_ual(addr, gp)
8216 || !addr)
8217 break;
8218 unlock_user(*q, addr, 0);
8221 g_free(argp);
8222 g_free(envp);
8224 return ret;
8225 case TARGET_NR_chdir:
8226 if (!(p = lock_user_string(arg1)))
8227 return -TARGET_EFAULT;
8228 ret = get_errno(chdir(p));
8229 unlock_user(p, arg1, 0);
8230 return ret;
8231 #ifdef TARGET_NR_time
8232 case TARGET_NR_time:
8234 time_t host_time;
8235 ret = get_errno(time(&host_time));
8236 if (!is_error(ret)
8237 && arg1
8238 && put_user_sal(host_time, arg1))
8239 return -TARGET_EFAULT;
8241 return ret;
8242 #endif
8243 #ifdef TARGET_NR_mknod
8244 case TARGET_NR_mknod:
8245 if (!(p = lock_user_string(arg1)))
8246 return -TARGET_EFAULT;
8247 ret = get_errno(mknod(p, arg2, arg3));
8248 unlock_user(p, arg1, 0);
8249 return ret;
8250 #endif
8251 #if defined(TARGET_NR_mknodat)
8252 case TARGET_NR_mknodat:
8253 if (!(p = lock_user_string(arg2)))
8254 return -TARGET_EFAULT;
8255 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8256 unlock_user(p, arg2, 0);
8257 return ret;
8258 #endif
8259 #ifdef TARGET_NR_chmod
8260 case TARGET_NR_chmod:
8261 if (!(p = lock_user_string(arg1)))
8262 return -TARGET_EFAULT;
8263 ret = get_errno(chmod(p, arg2));
8264 unlock_user(p, arg1, 0);
8265 return ret;
8266 #endif
8267 #ifdef TARGET_NR_lseek
8268 case TARGET_NR_lseek:
8269 return get_errno(lseek(arg1, arg2, arg3));
8270 #endif
8271 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8272 /* Alpha specific */
8273 case TARGET_NR_getxpid:
8274 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8275 return get_errno(getpid());
8276 #endif
8277 #ifdef TARGET_NR_getpid
8278 case TARGET_NR_getpid:
8279 return get_errno(getpid());
8280 #endif
8281 case TARGET_NR_mount:
8283 /* need to look at the data field */
8284 void *p2, *p3;
8286 if (arg1) {
8287 p = lock_user_string(arg1);
8288 if (!p) {
8289 return -TARGET_EFAULT;
8291 } else {
8292 p = NULL;
8295 p2 = lock_user_string(arg2);
8296 if (!p2) {
8297 if (arg1) {
8298 unlock_user(p, arg1, 0);
8300 return -TARGET_EFAULT;
8303 if (arg3) {
8304 p3 = lock_user_string(arg3);
8305 if (!p3) {
8306 if (arg1) {
8307 unlock_user(p, arg1, 0);
8309 unlock_user(p2, arg2, 0);
8310 return -TARGET_EFAULT;
8312 } else {
8313 p3 = NULL;
8316 /* FIXME - arg5 should be locked, but it isn't clear how to
8317 * do that since it's not guaranteed to be a NULL-terminated
8318 * string.
8320 if (!arg5) {
8321 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8322 } else {
8323 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8325 ret = get_errno(ret);
8327 if (arg1) {
8328 unlock_user(p, arg1, 0);
8330 unlock_user(p2, arg2, 0);
8331 if (arg3) {
8332 unlock_user(p3, arg3, 0);
8335 return ret;
8336 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8337 #if defined(TARGET_NR_umount)
8338 case TARGET_NR_umount:
8339 #endif
8340 #if defined(TARGET_NR_oldumount)
8341 case TARGET_NR_oldumount:
8342 #endif
8343 if (!(p = lock_user_string(arg1)))
8344 return -TARGET_EFAULT;
8345 ret = get_errno(umount(p));
8346 unlock_user(p, arg1, 0);
8347 return ret;
8348 #endif
8349 #ifdef TARGET_NR_stime /* not on alpha */
8350 case TARGET_NR_stime:
8352 struct timespec ts;
8353 ts.tv_nsec = 0;
8354 if (get_user_sal(ts.tv_sec, arg1)) {
8355 return -TARGET_EFAULT;
8357 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8359 #endif
8360 #ifdef TARGET_NR_alarm /* not on alpha */
8361 case TARGET_NR_alarm:
8362 return alarm(arg1);
8363 #endif
8364 #ifdef TARGET_NR_pause /* not on alpha */
8365 case TARGET_NR_pause:
8366 if (!block_signals()) {
8367 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8369 return -TARGET_EINTR;
8370 #endif
8371 #ifdef TARGET_NR_utime
8372 case TARGET_NR_utime:
8374 struct utimbuf tbuf, *host_tbuf;
8375 struct target_utimbuf *target_tbuf;
8376 if (arg2) {
8377 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8378 return -TARGET_EFAULT;
8379 tbuf.actime = tswapal(target_tbuf->actime);
8380 tbuf.modtime = tswapal(target_tbuf->modtime);
8381 unlock_user_struct(target_tbuf, arg2, 0);
8382 host_tbuf = &tbuf;
8383 } else {
8384 host_tbuf = NULL;
8386 if (!(p = lock_user_string(arg1)))
8387 return -TARGET_EFAULT;
8388 ret = get_errno(utime(p, host_tbuf));
8389 unlock_user(p, arg1, 0);
8391 return ret;
8392 #endif
8393 #ifdef TARGET_NR_utimes
8394 case TARGET_NR_utimes:
8396 struct timeval *tvp, tv[2];
8397 if (arg2) {
8398 if (copy_from_user_timeval(&tv[0], arg2)
8399 || copy_from_user_timeval(&tv[1],
8400 arg2 + sizeof(struct target_timeval)))
8401 return -TARGET_EFAULT;
8402 tvp = tv;
8403 } else {
8404 tvp = NULL;
8406 if (!(p = lock_user_string(arg1)))
8407 return -TARGET_EFAULT;
8408 ret = get_errno(utimes(p, tvp));
8409 unlock_user(p, arg1, 0);
8411 return ret;
8412 #endif
8413 #if defined(TARGET_NR_futimesat)
8414 case TARGET_NR_futimesat:
8416 struct timeval *tvp, tv[2];
8417 if (arg3) {
8418 if (copy_from_user_timeval(&tv[0], arg3)
8419 || copy_from_user_timeval(&tv[1],
8420 arg3 + sizeof(struct target_timeval)))
8421 return -TARGET_EFAULT;
8422 tvp = tv;
8423 } else {
8424 tvp = NULL;
8426 if (!(p = lock_user_string(arg2))) {
8427 return -TARGET_EFAULT;
8429 ret = get_errno(futimesat(arg1, path(p), tvp));
8430 unlock_user(p, arg2, 0);
8432 return ret;
8433 #endif
8434 #ifdef TARGET_NR_access
8435 case TARGET_NR_access:
8436 if (!(p = lock_user_string(arg1))) {
8437 return -TARGET_EFAULT;
8439 ret = get_errno(access(path(p), arg2));
8440 unlock_user(p, arg1, 0);
8441 return ret;
8442 #endif
8443 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8444 case TARGET_NR_faccessat:
8445 if (!(p = lock_user_string(arg2))) {
8446 return -TARGET_EFAULT;
8448 ret = get_errno(faccessat(arg1, p, arg3, 0));
8449 unlock_user(p, arg2, 0);
8450 return ret;
8451 #endif
8452 #ifdef TARGET_NR_nice /* not on alpha */
8453 case TARGET_NR_nice:
8454 return get_errno(nice(arg1));
8455 #endif
8456 case TARGET_NR_sync:
8457 sync();
8458 return 0;
8459 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8460 case TARGET_NR_syncfs:
8461 return get_errno(syncfs(arg1));
8462 #endif
8463 case TARGET_NR_kill:
8464 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8465 #ifdef TARGET_NR_rename
8466 case TARGET_NR_rename:
8468 void *p2;
8469 p = lock_user_string(arg1);
8470 p2 = lock_user_string(arg2);
8471 if (!p || !p2)
8472 ret = -TARGET_EFAULT;
8473 else
8474 ret = get_errno(rename(p, p2));
8475 unlock_user(p2, arg2, 0);
8476 unlock_user(p, arg1, 0);
8478 return ret;
8479 #endif
8480 #if defined(TARGET_NR_renameat)
8481 case TARGET_NR_renameat:
8483 void *p2;
8484 p = lock_user_string(arg2);
8485 p2 = lock_user_string(arg4);
8486 if (!p || !p2)
8487 ret = -TARGET_EFAULT;
8488 else
8489 ret = get_errno(renameat(arg1, p, arg3, p2));
8490 unlock_user(p2, arg4, 0);
8491 unlock_user(p, arg2, 0);
8493 return ret;
8494 #endif
8495 #if defined(TARGET_NR_renameat2)
8496 case TARGET_NR_renameat2:
8498 void *p2;
8499 p = lock_user_string(arg2);
8500 p2 = lock_user_string(arg4);
8501 if (!p || !p2) {
8502 ret = -TARGET_EFAULT;
8503 } else {
8504 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8506 unlock_user(p2, arg4, 0);
8507 unlock_user(p, arg2, 0);
8509 return ret;
8510 #endif
8511 #ifdef TARGET_NR_mkdir
8512 case TARGET_NR_mkdir:
8513 if (!(p = lock_user_string(arg1)))
8514 return -TARGET_EFAULT;
8515 ret = get_errno(mkdir(p, arg2));
8516 unlock_user(p, arg1, 0);
8517 return ret;
8518 #endif
8519 #if defined(TARGET_NR_mkdirat)
8520 case TARGET_NR_mkdirat:
8521 if (!(p = lock_user_string(arg2)))
8522 return -TARGET_EFAULT;
8523 ret = get_errno(mkdirat(arg1, p, arg3));
8524 unlock_user(p, arg2, 0);
8525 return ret;
8526 #endif
8527 #ifdef TARGET_NR_rmdir
8528 case TARGET_NR_rmdir:
8529 if (!(p = lock_user_string(arg1)))
8530 return -TARGET_EFAULT;
8531 ret = get_errno(rmdir(p));
8532 unlock_user(p, arg1, 0);
8533 return ret;
8534 #endif
8535 case TARGET_NR_dup:
8536 ret = get_errno(dup(arg1));
8537 if (ret >= 0) {
8538 fd_trans_dup(arg1, ret);
8540 return ret;
8541 #ifdef TARGET_NR_pipe
8542 case TARGET_NR_pipe:
8543 return do_pipe(cpu_env, arg1, 0, 0);
8544 #endif
8545 #ifdef TARGET_NR_pipe2
8546 case TARGET_NR_pipe2:
8547 return do_pipe(cpu_env, arg1,
8548 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8549 #endif
8550 case TARGET_NR_times:
8552 struct target_tms *tmsp;
8553 struct tms tms;
8554 ret = get_errno(times(&tms));
8555 if (arg1) {
8556 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8557 if (!tmsp)
8558 return -TARGET_EFAULT;
8559 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8560 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8561 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8562 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8564 if (!is_error(ret))
8565 ret = host_to_target_clock_t(ret);
8567 return ret;
8568 case TARGET_NR_acct:
8569 if (arg1 == 0) {
8570 ret = get_errno(acct(NULL));
8571 } else {
8572 if (!(p = lock_user_string(arg1))) {
8573 return -TARGET_EFAULT;
8575 ret = get_errno(acct(path(p)));
8576 unlock_user(p, arg1, 0);
8578 return ret;
8579 #ifdef TARGET_NR_umount2
8580 case TARGET_NR_umount2:
8581 if (!(p = lock_user_string(arg1)))
8582 return -TARGET_EFAULT;
8583 ret = get_errno(umount2(p, arg2));
8584 unlock_user(p, arg1, 0);
8585 return ret;
8586 #endif
8587 case TARGET_NR_ioctl:
8588 return do_ioctl(arg1, arg2, arg3);
8589 #ifdef TARGET_NR_fcntl
8590 case TARGET_NR_fcntl:
8591 return do_fcntl(arg1, arg2, arg3);
8592 #endif
8593 case TARGET_NR_setpgid:
8594 return get_errno(setpgid(arg1, arg2));
8595 case TARGET_NR_umask:
8596 return get_errno(umask(arg1));
8597 case TARGET_NR_chroot:
8598 if (!(p = lock_user_string(arg1)))
8599 return -TARGET_EFAULT;
8600 ret = get_errno(chroot(p));
8601 unlock_user(p, arg1, 0);
8602 return ret;
8603 #ifdef TARGET_NR_dup2
8604 case TARGET_NR_dup2:
8605 ret = get_errno(dup2(arg1, arg2));
8606 if (ret >= 0) {
8607 fd_trans_dup(arg1, arg2);
8609 return ret;
8610 #endif
8611 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8612 case TARGET_NR_dup3:
8614 int host_flags;
8616 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8617 return -EINVAL;
8619 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8620 ret = get_errno(dup3(arg1, arg2, host_flags));
8621 if (ret >= 0) {
8622 fd_trans_dup(arg1, arg2);
8624 return ret;
8626 #endif
8627 #ifdef TARGET_NR_getppid /* not on alpha */
8628 case TARGET_NR_getppid:
8629 return get_errno(getppid());
8630 #endif
8631 #ifdef TARGET_NR_getpgrp
8632 case TARGET_NR_getpgrp:
8633 return get_errno(getpgrp());
8634 #endif
8635 case TARGET_NR_setsid:
8636 return get_errno(setsid());
8637 #ifdef TARGET_NR_sigaction
8638 case TARGET_NR_sigaction:
8640 #if defined(TARGET_ALPHA)
8641 struct target_sigaction act, oact, *pact = 0;
8642 struct target_old_sigaction *old_act;
8643 if (arg2) {
8644 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8645 return -TARGET_EFAULT;
8646 act._sa_handler = old_act->_sa_handler;
8647 target_siginitset(&act.sa_mask, old_act->sa_mask);
8648 act.sa_flags = old_act->sa_flags;
8649 act.sa_restorer = 0;
8650 unlock_user_struct(old_act, arg2, 0);
8651 pact = &act;
8653 ret = get_errno(do_sigaction(arg1, pact, &oact));
8654 if (!is_error(ret) && arg3) {
8655 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8656 return -TARGET_EFAULT;
8657 old_act->_sa_handler = oact._sa_handler;
8658 old_act->sa_mask = oact.sa_mask.sig[0];
8659 old_act->sa_flags = oact.sa_flags;
8660 unlock_user_struct(old_act, arg3, 1);
8662 #elif defined(TARGET_MIPS)
8663 struct target_sigaction act, oact, *pact, *old_act;
8665 if (arg2) {
8666 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8667 return -TARGET_EFAULT;
8668 act._sa_handler = old_act->_sa_handler;
8669 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8670 act.sa_flags = old_act->sa_flags;
8671 unlock_user_struct(old_act, arg2, 0);
8672 pact = &act;
8673 } else {
8674 pact = NULL;
8677 ret = get_errno(do_sigaction(arg1, pact, &oact));
8679 if (!is_error(ret) && arg3) {
8680 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8681 return -TARGET_EFAULT;
8682 old_act->_sa_handler = oact._sa_handler;
8683 old_act->sa_flags = oact.sa_flags;
8684 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8685 old_act->sa_mask.sig[1] = 0;
8686 old_act->sa_mask.sig[2] = 0;
8687 old_act->sa_mask.sig[3] = 0;
8688 unlock_user_struct(old_act, arg3, 1);
8690 #else
8691 struct target_old_sigaction *old_act;
8692 struct target_sigaction act, oact, *pact;
8693 if (arg2) {
8694 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8695 return -TARGET_EFAULT;
8696 act._sa_handler = old_act->_sa_handler;
8697 target_siginitset(&act.sa_mask, old_act->sa_mask);
8698 act.sa_flags = old_act->sa_flags;
8699 act.sa_restorer = old_act->sa_restorer;
8700 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8701 act.ka_restorer = 0;
8702 #endif
8703 unlock_user_struct(old_act, arg2, 0);
8704 pact = &act;
8705 } else {
8706 pact = NULL;
8708 ret = get_errno(do_sigaction(arg1, pact, &oact));
8709 if (!is_error(ret) && arg3) {
8710 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8711 return -TARGET_EFAULT;
8712 old_act->_sa_handler = oact._sa_handler;
8713 old_act->sa_mask = oact.sa_mask.sig[0];
8714 old_act->sa_flags = oact.sa_flags;
8715 old_act->sa_restorer = oact.sa_restorer;
8716 unlock_user_struct(old_act, arg3, 1);
8718 #endif
8720 return ret;
8721 #endif
8722 case TARGET_NR_rt_sigaction:
8724 #if defined(TARGET_ALPHA)
8725 /* For Alpha and SPARC this is a 5 argument syscall, with
8726 * a 'restorer' parameter which must be copied into the
8727 * sa_restorer field of the sigaction struct.
8728 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8729 * and arg5 is the sigsetsize.
8730 * Alpha also has a separate rt_sigaction struct that it uses
8731 * here; SPARC uses the usual sigaction struct.
8733 struct target_rt_sigaction *rt_act;
8734 struct target_sigaction act, oact, *pact = 0;
8736 if (arg4 != sizeof(target_sigset_t)) {
8737 return -TARGET_EINVAL;
8739 if (arg2) {
8740 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8741 return -TARGET_EFAULT;
8742 act._sa_handler = rt_act->_sa_handler;
8743 act.sa_mask = rt_act->sa_mask;
8744 act.sa_flags = rt_act->sa_flags;
8745 act.sa_restorer = arg5;
8746 unlock_user_struct(rt_act, arg2, 0);
8747 pact = &act;
8749 ret = get_errno(do_sigaction(arg1, pact, &oact));
8750 if (!is_error(ret) && arg3) {
8751 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8752 return -TARGET_EFAULT;
8753 rt_act->_sa_handler = oact._sa_handler;
8754 rt_act->sa_mask = oact.sa_mask;
8755 rt_act->sa_flags = oact.sa_flags;
8756 unlock_user_struct(rt_act, arg3, 1);
8758 #else
8759 #ifdef TARGET_SPARC
8760 target_ulong restorer = arg4;
8761 target_ulong sigsetsize = arg5;
8762 #else
8763 target_ulong sigsetsize = arg4;
8764 #endif
8765 struct target_sigaction *act;
8766 struct target_sigaction *oact;
8768 if (sigsetsize != sizeof(target_sigset_t)) {
8769 return -TARGET_EINVAL;
8771 if (arg2) {
8772 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8773 return -TARGET_EFAULT;
8775 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8776 act->ka_restorer = restorer;
8777 #endif
8778 } else {
8779 act = NULL;
8781 if (arg3) {
8782 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8783 ret = -TARGET_EFAULT;
8784 goto rt_sigaction_fail;
8786 } else
8787 oact = NULL;
8788 ret = get_errno(do_sigaction(arg1, act, oact));
8789 rt_sigaction_fail:
8790 if (act)
8791 unlock_user_struct(act, arg2, 0);
8792 if (oact)
8793 unlock_user_struct(oact, arg3, 1);
8794 #endif
8796 return ret;
8797 #ifdef TARGET_NR_sgetmask /* not on alpha */
8798 case TARGET_NR_sgetmask:
8800 sigset_t cur_set;
8801 abi_ulong target_set;
8802 ret = do_sigprocmask(0, NULL, &cur_set);
8803 if (!ret) {
8804 host_to_target_old_sigset(&target_set, &cur_set);
8805 ret = target_set;
8808 return ret;
8809 #endif
8810 #ifdef TARGET_NR_ssetmask /* not on alpha */
8811 case TARGET_NR_ssetmask:
8813 sigset_t set, oset;
8814 abi_ulong target_set = arg1;
8815 target_to_host_old_sigset(&set, &target_set);
8816 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8817 if (!ret) {
8818 host_to_target_old_sigset(&target_set, &oset);
8819 ret = target_set;
8822 return ret;
8823 #endif
8824 #ifdef TARGET_NR_sigprocmask
8825 case TARGET_NR_sigprocmask:
8827 #if defined(TARGET_ALPHA)
8828 sigset_t set, oldset;
8829 abi_ulong mask;
8830 int how;
8832 switch (arg1) {
8833 case TARGET_SIG_BLOCK:
8834 how = SIG_BLOCK;
8835 break;
8836 case TARGET_SIG_UNBLOCK:
8837 how = SIG_UNBLOCK;
8838 break;
8839 case TARGET_SIG_SETMASK:
8840 how = SIG_SETMASK;
8841 break;
8842 default:
8843 return -TARGET_EINVAL;
8845 mask = arg2;
8846 target_to_host_old_sigset(&set, &mask);
8848 ret = do_sigprocmask(how, &set, &oldset);
8849 if (!is_error(ret)) {
8850 host_to_target_old_sigset(&mask, &oldset);
8851 ret = mask;
8852 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8854 #else
8855 sigset_t set, oldset, *set_ptr;
8856 int how;
8858 if (arg2) {
8859 switch (arg1) {
8860 case TARGET_SIG_BLOCK:
8861 how = SIG_BLOCK;
8862 break;
8863 case TARGET_SIG_UNBLOCK:
8864 how = SIG_UNBLOCK;
8865 break;
8866 case TARGET_SIG_SETMASK:
8867 how = SIG_SETMASK;
8868 break;
8869 default:
8870 return -TARGET_EINVAL;
8872 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8873 return -TARGET_EFAULT;
8874 target_to_host_old_sigset(&set, p);
8875 unlock_user(p, arg2, 0);
8876 set_ptr = &set;
8877 } else {
8878 how = 0;
8879 set_ptr = NULL;
8881 ret = do_sigprocmask(how, set_ptr, &oldset);
8882 if (!is_error(ret) && arg3) {
8883 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8884 return -TARGET_EFAULT;
8885 host_to_target_old_sigset(p, &oldset);
8886 unlock_user(p, arg3, sizeof(target_sigset_t));
8888 #endif
8890 return ret;
8891 #endif
8892 case TARGET_NR_rt_sigprocmask:
8894 int how = arg1;
8895 sigset_t set, oldset, *set_ptr;
8897 if (arg4 != sizeof(target_sigset_t)) {
8898 return -TARGET_EINVAL;
8901 if (arg2) {
8902 switch(how) {
8903 case TARGET_SIG_BLOCK:
8904 how = SIG_BLOCK;
8905 break;
8906 case TARGET_SIG_UNBLOCK:
8907 how = SIG_UNBLOCK;
8908 break;
8909 case TARGET_SIG_SETMASK:
8910 how = SIG_SETMASK;
8911 break;
8912 default:
8913 return -TARGET_EINVAL;
8915 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8916 return -TARGET_EFAULT;
8917 target_to_host_sigset(&set, p);
8918 unlock_user(p, arg2, 0);
8919 set_ptr = &set;
8920 } else {
8921 how = 0;
8922 set_ptr = NULL;
8924 ret = do_sigprocmask(how, set_ptr, &oldset);
8925 if (!is_error(ret) && arg3) {
8926 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8927 return -TARGET_EFAULT;
8928 host_to_target_sigset(p, &oldset);
8929 unlock_user(p, arg3, sizeof(target_sigset_t));
8932 return ret;
8933 #ifdef TARGET_NR_sigpending
8934 case TARGET_NR_sigpending:
8936 sigset_t set;
8937 ret = get_errno(sigpending(&set));
8938 if (!is_error(ret)) {
8939 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8940 return -TARGET_EFAULT;
8941 host_to_target_old_sigset(p, &set);
8942 unlock_user(p, arg1, sizeof(target_sigset_t));
8945 return ret;
8946 #endif
8947 case TARGET_NR_rt_sigpending:
8949 sigset_t set;
8951 /* Yes, this check is >, not != like most. We follow the kernel's
8952 * logic and it does it like this because it implements
8953 * NR_sigpending through the same code path, and in that case
8954 * the old_sigset_t is smaller in size.
8956 if (arg2 > sizeof(target_sigset_t)) {
8957 return -TARGET_EINVAL;
8960 ret = get_errno(sigpending(&set));
8961 if (!is_error(ret)) {
8962 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8963 return -TARGET_EFAULT;
8964 host_to_target_sigset(p, &set);
8965 unlock_user(p, arg1, sizeof(target_sigset_t));
8968 return ret;
8969 #ifdef TARGET_NR_sigsuspend
8970 case TARGET_NR_sigsuspend:
8972 TaskState *ts = cpu->opaque;
8973 #if defined(TARGET_ALPHA)
8974 abi_ulong mask = arg1;
8975 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8976 #else
8977 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8978 return -TARGET_EFAULT;
8979 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8980 unlock_user(p, arg1, 0);
8981 #endif
8982 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8983 SIGSET_T_SIZE));
8984 if (ret != -TARGET_ERESTARTSYS) {
8985 ts->in_sigsuspend = 1;
8988 return ret;
8989 #endif
8990 case TARGET_NR_rt_sigsuspend:
8992 TaskState *ts = cpu->opaque;
8994 if (arg2 != sizeof(target_sigset_t)) {
8995 return -TARGET_EINVAL;
8997 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8998 return -TARGET_EFAULT;
8999 target_to_host_sigset(&ts->sigsuspend_mask, p);
9000 unlock_user(p, arg1, 0);
9001 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9002 SIGSET_T_SIZE));
9003 if (ret != -TARGET_ERESTARTSYS) {
9004 ts->in_sigsuspend = 1;
9007 return ret;
9008 #ifdef TARGET_NR_rt_sigtimedwait
9009 case TARGET_NR_rt_sigtimedwait:
9011 sigset_t set;
9012 struct timespec uts, *puts;
9013 siginfo_t uinfo;
9015 if (arg4 != sizeof(target_sigset_t)) {
9016 return -TARGET_EINVAL;
9019 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9020 return -TARGET_EFAULT;
9021 target_to_host_sigset(&set, p);
9022 unlock_user(p, arg1, 0);
9023 if (arg3) {
9024 puts = &uts;
9025 if (target_to_host_timespec(puts, arg3)) {
9026 return -TARGET_EFAULT;
9028 } else {
9029 puts = NULL;
9031 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9032 SIGSET_T_SIZE));
9033 if (!is_error(ret)) {
9034 if (arg2) {
9035 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9037 if (!p) {
9038 return -TARGET_EFAULT;
9040 host_to_target_siginfo(p, &uinfo);
9041 unlock_user(p, arg2, sizeof(target_siginfo_t));
9043 ret = host_to_target_signal(ret);
9046 return ret;
9047 #endif
9048 case TARGET_NR_rt_sigqueueinfo:
9050 siginfo_t uinfo;
9052 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9053 if (!p) {
9054 return -TARGET_EFAULT;
9056 target_to_host_siginfo(&uinfo, p);
9057 unlock_user(p, arg3, 0);
9058 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9060 return ret;
9061 case TARGET_NR_rt_tgsigqueueinfo:
9063 siginfo_t uinfo;
9065 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9066 if (!p) {
9067 return -TARGET_EFAULT;
9069 target_to_host_siginfo(&uinfo, p);
9070 unlock_user(p, arg4, 0);
9071 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9073 return ret;
9074 #ifdef TARGET_NR_sigreturn
9075 case TARGET_NR_sigreturn:
9076 if (block_signals()) {
9077 return -TARGET_ERESTARTSYS;
9079 return do_sigreturn(cpu_env);
9080 #endif
9081 case TARGET_NR_rt_sigreturn:
9082 if (block_signals()) {
9083 return -TARGET_ERESTARTSYS;
9085 return do_rt_sigreturn(cpu_env);
9086 case TARGET_NR_sethostname:
9087 if (!(p = lock_user_string(arg1)))
9088 return -TARGET_EFAULT;
9089 ret = get_errno(sethostname(p, arg2));
9090 unlock_user(p, arg1, 0);
9091 return ret;
9092 #ifdef TARGET_NR_setrlimit
9093 case TARGET_NR_setrlimit:
9095 int resource = target_to_host_resource(arg1);
9096 struct target_rlimit *target_rlim;
9097 struct rlimit rlim;
9098 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9099 return -TARGET_EFAULT;
9100 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9101 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9102 unlock_user_struct(target_rlim, arg2, 0);
9104 * If we just passed through resource limit settings for memory then
9105 * they would also apply to QEMU's own allocations, and QEMU will
9106 * crash or hang or die if its allocations fail. Ideally we would
9107 * track the guest allocations in QEMU and apply the limits ourselves.
9108 * For now, just tell the guest the call succeeded but don't actually
9109 * limit anything.
9111 if (resource != RLIMIT_AS &&
9112 resource != RLIMIT_DATA &&
9113 resource != RLIMIT_STACK) {
9114 return get_errno(setrlimit(resource, &rlim));
9115 } else {
9116 return 0;
9119 #endif
9120 #ifdef TARGET_NR_getrlimit
9121 case TARGET_NR_getrlimit:
9123 int resource = target_to_host_resource(arg1);
9124 struct target_rlimit *target_rlim;
9125 struct rlimit rlim;
9127 ret = get_errno(getrlimit(resource, &rlim));
9128 if (!is_error(ret)) {
9129 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9130 return -TARGET_EFAULT;
9131 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9132 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9133 unlock_user_struct(target_rlim, arg2, 1);
9136 return ret;
9137 #endif
9138 case TARGET_NR_getrusage:
9140 struct rusage rusage;
9141 ret = get_errno(getrusage(arg1, &rusage));
9142 if (!is_error(ret)) {
9143 ret = host_to_target_rusage(arg2, &rusage);
9146 return ret;
9147 #if defined(TARGET_NR_gettimeofday)
9148 case TARGET_NR_gettimeofday:
9150 struct timeval tv;
9151 struct timezone tz;
9153 ret = get_errno(gettimeofday(&tv, &tz));
9154 if (!is_error(ret)) {
9155 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9156 return -TARGET_EFAULT;
9158 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9159 return -TARGET_EFAULT;
9163 return ret;
9164 #endif
9165 #if defined(TARGET_NR_settimeofday)
9166 case TARGET_NR_settimeofday:
9168 struct timeval tv, *ptv = NULL;
9169 struct timezone tz, *ptz = NULL;
9171 if (arg1) {
9172 if (copy_from_user_timeval(&tv, arg1)) {
9173 return -TARGET_EFAULT;
9175 ptv = &tv;
9178 if (arg2) {
9179 if (copy_from_user_timezone(&tz, arg2)) {
9180 return -TARGET_EFAULT;
9182 ptz = &tz;
9185 return get_errno(settimeofday(ptv, ptz));
9187 #endif
9188 #if defined(TARGET_NR_select)
9189 case TARGET_NR_select:
9190 #if defined(TARGET_WANT_NI_OLD_SELECT)
9191 /* some architectures used to have old_select here
9192 * but now ENOSYS it.
9194 ret = -TARGET_ENOSYS;
9195 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9196 ret = do_old_select(arg1);
9197 #else
9198 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9199 #endif
9200 return ret;
9201 #endif
9202 #ifdef TARGET_NR_pselect6
9203 case TARGET_NR_pselect6:
9205 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9206 fd_set rfds, wfds, efds;
9207 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9208 struct timespec ts, *ts_ptr;
9211 * The 6th arg is actually two args smashed together,
9212 * so we cannot use the C library.
9214 sigset_t set;
9215 struct {
9216 sigset_t *set;
9217 size_t size;
9218 } sig, *sig_ptr;
9220 abi_ulong arg_sigset, arg_sigsize, *arg7;
9221 target_sigset_t *target_sigset;
9223 n = arg1;
9224 rfd_addr = arg2;
9225 wfd_addr = arg3;
9226 efd_addr = arg4;
9227 ts_addr = arg5;
9229 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9230 if (ret) {
9231 return ret;
9233 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9234 if (ret) {
9235 return ret;
9237 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9238 if (ret) {
9239 return ret;
9243 * This takes a timespec, and not a timeval, so we cannot
9244 * use the do_select() helper ...
9246 if (ts_addr) {
9247 if (target_to_host_timespec(&ts, ts_addr)) {
9248 return -TARGET_EFAULT;
9250 ts_ptr = &ts;
9251 } else {
9252 ts_ptr = NULL;
9255 /* Extract the two packed args for the sigset */
9256 if (arg6) {
9257 sig_ptr = &sig;
9258 sig.size = SIGSET_T_SIZE;
9260 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9261 if (!arg7) {
9262 return -TARGET_EFAULT;
9264 arg_sigset = tswapal(arg7[0]);
9265 arg_sigsize = tswapal(arg7[1]);
9266 unlock_user(arg7, arg6, 0);
9268 if (arg_sigset) {
9269 sig.set = &set;
9270 if (arg_sigsize != sizeof(*target_sigset)) {
9271 /* Like the kernel, we enforce correct size sigsets */
9272 return -TARGET_EINVAL;
9274 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9275 sizeof(*target_sigset), 1);
9276 if (!target_sigset) {
9277 return -TARGET_EFAULT;
9279 target_to_host_sigset(&set, target_sigset);
9280 unlock_user(target_sigset, arg_sigset, 0);
9281 } else {
9282 sig.set = NULL;
9284 } else {
9285 sig_ptr = NULL;
9288 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9289 ts_ptr, sig_ptr));
9291 if (!is_error(ret)) {
9292 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9293 return -TARGET_EFAULT;
9294 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9295 return -TARGET_EFAULT;
9296 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9297 return -TARGET_EFAULT;
9299 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9300 return -TARGET_EFAULT;
9303 return ret;
9304 #endif
9305 #ifdef TARGET_NR_symlink
9306 case TARGET_NR_symlink:
9308 void *p2;
9309 p = lock_user_string(arg1);
9310 p2 = lock_user_string(arg2);
9311 if (!p || !p2)
9312 ret = -TARGET_EFAULT;
9313 else
9314 ret = get_errno(symlink(p, p2));
9315 unlock_user(p2, arg2, 0);
9316 unlock_user(p, arg1, 0);
9318 return ret;
9319 #endif
9320 #if defined(TARGET_NR_symlinkat)
9321 case TARGET_NR_symlinkat:
9323 void *p2;
9324 p = lock_user_string(arg1);
9325 p2 = lock_user_string(arg3);
9326 if (!p || !p2)
9327 ret = -TARGET_EFAULT;
9328 else
9329 ret = get_errno(symlinkat(p, arg2, p2));
9330 unlock_user(p2, arg3, 0);
9331 unlock_user(p, arg1, 0);
9333 return ret;
9334 #endif
9335 #ifdef TARGET_NR_readlink
9336 case TARGET_NR_readlink:
9338 void *p2;
9339 p = lock_user_string(arg1);
9340 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9341 if (!p || !p2) {
9342 ret = -TARGET_EFAULT;
9343 } else if (!arg3) {
9344 /* Short circuit this for the magic exe check. */
9345 ret = -TARGET_EINVAL;
9346 } else if (is_proc_myself((const char *)p, "exe")) {
9347 char real[PATH_MAX], *temp;
9348 temp = realpath(exec_path, real);
9349 /* Return value is # of bytes that we wrote to the buffer. */
9350 if (temp == NULL) {
9351 ret = get_errno(-1);
9352 } else {
9353 /* Don't worry about sign mismatch as earlier mapping
9354 * logic would have thrown a bad address error. */
9355 ret = MIN(strlen(real), arg3);
9356 /* We cannot NUL terminate the string. */
9357 memcpy(p2, real, ret);
9359 } else {
9360 ret = get_errno(readlink(path(p), p2, arg3));
9362 unlock_user(p2, arg2, ret);
9363 unlock_user(p, arg1, 0);
9365 return ret;
9366 #endif
9367 #if defined(TARGET_NR_readlinkat)
9368 case TARGET_NR_readlinkat:
9370 void *p2;
9371 p = lock_user_string(arg2);
9372 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9373 if (!p || !p2) {
9374 ret = -TARGET_EFAULT;
9375 } else if (is_proc_myself((const char *)p, "exe")) {
9376 char real[PATH_MAX], *temp;
9377 temp = realpath(exec_path, real);
9378 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9379 snprintf((char *)p2, arg4, "%s", real);
9380 } else {
9381 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9383 unlock_user(p2, arg3, ret);
9384 unlock_user(p, arg2, 0);
9386 return ret;
9387 #endif
9388 #ifdef TARGET_NR_swapon
9389 case TARGET_NR_swapon:
9390 if (!(p = lock_user_string(arg1)))
9391 return -TARGET_EFAULT;
9392 ret = get_errno(swapon(p, arg2));
9393 unlock_user(p, arg1, 0);
9394 return ret;
9395 #endif
9396 case TARGET_NR_reboot:
9397 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9398 /* arg4 must be ignored in all other cases */
9399 p = lock_user_string(arg4);
9400 if (!p) {
9401 return -TARGET_EFAULT;
9403 ret = get_errno(reboot(arg1, arg2, arg3, p));
9404 unlock_user(p, arg4, 0);
9405 } else {
9406 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9408 return ret;
9409 #ifdef TARGET_NR_mmap
9410 case TARGET_NR_mmap:
9411 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9412 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9413 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9414 || defined(TARGET_S390X)
9416 abi_ulong *v;
9417 abi_ulong v1, v2, v3, v4, v5, v6;
9418 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9419 return -TARGET_EFAULT;
9420 v1 = tswapal(v[0]);
9421 v2 = tswapal(v[1]);
9422 v3 = tswapal(v[2]);
9423 v4 = tswapal(v[3]);
9424 v5 = tswapal(v[4]);
9425 v6 = tswapal(v[5]);
9426 unlock_user(v, arg1, 0);
9427 ret = get_errno(target_mmap(v1, v2, v3,
9428 target_to_host_bitmask(v4, mmap_flags_tbl),
9429 v5, v6));
9431 #else
9432 ret = get_errno(target_mmap(arg1, arg2, arg3,
9433 target_to_host_bitmask(arg4, mmap_flags_tbl),
9434 arg5,
9435 arg6));
9436 #endif
9437 return ret;
9438 #endif
9439 #ifdef TARGET_NR_mmap2
9440 case TARGET_NR_mmap2:
9441 #ifndef MMAP_SHIFT
9442 #define MMAP_SHIFT 12
9443 #endif
9444 ret = target_mmap(arg1, arg2, arg3,
9445 target_to_host_bitmask(arg4, mmap_flags_tbl),
9446 arg5, arg6 << MMAP_SHIFT);
9447 return get_errno(ret);
9448 #endif
9449 case TARGET_NR_munmap:
9450 return get_errno(target_munmap(arg1, arg2));
9451 case TARGET_NR_mprotect:
9453 TaskState *ts = cpu->opaque;
9454 /* Special hack to detect libc making the stack executable. */
9455 if ((arg3 & PROT_GROWSDOWN)
9456 && arg1 >= ts->info->stack_limit
9457 && arg1 <= ts->info->start_stack) {
9458 arg3 &= ~PROT_GROWSDOWN;
9459 arg2 = arg2 + arg1 - ts->info->stack_limit;
9460 arg1 = ts->info->stack_limit;
9463 return get_errno(target_mprotect(arg1, arg2, arg3));
9464 #ifdef TARGET_NR_mremap
9465 case TARGET_NR_mremap:
9466 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9467 #endif
9468 /* ??? msync/mlock/munlock are broken for softmmu. */
9469 #ifdef TARGET_NR_msync
9470 case TARGET_NR_msync:
9471 return get_errno(msync(g2h(arg1), arg2, arg3));
9472 #endif
9473 #ifdef TARGET_NR_mlock
9474 case TARGET_NR_mlock:
9475 return get_errno(mlock(g2h(arg1), arg2));
9476 #endif
9477 #ifdef TARGET_NR_munlock
9478 case TARGET_NR_munlock:
9479 return get_errno(munlock(g2h(arg1), arg2));
9480 #endif
9481 #ifdef TARGET_NR_mlockall
9482 case TARGET_NR_mlockall:
9483 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9484 #endif
9485 #ifdef TARGET_NR_munlockall
9486 case TARGET_NR_munlockall:
9487 return get_errno(munlockall());
9488 #endif
9489 #ifdef TARGET_NR_truncate
9490 case TARGET_NR_truncate:
9491 if (!(p = lock_user_string(arg1)))
9492 return -TARGET_EFAULT;
9493 ret = get_errno(truncate(p, arg2));
9494 unlock_user(p, arg1, 0);
9495 return ret;
9496 #endif
9497 #ifdef TARGET_NR_ftruncate
9498 case TARGET_NR_ftruncate:
9499 return get_errno(ftruncate(arg1, arg2));
9500 #endif
9501 case TARGET_NR_fchmod:
9502 return get_errno(fchmod(arg1, arg2));
9503 #if defined(TARGET_NR_fchmodat)
9504 case TARGET_NR_fchmodat:
9505 if (!(p = lock_user_string(arg2)))
9506 return -TARGET_EFAULT;
9507 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9508 unlock_user(p, arg2, 0);
9509 return ret;
9510 #endif
9511 case TARGET_NR_getpriority:
9512 /* Note that negative values are valid for getpriority, so we must
9513 differentiate based on errno settings. */
9514 errno = 0;
9515 ret = getpriority(arg1, arg2);
9516 if (ret == -1 && errno != 0) {
9517 return -host_to_target_errno(errno);
9519 #ifdef TARGET_ALPHA
9520 /* Return value is the unbiased priority. Signal no error. */
9521 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9522 #else
9523 /* Return value is a biased priority to avoid negative numbers. */
9524 ret = 20 - ret;
9525 #endif
9526 return ret;
9527 case TARGET_NR_setpriority:
9528 return get_errno(setpriority(arg1, arg2, arg3));
9529 #ifdef TARGET_NR_statfs
9530 case TARGET_NR_statfs:
9531 if (!(p = lock_user_string(arg1))) {
9532 return -TARGET_EFAULT;
9534 ret = get_errno(statfs(path(p), &stfs));
9535 unlock_user(p, arg1, 0);
9536 convert_statfs:
9537 if (!is_error(ret)) {
9538 struct target_statfs *target_stfs;
9540 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9541 return -TARGET_EFAULT;
9542 __put_user(stfs.f_type, &target_stfs->f_type);
9543 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9544 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9545 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9546 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9547 __put_user(stfs.f_files, &target_stfs->f_files);
9548 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9549 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9550 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9551 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9552 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9553 #ifdef _STATFS_F_FLAGS
9554 __put_user(stfs.f_flags, &target_stfs->f_flags);
9555 #else
9556 __put_user(0, &target_stfs->f_flags);
9557 #endif
9558 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9559 unlock_user_struct(target_stfs, arg2, 1);
9561 return ret;
9562 #endif
9563 #ifdef TARGET_NR_fstatfs
9564 case TARGET_NR_fstatfs:
9565 ret = get_errno(fstatfs(arg1, &stfs));
9566 goto convert_statfs;
9567 #endif
9568 #ifdef TARGET_NR_statfs64
9569 case TARGET_NR_statfs64:
9570 if (!(p = lock_user_string(arg1))) {
9571 return -TARGET_EFAULT;
9573 ret = get_errno(statfs(path(p), &stfs));
9574 unlock_user(p, arg1, 0);
9575 convert_statfs64:
9576 if (!is_error(ret)) {
9577 struct target_statfs64 *target_stfs;
9579 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9580 return -TARGET_EFAULT;
9581 __put_user(stfs.f_type, &target_stfs->f_type);
9582 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9583 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9584 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9585 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9586 __put_user(stfs.f_files, &target_stfs->f_files);
9587 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9588 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9589 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9590 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9591 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9592 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9593 unlock_user_struct(target_stfs, arg3, 1);
9595 return ret;
9596 case TARGET_NR_fstatfs64:
9597 ret = get_errno(fstatfs(arg1, &stfs));
9598 goto convert_statfs64;
9599 #endif
9600 #ifdef TARGET_NR_socketcall
9601 case TARGET_NR_socketcall:
9602 return do_socketcall(arg1, arg2);
9603 #endif
9604 #ifdef TARGET_NR_accept
9605 case TARGET_NR_accept:
9606 return do_accept4(arg1, arg2, arg3, 0);
9607 #endif
9608 #ifdef TARGET_NR_accept4
9609 case TARGET_NR_accept4:
9610 return do_accept4(arg1, arg2, arg3, arg4);
9611 #endif
9612 #ifdef TARGET_NR_bind
9613 case TARGET_NR_bind:
9614 return do_bind(arg1, arg2, arg3);
9615 #endif
9616 #ifdef TARGET_NR_connect
9617 case TARGET_NR_connect:
9618 return do_connect(arg1, arg2, arg3);
9619 #endif
9620 #ifdef TARGET_NR_getpeername
9621 case TARGET_NR_getpeername:
9622 return do_getpeername(arg1, arg2, arg3);
9623 #endif
9624 #ifdef TARGET_NR_getsockname
9625 case TARGET_NR_getsockname:
9626 return do_getsockname(arg1, arg2, arg3);
9627 #endif
9628 #ifdef TARGET_NR_getsockopt
9629 case TARGET_NR_getsockopt:
9630 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9631 #endif
9632 #ifdef TARGET_NR_listen
9633 case TARGET_NR_listen:
9634 return get_errno(listen(arg1, arg2));
9635 #endif
9636 #ifdef TARGET_NR_recv
9637 case TARGET_NR_recv:
9638 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9639 #endif
9640 #ifdef TARGET_NR_recvfrom
9641 case TARGET_NR_recvfrom:
9642 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9643 #endif
9644 #ifdef TARGET_NR_recvmsg
9645 case TARGET_NR_recvmsg:
9646 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9647 #endif
9648 #ifdef TARGET_NR_send
9649 case TARGET_NR_send:
9650 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9651 #endif
9652 #ifdef TARGET_NR_sendmsg
9653 case TARGET_NR_sendmsg:
9654 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9655 #endif
9656 #ifdef TARGET_NR_sendmmsg
9657 case TARGET_NR_sendmmsg:
9658 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9659 #endif
9660 #ifdef TARGET_NR_recvmmsg
9661 case TARGET_NR_recvmmsg:
9662 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9663 #endif
9664 #ifdef TARGET_NR_sendto
9665 case TARGET_NR_sendto:
9666 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9667 #endif
9668 #ifdef TARGET_NR_shutdown
9669 case TARGET_NR_shutdown:
9670 return get_errno(shutdown(arg1, arg2));
9671 #endif
9672 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9673 case TARGET_NR_getrandom:
9674 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9675 if (!p) {
9676 return -TARGET_EFAULT;
9678 ret = get_errno(getrandom(p, arg2, arg3));
9679 unlock_user(p, arg1, ret);
9680 return ret;
9681 #endif
9682 #ifdef TARGET_NR_socket
9683 case TARGET_NR_socket:
9684 return do_socket(arg1, arg2, arg3);
9685 #endif
9686 #ifdef TARGET_NR_socketpair
9687 case TARGET_NR_socketpair:
9688 return do_socketpair(arg1, arg2, arg3, arg4);
9689 #endif
9690 #ifdef TARGET_NR_setsockopt
9691 case TARGET_NR_setsockopt:
9692 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9693 #endif
9694 #if defined(TARGET_NR_syslog)
9695 case TARGET_NR_syslog:
9697 int len = arg2;
9699 switch (arg1) {
9700 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9701 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9702 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9703 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9704 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9705 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9706 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9707 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9708 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9709 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9710 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9711 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9713 if (len < 0) {
9714 return -TARGET_EINVAL;
9716 if (len == 0) {
9717 return 0;
9719 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9720 if (!p) {
9721 return -TARGET_EFAULT;
9723 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9724 unlock_user(p, arg2, arg3);
9726 return ret;
9727 default:
9728 return -TARGET_EINVAL;
9731 break;
9732 #endif
9733 case TARGET_NR_setitimer:
9735 struct itimerval value, ovalue, *pvalue;
9737 if (arg2) {
9738 pvalue = &value;
9739 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9740 || copy_from_user_timeval(&pvalue->it_value,
9741 arg2 + sizeof(struct target_timeval)))
9742 return -TARGET_EFAULT;
9743 } else {
9744 pvalue = NULL;
9746 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9747 if (!is_error(ret) && arg3) {
9748 if (copy_to_user_timeval(arg3,
9749 &ovalue.it_interval)
9750 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9751 &ovalue.it_value))
9752 return -TARGET_EFAULT;
9755 return ret;
9756 case TARGET_NR_getitimer:
9758 struct itimerval value;
9760 ret = get_errno(getitimer(arg1, &value));
9761 if (!is_error(ret) && arg2) {
9762 if (copy_to_user_timeval(arg2,
9763 &value.it_interval)
9764 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9765 &value.it_value))
9766 return -TARGET_EFAULT;
9769 return ret;
9770 #ifdef TARGET_NR_stat
9771 case TARGET_NR_stat:
9772 if (!(p = lock_user_string(arg1))) {
9773 return -TARGET_EFAULT;
9775 ret = get_errno(stat(path(p), &st));
9776 unlock_user(p, arg1, 0);
9777 goto do_stat;
9778 #endif
9779 #ifdef TARGET_NR_lstat
9780 case TARGET_NR_lstat:
9781 if (!(p = lock_user_string(arg1))) {
9782 return -TARGET_EFAULT;
9784 ret = get_errno(lstat(path(p), &st));
9785 unlock_user(p, arg1, 0);
9786 goto do_stat;
9787 #endif
9788 #ifdef TARGET_NR_fstat
9789 case TARGET_NR_fstat:
9791 ret = get_errno(fstat(arg1, &st));
9792 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9793 do_stat:
9794 #endif
9795 if (!is_error(ret)) {
9796 struct target_stat *target_st;
9798 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9799 return -TARGET_EFAULT;
9800 memset(target_st, 0, sizeof(*target_st));
9801 __put_user(st.st_dev, &target_st->st_dev);
9802 __put_user(st.st_ino, &target_st->st_ino);
9803 __put_user(st.st_mode, &target_st->st_mode);
9804 __put_user(st.st_uid, &target_st->st_uid);
9805 __put_user(st.st_gid, &target_st->st_gid);
9806 __put_user(st.st_nlink, &target_st->st_nlink);
9807 __put_user(st.st_rdev, &target_st->st_rdev);
9808 __put_user(st.st_size, &target_st->st_size);
9809 __put_user(st.st_blksize, &target_st->st_blksize);
9810 __put_user(st.st_blocks, &target_st->st_blocks);
9811 __put_user(st.st_atime, &target_st->target_st_atime);
9812 __put_user(st.st_mtime, &target_st->target_st_mtime);
9813 __put_user(st.st_ctime, &target_st->target_st_ctime);
9814 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9815 defined(TARGET_STAT_HAVE_NSEC)
9816 __put_user(st.st_atim.tv_nsec,
9817 &target_st->target_st_atime_nsec);
9818 __put_user(st.st_mtim.tv_nsec,
9819 &target_st->target_st_mtime_nsec);
9820 __put_user(st.st_ctim.tv_nsec,
9821 &target_st->target_st_ctime_nsec);
9822 #endif
9823 unlock_user_struct(target_st, arg2, 1);
9826 return ret;
9827 #endif
9828 case TARGET_NR_vhangup:
9829 return get_errno(vhangup());
9830 #ifdef TARGET_NR_syscall
9831 case TARGET_NR_syscall:
9832 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9833 arg6, arg7, arg8, 0);
9834 #endif
9835 #if defined(TARGET_NR_wait4)
9836 case TARGET_NR_wait4:
9838 int status;
9839 abi_long status_ptr = arg2;
9840 struct rusage rusage, *rusage_ptr;
9841 abi_ulong target_rusage = arg4;
9842 abi_long rusage_err;
9843 if (target_rusage)
9844 rusage_ptr = &rusage;
9845 else
9846 rusage_ptr = NULL;
9847 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9848 if (!is_error(ret)) {
9849 if (status_ptr && ret) {
9850 status = host_to_target_waitstatus(status);
9851 if (put_user_s32(status, status_ptr))
9852 return -TARGET_EFAULT;
9854 if (target_rusage) {
9855 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9856 if (rusage_err) {
9857 ret = rusage_err;
9862 return ret;
9863 #endif
9864 #ifdef TARGET_NR_swapoff
9865 case TARGET_NR_swapoff:
9866 if (!(p = lock_user_string(arg1)))
9867 return -TARGET_EFAULT;
9868 ret = get_errno(swapoff(p));
9869 unlock_user(p, arg1, 0);
9870 return ret;
9871 #endif
9872 case TARGET_NR_sysinfo:
9874 struct target_sysinfo *target_value;
9875 struct sysinfo value;
9876 ret = get_errno(sysinfo(&value));
9877 if (!is_error(ret) && arg1)
9879 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9880 return -TARGET_EFAULT;
9881 __put_user(value.uptime, &target_value->uptime);
9882 __put_user(value.loads[0], &target_value->loads[0]);
9883 __put_user(value.loads[1], &target_value->loads[1]);
9884 __put_user(value.loads[2], &target_value->loads[2]);
9885 __put_user(value.totalram, &target_value->totalram);
9886 __put_user(value.freeram, &target_value->freeram);
9887 __put_user(value.sharedram, &target_value->sharedram);
9888 __put_user(value.bufferram, &target_value->bufferram);
9889 __put_user(value.totalswap, &target_value->totalswap);
9890 __put_user(value.freeswap, &target_value->freeswap);
9891 __put_user(value.procs, &target_value->procs);
9892 __put_user(value.totalhigh, &target_value->totalhigh);
9893 __put_user(value.freehigh, &target_value->freehigh);
9894 __put_user(value.mem_unit, &target_value->mem_unit);
9895 unlock_user_struct(target_value, arg1, 1);
9898 return ret;
9899 #ifdef TARGET_NR_ipc
9900 case TARGET_NR_ipc:
9901 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9902 #endif
9903 #ifdef TARGET_NR_semget
9904 case TARGET_NR_semget:
9905 return get_errno(semget(arg1, arg2, arg3));
9906 #endif
9907 #ifdef TARGET_NR_semop
9908 case TARGET_NR_semop:
9909 return do_semtimedop(arg1, arg2, arg3, 0);
9910 #endif
9911 #ifdef TARGET_NR_semtimedop
9912 case TARGET_NR_semtimedop:
9913 return do_semtimedop(arg1, arg2, arg3, arg4);
9914 #endif
9915 #ifdef TARGET_NR_semctl
9916 case TARGET_NR_semctl:
9917 return do_semctl(arg1, arg2, arg3, arg4);
9918 #endif
9919 #ifdef TARGET_NR_msgctl
9920 case TARGET_NR_msgctl:
9921 return do_msgctl(arg1, arg2, arg3);
9922 #endif
9923 #ifdef TARGET_NR_msgget
9924 case TARGET_NR_msgget:
9925 return get_errno(msgget(arg1, arg2));
9926 #endif
9927 #ifdef TARGET_NR_msgrcv
9928 case TARGET_NR_msgrcv:
9929 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9930 #endif
9931 #ifdef TARGET_NR_msgsnd
9932 case TARGET_NR_msgsnd:
9933 return do_msgsnd(arg1, arg2, arg3, arg4);
9934 #endif
9935 #ifdef TARGET_NR_shmget
9936 case TARGET_NR_shmget:
9937 return get_errno(shmget(arg1, arg2, arg3));
9938 #endif
9939 #ifdef TARGET_NR_shmctl
9940 case TARGET_NR_shmctl:
9941 return do_shmctl(arg1, arg2, arg3);
9942 #endif
9943 #ifdef TARGET_NR_shmat
9944 case TARGET_NR_shmat:
9945 return do_shmat(cpu_env, arg1, arg2, arg3);
9946 #endif
9947 #ifdef TARGET_NR_shmdt
9948 case TARGET_NR_shmdt:
9949 return do_shmdt(arg1);
9950 #endif
9951 case TARGET_NR_fsync:
9952 return get_errno(fsync(arg1));
9953 case TARGET_NR_clone:
9954 /* Linux manages to have three different orderings for its
9955 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9956 * match the kernel's CONFIG_CLONE_* settings.
9957 * Microblaze is further special in that it uses a sixth
9958 * implicit argument to clone for the TLS pointer.
9960 #if defined(TARGET_MICROBLAZE)
9961 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9962 #elif defined(TARGET_CLONE_BACKWARDS)
9963 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9964 #elif defined(TARGET_CLONE_BACKWARDS2)
9965 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9966 #else
9967 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9968 #endif
9969 return ret;
9970 #ifdef __NR_exit_group
9971 /* new thread calls */
9972 case TARGET_NR_exit_group:
9973 preexit_cleanup(cpu_env, arg1);
9974 return get_errno(exit_group(arg1));
9975 #endif
9976 case TARGET_NR_setdomainname:
9977 if (!(p = lock_user_string(arg1)))
9978 return -TARGET_EFAULT;
9979 ret = get_errno(setdomainname(p, arg2));
9980 unlock_user(p, arg1, 0);
9981 return ret;
9982 case TARGET_NR_uname:
9983 /* no need to transcode because we use the linux syscall */
9985 struct new_utsname * buf;
9987 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9988 return -TARGET_EFAULT;
9989 ret = get_errno(sys_uname(buf));
9990 if (!is_error(ret)) {
9991 /* Overwrite the native machine name with whatever is being
9992 emulated. */
9993 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9994 sizeof(buf->machine));
9995 /* Allow the user to override the reported release. */
9996 if (qemu_uname_release && *qemu_uname_release) {
9997 g_strlcpy(buf->release, qemu_uname_release,
9998 sizeof(buf->release));
10001 unlock_user_struct(buf, arg1, 1);
10003 return ret;
10004 #ifdef TARGET_I386
10005 case TARGET_NR_modify_ldt:
10006 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10007 #if !defined(TARGET_X86_64)
10008 case TARGET_NR_vm86:
10009 return do_vm86(cpu_env, arg1, arg2);
10010 #endif
10011 #endif
10012 #if defined(TARGET_NR_adjtimex)
10013 case TARGET_NR_adjtimex:
10015 struct timex host_buf;
10017 if (target_to_host_timex(&host_buf, arg1) != 0) {
10018 return -TARGET_EFAULT;
10020 ret = get_errno(adjtimex(&host_buf));
10021 if (!is_error(ret)) {
10022 if (host_to_target_timex(arg1, &host_buf) != 0) {
10023 return -TARGET_EFAULT;
10027 return ret;
10028 #endif
10029 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10030 case TARGET_NR_clock_adjtime:
10032 struct timex htx, *phtx = &htx;
10034 if (target_to_host_timex(phtx, arg2) != 0) {
10035 return -TARGET_EFAULT;
10037 ret = get_errno(clock_adjtime(arg1, phtx));
10038 if (!is_error(ret) && phtx) {
10039 if (host_to_target_timex(arg2, phtx) != 0) {
10040 return -TARGET_EFAULT;
10044 return ret;
10045 #endif
10046 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10047 case TARGET_NR_clock_adjtime64:
10049 struct timex htx;
10051 if (target_to_host_timex64(&htx, arg2) != 0) {
10052 return -TARGET_EFAULT;
10054 ret = get_errno(clock_adjtime(arg1, &htx));
10055 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10056 return -TARGET_EFAULT;
10059 return ret;
10060 #endif
10061 case TARGET_NR_getpgid:
10062 return get_errno(getpgid(arg1));
10063 case TARGET_NR_fchdir:
10064 return get_errno(fchdir(arg1));
10065 case TARGET_NR_personality:
10066 return get_errno(personality(arg1));
10067 #ifdef TARGET_NR__llseek /* Not on alpha */
10068 case TARGET_NR__llseek:
10070 int64_t res;
10071 #if !defined(__NR_llseek)
10072 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10073 if (res == -1) {
10074 ret = get_errno(res);
10075 } else {
10076 ret = 0;
10078 #else
10079 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10080 #endif
10081 if ((ret == 0) && put_user_s64(res, arg4)) {
10082 return -TARGET_EFAULT;
10085 return ret;
10086 #endif
10087 #ifdef TARGET_NR_getdents
10088 case TARGET_NR_getdents:
10089 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10090 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10092 struct target_dirent *target_dirp;
10093 struct linux_dirent *dirp;
10094 abi_long count = arg3;
10096 dirp = g_try_malloc(count);
10097 if (!dirp) {
10098 return -TARGET_ENOMEM;
10101 ret = get_errno(sys_getdents(arg1, dirp, count));
10102 if (!is_error(ret)) {
10103 struct linux_dirent *de;
10104 struct target_dirent *tde;
10105 int len = ret;
10106 int reclen, treclen;
10107 int count1, tnamelen;
10109 count1 = 0;
10110 de = dirp;
10111 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10112 return -TARGET_EFAULT;
10113 tde = target_dirp;
10114 while (len > 0) {
10115 reclen = de->d_reclen;
10116 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10117 assert(tnamelen >= 0);
10118 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10119 assert(count1 + treclen <= count);
10120 tde->d_reclen = tswap16(treclen);
10121 tde->d_ino = tswapal(de->d_ino);
10122 tde->d_off = tswapal(de->d_off);
10123 memcpy(tde->d_name, de->d_name, tnamelen);
10124 de = (struct linux_dirent *)((char *)de + reclen);
10125 len -= reclen;
10126 tde = (struct target_dirent *)((char *)tde + treclen);
10127 count1 += treclen;
10129 ret = count1;
10130 unlock_user(target_dirp, arg2, ret);
10132 g_free(dirp);
10134 #else
10136 struct linux_dirent *dirp;
10137 abi_long count = arg3;
10139 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10140 return -TARGET_EFAULT;
10141 ret = get_errno(sys_getdents(arg1, dirp, count));
10142 if (!is_error(ret)) {
10143 struct linux_dirent *de;
10144 int len = ret;
10145 int reclen;
10146 de = dirp;
10147 while (len > 0) {
10148 reclen = de->d_reclen;
10149 if (reclen > len)
10150 break;
10151 de->d_reclen = tswap16(reclen);
10152 tswapls(&de->d_ino);
10153 tswapls(&de->d_off);
10154 de = (struct linux_dirent *)((char *)de + reclen);
10155 len -= reclen;
10158 unlock_user(dirp, arg2, ret);
10160 #endif
10161 #else
10162 /* Implement getdents in terms of getdents64 */
10164 struct linux_dirent64 *dirp;
10165 abi_long count = arg3;
10167 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10168 if (!dirp) {
10169 return -TARGET_EFAULT;
10171 ret = get_errno(sys_getdents64(arg1, dirp, count));
10172 if (!is_error(ret)) {
10173 /* Convert the dirent64 structs to target dirent. We do this
10174 * in-place, since we can guarantee that a target_dirent is no
10175 * larger than a dirent64; however this means we have to be
10176 * careful to read everything before writing in the new format.
10178 struct linux_dirent64 *de;
10179 struct target_dirent *tde;
10180 int len = ret;
10181 int tlen = 0;
10183 de = dirp;
10184 tde = (struct target_dirent *)dirp;
10185 while (len > 0) {
10186 int namelen, treclen;
10187 int reclen = de->d_reclen;
10188 uint64_t ino = de->d_ino;
10189 int64_t off = de->d_off;
10190 uint8_t type = de->d_type;
10192 namelen = strlen(de->d_name);
10193 treclen = offsetof(struct target_dirent, d_name)
10194 + namelen + 2;
10195 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10197 memmove(tde->d_name, de->d_name, namelen + 1);
10198 tde->d_ino = tswapal(ino);
10199 tde->d_off = tswapal(off);
10200 tde->d_reclen = tswap16(treclen);
10201 /* The target_dirent type is in what was formerly a padding
10202 * byte at the end of the structure:
10204 *(((char *)tde) + treclen - 1) = type;
10206 de = (struct linux_dirent64 *)((char *)de + reclen);
10207 tde = (struct target_dirent *)((char *)tde + treclen);
10208 len -= reclen;
10209 tlen += treclen;
10211 ret = tlen;
10213 unlock_user(dirp, arg2, ret);
10215 #endif
10216 return ret;
10217 #endif /* TARGET_NR_getdents */
10218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10219 case TARGET_NR_getdents64:
10221 struct linux_dirent64 *dirp;
10222 abi_long count = arg3;
10223 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10224 return -TARGET_EFAULT;
10225 ret = get_errno(sys_getdents64(arg1, dirp, count));
10226 if (!is_error(ret)) {
10227 struct linux_dirent64 *de;
10228 int len = ret;
10229 int reclen;
10230 de = dirp;
10231 while (len > 0) {
10232 reclen = de->d_reclen;
10233 if (reclen > len)
10234 break;
10235 de->d_reclen = tswap16(reclen);
10236 tswap64s((uint64_t *)&de->d_ino);
10237 tswap64s((uint64_t *)&de->d_off);
10238 de = (struct linux_dirent64 *)((char *)de + reclen);
10239 len -= reclen;
10242 unlock_user(dirp, arg2, ret);
10244 return ret;
10245 #endif /* TARGET_NR_getdents64 */
10246 #if defined(TARGET_NR__newselect)
10247 case TARGET_NR__newselect:
10248 return do_select(arg1, arg2, arg3, arg4, arg5);
10249 #endif
10250 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10251 # ifdef TARGET_NR_poll
10252 case TARGET_NR_poll:
10253 # endif
10254 # ifdef TARGET_NR_ppoll
10255 case TARGET_NR_ppoll:
10256 # endif
10258 struct target_pollfd *target_pfd;
10259 unsigned int nfds = arg2;
10260 struct pollfd *pfd;
10261 unsigned int i;
10263 pfd = NULL;
10264 target_pfd = NULL;
10265 if (nfds) {
10266 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10267 return -TARGET_EINVAL;
10270 target_pfd = lock_user(VERIFY_WRITE, arg1,
10271 sizeof(struct target_pollfd) * nfds, 1);
10272 if (!target_pfd) {
10273 return -TARGET_EFAULT;
10276 pfd = alloca(sizeof(struct pollfd) * nfds);
10277 for (i = 0; i < nfds; i++) {
10278 pfd[i].fd = tswap32(target_pfd[i].fd);
10279 pfd[i].events = tswap16(target_pfd[i].events);
10283 switch (num) {
10284 # ifdef TARGET_NR_ppoll
10285 case TARGET_NR_ppoll:
10287 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10288 target_sigset_t *target_set;
10289 sigset_t _set, *set = &_set;
10291 if (arg3) {
10292 if (target_to_host_timespec(timeout_ts, arg3)) {
10293 unlock_user(target_pfd, arg1, 0);
10294 return -TARGET_EFAULT;
10296 } else {
10297 timeout_ts = NULL;
10300 if (arg4) {
10301 if (arg5 != sizeof(target_sigset_t)) {
10302 unlock_user(target_pfd, arg1, 0);
10303 return -TARGET_EINVAL;
10306 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10307 if (!target_set) {
10308 unlock_user(target_pfd, arg1, 0);
10309 return -TARGET_EFAULT;
10311 target_to_host_sigset(set, target_set);
10312 } else {
10313 set = NULL;
10316 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10317 set, SIGSET_T_SIZE));
10319 if (!is_error(ret) && arg3) {
10320 host_to_target_timespec(arg3, timeout_ts);
10322 if (arg4) {
10323 unlock_user(target_set, arg4, 0);
10325 break;
10327 # endif
10328 # ifdef TARGET_NR_poll
10329 case TARGET_NR_poll:
10331 struct timespec ts, *pts;
10333 if (arg3 >= 0) {
10334 /* Convert ms to secs, ns */
10335 ts.tv_sec = arg3 / 1000;
10336 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10337 pts = &ts;
10338 } else {
10339 /* -ve poll() timeout means "infinite" */
10340 pts = NULL;
10342 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10343 break;
10345 # endif
10346 default:
10347 g_assert_not_reached();
10350 if (!is_error(ret)) {
10351 for(i = 0; i < nfds; i++) {
10352 target_pfd[i].revents = tswap16(pfd[i].revents);
10355 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10357 return ret;
10358 #endif
10359 case TARGET_NR_flock:
10360 /* NOTE: the flock constant seems to be the same for every
10361 Linux platform */
10362 return get_errno(safe_flock(arg1, arg2));
10363 case TARGET_NR_readv:
10365 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10366 if (vec != NULL) {
10367 ret = get_errno(safe_readv(arg1, vec, arg3));
10368 unlock_iovec(vec, arg2, arg3, 1);
10369 } else {
10370 ret = -host_to_target_errno(errno);
10373 return ret;
10374 case TARGET_NR_writev:
10376 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10377 if (vec != NULL) {
10378 ret = get_errno(safe_writev(arg1, vec, arg3));
10379 unlock_iovec(vec, arg2, arg3, 0);
10380 } else {
10381 ret = -host_to_target_errno(errno);
10384 return ret;
10385 #if defined(TARGET_NR_preadv)
10386 case TARGET_NR_preadv:
10388 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10389 if (vec != NULL) {
10390 unsigned long low, high;
10392 target_to_host_low_high(arg4, arg5, &low, &high);
10393 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10394 unlock_iovec(vec, arg2, arg3, 1);
10395 } else {
10396 ret = -host_to_target_errno(errno);
10399 return ret;
10400 #endif
10401 #if defined(TARGET_NR_pwritev)
10402 case TARGET_NR_pwritev:
10404 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10405 if (vec != NULL) {
10406 unsigned long low, high;
10408 target_to_host_low_high(arg4, arg5, &low, &high);
10409 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10410 unlock_iovec(vec, arg2, arg3, 0);
10411 } else {
10412 ret = -host_to_target_errno(errno);
10415 return ret;
10416 #endif
10417 case TARGET_NR_getsid:
10418 return get_errno(getsid(arg1));
10419 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10420 case TARGET_NR_fdatasync:
10421 return get_errno(fdatasync(arg1));
10422 #endif
10423 #ifdef TARGET_NR__sysctl
10424 case TARGET_NR__sysctl:
10425 /* We don't implement this, but ENOTDIR is always a safe
10426 return value. */
10427 return -TARGET_ENOTDIR;
10428 #endif
10429 case TARGET_NR_sched_getaffinity:
10431 unsigned int mask_size;
10432 unsigned long *mask;
10435 * sched_getaffinity needs multiples of ulong, so need to take
10436 * care of mismatches between target ulong and host ulong sizes.
10438 if (arg2 & (sizeof(abi_ulong) - 1)) {
10439 return -TARGET_EINVAL;
10441 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10443 mask = alloca(mask_size);
10444 memset(mask, 0, mask_size);
10445 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10447 if (!is_error(ret)) {
10448 if (ret > arg2) {
10449 /* More data returned than the caller's buffer will fit.
10450 * This only happens if sizeof(abi_long) < sizeof(long)
10451 * and the caller passed us a buffer holding an odd number
10452 * of abi_longs. If the host kernel is actually using the
10453 * extra 4 bytes then fail EINVAL; otherwise we can just
10454 * ignore them and only copy the interesting part.
10456 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10457 if (numcpus > arg2 * 8) {
10458 return -TARGET_EINVAL;
10460 ret = arg2;
10463 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10464 return -TARGET_EFAULT;
10468 return ret;
10469 case TARGET_NR_sched_setaffinity:
10471 unsigned int mask_size;
10472 unsigned long *mask;
10475 * sched_setaffinity needs multiples of ulong, so need to take
10476 * care of mismatches between target ulong and host ulong sizes.
10478 if (arg2 & (sizeof(abi_ulong) - 1)) {
10479 return -TARGET_EINVAL;
10481 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10482 mask = alloca(mask_size);
10484 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10485 if (ret) {
10486 return ret;
10489 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10491 case TARGET_NR_getcpu:
10493 unsigned cpu, node;
10494 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10495 arg2 ? &node : NULL,
10496 NULL));
10497 if (is_error(ret)) {
10498 return ret;
10500 if (arg1 && put_user_u32(cpu, arg1)) {
10501 return -TARGET_EFAULT;
10503 if (arg2 && put_user_u32(node, arg2)) {
10504 return -TARGET_EFAULT;
10507 return ret;
10508 case TARGET_NR_sched_setparam:
10510 struct sched_param *target_schp;
10511 struct sched_param schp;
10513 if (arg2 == 0) {
10514 return -TARGET_EINVAL;
10516 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10517 return -TARGET_EFAULT;
10518 schp.sched_priority = tswap32(target_schp->sched_priority);
10519 unlock_user_struct(target_schp, arg2, 0);
10520 return get_errno(sched_setparam(arg1, &schp));
10522 case TARGET_NR_sched_getparam:
10524 struct sched_param *target_schp;
10525 struct sched_param schp;
10527 if (arg2 == 0) {
10528 return -TARGET_EINVAL;
10530 ret = get_errno(sched_getparam(arg1, &schp));
10531 if (!is_error(ret)) {
10532 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10533 return -TARGET_EFAULT;
10534 target_schp->sched_priority = tswap32(schp.sched_priority);
10535 unlock_user_struct(target_schp, arg2, 1);
10538 return ret;
10539 case TARGET_NR_sched_setscheduler:
10541 struct sched_param *target_schp;
10542 struct sched_param schp;
10543 if (arg3 == 0) {
10544 return -TARGET_EINVAL;
10546 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10547 return -TARGET_EFAULT;
10548 schp.sched_priority = tswap32(target_schp->sched_priority);
10549 unlock_user_struct(target_schp, arg3, 0);
10550 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10552 case TARGET_NR_sched_getscheduler:
10553 return get_errno(sched_getscheduler(arg1));
10554 case TARGET_NR_sched_yield:
10555 return get_errno(sched_yield());
10556 case TARGET_NR_sched_get_priority_max:
10557 return get_errno(sched_get_priority_max(arg1));
10558 case TARGET_NR_sched_get_priority_min:
10559 return get_errno(sched_get_priority_min(arg1));
10560 #ifdef TARGET_NR_sched_rr_get_interval
10561 case TARGET_NR_sched_rr_get_interval:
10563 struct timespec ts;
10564 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10565 if (!is_error(ret)) {
10566 ret = host_to_target_timespec(arg2, &ts);
10569 return ret;
10570 #endif
10571 #if defined(TARGET_NR_nanosleep)
10572 case TARGET_NR_nanosleep:
10574 struct timespec req, rem;
10575 target_to_host_timespec(&req, arg1);
10576 ret = get_errno(safe_nanosleep(&req, &rem));
10577 if (is_error(ret) && arg2) {
10578 host_to_target_timespec(arg2, &rem);
10581 return ret;
10582 #endif
10583 case TARGET_NR_prctl:
10584 switch (arg1) {
10585 case PR_GET_PDEATHSIG:
10587 int deathsig;
10588 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10589 if (!is_error(ret) && arg2
10590 && put_user_ual(deathsig, arg2)) {
10591 return -TARGET_EFAULT;
10593 return ret;
10595 #ifdef PR_GET_NAME
10596 case PR_GET_NAME:
10598 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10599 if (!name) {
10600 return -TARGET_EFAULT;
10602 ret = get_errno(prctl(arg1, (unsigned long)name,
10603 arg3, arg4, arg5));
10604 unlock_user(name, arg2, 16);
10605 return ret;
10607 case PR_SET_NAME:
10609 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10610 if (!name) {
10611 return -TARGET_EFAULT;
10613 ret = get_errno(prctl(arg1, (unsigned long)name,
10614 arg3, arg4, arg5));
10615 unlock_user(name, arg2, 0);
10616 return ret;
10618 #endif
10619 #ifdef TARGET_MIPS
10620 case TARGET_PR_GET_FP_MODE:
10622 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10623 ret = 0;
10624 if (env->CP0_Status & (1 << CP0St_FR)) {
10625 ret |= TARGET_PR_FP_MODE_FR;
10627 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10628 ret |= TARGET_PR_FP_MODE_FRE;
10630 return ret;
10632 case TARGET_PR_SET_FP_MODE:
10634 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10635 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10636 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10637 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10638 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10640 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10641 TARGET_PR_FP_MODE_FRE;
10643 /* If nothing to change, return right away, successfully. */
10644 if (old_fr == new_fr && old_fre == new_fre) {
10645 return 0;
10647 /* Check the value is valid */
10648 if (arg2 & ~known_bits) {
10649 return -TARGET_EOPNOTSUPP;
10651 /* Setting FRE without FR is not supported. */
10652 if (new_fre && !new_fr) {
10653 return -TARGET_EOPNOTSUPP;
10655 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10656 /* FR1 is not supported */
10657 return -TARGET_EOPNOTSUPP;
10659 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10660 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10661 /* cannot set FR=0 */
10662 return -TARGET_EOPNOTSUPP;
10664 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10665 /* Cannot set FRE=1 */
10666 return -TARGET_EOPNOTSUPP;
10669 int i;
10670 fpr_t *fpr = env->active_fpu.fpr;
10671 for (i = 0; i < 32 ; i += 2) {
10672 if (!old_fr && new_fr) {
10673 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10674 } else if (old_fr && !new_fr) {
10675 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10679 if (new_fr) {
10680 env->CP0_Status |= (1 << CP0St_FR);
10681 env->hflags |= MIPS_HFLAG_F64;
10682 } else {
10683 env->CP0_Status &= ~(1 << CP0St_FR);
10684 env->hflags &= ~MIPS_HFLAG_F64;
10686 if (new_fre) {
10687 env->CP0_Config5 |= (1 << CP0C5_FRE);
10688 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10689 env->hflags |= MIPS_HFLAG_FRE;
10691 } else {
10692 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10693 env->hflags &= ~MIPS_HFLAG_FRE;
10696 return 0;
10698 #endif /* MIPS */
10699 #ifdef TARGET_AARCH64
10700 case TARGET_PR_SVE_SET_VL:
10702 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10703 * PR_SVE_VL_INHERIT. Note the kernel definition
10704 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10705 * even though the current architectural maximum is VQ=16.
10707 ret = -TARGET_EINVAL;
10708 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10709 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10710 CPUARMState *env = cpu_env;
10711 ARMCPU *cpu = env_archcpu(env);
10712 uint32_t vq, old_vq;
10714 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10715 vq = MAX(arg2 / 16, 1);
10716 vq = MIN(vq, cpu->sve_max_vq);
10718 if (vq < old_vq) {
10719 aarch64_sve_narrow_vq(env, vq);
10721 env->vfp.zcr_el[1] = vq - 1;
10722 arm_rebuild_hflags(env);
10723 ret = vq * 16;
10725 return ret;
10726 case TARGET_PR_SVE_GET_VL:
10727 ret = -TARGET_EINVAL;
10729 ARMCPU *cpu = env_archcpu(cpu_env);
10730 if (cpu_isar_feature(aa64_sve, cpu)) {
10731 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10734 return ret;
10735 case TARGET_PR_PAC_RESET_KEYS:
10737 CPUARMState *env = cpu_env;
10738 ARMCPU *cpu = env_archcpu(env);
10740 if (arg3 || arg4 || arg5) {
10741 return -TARGET_EINVAL;
10743 if (cpu_isar_feature(aa64_pauth, cpu)) {
10744 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10745 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10746 TARGET_PR_PAC_APGAKEY);
10747 int ret = 0;
10748 Error *err = NULL;
10750 if (arg2 == 0) {
10751 arg2 = all;
10752 } else if (arg2 & ~all) {
10753 return -TARGET_EINVAL;
10755 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10756 ret |= qemu_guest_getrandom(&env->keys.apia,
10757 sizeof(ARMPACKey), &err);
10759 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10760 ret |= qemu_guest_getrandom(&env->keys.apib,
10761 sizeof(ARMPACKey), &err);
10763 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10764 ret |= qemu_guest_getrandom(&env->keys.apda,
10765 sizeof(ARMPACKey), &err);
10767 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10768 ret |= qemu_guest_getrandom(&env->keys.apdb,
10769 sizeof(ARMPACKey), &err);
10771 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10772 ret |= qemu_guest_getrandom(&env->keys.apga,
10773 sizeof(ARMPACKey), &err);
10775 if (ret != 0) {
10777 * Some unknown failure in the crypto. The best
10778 * we can do is log it and fail the syscall.
10779 * The real syscall cannot fail this way.
10781 qemu_log_mask(LOG_UNIMP,
10782 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10783 error_get_pretty(err));
10784 error_free(err);
10785 return -TARGET_EIO;
10787 return 0;
10790 return -TARGET_EINVAL;
10791 #endif /* AARCH64 */
10792 case PR_GET_SECCOMP:
10793 case PR_SET_SECCOMP:
10794 /* Disable seccomp to prevent the target disabling syscalls we
10795 * need. */
10796 return -TARGET_EINVAL;
10797 default:
10798 /* Most prctl options have no pointer arguments */
10799 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10801 break;
10802 #ifdef TARGET_NR_arch_prctl
10803 case TARGET_NR_arch_prctl:
10804 return do_arch_prctl(cpu_env, arg1, arg2);
10805 #endif
10806 #ifdef TARGET_NR_pread64
10807 case TARGET_NR_pread64:
10808 if (regpairs_aligned(cpu_env, num)) {
10809 arg4 = arg5;
10810 arg5 = arg6;
10812 if (arg2 == 0 && arg3 == 0) {
10813 /* Special-case NULL buffer and zero length, which should succeed */
10814 p = 0;
10815 } else {
10816 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10817 if (!p) {
10818 return -TARGET_EFAULT;
10821 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10822 unlock_user(p, arg2, ret);
10823 return ret;
10824 case TARGET_NR_pwrite64:
10825 if (regpairs_aligned(cpu_env, num)) {
10826 arg4 = arg5;
10827 arg5 = arg6;
10829 if (arg2 == 0 && arg3 == 0) {
10830 /* Special-case NULL buffer and zero length, which should succeed */
10831 p = 0;
10832 } else {
10833 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10834 if (!p) {
10835 return -TARGET_EFAULT;
10838 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10839 unlock_user(p, arg2, 0);
10840 return ret;
10841 #endif
10842 case TARGET_NR_getcwd:
10843 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10844 return -TARGET_EFAULT;
10845 ret = get_errno(sys_getcwd1(p, arg2));
10846 unlock_user(p, arg1, ret);
10847 return ret;
10848 case TARGET_NR_capget:
10849 case TARGET_NR_capset:
10851 struct target_user_cap_header *target_header;
10852 struct target_user_cap_data *target_data = NULL;
10853 struct __user_cap_header_struct header;
10854 struct __user_cap_data_struct data[2];
10855 struct __user_cap_data_struct *dataptr = NULL;
10856 int i, target_datalen;
10857 int data_items = 1;
10859 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10860 return -TARGET_EFAULT;
10862 header.version = tswap32(target_header->version);
10863 header.pid = tswap32(target_header->pid);
10865 if (header.version != _LINUX_CAPABILITY_VERSION) {
10866 /* Version 2 and up takes pointer to two user_data structs */
10867 data_items = 2;
10870 target_datalen = sizeof(*target_data) * data_items;
10872 if (arg2) {
10873 if (num == TARGET_NR_capget) {
10874 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10875 } else {
10876 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10878 if (!target_data) {
10879 unlock_user_struct(target_header, arg1, 0);
10880 return -TARGET_EFAULT;
10883 if (num == TARGET_NR_capset) {
10884 for (i = 0; i < data_items; i++) {
10885 data[i].effective = tswap32(target_data[i].effective);
10886 data[i].permitted = tswap32(target_data[i].permitted);
10887 data[i].inheritable = tswap32(target_data[i].inheritable);
10891 dataptr = data;
10894 if (num == TARGET_NR_capget) {
10895 ret = get_errno(capget(&header, dataptr));
10896 } else {
10897 ret = get_errno(capset(&header, dataptr));
10900 /* The kernel always updates version for both capget and capset */
10901 target_header->version = tswap32(header.version);
10902 unlock_user_struct(target_header, arg1, 1);
10904 if (arg2) {
10905 if (num == TARGET_NR_capget) {
10906 for (i = 0; i < data_items; i++) {
10907 target_data[i].effective = tswap32(data[i].effective);
10908 target_data[i].permitted = tswap32(data[i].permitted);
10909 target_data[i].inheritable = tswap32(data[i].inheritable);
10911 unlock_user(target_data, arg2, target_datalen);
10912 } else {
10913 unlock_user(target_data, arg2, 0);
10916 return ret;
10918 case TARGET_NR_sigaltstack:
10919 return do_sigaltstack(arg1, arg2,
10920 get_sp_from_cpustate((CPUArchState *)cpu_env));
10922 #ifdef CONFIG_SENDFILE
10923 #ifdef TARGET_NR_sendfile
10924 case TARGET_NR_sendfile:
10926 off_t *offp = NULL;
10927 off_t off;
10928 if (arg3) {
10929 ret = get_user_sal(off, arg3);
10930 if (is_error(ret)) {
10931 return ret;
10933 offp = &off;
10935 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10936 if (!is_error(ret) && arg3) {
10937 abi_long ret2 = put_user_sal(off, arg3);
10938 if (is_error(ret2)) {
10939 ret = ret2;
10942 return ret;
10944 #endif
10945 #ifdef TARGET_NR_sendfile64
10946 case TARGET_NR_sendfile64:
10948 off_t *offp = NULL;
10949 off_t off;
10950 if (arg3) {
10951 ret = get_user_s64(off, arg3);
10952 if (is_error(ret)) {
10953 return ret;
10955 offp = &off;
10957 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10958 if (!is_error(ret) && arg3) {
10959 abi_long ret2 = put_user_s64(off, arg3);
10960 if (is_error(ret2)) {
10961 ret = ret2;
10964 return ret;
10966 #endif
10967 #endif
10968 #ifdef TARGET_NR_vfork
10969 case TARGET_NR_vfork:
10970 return get_errno(do_fork(cpu_env,
10971 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10972 0, 0, 0, 0));
10973 #endif
10974 #ifdef TARGET_NR_ugetrlimit
10975 case TARGET_NR_ugetrlimit:
10977 struct rlimit rlim;
10978 int resource = target_to_host_resource(arg1);
10979 ret = get_errno(getrlimit(resource, &rlim));
10980 if (!is_error(ret)) {
10981 struct target_rlimit *target_rlim;
10982 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10983 return -TARGET_EFAULT;
10984 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10985 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10986 unlock_user_struct(target_rlim, arg2, 1);
10988 return ret;
10990 #endif
10991 #ifdef TARGET_NR_truncate64
10992 case TARGET_NR_truncate64:
10993 if (!(p = lock_user_string(arg1)))
10994 return -TARGET_EFAULT;
10995 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10996 unlock_user(p, arg1, 0);
10997 return ret;
10998 #endif
10999 #ifdef TARGET_NR_ftruncate64
11000 case TARGET_NR_ftruncate64:
11001 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11002 #endif
11003 #ifdef TARGET_NR_stat64
11004 case TARGET_NR_stat64:
11005 if (!(p = lock_user_string(arg1))) {
11006 return -TARGET_EFAULT;
11008 ret = get_errno(stat(path(p), &st));
11009 unlock_user(p, arg1, 0);
11010 if (!is_error(ret))
11011 ret = host_to_target_stat64(cpu_env, arg2, &st);
11012 return ret;
11013 #endif
11014 #ifdef TARGET_NR_lstat64
11015 case TARGET_NR_lstat64:
11016 if (!(p = lock_user_string(arg1))) {
11017 return -TARGET_EFAULT;
11019 ret = get_errno(lstat(path(p), &st));
11020 unlock_user(p, arg1, 0);
11021 if (!is_error(ret))
11022 ret = host_to_target_stat64(cpu_env, arg2, &st);
11023 return ret;
11024 #endif
11025 #ifdef TARGET_NR_fstat64
11026 case TARGET_NR_fstat64:
11027 ret = get_errno(fstat(arg1, &st));
11028 if (!is_error(ret))
11029 ret = host_to_target_stat64(cpu_env, arg2, &st);
11030 return ret;
11031 #endif
11032 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11033 #ifdef TARGET_NR_fstatat64
11034 case TARGET_NR_fstatat64:
11035 #endif
11036 #ifdef TARGET_NR_newfstatat
11037 case TARGET_NR_newfstatat:
11038 #endif
11039 if (!(p = lock_user_string(arg2))) {
11040 return -TARGET_EFAULT;
11042 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11043 unlock_user(p, arg2, 0);
11044 if (!is_error(ret))
11045 ret = host_to_target_stat64(cpu_env, arg3, &st);
11046 return ret;
11047 #endif
11048 #if defined(TARGET_NR_statx)
11049 case TARGET_NR_statx:
11051 struct target_statx *target_stx;
11052 int dirfd = arg1;
11053 int flags = arg3;
11055 p = lock_user_string(arg2);
11056 if (p == NULL) {
11057 return -TARGET_EFAULT;
11059 #if defined(__NR_statx)
11062 * It is assumed that struct statx is architecture independent.
11064 struct target_statx host_stx;
11065 int mask = arg4;
11067 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11068 if (!is_error(ret)) {
11069 if (host_to_target_statx(&host_stx, arg5) != 0) {
11070 unlock_user(p, arg2, 0);
11071 return -TARGET_EFAULT;
11075 if (ret != -TARGET_ENOSYS) {
11076 unlock_user(p, arg2, 0);
11077 return ret;
11080 #endif
11081 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11082 unlock_user(p, arg2, 0);
11084 if (!is_error(ret)) {
11085 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11086 return -TARGET_EFAULT;
11088 memset(target_stx, 0, sizeof(*target_stx));
11089 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11090 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11091 __put_user(st.st_ino, &target_stx->stx_ino);
11092 __put_user(st.st_mode, &target_stx->stx_mode);
11093 __put_user(st.st_uid, &target_stx->stx_uid);
11094 __put_user(st.st_gid, &target_stx->stx_gid);
11095 __put_user(st.st_nlink, &target_stx->stx_nlink);
11096 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11097 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11098 __put_user(st.st_size, &target_stx->stx_size);
11099 __put_user(st.st_blksize, &target_stx->stx_blksize);
11100 __put_user(st.st_blocks, &target_stx->stx_blocks);
11101 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11102 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11103 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11104 unlock_user_struct(target_stx, arg5, 1);
11107 return ret;
11108 #endif
11109 #ifdef TARGET_NR_lchown
11110 case TARGET_NR_lchown:
11111 if (!(p = lock_user_string(arg1)))
11112 return -TARGET_EFAULT;
11113 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11114 unlock_user(p, arg1, 0);
11115 return ret;
11116 #endif
11117 #ifdef TARGET_NR_getuid
11118 case TARGET_NR_getuid:
11119 return get_errno(high2lowuid(getuid()));
11120 #endif
11121 #ifdef TARGET_NR_getgid
11122 case TARGET_NR_getgid:
11123 return get_errno(high2lowgid(getgid()));
11124 #endif
11125 #ifdef TARGET_NR_geteuid
11126 case TARGET_NR_geteuid:
11127 return get_errno(high2lowuid(geteuid()));
11128 #endif
11129 #ifdef TARGET_NR_getegid
11130 case TARGET_NR_getegid:
11131 return get_errno(high2lowgid(getegid()));
11132 #endif
11133 case TARGET_NR_setreuid:
11134 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11135 case TARGET_NR_setregid:
11136 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11137 case TARGET_NR_getgroups:
11139 int gidsetsize = arg1;
11140 target_id *target_grouplist;
11141 gid_t *grouplist;
11142 int i;
11144 grouplist = alloca(gidsetsize * sizeof(gid_t));
11145 ret = get_errno(getgroups(gidsetsize, grouplist));
11146 if (gidsetsize == 0)
11147 return ret;
11148 if (!is_error(ret)) {
11149 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11150 if (!target_grouplist)
11151 return -TARGET_EFAULT;
11152 for(i = 0;i < ret; i++)
11153 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11154 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11157 return ret;
11158 case TARGET_NR_setgroups:
11160 int gidsetsize = arg1;
11161 target_id *target_grouplist;
11162 gid_t *grouplist = NULL;
11163 int i;
11164 if (gidsetsize) {
11165 grouplist = alloca(gidsetsize * sizeof(gid_t));
11166 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11167 if (!target_grouplist) {
11168 return -TARGET_EFAULT;
11170 for (i = 0; i < gidsetsize; i++) {
11171 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11173 unlock_user(target_grouplist, arg2, 0);
11175 return get_errno(setgroups(gidsetsize, grouplist));
11177 case TARGET_NR_fchown:
11178 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11179 #if defined(TARGET_NR_fchownat)
11180 case TARGET_NR_fchownat:
11181 if (!(p = lock_user_string(arg2)))
11182 return -TARGET_EFAULT;
11183 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11184 low2highgid(arg4), arg5));
11185 unlock_user(p, arg2, 0);
11186 return ret;
11187 #endif
11188 #ifdef TARGET_NR_setresuid
11189 case TARGET_NR_setresuid:
11190 return get_errno(sys_setresuid(low2highuid(arg1),
11191 low2highuid(arg2),
11192 low2highuid(arg3)));
11193 #endif
11194 #ifdef TARGET_NR_getresuid
11195 case TARGET_NR_getresuid:
11197 uid_t ruid, euid, suid;
11198 ret = get_errno(getresuid(&ruid, &euid, &suid));
11199 if (!is_error(ret)) {
11200 if (put_user_id(high2lowuid(ruid), arg1)
11201 || put_user_id(high2lowuid(euid), arg2)
11202 || put_user_id(high2lowuid(suid), arg3))
11203 return -TARGET_EFAULT;
11206 return ret;
11207 #endif
11208 #ifdef TARGET_NR_getresgid
11209 case TARGET_NR_setresgid:
11210 return get_errno(sys_setresgid(low2highgid(arg1),
11211 low2highgid(arg2),
11212 low2highgid(arg3)));
11213 #endif
11214 #ifdef TARGET_NR_getresgid
11215 case TARGET_NR_getresgid:
11217 gid_t rgid, egid, sgid;
11218 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11219 if (!is_error(ret)) {
11220 if (put_user_id(high2lowgid(rgid), arg1)
11221 || put_user_id(high2lowgid(egid), arg2)
11222 || put_user_id(high2lowgid(sgid), arg3))
11223 return -TARGET_EFAULT;
11226 return ret;
11227 #endif
11228 #ifdef TARGET_NR_chown
11229 case TARGET_NR_chown:
11230 if (!(p = lock_user_string(arg1)))
11231 return -TARGET_EFAULT;
11232 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11233 unlock_user(p, arg1, 0);
11234 return ret;
11235 #endif
11236 case TARGET_NR_setuid:
11237 return get_errno(sys_setuid(low2highuid(arg1)));
11238 case TARGET_NR_setgid:
11239 return get_errno(sys_setgid(low2highgid(arg1)));
11240 case TARGET_NR_setfsuid:
11241 return get_errno(setfsuid(arg1));
11242 case TARGET_NR_setfsgid:
11243 return get_errno(setfsgid(arg1));
11245 #ifdef TARGET_NR_lchown32
11246 case TARGET_NR_lchown32:
11247 if (!(p = lock_user_string(arg1)))
11248 return -TARGET_EFAULT;
11249 ret = get_errno(lchown(p, arg2, arg3));
11250 unlock_user(p, arg1, 0);
11251 return ret;
11252 #endif
11253 #ifdef TARGET_NR_getuid32
11254 case TARGET_NR_getuid32:
11255 return get_errno(getuid());
11256 #endif
11258 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11259 /* Alpha specific */
11260 case TARGET_NR_getxuid:
11262 uid_t euid;
11263 euid=geteuid();
11264 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11266 return get_errno(getuid());
11267 #endif
11268 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11269 /* Alpha specific */
11270 case TARGET_NR_getxgid:
11272 uid_t egid;
11273 egid=getegid();
11274 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11276 return get_errno(getgid());
11277 #endif
11278 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11279 /* Alpha specific */
11280 case TARGET_NR_osf_getsysinfo:
11281 ret = -TARGET_EOPNOTSUPP;
11282 switch (arg1) {
11283 case TARGET_GSI_IEEE_FP_CONTROL:
11285 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11286 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11288 swcr &= ~SWCR_STATUS_MASK;
11289 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11291 if (put_user_u64 (swcr, arg2))
11292 return -TARGET_EFAULT;
11293 ret = 0;
11295 break;
11297 /* case GSI_IEEE_STATE_AT_SIGNAL:
11298 -- Not implemented in linux kernel.
11299 case GSI_UACPROC:
11300 -- Retrieves current unaligned access state; not much used.
11301 case GSI_PROC_TYPE:
11302 -- Retrieves implver information; surely not used.
11303 case GSI_GET_HWRPB:
11304 -- Grabs a copy of the HWRPB; surely not used.
11307 return ret;
11308 #endif
11309 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11310 /* Alpha specific */
11311 case TARGET_NR_osf_setsysinfo:
11312 ret = -TARGET_EOPNOTSUPP;
11313 switch (arg1) {
11314 case TARGET_SSI_IEEE_FP_CONTROL:
11316 uint64_t swcr, fpcr;
11318 if (get_user_u64 (swcr, arg2)) {
11319 return -TARGET_EFAULT;
11323 * The kernel calls swcr_update_status to update the
11324 * status bits from the fpcr at every point that it
11325 * could be queried. Therefore, we store the status
11326 * bits only in FPCR.
11328 ((CPUAlphaState *)cpu_env)->swcr
11329 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11331 fpcr = cpu_alpha_load_fpcr(cpu_env);
11332 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11333 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11334 cpu_alpha_store_fpcr(cpu_env, fpcr);
11335 ret = 0;
11337 break;
11339 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11341 uint64_t exc, fpcr, fex;
11343 if (get_user_u64(exc, arg2)) {
11344 return -TARGET_EFAULT;
11346 exc &= SWCR_STATUS_MASK;
11347 fpcr = cpu_alpha_load_fpcr(cpu_env);
11349 /* Old exceptions are not signaled. */
11350 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11351 fex = exc & ~fex;
11352 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11353 fex &= ((CPUArchState *)cpu_env)->swcr;
11355 /* Update the hardware fpcr. */
11356 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11357 cpu_alpha_store_fpcr(cpu_env, fpcr);
11359 if (fex) {
11360 int si_code = TARGET_FPE_FLTUNK;
11361 target_siginfo_t info;
11363 if (fex & SWCR_TRAP_ENABLE_DNO) {
11364 si_code = TARGET_FPE_FLTUND;
11366 if (fex & SWCR_TRAP_ENABLE_INE) {
11367 si_code = TARGET_FPE_FLTRES;
11369 if (fex & SWCR_TRAP_ENABLE_UNF) {
11370 si_code = TARGET_FPE_FLTUND;
11372 if (fex & SWCR_TRAP_ENABLE_OVF) {
11373 si_code = TARGET_FPE_FLTOVF;
11375 if (fex & SWCR_TRAP_ENABLE_DZE) {
11376 si_code = TARGET_FPE_FLTDIV;
11378 if (fex & SWCR_TRAP_ENABLE_INV) {
11379 si_code = TARGET_FPE_FLTINV;
11382 info.si_signo = SIGFPE;
11383 info.si_errno = 0;
11384 info.si_code = si_code;
11385 info._sifields._sigfault._addr
11386 = ((CPUArchState *)cpu_env)->pc;
11387 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11388 QEMU_SI_FAULT, &info);
11390 ret = 0;
11392 break;
11394 /* case SSI_NVPAIRS:
11395 -- Used with SSIN_UACPROC to enable unaligned accesses.
11396 case SSI_IEEE_STATE_AT_SIGNAL:
11397 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11398 -- Not implemented in linux kernel
11401 return ret;
11402 #endif
11403 #ifdef TARGET_NR_osf_sigprocmask
11404 /* Alpha specific. */
11405 case TARGET_NR_osf_sigprocmask:
11407 abi_ulong mask;
11408 int how;
11409 sigset_t set, oldset;
11411 switch(arg1) {
11412 case TARGET_SIG_BLOCK:
11413 how = SIG_BLOCK;
11414 break;
11415 case TARGET_SIG_UNBLOCK:
11416 how = SIG_UNBLOCK;
11417 break;
11418 case TARGET_SIG_SETMASK:
11419 how = SIG_SETMASK;
11420 break;
11421 default:
11422 return -TARGET_EINVAL;
11424 mask = arg2;
11425 target_to_host_old_sigset(&set, &mask);
11426 ret = do_sigprocmask(how, &set, &oldset);
11427 if (!ret) {
11428 host_to_target_old_sigset(&mask, &oldset);
11429 ret = mask;
11432 return ret;
11433 #endif
11435 #ifdef TARGET_NR_getgid32
11436 case TARGET_NR_getgid32:
11437 return get_errno(getgid());
11438 #endif
11439 #ifdef TARGET_NR_geteuid32
11440 case TARGET_NR_geteuid32:
11441 return get_errno(geteuid());
11442 #endif
11443 #ifdef TARGET_NR_getegid32
11444 case TARGET_NR_getegid32:
11445 return get_errno(getegid());
11446 #endif
11447 #ifdef TARGET_NR_setreuid32
11448 case TARGET_NR_setreuid32:
11449 return get_errno(setreuid(arg1, arg2));
11450 #endif
11451 #ifdef TARGET_NR_setregid32
11452 case TARGET_NR_setregid32:
11453 return get_errno(setregid(arg1, arg2));
11454 #endif
11455 #ifdef TARGET_NR_getgroups32
11456 case TARGET_NR_getgroups32:
11458 int gidsetsize = arg1;
11459 uint32_t *target_grouplist;
11460 gid_t *grouplist;
11461 int i;
11463 grouplist = alloca(gidsetsize * sizeof(gid_t));
11464 ret = get_errno(getgroups(gidsetsize, grouplist));
11465 if (gidsetsize == 0)
11466 return ret;
11467 if (!is_error(ret)) {
11468 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11469 if (!target_grouplist) {
11470 return -TARGET_EFAULT;
11472 for(i = 0;i < ret; i++)
11473 target_grouplist[i] = tswap32(grouplist[i]);
11474 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11477 return ret;
11478 #endif
11479 #ifdef TARGET_NR_setgroups32
11480 case TARGET_NR_setgroups32:
11482 int gidsetsize = arg1;
11483 uint32_t *target_grouplist;
11484 gid_t *grouplist;
11485 int i;
11487 grouplist = alloca(gidsetsize * sizeof(gid_t));
11488 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11489 if (!target_grouplist) {
11490 return -TARGET_EFAULT;
11492 for(i = 0;i < gidsetsize; i++)
11493 grouplist[i] = tswap32(target_grouplist[i]);
11494 unlock_user(target_grouplist, arg2, 0);
11495 return get_errno(setgroups(gidsetsize, grouplist));
11497 #endif
11498 #ifdef TARGET_NR_fchown32
11499 case TARGET_NR_fchown32:
11500 return get_errno(fchown(arg1, arg2, arg3));
11501 #endif
11502 #ifdef TARGET_NR_setresuid32
11503 case TARGET_NR_setresuid32:
11504 return get_errno(sys_setresuid(arg1, arg2, arg3));
11505 #endif
11506 #ifdef TARGET_NR_getresuid32
11507 case TARGET_NR_getresuid32:
11509 uid_t ruid, euid, suid;
11510 ret = get_errno(getresuid(&ruid, &euid, &suid));
11511 if (!is_error(ret)) {
11512 if (put_user_u32(ruid, arg1)
11513 || put_user_u32(euid, arg2)
11514 || put_user_u32(suid, arg3))
11515 return -TARGET_EFAULT;
11518 return ret;
11519 #endif
11520 #ifdef TARGET_NR_setresgid32
11521 case TARGET_NR_setresgid32:
11522 return get_errno(sys_setresgid(arg1, arg2, arg3));
11523 #endif
11524 #ifdef TARGET_NR_getresgid32
11525 case TARGET_NR_getresgid32:
11527 gid_t rgid, egid, sgid;
11528 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11529 if (!is_error(ret)) {
11530 if (put_user_u32(rgid, arg1)
11531 || put_user_u32(egid, arg2)
11532 || put_user_u32(sgid, arg3))
11533 return -TARGET_EFAULT;
11536 return ret;
11537 #endif
11538 #ifdef TARGET_NR_chown32
11539 case TARGET_NR_chown32:
11540 if (!(p = lock_user_string(arg1)))
11541 return -TARGET_EFAULT;
11542 ret = get_errno(chown(p, arg2, arg3));
11543 unlock_user(p, arg1, 0);
11544 return ret;
11545 #endif
11546 #ifdef TARGET_NR_setuid32
11547 case TARGET_NR_setuid32:
11548 return get_errno(sys_setuid(arg1));
11549 #endif
11550 #ifdef TARGET_NR_setgid32
11551 case TARGET_NR_setgid32:
11552 return get_errno(sys_setgid(arg1));
11553 #endif
11554 #ifdef TARGET_NR_setfsuid32
11555 case TARGET_NR_setfsuid32:
11556 return get_errno(setfsuid(arg1));
11557 #endif
11558 #ifdef TARGET_NR_setfsgid32
11559 case TARGET_NR_setfsgid32:
11560 return get_errno(setfsgid(arg1));
11561 #endif
11562 #ifdef TARGET_NR_mincore
11563 case TARGET_NR_mincore:
11565 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11566 if (!a) {
11567 return -TARGET_ENOMEM;
11569 p = lock_user_string(arg3);
11570 if (!p) {
11571 ret = -TARGET_EFAULT;
11572 } else {
11573 ret = get_errno(mincore(a, arg2, p));
11574 unlock_user(p, arg3, ret);
11576 unlock_user(a, arg1, 0);
11578 return ret;
11579 #endif
11580 #ifdef TARGET_NR_arm_fadvise64_64
11581 case TARGET_NR_arm_fadvise64_64:
11582 /* arm_fadvise64_64 looks like fadvise64_64 but
11583 * with different argument order: fd, advice, offset, len
11584 * rather than the usual fd, offset, len, advice.
11585 * Note that offset and len are both 64-bit so appear as
11586 * pairs of 32-bit registers.
11588 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11589 target_offset64(arg5, arg6), arg2);
11590 return -host_to_target_errno(ret);
11591 #endif
11593 #if TARGET_ABI_BITS == 32
11595 #ifdef TARGET_NR_fadvise64_64
11596 case TARGET_NR_fadvise64_64:
11597 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11598 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11599 ret = arg2;
11600 arg2 = arg3;
11601 arg3 = arg4;
11602 arg4 = arg5;
11603 arg5 = arg6;
11604 arg6 = ret;
11605 #else
11606 /* 6 args: fd, offset (high, low), len (high, low), advice */
11607 if (regpairs_aligned(cpu_env, num)) {
11608 /* offset is in (3,4), len in (5,6) and advice in 7 */
11609 arg2 = arg3;
11610 arg3 = arg4;
11611 arg4 = arg5;
11612 arg5 = arg6;
11613 arg6 = arg7;
11615 #endif
11616 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11617 target_offset64(arg4, arg5), arg6);
11618 return -host_to_target_errno(ret);
11619 #endif
11621 #ifdef TARGET_NR_fadvise64
11622 case TARGET_NR_fadvise64:
11623 /* 5 args: fd, offset (high, low), len, advice */
11624 if (regpairs_aligned(cpu_env, num)) {
11625 /* offset is in (3,4), len in 5 and advice in 6 */
11626 arg2 = arg3;
11627 arg3 = arg4;
11628 arg4 = arg5;
11629 arg5 = arg6;
11631 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11632 return -host_to_target_errno(ret);
11633 #endif
11635 #else /* not a 32-bit ABI */
11636 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11637 #ifdef TARGET_NR_fadvise64_64
11638 case TARGET_NR_fadvise64_64:
11639 #endif
11640 #ifdef TARGET_NR_fadvise64
11641 case TARGET_NR_fadvise64:
11642 #endif
11643 #ifdef TARGET_S390X
11644 switch (arg4) {
11645 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11646 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11647 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11648 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11649 default: break;
11651 #endif
11652 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11653 #endif
11654 #endif /* end of 64-bit ABI fadvise handling */
11656 #ifdef TARGET_NR_madvise
11657 case TARGET_NR_madvise:
11658 /* A straight passthrough may not be safe because qemu sometimes
11659 turns private file-backed mappings into anonymous mappings.
11660 This will break MADV_DONTNEED.
11661 This is a hint, so ignoring and returning success is ok. */
11662 return 0;
11663 #endif
11664 #ifdef TARGET_NR_fcntl64
11665 case TARGET_NR_fcntl64:
11667 int cmd;
11668 struct flock64 fl;
11669 from_flock64_fn *copyfrom = copy_from_user_flock64;
11670 to_flock64_fn *copyto = copy_to_user_flock64;
11672 #ifdef TARGET_ARM
11673 if (!((CPUARMState *)cpu_env)->eabi) {
11674 copyfrom = copy_from_user_oabi_flock64;
11675 copyto = copy_to_user_oabi_flock64;
11677 #endif
11679 cmd = target_to_host_fcntl_cmd(arg2);
11680 if (cmd == -TARGET_EINVAL) {
11681 return cmd;
11684 switch(arg2) {
11685 case TARGET_F_GETLK64:
11686 ret = copyfrom(&fl, arg3);
11687 if (ret) {
11688 break;
11690 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11691 if (ret == 0) {
11692 ret = copyto(arg3, &fl);
11694 break;
11696 case TARGET_F_SETLK64:
11697 case TARGET_F_SETLKW64:
11698 ret = copyfrom(&fl, arg3);
11699 if (ret) {
11700 break;
11702 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11703 break;
11704 default:
11705 ret = do_fcntl(arg1, arg2, arg3);
11706 break;
11708 return ret;
11710 #endif
11711 #ifdef TARGET_NR_cacheflush
11712 case TARGET_NR_cacheflush:
11713 /* self-modifying code is handled automatically, so nothing needed */
11714 return 0;
11715 #endif
11716 #ifdef TARGET_NR_getpagesize
11717 case TARGET_NR_getpagesize:
11718 return TARGET_PAGE_SIZE;
11719 #endif
11720 case TARGET_NR_gettid:
11721 return get_errno(sys_gettid());
11722 #ifdef TARGET_NR_readahead
11723 case TARGET_NR_readahead:
11724 #if TARGET_ABI_BITS == 32
11725 if (regpairs_aligned(cpu_env, num)) {
11726 arg2 = arg3;
11727 arg3 = arg4;
11728 arg4 = arg5;
11730 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11731 #else
11732 ret = get_errno(readahead(arg1, arg2, arg3));
11733 #endif
11734 return ret;
11735 #endif
11736 #ifdef CONFIG_ATTR
11737 #ifdef TARGET_NR_setxattr
11738 case TARGET_NR_listxattr:
11739 case TARGET_NR_llistxattr:
11741 void *p, *b = 0;
11742 if (arg2) {
11743 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11744 if (!b) {
11745 return -TARGET_EFAULT;
11748 p = lock_user_string(arg1);
11749 if (p) {
11750 if (num == TARGET_NR_listxattr) {
11751 ret = get_errno(listxattr(p, b, arg3));
11752 } else {
11753 ret = get_errno(llistxattr(p, b, arg3));
11755 } else {
11756 ret = -TARGET_EFAULT;
11758 unlock_user(p, arg1, 0);
11759 unlock_user(b, arg2, arg3);
11760 return ret;
11762 case TARGET_NR_flistxattr:
11764 void *b = 0;
11765 if (arg2) {
11766 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11767 if (!b) {
11768 return -TARGET_EFAULT;
11771 ret = get_errno(flistxattr(arg1, b, arg3));
11772 unlock_user(b, arg2, arg3);
11773 return ret;
11775 case TARGET_NR_setxattr:
11776 case TARGET_NR_lsetxattr:
11778 void *p, *n, *v = 0;
11779 if (arg3) {
11780 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11781 if (!v) {
11782 return -TARGET_EFAULT;
11785 p = lock_user_string(arg1);
11786 n = lock_user_string(arg2);
11787 if (p && n) {
11788 if (num == TARGET_NR_setxattr) {
11789 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11790 } else {
11791 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11793 } else {
11794 ret = -TARGET_EFAULT;
11796 unlock_user(p, arg1, 0);
11797 unlock_user(n, arg2, 0);
11798 unlock_user(v, arg3, 0);
11800 return ret;
11801 case TARGET_NR_fsetxattr:
11803 void *n, *v = 0;
11804 if (arg3) {
11805 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11806 if (!v) {
11807 return -TARGET_EFAULT;
11810 n = lock_user_string(arg2);
11811 if (n) {
11812 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11813 } else {
11814 ret = -TARGET_EFAULT;
11816 unlock_user(n, arg2, 0);
11817 unlock_user(v, arg3, 0);
11819 return ret;
11820 case TARGET_NR_getxattr:
11821 case TARGET_NR_lgetxattr:
11823 void *p, *n, *v = 0;
11824 if (arg3) {
11825 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11826 if (!v) {
11827 return -TARGET_EFAULT;
11830 p = lock_user_string(arg1);
11831 n = lock_user_string(arg2);
11832 if (p && n) {
11833 if (num == TARGET_NR_getxattr) {
11834 ret = get_errno(getxattr(p, n, v, arg4));
11835 } else {
11836 ret = get_errno(lgetxattr(p, n, v, arg4));
11838 } else {
11839 ret = -TARGET_EFAULT;
11841 unlock_user(p, arg1, 0);
11842 unlock_user(n, arg2, 0);
11843 unlock_user(v, arg3, arg4);
11845 return ret;
11846 case TARGET_NR_fgetxattr:
11848 void *n, *v = 0;
11849 if (arg3) {
11850 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11851 if (!v) {
11852 return -TARGET_EFAULT;
11855 n = lock_user_string(arg2);
11856 if (n) {
11857 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11858 } else {
11859 ret = -TARGET_EFAULT;
11861 unlock_user(n, arg2, 0);
11862 unlock_user(v, arg3, arg4);
11864 return ret;
11865 case TARGET_NR_removexattr:
11866 case TARGET_NR_lremovexattr:
11868 void *p, *n;
11869 p = lock_user_string(arg1);
11870 n = lock_user_string(arg2);
11871 if (p && n) {
11872 if (num == TARGET_NR_removexattr) {
11873 ret = get_errno(removexattr(p, n));
11874 } else {
11875 ret = get_errno(lremovexattr(p, n));
11877 } else {
11878 ret = -TARGET_EFAULT;
11880 unlock_user(p, arg1, 0);
11881 unlock_user(n, arg2, 0);
11883 return ret;
11884 case TARGET_NR_fremovexattr:
11886 void *n;
11887 n = lock_user_string(arg2);
11888 if (n) {
11889 ret = get_errno(fremovexattr(arg1, n));
11890 } else {
11891 ret = -TARGET_EFAULT;
11893 unlock_user(n, arg2, 0);
11895 return ret;
11896 #endif
11897 #endif /* CONFIG_ATTR */
11898 #ifdef TARGET_NR_set_thread_area
11899 case TARGET_NR_set_thread_area:
11900 #if defined(TARGET_MIPS)
11901 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11902 return 0;
11903 #elif defined(TARGET_CRIS)
11904 if (arg1 & 0xff)
11905 ret = -TARGET_EINVAL;
11906 else {
11907 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11908 ret = 0;
11910 return ret;
11911 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11912 return do_set_thread_area(cpu_env, arg1);
11913 #elif defined(TARGET_M68K)
11915 TaskState *ts = cpu->opaque;
11916 ts->tp_value = arg1;
11917 return 0;
11919 #else
11920 return -TARGET_ENOSYS;
11921 #endif
11922 #endif
11923 #ifdef TARGET_NR_get_thread_area
11924 case TARGET_NR_get_thread_area:
11925 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11926 return do_get_thread_area(cpu_env, arg1);
11927 #elif defined(TARGET_M68K)
11929 TaskState *ts = cpu->opaque;
11930 return ts->tp_value;
11932 #else
11933 return -TARGET_ENOSYS;
11934 #endif
11935 #endif
11936 #ifdef TARGET_NR_getdomainname
11937 case TARGET_NR_getdomainname:
11938 return -TARGET_ENOSYS;
11939 #endif
11941 #ifdef TARGET_NR_clock_settime
11942 case TARGET_NR_clock_settime:
11944 struct timespec ts;
11946 ret = target_to_host_timespec(&ts, arg2);
11947 if (!is_error(ret)) {
11948 ret = get_errno(clock_settime(arg1, &ts));
11950 return ret;
11952 #endif
11953 #ifdef TARGET_NR_clock_settime64
11954 case TARGET_NR_clock_settime64:
11956 struct timespec ts;
11958 ret = target_to_host_timespec64(&ts, arg2);
11959 if (!is_error(ret)) {
11960 ret = get_errno(clock_settime(arg1, &ts));
11962 return ret;
11964 #endif
11965 #ifdef TARGET_NR_clock_gettime
11966 case TARGET_NR_clock_gettime:
11968 struct timespec ts;
11969 ret = get_errno(clock_gettime(arg1, &ts));
11970 if (!is_error(ret)) {
11971 ret = host_to_target_timespec(arg2, &ts);
11973 return ret;
11975 #endif
11976 #ifdef TARGET_NR_clock_gettime64
11977 case TARGET_NR_clock_gettime64:
11979 struct timespec ts;
11980 ret = get_errno(clock_gettime(arg1, &ts));
11981 if (!is_error(ret)) {
11982 ret = host_to_target_timespec64(arg2, &ts);
11984 return ret;
11986 #endif
11987 #ifdef TARGET_NR_clock_getres
11988 case TARGET_NR_clock_getres:
11990 struct timespec ts;
11991 ret = get_errno(clock_getres(arg1, &ts));
11992 if (!is_error(ret)) {
11993 host_to_target_timespec(arg2, &ts);
11995 return ret;
11997 #endif
11998 #ifdef TARGET_NR_clock_getres_time64
11999 case TARGET_NR_clock_getres_time64:
12001 struct timespec ts;
12002 ret = get_errno(clock_getres(arg1, &ts));
12003 if (!is_error(ret)) {
12004 host_to_target_timespec64(arg2, &ts);
12006 return ret;
12008 #endif
12009 #ifdef TARGET_NR_clock_nanosleep
12010 case TARGET_NR_clock_nanosleep:
12012 struct timespec ts;
12013 if (target_to_host_timespec(&ts, arg3)) {
12014 return -TARGET_EFAULT;
12016 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12017 &ts, arg4 ? &ts : NULL));
12019 * if the call is interrupted by a signal handler, it fails
12020 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12021 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12023 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12024 host_to_target_timespec(arg4, &ts)) {
12025 return -TARGET_EFAULT;
12028 return ret;
12030 #endif
12031 #ifdef TARGET_NR_clock_nanosleep_time64
12032 case TARGET_NR_clock_nanosleep_time64:
12034 struct timespec ts;
12036 if (target_to_host_timespec64(&ts, arg3)) {
12037 return -TARGET_EFAULT;
12040 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12041 &ts, arg4 ? &ts : NULL));
12043 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12044 host_to_target_timespec64(arg4, &ts)) {
12045 return -TARGET_EFAULT;
12047 return ret;
12049 #endif
12051 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12052 case TARGET_NR_set_tid_address:
12053 return get_errno(set_tid_address((int *)g2h(arg1)));
12054 #endif
12056 case TARGET_NR_tkill:
12057 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12059 case TARGET_NR_tgkill:
12060 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12061 target_to_host_signal(arg3)));
12063 #ifdef TARGET_NR_set_robust_list
12064 case TARGET_NR_set_robust_list:
12065 case TARGET_NR_get_robust_list:
12066 /* The ABI for supporting robust futexes has userspace pass
12067 * the kernel a pointer to a linked list which is updated by
12068 * userspace after the syscall; the list is walked by the kernel
12069 * when the thread exits. Since the linked list in QEMU guest
12070 * memory isn't a valid linked list for the host and we have
12071 * no way to reliably intercept the thread-death event, we can't
12072 * support these. Silently return ENOSYS so that guest userspace
12073 * falls back to a non-robust futex implementation (which should
12074 * be OK except in the corner case of the guest crashing while
12075 * holding a mutex that is shared with another process via
12076 * shared memory).
12078 return -TARGET_ENOSYS;
12079 #endif
12081 #if defined(TARGET_NR_utimensat)
12082 case TARGET_NR_utimensat:
12084 struct timespec *tsp, ts[2];
12085 if (!arg3) {
12086 tsp = NULL;
12087 } else {
12088 if (target_to_host_timespec(ts, arg3)) {
12089 return -TARGET_EFAULT;
12091 if (target_to_host_timespec(ts + 1, arg3 +
12092 sizeof(struct target_timespec))) {
12093 return -TARGET_EFAULT;
12095 tsp = ts;
12097 if (!arg2)
12098 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12099 else {
12100 if (!(p = lock_user_string(arg2))) {
12101 return -TARGET_EFAULT;
12103 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12104 unlock_user(p, arg2, 0);
12107 return ret;
12108 #endif
12109 #ifdef TARGET_NR_futex
12110 case TARGET_NR_futex:
12111 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12112 #endif
12113 #ifdef TARGET_NR_futex_time64
12114 case TARGET_NR_futex_time64:
12115 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12116 #endif
12117 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12118 case TARGET_NR_inotify_init:
12119 ret = get_errno(sys_inotify_init());
12120 if (ret >= 0) {
12121 fd_trans_register(ret, &target_inotify_trans);
12123 return ret;
12124 #endif
12125 #ifdef CONFIG_INOTIFY1
12126 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12127 case TARGET_NR_inotify_init1:
12128 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12129 fcntl_flags_tbl)));
12130 if (ret >= 0) {
12131 fd_trans_register(ret, &target_inotify_trans);
12133 return ret;
12134 #endif
12135 #endif
12136 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12137 case TARGET_NR_inotify_add_watch:
12138 p = lock_user_string(arg2);
12139 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12140 unlock_user(p, arg2, 0);
12141 return ret;
12142 #endif
12143 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12144 case TARGET_NR_inotify_rm_watch:
12145 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12146 #endif
12148 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12149 case TARGET_NR_mq_open:
12151 struct mq_attr posix_mq_attr;
12152 struct mq_attr *pposix_mq_attr;
12153 int host_flags;
12155 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12156 pposix_mq_attr = NULL;
12157 if (arg4) {
12158 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12159 return -TARGET_EFAULT;
12161 pposix_mq_attr = &posix_mq_attr;
12163 p = lock_user_string(arg1 - 1);
12164 if (!p) {
12165 return -TARGET_EFAULT;
12167 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12168 unlock_user (p, arg1, 0);
12170 return ret;
12172 case TARGET_NR_mq_unlink:
12173 p = lock_user_string(arg1 - 1);
12174 if (!p) {
12175 return -TARGET_EFAULT;
12177 ret = get_errno(mq_unlink(p));
12178 unlock_user (p, arg1, 0);
12179 return ret;
12181 #ifdef TARGET_NR_mq_timedsend
12182 case TARGET_NR_mq_timedsend:
12184 struct timespec ts;
12186 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12187 if (arg5 != 0) {
12188 if (target_to_host_timespec(&ts, arg5)) {
12189 return -TARGET_EFAULT;
12191 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12192 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12193 return -TARGET_EFAULT;
12195 } else {
12196 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12198 unlock_user (p, arg2, arg3);
12200 return ret;
12201 #endif
12202 #ifdef TARGET_NR_mq_timedsend_time64
12203 case TARGET_NR_mq_timedsend_time64:
12205 struct timespec ts;
12207 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12208 if (arg5 != 0) {
12209 if (target_to_host_timespec64(&ts, arg5)) {
12210 return -TARGET_EFAULT;
12212 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12213 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12214 return -TARGET_EFAULT;
12216 } else {
12217 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12219 unlock_user(p, arg2, arg3);
12221 return ret;
12222 #endif
12224 #ifdef TARGET_NR_mq_timedreceive
12225 case TARGET_NR_mq_timedreceive:
12227 struct timespec ts;
12228 unsigned int prio;
12230 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12231 if (arg5 != 0) {
12232 if (target_to_host_timespec(&ts, arg5)) {
12233 return -TARGET_EFAULT;
12235 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12236 &prio, &ts));
12237 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12238 return -TARGET_EFAULT;
12240 } else {
12241 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12242 &prio, NULL));
12244 unlock_user (p, arg2, arg3);
12245 if (arg4 != 0)
12246 put_user_u32(prio, arg4);
12248 return ret;
12249 #endif
12250 #ifdef TARGET_NR_mq_timedreceive_time64
12251 case TARGET_NR_mq_timedreceive_time64:
12253 struct timespec ts;
12254 unsigned int prio;
12256 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12257 if (arg5 != 0) {
12258 if (target_to_host_timespec64(&ts, arg5)) {
12259 return -TARGET_EFAULT;
12261 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12262 &prio, &ts));
12263 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12264 return -TARGET_EFAULT;
12266 } else {
12267 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12268 &prio, NULL));
12270 unlock_user(p, arg2, arg3);
12271 if (arg4 != 0) {
12272 put_user_u32(prio, arg4);
12275 return ret;
12276 #endif
12278 /* Not implemented for now... */
12279 /* case TARGET_NR_mq_notify: */
12280 /* break; */
12282 case TARGET_NR_mq_getsetattr:
12284 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12285 ret = 0;
12286 if (arg2 != 0) {
12287 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12288 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12289 &posix_mq_attr_out));
12290 } else if (arg3 != 0) {
12291 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12293 if (ret == 0 && arg3 != 0) {
12294 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12297 return ret;
12298 #endif
12300 #ifdef CONFIG_SPLICE
12301 #ifdef TARGET_NR_tee
12302 case TARGET_NR_tee:
12304 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12306 return ret;
12307 #endif
12308 #ifdef TARGET_NR_splice
12309 case TARGET_NR_splice:
12311 loff_t loff_in, loff_out;
12312 loff_t *ploff_in = NULL, *ploff_out = NULL;
12313 if (arg2) {
12314 if (get_user_u64(loff_in, arg2)) {
12315 return -TARGET_EFAULT;
12317 ploff_in = &loff_in;
12319 if (arg4) {
12320 if (get_user_u64(loff_out, arg4)) {
12321 return -TARGET_EFAULT;
12323 ploff_out = &loff_out;
12325 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12326 if (arg2) {
12327 if (put_user_u64(loff_in, arg2)) {
12328 return -TARGET_EFAULT;
12331 if (arg4) {
12332 if (put_user_u64(loff_out, arg4)) {
12333 return -TARGET_EFAULT;
12337 return ret;
12338 #endif
12339 #ifdef TARGET_NR_vmsplice
12340 case TARGET_NR_vmsplice:
12342 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12343 if (vec != NULL) {
12344 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12345 unlock_iovec(vec, arg2, arg3, 0);
12346 } else {
12347 ret = -host_to_target_errno(errno);
12350 return ret;
12351 #endif
12352 #endif /* CONFIG_SPLICE */
12353 #ifdef CONFIG_EVENTFD
12354 #if defined(TARGET_NR_eventfd)
12355 case TARGET_NR_eventfd:
12356 ret = get_errno(eventfd(arg1, 0));
12357 if (ret >= 0) {
12358 fd_trans_register(ret, &target_eventfd_trans);
12360 return ret;
12361 #endif
12362 #if defined(TARGET_NR_eventfd2)
12363 case TARGET_NR_eventfd2:
12365 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12366 if (arg2 & TARGET_O_NONBLOCK) {
12367 host_flags |= O_NONBLOCK;
12369 if (arg2 & TARGET_O_CLOEXEC) {
12370 host_flags |= O_CLOEXEC;
12372 ret = get_errno(eventfd(arg1, host_flags));
12373 if (ret >= 0) {
12374 fd_trans_register(ret, &target_eventfd_trans);
12376 return ret;
12378 #endif
12379 #endif /* CONFIG_EVENTFD */
12380 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12381 case TARGET_NR_fallocate:
12382 #if TARGET_ABI_BITS == 32
12383 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12384 target_offset64(arg5, arg6)));
12385 #else
12386 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12387 #endif
12388 return ret;
12389 #endif
12390 #if defined(CONFIG_SYNC_FILE_RANGE)
12391 #if defined(TARGET_NR_sync_file_range)
12392 case TARGET_NR_sync_file_range:
12393 #if TARGET_ABI_BITS == 32
12394 #if defined(TARGET_MIPS)
12395 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12396 target_offset64(arg5, arg6), arg7));
12397 #else
12398 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12399 target_offset64(arg4, arg5), arg6));
12400 #endif /* !TARGET_MIPS */
12401 #else
12402 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12403 #endif
12404 return ret;
12405 #endif
12406 #if defined(TARGET_NR_sync_file_range2) || \
12407 defined(TARGET_NR_arm_sync_file_range)
12408 #if defined(TARGET_NR_sync_file_range2)
12409 case TARGET_NR_sync_file_range2:
12410 #endif
12411 #if defined(TARGET_NR_arm_sync_file_range)
12412 case TARGET_NR_arm_sync_file_range:
12413 #endif
12414 /* This is like sync_file_range but the arguments are reordered */
12415 #if TARGET_ABI_BITS == 32
12416 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12417 target_offset64(arg5, arg6), arg2));
12418 #else
12419 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12420 #endif
12421 return ret;
12422 #endif
12423 #endif
12424 #if defined(TARGET_NR_signalfd4)
12425 case TARGET_NR_signalfd4:
12426 return do_signalfd4(arg1, arg2, arg4);
12427 #endif
12428 #if defined(TARGET_NR_signalfd)
12429 case TARGET_NR_signalfd:
12430 return do_signalfd4(arg1, arg2, 0);
12431 #endif
12432 #if defined(CONFIG_EPOLL)
12433 #if defined(TARGET_NR_epoll_create)
12434 case TARGET_NR_epoll_create:
12435 return get_errno(epoll_create(arg1));
12436 #endif
12437 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12438 case TARGET_NR_epoll_create1:
12439 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12440 #endif
12441 #if defined(TARGET_NR_epoll_ctl)
12442 case TARGET_NR_epoll_ctl:
12444 struct epoll_event ep;
12445 struct epoll_event *epp = 0;
12446 if (arg4) {
12447 struct target_epoll_event *target_ep;
12448 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12449 return -TARGET_EFAULT;
12451 ep.events = tswap32(target_ep->events);
12452 /* The epoll_data_t union is just opaque data to the kernel,
12453 * so we transfer all 64 bits across and need not worry what
12454 * actual data type it is.
12456 ep.data.u64 = tswap64(target_ep->data.u64);
12457 unlock_user_struct(target_ep, arg4, 0);
12458 epp = &ep;
12460 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12462 #endif
12464 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12465 #if defined(TARGET_NR_epoll_wait)
12466 case TARGET_NR_epoll_wait:
12467 #endif
12468 #if defined(TARGET_NR_epoll_pwait)
12469 case TARGET_NR_epoll_pwait:
12470 #endif
12472 struct target_epoll_event *target_ep;
12473 struct epoll_event *ep;
12474 int epfd = arg1;
12475 int maxevents = arg3;
12476 int timeout = arg4;
12478 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12479 return -TARGET_EINVAL;
12482 target_ep = lock_user(VERIFY_WRITE, arg2,
12483 maxevents * sizeof(struct target_epoll_event), 1);
12484 if (!target_ep) {
12485 return -TARGET_EFAULT;
12488 ep = g_try_new(struct epoll_event, maxevents);
12489 if (!ep) {
12490 unlock_user(target_ep, arg2, 0);
12491 return -TARGET_ENOMEM;
12494 switch (num) {
12495 #if defined(TARGET_NR_epoll_pwait)
12496 case TARGET_NR_epoll_pwait:
12498 target_sigset_t *target_set;
12499 sigset_t _set, *set = &_set;
12501 if (arg5) {
12502 if (arg6 != sizeof(target_sigset_t)) {
12503 ret = -TARGET_EINVAL;
12504 break;
12507 target_set = lock_user(VERIFY_READ, arg5,
12508 sizeof(target_sigset_t), 1);
12509 if (!target_set) {
12510 ret = -TARGET_EFAULT;
12511 break;
12513 target_to_host_sigset(set, target_set);
12514 unlock_user(target_set, arg5, 0);
12515 } else {
12516 set = NULL;
12519 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12520 set, SIGSET_T_SIZE));
12521 break;
12523 #endif
12524 #if defined(TARGET_NR_epoll_wait)
12525 case TARGET_NR_epoll_wait:
12526 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12527 NULL, 0));
12528 break;
12529 #endif
12530 default:
12531 ret = -TARGET_ENOSYS;
12533 if (!is_error(ret)) {
12534 int i;
12535 for (i = 0; i < ret; i++) {
12536 target_ep[i].events = tswap32(ep[i].events);
12537 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12539 unlock_user(target_ep, arg2,
12540 ret * sizeof(struct target_epoll_event));
12541 } else {
12542 unlock_user(target_ep, arg2, 0);
12544 g_free(ep);
12545 return ret;
12547 #endif
12548 #endif
12549 #ifdef TARGET_NR_prlimit64
12550 case TARGET_NR_prlimit64:
12552 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12553 struct target_rlimit64 *target_rnew, *target_rold;
12554 struct host_rlimit64 rnew, rold, *rnewp = 0;
12555 int resource = target_to_host_resource(arg2);
12557 if (arg3 && (resource != RLIMIT_AS &&
12558 resource != RLIMIT_DATA &&
12559 resource != RLIMIT_STACK)) {
12560 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12561 return -TARGET_EFAULT;
12563 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12564 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12565 unlock_user_struct(target_rnew, arg3, 0);
12566 rnewp = &rnew;
12569 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12570 if (!is_error(ret) && arg4) {
12571 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12572 return -TARGET_EFAULT;
12574 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12575 target_rold->rlim_max = tswap64(rold.rlim_max);
12576 unlock_user_struct(target_rold, arg4, 1);
12578 return ret;
12580 #endif
12581 #ifdef TARGET_NR_gethostname
12582 case TARGET_NR_gethostname:
12584 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12585 if (name) {
12586 ret = get_errno(gethostname(name, arg2));
12587 unlock_user(name, arg1, arg2);
12588 } else {
12589 ret = -TARGET_EFAULT;
12591 return ret;
12593 #endif
12594 #ifdef TARGET_NR_atomic_cmpxchg_32
12595 case TARGET_NR_atomic_cmpxchg_32:
12597 /* should use start_exclusive from main.c */
12598 abi_ulong mem_value;
12599 if (get_user_u32(mem_value, arg6)) {
12600 target_siginfo_t info;
12601 info.si_signo = SIGSEGV;
12602 info.si_errno = 0;
12603 info.si_code = TARGET_SEGV_MAPERR;
12604 info._sifields._sigfault._addr = arg6;
12605 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12606 QEMU_SI_FAULT, &info);
12607 ret = 0xdeadbeef;
12610 if (mem_value == arg2)
12611 put_user_u32(arg1, arg6);
12612 return mem_value;
12614 #endif
12615 #ifdef TARGET_NR_atomic_barrier
12616 case TARGET_NR_atomic_barrier:
12617 /* Like the kernel implementation and the
12618 qemu arm barrier, no-op this? */
12619 return 0;
12620 #endif
12622 #ifdef TARGET_NR_timer_create
12623 case TARGET_NR_timer_create:
12625 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12627 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12629 int clkid = arg1;
12630 int timer_index = next_free_host_timer();
12632 if (timer_index < 0) {
12633 ret = -TARGET_EAGAIN;
12634 } else {
12635 timer_t *phtimer = g_posix_timers + timer_index;
12637 if (arg2) {
12638 phost_sevp = &host_sevp;
12639 ret = target_to_host_sigevent(phost_sevp, arg2);
12640 if (ret != 0) {
12641 return ret;
12645 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12646 if (ret) {
12647 phtimer = NULL;
12648 } else {
12649 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12650 return -TARGET_EFAULT;
12654 return ret;
12656 #endif
12658 #ifdef TARGET_NR_timer_settime
12659 case TARGET_NR_timer_settime:
12661 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12662 * struct itimerspec * old_value */
12663 target_timer_t timerid = get_timer_id(arg1);
12665 if (timerid < 0) {
12666 ret = timerid;
12667 } else if (arg3 == 0) {
12668 ret = -TARGET_EINVAL;
12669 } else {
12670 timer_t htimer = g_posix_timers[timerid];
12671 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12673 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12674 return -TARGET_EFAULT;
12676 ret = get_errno(
12677 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12678 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12679 return -TARGET_EFAULT;
12682 return ret;
12684 #endif
12686 #ifdef TARGET_NR_timer_settime64
12687 case TARGET_NR_timer_settime64:
12689 target_timer_t timerid = get_timer_id(arg1);
12691 if (timerid < 0) {
12692 ret = timerid;
12693 } else if (arg3 == 0) {
12694 ret = -TARGET_EINVAL;
12695 } else {
12696 timer_t htimer = g_posix_timers[timerid];
12697 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12699 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12700 return -TARGET_EFAULT;
12702 ret = get_errno(
12703 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12704 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12705 return -TARGET_EFAULT;
12708 return ret;
12710 #endif
12712 #ifdef TARGET_NR_timer_gettime
12713 case TARGET_NR_timer_gettime:
12715 /* args: timer_t timerid, struct itimerspec *curr_value */
12716 target_timer_t timerid = get_timer_id(arg1);
12718 if (timerid < 0) {
12719 ret = timerid;
12720 } else if (!arg2) {
12721 ret = -TARGET_EFAULT;
12722 } else {
12723 timer_t htimer = g_posix_timers[timerid];
12724 struct itimerspec hspec;
12725 ret = get_errno(timer_gettime(htimer, &hspec));
12727 if (host_to_target_itimerspec(arg2, &hspec)) {
12728 ret = -TARGET_EFAULT;
12731 return ret;
12733 #endif
12735 #ifdef TARGET_NR_timer_gettime64
12736 case TARGET_NR_timer_gettime64:
12738 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12739 target_timer_t timerid = get_timer_id(arg1);
12741 if (timerid < 0) {
12742 ret = timerid;
12743 } else if (!arg2) {
12744 ret = -TARGET_EFAULT;
12745 } else {
12746 timer_t htimer = g_posix_timers[timerid];
12747 struct itimerspec hspec;
12748 ret = get_errno(timer_gettime(htimer, &hspec));
12750 if (host_to_target_itimerspec64(arg2, &hspec)) {
12751 ret = -TARGET_EFAULT;
12754 return ret;
12756 #endif
12758 #ifdef TARGET_NR_timer_getoverrun
12759 case TARGET_NR_timer_getoverrun:
12761 /* args: timer_t timerid */
12762 target_timer_t timerid = get_timer_id(arg1);
12764 if (timerid < 0) {
12765 ret = timerid;
12766 } else {
12767 timer_t htimer = g_posix_timers[timerid];
12768 ret = get_errno(timer_getoverrun(htimer));
12770 return ret;
12772 #endif
12774 #ifdef TARGET_NR_timer_delete
12775 case TARGET_NR_timer_delete:
12777 /* args: timer_t timerid */
12778 target_timer_t timerid = get_timer_id(arg1);
12780 if (timerid < 0) {
12781 ret = timerid;
12782 } else {
12783 timer_t htimer = g_posix_timers[timerid];
12784 ret = get_errno(timer_delete(htimer));
12785 g_posix_timers[timerid] = 0;
12787 return ret;
12789 #endif
12791 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12792 case TARGET_NR_timerfd_create:
12793 return get_errno(timerfd_create(arg1,
12794 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12795 #endif
12797 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12798 case TARGET_NR_timerfd_gettime:
12800 struct itimerspec its_curr;
12802 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12804 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12805 return -TARGET_EFAULT;
12808 return ret;
12809 #endif
12811 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12812 case TARGET_NR_timerfd_gettime64:
12814 struct itimerspec its_curr;
12816 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12818 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12819 return -TARGET_EFAULT;
12822 return ret;
12823 #endif
12825 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12826 case TARGET_NR_timerfd_settime:
12828 struct itimerspec its_new, its_old, *p_new;
12830 if (arg3) {
12831 if (target_to_host_itimerspec(&its_new, arg3)) {
12832 return -TARGET_EFAULT;
12834 p_new = &its_new;
12835 } else {
12836 p_new = NULL;
12839 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12841 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12842 return -TARGET_EFAULT;
12845 return ret;
12846 #endif
12848 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12849 case TARGET_NR_timerfd_settime64:
12851 struct itimerspec its_new, its_old, *p_new;
12853 if (arg3) {
12854 if (target_to_host_itimerspec64(&its_new, arg3)) {
12855 return -TARGET_EFAULT;
12857 p_new = &its_new;
12858 } else {
12859 p_new = NULL;
12862 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12864 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12865 return -TARGET_EFAULT;
12868 return ret;
12869 #endif
12871 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12872 case TARGET_NR_ioprio_get:
12873 return get_errno(ioprio_get(arg1, arg2));
12874 #endif
12876 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12877 case TARGET_NR_ioprio_set:
12878 return get_errno(ioprio_set(arg1, arg2, arg3));
12879 #endif
12881 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12882 case TARGET_NR_setns:
12883 return get_errno(setns(arg1, arg2));
12884 #endif
12885 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12886 case TARGET_NR_unshare:
12887 return get_errno(unshare(arg1));
12888 #endif
12889 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12890 case TARGET_NR_kcmp:
12891 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12892 #endif
12893 #ifdef TARGET_NR_swapcontext
12894 case TARGET_NR_swapcontext:
12895 /* PowerPC specific. */
12896 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12897 #endif
12898 #ifdef TARGET_NR_memfd_create
12899 case TARGET_NR_memfd_create:
12900 p = lock_user_string(arg1);
12901 if (!p) {
12902 return -TARGET_EFAULT;
12904 ret = get_errno(memfd_create(p, arg2));
12905 fd_trans_unregister(ret);
12906 unlock_user(p, arg1, 0);
12907 return ret;
12908 #endif
12909 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12910 case TARGET_NR_membarrier:
12911 return get_errno(membarrier(arg1, arg2));
12912 #endif
12914 default:
12915 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12916 return -TARGET_ENOSYS;
12918 return ret;
12921 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12922 abi_long arg2, abi_long arg3, abi_long arg4,
12923 abi_long arg5, abi_long arg6, abi_long arg7,
12924 abi_long arg8)
12926 CPUState *cpu = env_cpu(cpu_env);
12927 abi_long ret;
12929 #ifdef DEBUG_ERESTARTSYS
12930 /* Debug-only code for exercising the syscall-restart code paths
12931 * in the per-architecture cpu main loops: restart every syscall
12932 * the guest makes once before letting it through.
12935 static bool flag;
12936 flag = !flag;
12937 if (flag) {
12938 return -TARGET_ERESTARTSYS;
12941 #endif
12943 record_syscall_start(cpu, num, arg1,
12944 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12946 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12947 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12950 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12951 arg5, arg6, arg7, arg8);
12953 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12954 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12955 arg3, arg4, arg5, arg6);
12958 record_syscall_return(cpu, num, ret);
12959 return ret;