vfio: Allow hotplug of containers onto existing guest IOMMU mappings
[qemu/kevin.git] / linux-user / syscall.c
blob98b5766d4adff0af5abc58246f9befeaba25029e
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef TARGET_GPROF
73 #include <sys/gmon.h>
74 #endif
75 #ifdef CONFIG_EVENTFD
76 #include <sys/eventfd.h>
77 #endif
78 #ifdef CONFIG_EPOLL
79 #include <sys/epoll.h>
80 #endif
81 #ifdef CONFIG_ATTR
82 #include "qemu/xattr.h"
83 #endif
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
86 #endif
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
114 #include "uname.h"
116 #include "qemu.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #endif
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #endif
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
225 loff_t *, res, uint, wh);
226 #endif
227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #endif
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #endif
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group,int,error_code)
237 #endif
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address,int *,tidptr)
240 #endif
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253 _syscall2(int, capget, struct __user_cap_header_struct *, header,
254 struct __user_cap_data_struct *, data);
255 _syscall2(int, capset, struct __user_cap_header_struct *, header,
256 struct __user_cap_data_struct *, data);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get, int, which, int, who)
259 #endif
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
262 #endif
264 static bitmask_transtbl fcntl_flags_tbl[] = {
265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #endif
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #endif
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 #endif
287 #if defined(O_PATH)
288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 #endif
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
293 #endif
294 { 0, 0, 0, 0 }
297 static int sys_getcwd1(char *buf, size_t size)
299 if (getcwd(buf, size) == NULL) {
300 /* getcwd() sets errno */
301 return (-1);
303 return strlen(buf)+1;
306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
309 * open(2) has extra parameter 'mode' when called with
310 * flag O_CREAT.
312 if ((flags & O_CREAT) != 0) {
313 return (openat(dirfd, pathname, flags, mode));
315 return (openat(dirfd, pathname, flags));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd, const char *pathname,
321 const struct timespec times[2], int flags)
323 if (pathname == NULL)
324 return futimens(dirfd, times);
325 else
326 return utimensat(dirfd, pathname, times, flags);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
331 const struct timespec *,tsp,int,flags)
332 #else
333 static int sys_utimensat(int dirfd, const char *pathname,
334 const struct timespec times[2], int flags)
336 errno = ENOSYS;
337 return -1;
339 #endif
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
350 #endif
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
354 return (inotify_add_watch(fd, pathname, mask));
356 #endif
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd, int32_t wd)
360 return (inotify_rm_watch(fd, wd));
362 #endif
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags)
367 return (inotify_init1(flags));
369 #endif
370 #endif
371 #else
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
380 #ifndef __NR_ppoll
381 # define __NR_ppoll -1
382 #endif
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
385 struct timespec *, timeout, const sigset_t *, sigmask,
386 size_t, sigsetsize)
387 #endif
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
392 #endif
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
396 #endif
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
401 #endif
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64 {
405 uint64_t rlim_cur;
406 uint64_t rlim_max;
408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
409 const struct host_rlimit64 *, new_limit,
410 struct host_rlimit64 *, old_limit)
411 #endif
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
420 int k ;
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
423 if (g_posix_timers[k] == 0) {
424 g_posix_timers[k] = (timer_t) 1;
425 return k;
428 return -1;
430 #endif
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433 #ifdef TARGET_ARM
434 static inline int regpairs_aligned(void *cpu_env) {
435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
442 * r3 as arg1 */
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #else
445 static inline int regpairs_aligned(void *cpu_env) { return 0; }
446 #endif
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
460 [EAGAIN] = TARGET_EAGAIN,
461 [EIDRM] = TARGET_EIDRM,
462 [ECHRNG] = TARGET_ECHRNG,
463 [EL2NSYNC] = TARGET_EL2NSYNC,
464 [EL3HLT] = TARGET_EL3HLT,
465 [EL3RST] = TARGET_EL3RST,
466 [ELNRNG] = TARGET_ELNRNG,
467 [EUNATCH] = TARGET_EUNATCH,
468 [ENOCSI] = TARGET_ENOCSI,
469 [EL2HLT] = TARGET_EL2HLT,
470 [EDEADLK] = TARGET_EDEADLK,
471 [ENOLCK] = TARGET_ENOLCK,
472 [EBADE] = TARGET_EBADE,
473 [EBADR] = TARGET_EBADR,
474 [EXFULL] = TARGET_EXFULL,
475 [ENOANO] = TARGET_ENOANO,
476 [EBADRQC] = TARGET_EBADRQC,
477 [EBADSLT] = TARGET_EBADSLT,
478 [EBFONT] = TARGET_EBFONT,
479 [ENOSTR] = TARGET_ENOSTR,
480 [ENODATA] = TARGET_ENODATA,
481 [ETIME] = TARGET_ETIME,
482 [ENOSR] = TARGET_ENOSR,
483 [ENONET] = TARGET_ENONET,
484 [ENOPKG] = TARGET_ENOPKG,
485 [EREMOTE] = TARGET_EREMOTE,
486 [ENOLINK] = TARGET_ENOLINK,
487 [EADV] = TARGET_EADV,
488 [ESRMNT] = TARGET_ESRMNT,
489 [ECOMM] = TARGET_ECOMM,
490 [EPROTO] = TARGET_EPROTO,
491 [EDOTDOT] = TARGET_EDOTDOT,
492 [EMULTIHOP] = TARGET_EMULTIHOP,
493 [EBADMSG] = TARGET_EBADMSG,
494 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
495 [EOVERFLOW] = TARGET_EOVERFLOW,
496 [ENOTUNIQ] = TARGET_ENOTUNIQ,
497 [EBADFD] = TARGET_EBADFD,
498 [EREMCHG] = TARGET_EREMCHG,
499 [ELIBACC] = TARGET_ELIBACC,
500 [ELIBBAD] = TARGET_ELIBBAD,
501 [ELIBSCN] = TARGET_ELIBSCN,
502 [ELIBMAX] = TARGET_ELIBMAX,
503 [ELIBEXEC] = TARGET_ELIBEXEC,
504 [EILSEQ] = TARGET_EILSEQ,
505 [ENOSYS] = TARGET_ENOSYS,
506 [ELOOP] = TARGET_ELOOP,
507 [ERESTART] = TARGET_ERESTART,
508 [ESTRPIPE] = TARGET_ESTRPIPE,
509 [ENOTEMPTY] = TARGET_ENOTEMPTY,
510 [EUSERS] = TARGET_EUSERS,
511 [ENOTSOCK] = TARGET_ENOTSOCK,
512 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
513 [EMSGSIZE] = TARGET_EMSGSIZE,
514 [EPROTOTYPE] = TARGET_EPROTOTYPE,
515 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
516 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
517 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
518 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
519 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
520 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
521 [EADDRINUSE] = TARGET_EADDRINUSE,
522 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
523 [ENETDOWN] = TARGET_ENETDOWN,
524 [ENETUNREACH] = TARGET_ENETUNREACH,
525 [ENETRESET] = TARGET_ENETRESET,
526 [ECONNABORTED] = TARGET_ECONNABORTED,
527 [ECONNRESET] = TARGET_ECONNRESET,
528 [ENOBUFS] = TARGET_ENOBUFS,
529 [EISCONN] = TARGET_EISCONN,
530 [ENOTCONN] = TARGET_ENOTCONN,
531 [EUCLEAN] = TARGET_EUCLEAN,
532 [ENOTNAM] = TARGET_ENOTNAM,
533 [ENAVAIL] = TARGET_ENAVAIL,
534 [EISNAM] = TARGET_EISNAM,
535 [EREMOTEIO] = TARGET_EREMOTEIO,
536 [ESHUTDOWN] = TARGET_ESHUTDOWN,
537 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
538 [ETIMEDOUT] = TARGET_ETIMEDOUT,
539 [ECONNREFUSED] = TARGET_ECONNREFUSED,
540 [EHOSTDOWN] = TARGET_EHOSTDOWN,
541 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
542 [EALREADY] = TARGET_EALREADY,
543 [EINPROGRESS] = TARGET_EINPROGRESS,
544 [ESTALE] = TARGET_ESTALE,
545 [ECANCELED] = TARGET_ECANCELED,
546 [ENOMEDIUM] = TARGET_ENOMEDIUM,
547 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
548 #ifdef ENOKEY
549 [ENOKEY] = TARGET_ENOKEY,
550 #endif
551 #ifdef EKEYEXPIRED
552 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
553 #endif
554 #ifdef EKEYREVOKED
555 [EKEYREVOKED] = TARGET_EKEYREVOKED,
556 #endif
557 #ifdef EKEYREJECTED
558 [EKEYREJECTED] = TARGET_EKEYREJECTED,
559 #endif
560 #ifdef EOWNERDEAD
561 [EOWNERDEAD] = TARGET_EOWNERDEAD,
562 #endif
563 #ifdef ENOTRECOVERABLE
564 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
565 #endif
568 static inline int host_to_target_errno(int err)
570 if(host_to_target_errno_table[err])
571 return host_to_target_errno_table[err];
572 return err;
575 static inline int target_to_host_errno(int err)
577 if (target_to_host_errno_table[err])
578 return target_to_host_errno_table[err];
579 return err;
582 static inline abi_long get_errno(abi_long ret)
584 if (ret == -1)
585 return -host_to_target_errno(errno);
586 else
587 return ret;
590 static inline int is_error(abi_long ret)
592 return (abi_ulong)ret >= (abi_ulong)(-4096);
595 char *target_strerror(int err)
597 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
598 return NULL;
600 return strerror(target_to_host_errno(err));
603 static inline int host_to_target_sock_type(int host_type)
605 int target_type;
607 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
608 case SOCK_DGRAM:
609 target_type = TARGET_SOCK_DGRAM;
610 break;
611 case SOCK_STREAM:
612 target_type = TARGET_SOCK_STREAM;
613 break;
614 default:
615 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
616 break;
619 #if defined(SOCK_CLOEXEC)
620 if (host_type & SOCK_CLOEXEC) {
621 target_type |= TARGET_SOCK_CLOEXEC;
623 #endif
625 #if defined(SOCK_NONBLOCK)
626 if (host_type & SOCK_NONBLOCK) {
627 target_type |= TARGET_SOCK_NONBLOCK;
629 #endif
631 return target_type;
634 static abi_ulong target_brk;
635 static abi_ulong target_original_brk;
636 static abi_ulong brk_page;
638 void target_set_brk(abi_ulong new_brk)
640 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
641 brk_page = HOST_PAGE_ALIGN(target_brk);
644 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
645 #define DEBUGF_BRK(message, args...)
647 /* do_brk() must return target values and target errnos. */
648 abi_long do_brk(abi_ulong new_brk)
650 abi_long mapped_addr;
651 int new_alloc_size;
653 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
655 if (!new_brk) {
656 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
657 return target_brk;
659 if (new_brk < target_original_brk) {
660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
661 target_brk);
662 return target_brk;
665 /* If the new brk is less than the highest page reserved to the
666 * target heap allocation, set it and we're almost done... */
667 if (new_brk <= brk_page) {
668 /* Heap contents are initialized to zero, as for anonymous
669 * mapped pages. */
670 if (new_brk > target_brk) {
671 memset(g2h(target_brk), 0, new_brk - target_brk);
673 target_brk = new_brk;
674 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
675 return target_brk;
678 /* We need to allocate more memory after the brk... Note that
679 * we don't use MAP_FIXED because that will map over the top of
680 * any existing mapping (like the one with the host libc or qemu
681 * itself); instead we treat "mapped but at wrong address" as
682 * a failure and unmap again.
684 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
685 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
686 PROT_READ|PROT_WRITE,
687 MAP_ANON|MAP_PRIVATE, 0, 0));
689 if (mapped_addr == brk_page) {
690 /* Heap contents are initialized to zero, as for anonymous
691 * mapped pages. Technically the new pages are already
692 * initialized to zero since they *are* anonymous mapped
693 * pages, however we have to take care with the contents that
694 * come from the remaining part of the previous page: it may
695 * contains garbage data due to a previous heap usage (grown
696 * then shrunken). */
697 memset(g2h(target_brk), 0, brk_page - target_brk);
699 target_brk = new_brk;
700 brk_page = HOST_PAGE_ALIGN(target_brk);
701 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
702 target_brk);
703 return target_brk;
704 } else if (mapped_addr != -1) {
705 /* Mapped but at wrong address, meaning there wasn't actually
706 * enough space for this brk.
708 target_munmap(mapped_addr, new_alloc_size);
709 mapped_addr = -1;
710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
712 else {
713 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
716 #if defined(TARGET_ALPHA)
717 /* We (partially) emulate OSF/1 on Alpha, which requires we
718 return a proper errno, not an unchanged brk value. */
719 return -TARGET_ENOMEM;
720 #endif
721 /* For everything else, return the previous break. */
722 return target_brk;
725 static inline abi_long copy_from_user_fdset(fd_set *fds,
726 abi_ulong target_fds_addr,
727 int n)
729 int i, nw, j, k;
730 abi_ulong b, *target_fds;
732 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
733 if (!(target_fds = lock_user(VERIFY_READ,
734 target_fds_addr,
735 sizeof(abi_ulong) * nw,
736 1)))
737 return -TARGET_EFAULT;
739 FD_ZERO(fds);
740 k = 0;
741 for (i = 0; i < nw; i++) {
742 /* grab the abi_ulong */
743 __get_user(b, &target_fds[i]);
744 for (j = 0; j < TARGET_ABI_BITS; j++) {
745 /* check the bit inside the abi_ulong */
746 if ((b >> j) & 1)
747 FD_SET(k, fds);
748 k++;
752 unlock_user(target_fds, target_fds_addr, 0);
754 return 0;
757 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
758 abi_ulong target_fds_addr,
759 int n)
761 if (target_fds_addr) {
762 if (copy_from_user_fdset(fds, target_fds_addr, n))
763 return -TARGET_EFAULT;
764 *fds_ptr = fds;
765 } else {
766 *fds_ptr = NULL;
768 return 0;
771 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
772 const fd_set *fds,
773 int n)
775 int i, nw, j, k;
776 abi_long v;
777 abi_ulong *target_fds;
779 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
780 if (!(target_fds = lock_user(VERIFY_WRITE,
781 target_fds_addr,
782 sizeof(abi_ulong) * nw,
783 0)))
784 return -TARGET_EFAULT;
786 k = 0;
787 for (i = 0; i < nw; i++) {
788 v = 0;
789 for (j = 0; j < TARGET_ABI_BITS; j++) {
790 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
791 k++;
793 __put_user(v, &target_fds[i]);
796 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
798 return 0;
801 #if defined(__alpha__)
802 #define HOST_HZ 1024
803 #else
804 #define HOST_HZ 100
805 #endif
807 static inline abi_long host_to_target_clock_t(long ticks)
809 #if HOST_HZ == TARGET_HZ
810 return ticks;
811 #else
812 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
813 #endif
816 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
817 const struct rusage *rusage)
819 struct target_rusage *target_rusage;
821 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
822 return -TARGET_EFAULT;
823 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
824 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
825 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
826 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
827 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
828 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
829 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
830 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
831 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
832 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
833 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
834 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
835 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
836 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
837 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
838 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
839 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
840 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
841 unlock_user_struct(target_rusage, target_addr, 1);
843 return 0;
846 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
848 abi_ulong target_rlim_swap;
849 rlim_t result;
851 target_rlim_swap = tswapal(target_rlim);
852 if (target_rlim_swap == TARGET_RLIM_INFINITY)
853 return RLIM_INFINITY;
855 result = target_rlim_swap;
856 if (target_rlim_swap != (rlim_t)result)
857 return RLIM_INFINITY;
859 return result;
862 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
864 abi_ulong target_rlim_swap;
865 abi_ulong result;
867 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
868 target_rlim_swap = TARGET_RLIM_INFINITY;
869 else
870 target_rlim_swap = rlim;
871 result = tswapal(target_rlim_swap);
873 return result;
876 static inline int target_to_host_resource(int code)
878 switch (code) {
879 case TARGET_RLIMIT_AS:
880 return RLIMIT_AS;
881 case TARGET_RLIMIT_CORE:
882 return RLIMIT_CORE;
883 case TARGET_RLIMIT_CPU:
884 return RLIMIT_CPU;
885 case TARGET_RLIMIT_DATA:
886 return RLIMIT_DATA;
887 case TARGET_RLIMIT_FSIZE:
888 return RLIMIT_FSIZE;
889 case TARGET_RLIMIT_LOCKS:
890 return RLIMIT_LOCKS;
891 case TARGET_RLIMIT_MEMLOCK:
892 return RLIMIT_MEMLOCK;
893 case TARGET_RLIMIT_MSGQUEUE:
894 return RLIMIT_MSGQUEUE;
895 case TARGET_RLIMIT_NICE:
896 return RLIMIT_NICE;
897 case TARGET_RLIMIT_NOFILE:
898 return RLIMIT_NOFILE;
899 case TARGET_RLIMIT_NPROC:
900 return RLIMIT_NPROC;
901 case TARGET_RLIMIT_RSS:
902 return RLIMIT_RSS;
903 case TARGET_RLIMIT_RTPRIO:
904 return RLIMIT_RTPRIO;
905 case TARGET_RLIMIT_SIGPENDING:
906 return RLIMIT_SIGPENDING;
907 case TARGET_RLIMIT_STACK:
908 return RLIMIT_STACK;
909 default:
910 return code;
914 static inline abi_long copy_from_user_timeval(struct timeval *tv,
915 abi_ulong target_tv_addr)
917 struct target_timeval *target_tv;
919 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
920 return -TARGET_EFAULT;
922 __get_user(tv->tv_sec, &target_tv->tv_sec);
923 __get_user(tv->tv_usec, &target_tv->tv_usec);
925 unlock_user_struct(target_tv, target_tv_addr, 0);
927 return 0;
930 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
931 const struct timeval *tv)
933 struct target_timeval *target_tv;
935 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
936 return -TARGET_EFAULT;
938 __put_user(tv->tv_sec, &target_tv->tv_sec);
939 __put_user(tv->tv_usec, &target_tv->tv_usec);
941 unlock_user_struct(target_tv, target_tv_addr, 1);
943 return 0;
946 static inline abi_long copy_from_user_timezone(struct timezone *tz,
947 abi_ulong target_tz_addr)
949 struct target_timezone *target_tz;
951 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
952 return -TARGET_EFAULT;
955 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
956 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
958 unlock_user_struct(target_tz, target_tz_addr, 0);
960 return 0;
963 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
964 #include <mqueue.h>
966 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
967 abi_ulong target_mq_attr_addr)
969 struct target_mq_attr *target_mq_attr;
971 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
972 target_mq_attr_addr, 1))
973 return -TARGET_EFAULT;
975 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
976 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
977 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
978 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
980 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
982 return 0;
985 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
986 const struct mq_attr *attr)
988 struct target_mq_attr *target_mq_attr;
990 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
991 target_mq_attr_addr, 0))
992 return -TARGET_EFAULT;
994 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
995 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
996 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
997 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
999 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1001 return 0;
1003 #endif
1005 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1006 /* do_select() must return target values and target errnos. */
1007 static abi_long do_select(int n,
1008 abi_ulong rfd_addr, abi_ulong wfd_addr,
1009 abi_ulong efd_addr, abi_ulong target_tv_addr)
1011 fd_set rfds, wfds, efds;
1012 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1013 struct timeval tv, *tv_ptr;
1014 abi_long ret;
1016 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1017 if (ret) {
1018 return ret;
1020 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1021 if (ret) {
1022 return ret;
1024 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1025 if (ret) {
1026 return ret;
1029 if (target_tv_addr) {
1030 if (copy_from_user_timeval(&tv, target_tv_addr))
1031 return -TARGET_EFAULT;
1032 tv_ptr = &tv;
1033 } else {
1034 tv_ptr = NULL;
1037 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1039 if (!is_error(ret)) {
1040 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1041 return -TARGET_EFAULT;
1042 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1043 return -TARGET_EFAULT;
1044 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1045 return -TARGET_EFAULT;
1047 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1048 return -TARGET_EFAULT;
1051 return ret;
1053 #endif
1055 static abi_long do_pipe2(int host_pipe[], int flags)
1057 #ifdef CONFIG_PIPE2
1058 return pipe2(host_pipe, flags);
1059 #else
1060 return -ENOSYS;
1061 #endif
1064 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1065 int flags, int is_pipe2)
1067 int host_pipe[2];
1068 abi_long ret;
1069 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1071 if (is_error(ret))
1072 return get_errno(ret);
1074 /* Several targets have special calling conventions for the original
1075 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1076 if (!is_pipe2) {
1077 #if defined(TARGET_ALPHA)
1078 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1079 return host_pipe[0];
1080 #elif defined(TARGET_MIPS)
1081 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1082 return host_pipe[0];
1083 #elif defined(TARGET_SH4)
1084 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1085 return host_pipe[0];
1086 #elif defined(TARGET_SPARC)
1087 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1088 return host_pipe[0];
1089 #endif
1092 if (put_user_s32(host_pipe[0], pipedes)
1093 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1094 return -TARGET_EFAULT;
1095 return get_errno(ret);
1098 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1099 abi_ulong target_addr,
1100 socklen_t len)
1102 struct target_ip_mreqn *target_smreqn;
1104 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1105 if (!target_smreqn)
1106 return -TARGET_EFAULT;
1107 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1108 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1109 if (len == sizeof(struct target_ip_mreqn))
1110 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1111 unlock_user(target_smreqn, target_addr, 0);
1113 return 0;
1116 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1117 abi_ulong target_addr,
1118 socklen_t len)
1120 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1121 sa_family_t sa_family;
1122 struct target_sockaddr *target_saddr;
1124 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1125 if (!target_saddr)
1126 return -TARGET_EFAULT;
1128 sa_family = tswap16(target_saddr->sa_family);
1130 /* Oops. The caller might send a incomplete sun_path; sun_path
1131 * must be terminated by \0 (see the manual page), but
1132 * unfortunately it is quite common to specify sockaddr_un
1133 * length as "strlen(x->sun_path)" while it should be
1134 * "strlen(...) + 1". We'll fix that here if needed.
1135 * Linux kernel has a similar feature.
1138 if (sa_family == AF_UNIX) {
1139 if (len < unix_maxlen && len > 0) {
1140 char *cp = (char*)target_saddr;
1142 if ( cp[len-1] && !cp[len] )
1143 len++;
1145 if (len > unix_maxlen)
1146 len = unix_maxlen;
1149 memcpy(addr, target_saddr, len);
1150 addr->sa_family = sa_family;
1151 if (sa_family == AF_PACKET) {
1152 struct target_sockaddr_ll *lladdr;
1154 lladdr = (struct target_sockaddr_ll *)addr;
1155 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1156 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1158 unlock_user(target_saddr, target_addr, 0);
1160 return 0;
1163 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1164 struct sockaddr *addr,
1165 socklen_t len)
1167 struct target_sockaddr *target_saddr;
1169 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1170 if (!target_saddr)
1171 return -TARGET_EFAULT;
1172 memcpy(target_saddr, addr, len);
1173 target_saddr->sa_family = tswap16(addr->sa_family);
1174 unlock_user(target_saddr, target_addr, len);
1176 return 0;
1179 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1180 struct target_msghdr *target_msgh)
1182 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1183 abi_long msg_controllen;
1184 abi_ulong target_cmsg_addr;
1185 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1186 socklen_t space = 0;
1188 msg_controllen = tswapal(target_msgh->msg_controllen);
1189 if (msg_controllen < sizeof (struct target_cmsghdr))
1190 goto the_end;
1191 target_cmsg_addr = tswapal(target_msgh->msg_control);
1192 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1193 target_cmsg_start = target_cmsg;
1194 if (!target_cmsg)
1195 return -TARGET_EFAULT;
1197 while (cmsg && target_cmsg) {
1198 void *data = CMSG_DATA(cmsg);
1199 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1201 int len = tswapal(target_cmsg->cmsg_len)
1202 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1204 space += CMSG_SPACE(len);
1205 if (space > msgh->msg_controllen) {
1206 space -= CMSG_SPACE(len);
1207 /* This is a QEMU bug, since we allocated the payload
1208 * area ourselves (unlike overflow in host-to-target
1209 * conversion, which is just the guest giving us a buffer
1210 * that's too small). It can't happen for the payload types
1211 * we currently support; if it becomes an issue in future
1212 * we would need to improve our allocation strategy to
1213 * something more intelligent than "twice the size of the
1214 * target buffer we're reading from".
1216 gemu_log("Host cmsg overflow\n");
1217 break;
1220 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1221 cmsg->cmsg_level = SOL_SOCKET;
1222 } else {
1223 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1225 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1226 cmsg->cmsg_len = CMSG_LEN(len);
1228 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1229 int *fd = (int *)data;
1230 int *target_fd = (int *)target_data;
1231 int i, numfds = len / sizeof(int);
1233 for (i = 0; i < numfds; i++) {
1234 __get_user(fd[i], target_fd + i);
1236 } else if (cmsg->cmsg_level == SOL_SOCKET
1237 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1238 struct ucred *cred = (struct ucred *)data;
1239 struct target_ucred *target_cred =
1240 (struct target_ucred *)target_data;
1242 __get_user(cred->pid, &target_cred->pid);
1243 __get_user(cred->uid, &target_cred->uid);
1244 __get_user(cred->gid, &target_cred->gid);
1245 } else {
1246 gemu_log("Unsupported ancillary data: %d/%d\n",
1247 cmsg->cmsg_level, cmsg->cmsg_type);
1248 memcpy(data, target_data, len);
1251 cmsg = CMSG_NXTHDR(msgh, cmsg);
1252 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1253 target_cmsg_start);
1255 unlock_user(target_cmsg, target_cmsg_addr, 0);
1256 the_end:
1257 msgh->msg_controllen = space;
1258 return 0;
1261 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1262 struct msghdr *msgh)
1264 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1265 abi_long msg_controllen;
1266 abi_ulong target_cmsg_addr;
1267 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1268 socklen_t space = 0;
1270 msg_controllen = tswapal(target_msgh->msg_controllen);
1271 if (msg_controllen < sizeof (struct target_cmsghdr))
1272 goto the_end;
1273 target_cmsg_addr = tswapal(target_msgh->msg_control);
1274 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1275 target_cmsg_start = target_cmsg;
1276 if (!target_cmsg)
1277 return -TARGET_EFAULT;
1279 while (cmsg && target_cmsg) {
1280 void *data = CMSG_DATA(cmsg);
1281 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1283 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1284 int tgt_len, tgt_space;
1286 /* We never copy a half-header but may copy half-data;
1287 * this is Linux's behaviour in put_cmsg(). Note that
1288 * truncation here is a guest problem (which we report
1289 * to the guest via the CTRUNC bit), unlike truncation
1290 * in target_to_host_cmsg, which is a QEMU bug.
1292 if (msg_controllen < sizeof(struct cmsghdr)) {
1293 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1294 break;
1297 if (cmsg->cmsg_level == SOL_SOCKET) {
1298 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1299 } else {
1300 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1302 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1304 tgt_len = TARGET_CMSG_LEN(len);
1306 /* Payload types which need a different size of payload on
1307 * the target must adjust tgt_len here.
1309 switch (cmsg->cmsg_level) {
1310 case SOL_SOCKET:
1311 switch (cmsg->cmsg_type) {
1312 case SO_TIMESTAMP:
1313 tgt_len = sizeof(struct target_timeval);
1314 break;
1315 default:
1316 break;
1318 default:
1319 break;
1322 if (msg_controllen < tgt_len) {
1323 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1324 tgt_len = msg_controllen;
1327 /* We must now copy-and-convert len bytes of payload
1328 * into tgt_len bytes of destination space. Bear in mind
1329 * that in both source and destination we may be dealing
1330 * with a truncated value!
1332 switch (cmsg->cmsg_level) {
1333 case SOL_SOCKET:
1334 switch (cmsg->cmsg_type) {
1335 case SCM_RIGHTS:
1337 int *fd = (int *)data;
1338 int *target_fd = (int *)target_data;
1339 int i, numfds = tgt_len / sizeof(int);
1341 for (i = 0; i < numfds; i++) {
1342 __put_user(fd[i], target_fd + i);
1344 break;
1346 case SO_TIMESTAMP:
1348 struct timeval *tv = (struct timeval *)data;
1349 struct target_timeval *target_tv =
1350 (struct target_timeval *)target_data;
1352 if (len != sizeof(struct timeval) ||
1353 tgt_len != sizeof(struct target_timeval)) {
1354 goto unimplemented;
1357 /* copy struct timeval to target */
1358 __put_user(tv->tv_sec, &target_tv->tv_sec);
1359 __put_user(tv->tv_usec, &target_tv->tv_usec);
1360 break;
1362 case SCM_CREDENTIALS:
1364 struct ucred *cred = (struct ucred *)data;
1365 struct target_ucred *target_cred =
1366 (struct target_ucred *)target_data;
1368 __put_user(cred->pid, &target_cred->pid);
1369 __put_user(cred->uid, &target_cred->uid);
1370 __put_user(cred->gid, &target_cred->gid);
1371 break;
1373 default:
1374 goto unimplemented;
1376 break;
1378 default:
1379 unimplemented:
1380 gemu_log("Unsupported ancillary data: %d/%d\n",
1381 cmsg->cmsg_level, cmsg->cmsg_type);
1382 memcpy(target_data, data, MIN(len, tgt_len));
1383 if (tgt_len > len) {
1384 memset(target_data + len, 0, tgt_len - len);
1388 target_cmsg->cmsg_len = tswapal(tgt_len);
1389 tgt_space = TARGET_CMSG_SPACE(len);
1390 if (msg_controllen < tgt_space) {
1391 tgt_space = msg_controllen;
1393 msg_controllen -= tgt_space;
1394 space += tgt_space;
1395 cmsg = CMSG_NXTHDR(msgh, cmsg);
1396 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1397 target_cmsg_start);
1399 unlock_user(target_cmsg, target_cmsg_addr, space);
1400 the_end:
1401 target_msgh->msg_controllen = tswapal(space);
1402 return 0;
1405 /* do_setsockopt() Must return target values and target errnos. */
1406 static abi_long do_setsockopt(int sockfd, int level, int optname,
1407 abi_ulong optval_addr, socklen_t optlen)
1409 abi_long ret;
1410 int val;
1411 struct ip_mreqn *ip_mreq;
1412 struct ip_mreq_source *ip_mreq_source;
1414 switch(level) {
1415 case SOL_TCP:
1416 /* TCP options all take an 'int' value. */
1417 if (optlen < sizeof(uint32_t))
1418 return -TARGET_EINVAL;
1420 if (get_user_u32(val, optval_addr))
1421 return -TARGET_EFAULT;
1422 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1423 break;
1424 case SOL_IP:
1425 switch(optname) {
1426 case IP_TOS:
1427 case IP_TTL:
1428 case IP_HDRINCL:
1429 case IP_ROUTER_ALERT:
1430 case IP_RECVOPTS:
1431 case IP_RETOPTS:
1432 case IP_PKTINFO:
1433 case IP_MTU_DISCOVER:
1434 case IP_RECVERR:
1435 case IP_RECVTOS:
1436 #ifdef IP_FREEBIND
1437 case IP_FREEBIND:
1438 #endif
1439 case IP_MULTICAST_TTL:
1440 case IP_MULTICAST_LOOP:
1441 val = 0;
1442 if (optlen >= sizeof(uint32_t)) {
1443 if (get_user_u32(val, optval_addr))
1444 return -TARGET_EFAULT;
1445 } else if (optlen >= 1) {
1446 if (get_user_u8(val, optval_addr))
1447 return -TARGET_EFAULT;
1449 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1450 break;
1451 case IP_ADD_MEMBERSHIP:
1452 case IP_DROP_MEMBERSHIP:
1453 if (optlen < sizeof (struct target_ip_mreq) ||
1454 optlen > sizeof (struct target_ip_mreqn))
1455 return -TARGET_EINVAL;
1457 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1458 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1459 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1460 break;
1462 case IP_BLOCK_SOURCE:
1463 case IP_UNBLOCK_SOURCE:
1464 case IP_ADD_SOURCE_MEMBERSHIP:
1465 case IP_DROP_SOURCE_MEMBERSHIP:
1466 if (optlen != sizeof (struct target_ip_mreq_source))
1467 return -TARGET_EINVAL;
1469 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1470 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1471 unlock_user (ip_mreq_source, optval_addr, 0);
1472 break;
1474 default:
1475 goto unimplemented;
1477 break;
1478 case SOL_IPV6:
1479 switch (optname) {
1480 case IPV6_MTU_DISCOVER:
1481 case IPV6_MTU:
1482 case IPV6_V6ONLY:
1483 case IPV6_RECVPKTINFO:
1484 val = 0;
1485 if (optlen < sizeof(uint32_t)) {
1486 return -TARGET_EINVAL;
1488 if (get_user_u32(val, optval_addr)) {
1489 return -TARGET_EFAULT;
1491 ret = get_errno(setsockopt(sockfd, level, optname,
1492 &val, sizeof(val)));
1493 break;
1494 default:
1495 goto unimplemented;
1497 break;
1498 case SOL_RAW:
1499 switch (optname) {
1500 case ICMP_FILTER:
1501 /* struct icmp_filter takes an u32 value */
1502 if (optlen < sizeof(uint32_t)) {
1503 return -TARGET_EINVAL;
1506 if (get_user_u32(val, optval_addr)) {
1507 return -TARGET_EFAULT;
1509 ret = get_errno(setsockopt(sockfd, level, optname,
1510 &val, sizeof(val)));
1511 break;
1513 default:
1514 goto unimplemented;
1516 break;
1517 case TARGET_SOL_SOCKET:
1518 switch (optname) {
1519 case TARGET_SO_RCVTIMEO:
1521 struct timeval tv;
1523 optname = SO_RCVTIMEO;
1525 set_timeout:
1526 if (optlen != sizeof(struct target_timeval)) {
1527 return -TARGET_EINVAL;
1530 if (copy_from_user_timeval(&tv, optval_addr)) {
1531 return -TARGET_EFAULT;
1534 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1535 &tv, sizeof(tv)));
1536 return ret;
1538 case TARGET_SO_SNDTIMEO:
1539 optname = SO_SNDTIMEO;
1540 goto set_timeout;
1541 case TARGET_SO_ATTACH_FILTER:
1543 struct target_sock_fprog *tfprog;
1544 struct target_sock_filter *tfilter;
1545 struct sock_fprog fprog;
1546 struct sock_filter *filter;
1547 int i;
1549 if (optlen != sizeof(*tfprog)) {
1550 return -TARGET_EINVAL;
1552 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1553 return -TARGET_EFAULT;
1555 if (!lock_user_struct(VERIFY_READ, tfilter,
1556 tswapal(tfprog->filter), 0)) {
1557 unlock_user_struct(tfprog, optval_addr, 1);
1558 return -TARGET_EFAULT;
1561 fprog.len = tswap16(tfprog->len);
1562 filter = malloc(fprog.len * sizeof(*filter));
1563 if (filter == NULL) {
1564 unlock_user_struct(tfilter, tfprog->filter, 1);
1565 unlock_user_struct(tfprog, optval_addr, 1);
1566 return -TARGET_ENOMEM;
1568 for (i = 0; i < fprog.len; i++) {
1569 filter[i].code = tswap16(tfilter[i].code);
1570 filter[i].jt = tfilter[i].jt;
1571 filter[i].jf = tfilter[i].jf;
1572 filter[i].k = tswap32(tfilter[i].k);
1574 fprog.filter = filter;
1576 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1577 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1578 free(filter);
1580 unlock_user_struct(tfilter, tfprog->filter, 1);
1581 unlock_user_struct(tfprog, optval_addr, 1);
1582 return ret;
1584 case TARGET_SO_BINDTODEVICE:
1586 char *dev_ifname, *addr_ifname;
1588 if (optlen > IFNAMSIZ - 1) {
1589 optlen = IFNAMSIZ - 1;
1591 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1592 if (!dev_ifname) {
1593 return -TARGET_EFAULT;
1595 optname = SO_BINDTODEVICE;
1596 addr_ifname = alloca(IFNAMSIZ);
1597 memcpy(addr_ifname, dev_ifname, optlen);
1598 addr_ifname[optlen] = 0;
1599 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1600 unlock_user (dev_ifname, optval_addr, 0);
1601 return ret;
1603 /* Options with 'int' argument. */
1604 case TARGET_SO_DEBUG:
1605 optname = SO_DEBUG;
1606 break;
1607 case TARGET_SO_REUSEADDR:
1608 optname = SO_REUSEADDR;
1609 break;
1610 case TARGET_SO_TYPE:
1611 optname = SO_TYPE;
1612 break;
1613 case TARGET_SO_ERROR:
1614 optname = SO_ERROR;
1615 break;
1616 case TARGET_SO_DONTROUTE:
1617 optname = SO_DONTROUTE;
1618 break;
1619 case TARGET_SO_BROADCAST:
1620 optname = SO_BROADCAST;
1621 break;
1622 case TARGET_SO_SNDBUF:
1623 optname = SO_SNDBUF;
1624 break;
1625 case TARGET_SO_SNDBUFFORCE:
1626 optname = SO_SNDBUFFORCE;
1627 break;
1628 case TARGET_SO_RCVBUF:
1629 optname = SO_RCVBUF;
1630 break;
1631 case TARGET_SO_RCVBUFFORCE:
1632 optname = SO_RCVBUFFORCE;
1633 break;
1634 case TARGET_SO_KEEPALIVE:
1635 optname = SO_KEEPALIVE;
1636 break;
1637 case TARGET_SO_OOBINLINE:
1638 optname = SO_OOBINLINE;
1639 break;
1640 case TARGET_SO_NO_CHECK:
1641 optname = SO_NO_CHECK;
1642 break;
1643 case TARGET_SO_PRIORITY:
1644 optname = SO_PRIORITY;
1645 break;
1646 #ifdef SO_BSDCOMPAT
1647 case TARGET_SO_BSDCOMPAT:
1648 optname = SO_BSDCOMPAT;
1649 break;
1650 #endif
1651 case TARGET_SO_PASSCRED:
1652 optname = SO_PASSCRED;
1653 break;
1654 case TARGET_SO_PASSSEC:
1655 optname = SO_PASSSEC;
1656 break;
1657 case TARGET_SO_TIMESTAMP:
1658 optname = SO_TIMESTAMP;
1659 break;
1660 case TARGET_SO_RCVLOWAT:
1661 optname = SO_RCVLOWAT;
1662 break;
1663 break;
1664 default:
1665 goto unimplemented;
1667 if (optlen < sizeof(uint32_t))
1668 return -TARGET_EINVAL;
1670 if (get_user_u32(val, optval_addr))
1671 return -TARGET_EFAULT;
1672 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1673 break;
1674 default:
1675 unimplemented:
1676 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1677 ret = -TARGET_ENOPROTOOPT;
1679 return ret;
1682 /* do_getsockopt() Must return target values and target errnos. */
1683 static abi_long do_getsockopt(int sockfd, int level, int optname,
1684 abi_ulong optval_addr, abi_ulong optlen)
1686 abi_long ret;
1687 int len, val;
1688 socklen_t lv;
1690 switch(level) {
1691 case TARGET_SOL_SOCKET:
1692 level = SOL_SOCKET;
1693 switch (optname) {
1694 /* These don't just return a single integer */
1695 case TARGET_SO_LINGER:
1696 case TARGET_SO_RCVTIMEO:
1697 case TARGET_SO_SNDTIMEO:
1698 case TARGET_SO_PEERNAME:
1699 goto unimplemented;
1700 case TARGET_SO_PEERCRED: {
1701 struct ucred cr;
1702 socklen_t crlen;
1703 struct target_ucred *tcr;
1705 if (get_user_u32(len, optlen)) {
1706 return -TARGET_EFAULT;
1708 if (len < 0) {
1709 return -TARGET_EINVAL;
1712 crlen = sizeof(cr);
1713 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1714 &cr, &crlen));
1715 if (ret < 0) {
1716 return ret;
1718 if (len > crlen) {
1719 len = crlen;
1721 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1722 return -TARGET_EFAULT;
1724 __put_user(cr.pid, &tcr->pid);
1725 __put_user(cr.uid, &tcr->uid);
1726 __put_user(cr.gid, &tcr->gid);
1727 unlock_user_struct(tcr, optval_addr, 1);
1728 if (put_user_u32(len, optlen)) {
1729 return -TARGET_EFAULT;
1731 break;
1733 /* Options with 'int' argument. */
1734 case TARGET_SO_DEBUG:
1735 optname = SO_DEBUG;
1736 goto int_case;
1737 case TARGET_SO_REUSEADDR:
1738 optname = SO_REUSEADDR;
1739 goto int_case;
1740 case TARGET_SO_TYPE:
1741 optname = SO_TYPE;
1742 goto int_case;
1743 case TARGET_SO_ERROR:
1744 optname = SO_ERROR;
1745 goto int_case;
1746 case TARGET_SO_DONTROUTE:
1747 optname = SO_DONTROUTE;
1748 goto int_case;
1749 case TARGET_SO_BROADCAST:
1750 optname = SO_BROADCAST;
1751 goto int_case;
1752 case TARGET_SO_SNDBUF:
1753 optname = SO_SNDBUF;
1754 goto int_case;
1755 case TARGET_SO_RCVBUF:
1756 optname = SO_RCVBUF;
1757 goto int_case;
1758 case TARGET_SO_KEEPALIVE:
1759 optname = SO_KEEPALIVE;
1760 goto int_case;
1761 case TARGET_SO_OOBINLINE:
1762 optname = SO_OOBINLINE;
1763 goto int_case;
1764 case TARGET_SO_NO_CHECK:
1765 optname = SO_NO_CHECK;
1766 goto int_case;
1767 case TARGET_SO_PRIORITY:
1768 optname = SO_PRIORITY;
1769 goto int_case;
1770 #ifdef SO_BSDCOMPAT
1771 case TARGET_SO_BSDCOMPAT:
1772 optname = SO_BSDCOMPAT;
1773 goto int_case;
1774 #endif
1775 case TARGET_SO_PASSCRED:
1776 optname = SO_PASSCRED;
1777 goto int_case;
1778 case TARGET_SO_TIMESTAMP:
1779 optname = SO_TIMESTAMP;
1780 goto int_case;
1781 case TARGET_SO_RCVLOWAT:
1782 optname = SO_RCVLOWAT;
1783 goto int_case;
1784 case TARGET_SO_ACCEPTCONN:
1785 optname = SO_ACCEPTCONN;
1786 goto int_case;
1787 default:
1788 goto int_case;
1790 break;
1791 case SOL_TCP:
1792 /* TCP options all take an 'int' value. */
1793 int_case:
1794 if (get_user_u32(len, optlen))
1795 return -TARGET_EFAULT;
1796 if (len < 0)
1797 return -TARGET_EINVAL;
1798 lv = sizeof(lv);
1799 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1800 if (ret < 0)
1801 return ret;
1802 if (optname == SO_TYPE) {
1803 val = host_to_target_sock_type(val);
1805 if (len > lv)
1806 len = lv;
1807 if (len == 4) {
1808 if (put_user_u32(val, optval_addr))
1809 return -TARGET_EFAULT;
1810 } else {
1811 if (put_user_u8(val, optval_addr))
1812 return -TARGET_EFAULT;
1814 if (put_user_u32(len, optlen))
1815 return -TARGET_EFAULT;
1816 break;
1817 case SOL_IP:
1818 switch(optname) {
1819 case IP_TOS:
1820 case IP_TTL:
1821 case IP_HDRINCL:
1822 case IP_ROUTER_ALERT:
1823 case IP_RECVOPTS:
1824 case IP_RETOPTS:
1825 case IP_PKTINFO:
1826 case IP_MTU_DISCOVER:
1827 case IP_RECVERR:
1828 case IP_RECVTOS:
1829 #ifdef IP_FREEBIND
1830 case IP_FREEBIND:
1831 #endif
1832 case IP_MULTICAST_TTL:
1833 case IP_MULTICAST_LOOP:
1834 if (get_user_u32(len, optlen))
1835 return -TARGET_EFAULT;
1836 if (len < 0)
1837 return -TARGET_EINVAL;
1838 lv = sizeof(lv);
1839 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1840 if (ret < 0)
1841 return ret;
1842 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1843 len = 1;
1844 if (put_user_u32(len, optlen)
1845 || put_user_u8(val, optval_addr))
1846 return -TARGET_EFAULT;
1847 } else {
1848 if (len > sizeof(int))
1849 len = sizeof(int);
1850 if (put_user_u32(len, optlen)
1851 || put_user_u32(val, optval_addr))
1852 return -TARGET_EFAULT;
1854 break;
1855 default:
1856 ret = -TARGET_ENOPROTOOPT;
1857 break;
1859 break;
1860 default:
1861 unimplemented:
1862 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1863 level, optname);
1864 ret = -TARGET_EOPNOTSUPP;
1865 break;
1867 return ret;
1870 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1871 int count, int copy)
1873 struct target_iovec *target_vec;
1874 struct iovec *vec;
1875 abi_ulong total_len, max_len;
1876 int i;
1877 int err = 0;
1878 bool bad_address = false;
1880 if (count == 0) {
1881 errno = 0;
1882 return NULL;
1884 if (count < 0 || count > IOV_MAX) {
1885 errno = EINVAL;
1886 return NULL;
1889 vec = calloc(count, sizeof(struct iovec));
1890 if (vec == NULL) {
1891 errno = ENOMEM;
1892 return NULL;
1895 target_vec = lock_user(VERIFY_READ, target_addr,
1896 count * sizeof(struct target_iovec), 1);
1897 if (target_vec == NULL) {
1898 err = EFAULT;
1899 goto fail2;
1902 /* ??? If host page size > target page size, this will result in a
1903 value larger than what we can actually support. */
1904 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1905 total_len = 0;
1907 for (i = 0; i < count; i++) {
1908 abi_ulong base = tswapal(target_vec[i].iov_base);
1909 abi_long len = tswapal(target_vec[i].iov_len);
1911 if (len < 0) {
1912 err = EINVAL;
1913 goto fail;
1914 } else if (len == 0) {
1915 /* Zero length pointer is ignored. */
1916 vec[i].iov_base = 0;
1917 } else {
1918 vec[i].iov_base = lock_user(type, base, len, copy);
1919 /* If the first buffer pointer is bad, this is a fault. But
1920 * subsequent bad buffers will result in a partial write; this
1921 * is realized by filling the vector with null pointers and
1922 * zero lengths. */
1923 if (!vec[i].iov_base) {
1924 if (i == 0) {
1925 err = EFAULT;
1926 goto fail;
1927 } else {
1928 bad_address = true;
1931 if (bad_address) {
1932 len = 0;
1934 if (len > max_len - total_len) {
1935 len = max_len - total_len;
1938 vec[i].iov_len = len;
1939 total_len += len;
1942 unlock_user(target_vec, target_addr, 0);
1943 return vec;
1945 fail:
1946 while (--i >= 0) {
1947 if (tswapal(target_vec[i].iov_len) > 0) {
1948 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
1951 unlock_user(target_vec, target_addr, 0);
1952 fail2:
1953 free(vec);
1954 errno = err;
1955 return NULL;
1958 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1959 int count, int copy)
1961 struct target_iovec *target_vec;
1962 int i;
1964 target_vec = lock_user(VERIFY_READ, target_addr,
1965 count * sizeof(struct target_iovec), 1);
1966 if (target_vec) {
1967 for (i = 0; i < count; i++) {
1968 abi_ulong base = tswapal(target_vec[i].iov_base);
1969 abi_long len = tswapal(target_vec[i].iov_len);
1970 if (len < 0) {
1971 break;
1973 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1975 unlock_user(target_vec, target_addr, 0);
1978 free(vec);
1981 static inline int target_to_host_sock_type(int *type)
1983 int host_type = 0;
1984 int target_type = *type;
1986 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1987 case TARGET_SOCK_DGRAM:
1988 host_type = SOCK_DGRAM;
1989 break;
1990 case TARGET_SOCK_STREAM:
1991 host_type = SOCK_STREAM;
1992 break;
1993 default:
1994 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1995 break;
1997 if (target_type & TARGET_SOCK_CLOEXEC) {
1998 #if defined(SOCK_CLOEXEC)
1999 host_type |= SOCK_CLOEXEC;
2000 #else
2001 return -TARGET_EINVAL;
2002 #endif
2004 if (target_type & TARGET_SOCK_NONBLOCK) {
2005 #if defined(SOCK_NONBLOCK)
2006 host_type |= SOCK_NONBLOCK;
2007 #elif !defined(O_NONBLOCK)
2008 return -TARGET_EINVAL;
2009 #endif
2011 *type = host_type;
2012 return 0;
2015 /* Try to emulate socket type flags after socket creation. */
2016 static int sock_flags_fixup(int fd, int target_type)
2018 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2019 if (target_type & TARGET_SOCK_NONBLOCK) {
2020 int flags = fcntl(fd, F_GETFL);
2021 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2022 close(fd);
2023 return -TARGET_EINVAL;
2026 #endif
2027 return fd;
2030 /* do_socket() Must return target values and target errnos. */
2031 static abi_long do_socket(int domain, int type, int protocol)
2033 int target_type = type;
2034 int ret;
2036 ret = target_to_host_sock_type(&type);
2037 if (ret) {
2038 return ret;
2041 if (domain == PF_NETLINK)
2042 return -TARGET_EAFNOSUPPORT;
2043 ret = get_errno(socket(domain, type, protocol));
2044 if (ret >= 0) {
2045 ret = sock_flags_fixup(ret, target_type);
2047 return ret;
2050 /* do_bind() Must return target values and target errnos. */
2051 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2052 socklen_t addrlen)
2054 void *addr;
2055 abi_long ret;
2057 if ((int)addrlen < 0) {
2058 return -TARGET_EINVAL;
2061 addr = alloca(addrlen+1);
2063 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2064 if (ret)
2065 return ret;
2067 return get_errno(bind(sockfd, addr, addrlen));
2070 /* do_connect() Must return target values and target errnos. */
2071 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2072 socklen_t addrlen)
2074 void *addr;
2075 abi_long ret;
2077 if ((int)addrlen < 0) {
2078 return -TARGET_EINVAL;
2081 addr = alloca(addrlen+1);
2083 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2084 if (ret)
2085 return ret;
2087 return get_errno(connect(sockfd, addr, addrlen));
2090 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2091 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2092 int flags, int send)
2094 abi_long ret, len;
2095 struct msghdr msg;
2096 int count;
2097 struct iovec *vec;
2098 abi_ulong target_vec;
2100 if (msgp->msg_name) {
2101 msg.msg_namelen = tswap32(msgp->msg_namelen);
2102 msg.msg_name = alloca(msg.msg_namelen+1);
2103 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2104 msg.msg_namelen);
2105 if (ret) {
2106 goto out2;
2108 } else {
2109 msg.msg_name = NULL;
2110 msg.msg_namelen = 0;
2112 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2113 msg.msg_control = alloca(msg.msg_controllen);
2114 msg.msg_flags = tswap32(msgp->msg_flags);
2116 count = tswapal(msgp->msg_iovlen);
2117 target_vec = tswapal(msgp->msg_iov);
2118 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2119 target_vec, count, send);
2120 if (vec == NULL) {
2121 ret = -host_to_target_errno(errno);
2122 goto out2;
2124 msg.msg_iovlen = count;
2125 msg.msg_iov = vec;
2127 if (send) {
2128 ret = target_to_host_cmsg(&msg, msgp);
2129 if (ret == 0)
2130 ret = get_errno(sendmsg(fd, &msg, flags));
2131 } else {
2132 ret = get_errno(recvmsg(fd, &msg, flags));
2133 if (!is_error(ret)) {
2134 len = ret;
2135 ret = host_to_target_cmsg(msgp, &msg);
2136 if (!is_error(ret)) {
2137 msgp->msg_namelen = tswap32(msg.msg_namelen);
2138 if (msg.msg_name != NULL) {
2139 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2140 msg.msg_name, msg.msg_namelen);
2141 if (ret) {
2142 goto out;
2146 ret = len;
2151 out:
2152 unlock_iovec(vec, target_vec, count, !send);
2153 out2:
2154 return ret;
2157 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2158 int flags, int send)
2160 abi_long ret;
2161 struct target_msghdr *msgp;
2163 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2164 msgp,
2165 target_msg,
2166 send ? 1 : 0)) {
2167 return -TARGET_EFAULT;
2169 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2170 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2171 return ret;
2174 #ifdef TARGET_NR_sendmmsg
2175 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2176 * so it might not have this *mmsg-specific flag either.
2178 #ifndef MSG_WAITFORONE
2179 #define MSG_WAITFORONE 0x10000
2180 #endif
2182 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2183 unsigned int vlen, unsigned int flags,
2184 int send)
2186 struct target_mmsghdr *mmsgp;
2187 abi_long ret = 0;
2188 int i;
2190 if (vlen > UIO_MAXIOV) {
2191 vlen = UIO_MAXIOV;
2194 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2195 if (!mmsgp) {
2196 return -TARGET_EFAULT;
2199 for (i = 0; i < vlen; i++) {
2200 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2201 if (is_error(ret)) {
2202 break;
2204 mmsgp[i].msg_len = tswap32(ret);
2205 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2206 if (flags & MSG_WAITFORONE) {
2207 flags |= MSG_DONTWAIT;
2211 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2213 /* Return number of datagrams sent if we sent any at all;
2214 * otherwise return the error.
2216 if (i) {
2217 return i;
2219 return ret;
2221 #endif
2223 /* If we don't have a system accept4() then just call accept.
2224 * The callsites to do_accept4() will ensure that they don't
2225 * pass a non-zero flags argument in this config.
2227 #ifndef CONFIG_ACCEPT4
2228 static inline int accept4(int sockfd, struct sockaddr *addr,
2229 socklen_t *addrlen, int flags)
2231 assert(flags == 0);
2232 return accept(sockfd, addr, addrlen);
2234 #endif
2236 /* do_accept4() Must return target values and target errnos. */
2237 static abi_long do_accept4(int fd, abi_ulong target_addr,
2238 abi_ulong target_addrlen_addr, int flags)
2240 socklen_t addrlen;
2241 void *addr;
2242 abi_long ret;
2243 int host_flags;
2245 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2247 if (target_addr == 0) {
2248 return get_errno(accept4(fd, NULL, NULL, host_flags));
2251 /* linux returns EINVAL if addrlen pointer is invalid */
2252 if (get_user_u32(addrlen, target_addrlen_addr))
2253 return -TARGET_EINVAL;
2255 if ((int)addrlen < 0) {
2256 return -TARGET_EINVAL;
2259 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2260 return -TARGET_EINVAL;
2262 addr = alloca(addrlen);
2264 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2265 if (!is_error(ret)) {
2266 host_to_target_sockaddr(target_addr, addr, addrlen);
2267 if (put_user_u32(addrlen, target_addrlen_addr))
2268 ret = -TARGET_EFAULT;
2270 return ret;
2273 /* do_getpeername() Must return target values and target errnos. */
2274 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2275 abi_ulong target_addrlen_addr)
2277 socklen_t addrlen;
2278 void *addr;
2279 abi_long ret;
2281 if (get_user_u32(addrlen, target_addrlen_addr))
2282 return -TARGET_EFAULT;
2284 if ((int)addrlen < 0) {
2285 return -TARGET_EINVAL;
2288 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2289 return -TARGET_EFAULT;
2291 addr = alloca(addrlen);
2293 ret = get_errno(getpeername(fd, addr, &addrlen));
2294 if (!is_error(ret)) {
2295 host_to_target_sockaddr(target_addr, addr, addrlen);
2296 if (put_user_u32(addrlen, target_addrlen_addr))
2297 ret = -TARGET_EFAULT;
2299 return ret;
2302 /* do_getsockname() Must return target values and target errnos. */
2303 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2304 abi_ulong target_addrlen_addr)
2306 socklen_t addrlen;
2307 void *addr;
2308 abi_long ret;
2310 if (get_user_u32(addrlen, target_addrlen_addr))
2311 return -TARGET_EFAULT;
2313 if ((int)addrlen < 0) {
2314 return -TARGET_EINVAL;
2317 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2318 return -TARGET_EFAULT;
2320 addr = alloca(addrlen);
2322 ret = get_errno(getsockname(fd, addr, &addrlen));
2323 if (!is_error(ret)) {
2324 host_to_target_sockaddr(target_addr, addr, addrlen);
2325 if (put_user_u32(addrlen, target_addrlen_addr))
2326 ret = -TARGET_EFAULT;
2328 return ret;
2331 /* do_socketpair() Must return target values and target errnos. */
2332 static abi_long do_socketpair(int domain, int type, int protocol,
2333 abi_ulong target_tab_addr)
2335 int tab[2];
2336 abi_long ret;
2338 target_to_host_sock_type(&type);
2340 ret = get_errno(socketpair(domain, type, protocol, tab));
2341 if (!is_error(ret)) {
2342 if (put_user_s32(tab[0], target_tab_addr)
2343 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2344 ret = -TARGET_EFAULT;
2346 return ret;
2349 /* do_sendto() Must return target values and target errnos. */
2350 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2351 abi_ulong target_addr, socklen_t addrlen)
2353 void *addr;
2354 void *host_msg;
2355 abi_long ret;
2357 if ((int)addrlen < 0) {
2358 return -TARGET_EINVAL;
2361 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2362 if (!host_msg)
2363 return -TARGET_EFAULT;
2364 if (target_addr) {
2365 addr = alloca(addrlen+1);
2366 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2367 if (ret) {
2368 unlock_user(host_msg, msg, 0);
2369 return ret;
2371 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2372 } else {
2373 ret = get_errno(send(fd, host_msg, len, flags));
2375 unlock_user(host_msg, msg, 0);
2376 return ret;
2379 /* do_recvfrom() Must return target values and target errnos. */
2380 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2381 abi_ulong target_addr,
2382 abi_ulong target_addrlen)
2384 socklen_t addrlen;
2385 void *addr;
2386 void *host_msg;
2387 abi_long ret;
2389 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2390 if (!host_msg)
2391 return -TARGET_EFAULT;
2392 if (target_addr) {
2393 if (get_user_u32(addrlen, target_addrlen)) {
2394 ret = -TARGET_EFAULT;
2395 goto fail;
2397 if ((int)addrlen < 0) {
2398 ret = -TARGET_EINVAL;
2399 goto fail;
2401 addr = alloca(addrlen);
2402 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2403 } else {
2404 addr = NULL; /* To keep compiler quiet. */
2405 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2407 if (!is_error(ret)) {
2408 if (target_addr) {
2409 host_to_target_sockaddr(target_addr, addr, addrlen);
2410 if (put_user_u32(addrlen, target_addrlen)) {
2411 ret = -TARGET_EFAULT;
2412 goto fail;
2415 unlock_user(host_msg, msg, len);
2416 } else {
2417 fail:
2418 unlock_user(host_msg, msg, 0);
2420 return ret;
2423 #ifdef TARGET_NR_socketcall
2424 /* do_socketcall() Must return target values and target errnos. */
2425 static abi_long do_socketcall(int num, abi_ulong vptr)
2427 static const unsigned ac[] = { /* number of arguments per call */
2428 [SOCKOP_socket] = 3, /* domain, type, protocol */
2429 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2430 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2431 [SOCKOP_listen] = 2, /* sockfd, backlog */
2432 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2433 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2434 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2435 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2436 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2437 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2438 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2439 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2440 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2441 [SOCKOP_shutdown] = 2, /* sockfd, how */
2442 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2443 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2444 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2445 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2447 abi_long a[6]; /* max 6 args */
2449 /* first, collect the arguments in a[] according to ac[] */
2450 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2451 unsigned i;
2452 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2453 for (i = 0; i < ac[num]; ++i) {
2454 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2455 return -TARGET_EFAULT;
2460 /* now when we have the args, actually handle the call */
2461 switch (num) {
2462 case SOCKOP_socket: /* domain, type, protocol */
2463 return do_socket(a[0], a[1], a[2]);
2464 case SOCKOP_bind: /* sockfd, addr, addrlen */
2465 return do_bind(a[0], a[1], a[2]);
2466 case SOCKOP_connect: /* sockfd, addr, addrlen */
2467 return do_connect(a[0], a[1], a[2]);
2468 case SOCKOP_listen: /* sockfd, backlog */
2469 return get_errno(listen(a[0], a[1]));
2470 case SOCKOP_accept: /* sockfd, addr, addrlen */
2471 return do_accept4(a[0], a[1], a[2], 0);
2472 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2473 return do_accept4(a[0], a[1], a[2], a[3]);
2474 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2475 return do_getsockname(a[0], a[1], a[2]);
2476 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2477 return do_getpeername(a[0], a[1], a[2]);
2478 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2479 return do_socketpair(a[0], a[1], a[2], a[3]);
2480 case SOCKOP_send: /* sockfd, msg, len, flags */
2481 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2482 case SOCKOP_recv: /* sockfd, msg, len, flags */
2483 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2484 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2485 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2486 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2487 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2488 case SOCKOP_shutdown: /* sockfd, how */
2489 return get_errno(shutdown(a[0], a[1]));
2490 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2491 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2492 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2493 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2494 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2495 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2496 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2497 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2498 default:
2499 gemu_log("Unsupported socketcall: %d\n", num);
2500 return -TARGET_ENOSYS;
2503 #endif
2505 #define N_SHM_REGIONS 32
2507 static struct shm_region {
2508 abi_ulong start;
2509 abi_ulong size;
2510 } shm_regions[N_SHM_REGIONS];
2512 struct target_semid_ds
2514 struct target_ipc_perm sem_perm;
2515 abi_ulong sem_otime;
2516 #if !defined(TARGET_PPC64)
2517 abi_ulong __unused1;
2518 #endif
2519 abi_ulong sem_ctime;
2520 #if !defined(TARGET_PPC64)
2521 abi_ulong __unused2;
2522 #endif
2523 abi_ulong sem_nsems;
2524 abi_ulong __unused3;
2525 abi_ulong __unused4;
2528 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2529 abi_ulong target_addr)
2531 struct target_ipc_perm *target_ip;
2532 struct target_semid_ds *target_sd;
2534 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2535 return -TARGET_EFAULT;
2536 target_ip = &(target_sd->sem_perm);
2537 host_ip->__key = tswap32(target_ip->__key);
2538 host_ip->uid = tswap32(target_ip->uid);
2539 host_ip->gid = tswap32(target_ip->gid);
2540 host_ip->cuid = tswap32(target_ip->cuid);
2541 host_ip->cgid = tswap32(target_ip->cgid);
2542 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2543 host_ip->mode = tswap32(target_ip->mode);
2544 #else
2545 host_ip->mode = tswap16(target_ip->mode);
2546 #endif
2547 #if defined(TARGET_PPC)
2548 host_ip->__seq = tswap32(target_ip->__seq);
2549 #else
2550 host_ip->__seq = tswap16(target_ip->__seq);
2551 #endif
2552 unlock_user_struct(target_sd, target_addr, 0);
2553 return 0;
2556 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2557 struct ipc_perm *host_ip)
2559 struct target_ipc_perm *target_ip;
2560 struct target_semid_ds *target_sd;
2562 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2563 return -TARGET_EFAULT;
2564 target_ip = &(target_sd->sem_perm);
2565 target_ip->__key = tswap32(host_ip->__key);
2566 target_ip->uid = tswap32(host_ip->uid);
2567 target_ip->gid = tswap32(host_ip->gid);
2568 target_ip->cuid = tswap32(host_ip->cuid);
2569 target_ip->cgid = tswap32(host_ip->cgid);
2570 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2571 target_ip->mode = tswap32(host_ip->mode);
2572 #else
2573 target_ip->mode = tswap16(host_ip->mode);
2574 #endif
2575 #if defined(TARGET_PPC)
2576 target_ip->__seq = tswap32(host_ip->__seq);
2577 #else
2578 target_ip->__seq = tswap16(host_ip->__seq);
2579 #endif
2580 unlock_user_struct(target_sd, target_addr, 1);
2581 return 0;
2584 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2585 abi_ulong target_addr)
2587 struct target_semid_ds *target_sd;
2589 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2590 return -TARGET_EFAULT;
2591 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2592 return -TARGET_EFAULT;
2593 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2594 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2595 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2596 unlock_user_struct(target_sd, target_addr, 0);
2597 return 0;
2600 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2601 struct semid_ds *host_sd)
2603 struct target_semid_ds *target_sd;
2605 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2606 return -TARGET_EFAULT;
2607 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2608 return -TARGET_EFAULT;
2609 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2610 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2611 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2612 unlock_user_struct(target_sd, target_addr, 1);
2613 return 0;
2616 struct target_seminfo {
2617 int semmap;
2618 int semmni;
2619 int semmns;
2620 int semmnu;
2621 int semmsl;
2622 int semopm;
2623 int semume;
2624 int semusz;
2625 int semvmx;
2626 int semaem;
2629 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2630 struct seminfo *host_seminfo)
2632 struct target_seminfo *target_seminfo;
2633 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2634 return -TARGET_EFAULT;
2635 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2636 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2637 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2638 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2639 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2640 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2641 __put_user(host_seminfo->semume, &target_seminfo->semume);
2642 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2643 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2644 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2645 unlock_user_struct(target_seminfo, target_addr, 1);
2646 return 0;
2649 union semun {
2650 int val;
2651 struct semid_ds *buf;
2652 unsigned short *array;
2653 struct seminfo *__buf;
2656 union target_semun {
2657 int val;
2658 abi_ulong buf;
2659 abi_ulong array;
2660 abi_ulong __buf;
2663 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2664 abi_ulong target_addr)
2666 int nsems;
2667 unsigned short *array;
2668 union semun semun;
2669 struct semid_ds semid_ds;
2670 int i, ret;
2672 semun.buf = &semid_ds;
2674 ret = semctl(semid, 0, IPC_STAT, semun);
2675 if (ret == -1)
2676 return get_errno(ret);
2678 nsems = semid_ds.sem_nsems;
2680 *host_array = malloc(nsems*sizeof(unsigned short));
2681 if (!*host_array) {
2682 return -TARGET_ENOMEM;
2684 array = lock_user(VERIFY_READ, target_addr,
2685 nsems*sizeof(unsigned short), 1);
2686 if (!array) {
2687 free(*host_array);
2688 return -TARGET_EFAULT;
2691 for(i=0; i<nsems; i++) {
2692 __get_user((*host_array)[i], &array[i]);
2694 unlock_user(array, target_addr, 0);
2696 return 0;
2699 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2700 unsigned short **host_array)
2702 int nsems;
2703 unsigned short *array;
2704 union semun semun;
2705 struct semid_ds semid_ds;
2706 int i, ret;
2708 semun.buf = &semid_ds;
2710 ret = semctl(semid, 0, IPC_STAT, semun);
2711 if (ret == -1)
2712 return get_errno(ret);
2714 nsems = semid_ds.sem_nsems;
2716 array = lock_user(VERIFY_WRITE, target_addr,
2717 nsems*sizeof(unsigned short), 0);
2718 if (!array)
2719 return -TARGET_EFAULT;
2721 for(i=0; i<nsems; i++) {
2722 __put_user((*host_array)[i], &array[i]);
2724 free(*host_array);
2725 unlock_user(array, target_addr, 1);
2727 return 0;
2730 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2731 union target_semun target_su)
2733 union semun arg;
2734 struct semid_ds dsarg;
2735 unsigned short *array = NULL;
2736 struct seminfo seminfo;
2737 abi_long ret = -TARGET_EINVAL;
2738 abi_long err;
2739 cmd &= 0xff;
2741 switch( cmd ) {
2742 case GETVAL:
2743 case SETVAL:
2744 /* In 64 bit cross-endian situations, we will erroneously pick up
2745 * the wrong half of the union for the "val" element. To rectify
2746 * this, the entire 8-byte structure is byteswapped, followed by
2747 * a swap of the 4 byte val field. In other cases, the data is
2748 * already in proper host byte order. */
2749 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2750 target_su.buf = tswapal(target_su.buf);
2751 arg.val = tswap32(target_su.val);
2752 } else {
2753 arg.val = target_su.val;
2755 ret = get_errno(semctl(semid, semnum, cmd, arg));
2756 break;
2757 case GETALL:
2758 case SETALL:
2759 err = target_to_host_semarray(semid, &array, target_su.array);
2760 if (err)
2761 return err;
2762 arg.array = array;
2763 ret = get_errno(semctl(semid, semnum, cmd, arg));
2764 err = host_to_target_semarray(semid, target_su.array, &array);
2765 if (err)
2766 return err;
2767 break;
2768 case IPC_STAT:
2769 case IPC_SET:
2770 case SEM_STAT:
2771 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2772 if (err)
2773 return err;
2774 arg.buf = &dsarg;
2775 ret = get_errno(semctl(semid, semnum, cmd, arg));
2776 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2777 if (err)
2778 return err;
2779 break;
2780 case IPC_INFO:
2781 case SEM_INFO:
2782 arg.__buf = &seminfo;
2783 ret = get_errno(semctl(semid, semnum, cmd, arg));
2784 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2785 if (err)
2786 return err;
2787 break;
2788 case IPC_RMID:
2789 case GETPID:
2790 case GETNCNT:
2791 case GETZCNT:
2792 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2793 break;
2796 return ret;
2799 struct target_sembuf {
2800 unsigned short sem_num;
2801 short sem_op;
2802 short sem_flg;
2805 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2806 abi_ulong target_addr,
2807 unsigned nsops)
2809 struct target_sembuf *target_sembuf;
2810 int i;
2812 target_sembuf = lock_user(VERIFY_READ, target_addr,
2813 nsops*sizeof(struct target_sembuf), 1);
2814 if (!target_sembuf)
2815 return -TARGET_EFAULT;
2817 for(i=0; i<nsops; i++) {
2818 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2819 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2820 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2823 unlock_user(target_sembuf, target_addr, 0);
2825 return 0;
2828 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2830 struct sembuf sops[nsops];
2832 if (target_to_host_sembuf(sops, ptr, nsops))
2833 return -TARGET_EFAULT;
2835 return get_errno(semop(semid, sops, nsops));
2838 struct target_msqid_ds
2840 struct target_ipc_perm msg_perm;
2841 abi_ulong msg_stime;
2842 #if TARGET_ABI_BITS == 32
2843 abi_ulong __unused1;
2844 #endif
2845 abi_ulong msg_rtime;
2846 #if TARGET_ABI_BITS == 32
2847 abi_ulong __unused2;
2848 #endif
2849 abi_ulong msg_ctime;
2850 #if TARGET_ABI_BITS == 32
2851 abi_ulong __unused3;
2852 #endif
2853 abi_ulong __msg_cbytes;
2854 abi_ulong msg_qnum;
2855 abi_ulong msg_qbytes;
2856 abi_ulong msg_lspid;
2857 abi_ulong msg_lrpid;
2858 abi_ulong __unused4;
2859 abi_ulong __unused5;
2862 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2863 abi_ulong target_addr)
2865 struct target_msqid_ds *target_md;
2867 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2868 return -TARGET_EFAULT;
2869 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2870 return -TARGET_EFAULT;
2871 host_md->msg_stime = tswapal(target_md->msg_stime);
2872 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2873 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2874 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2875 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2876 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2877 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2878 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2879 unlock_user_struct(target_md, target_addr, 0);
2880 return 0;
2883 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2884 struct msqid_ds *host_md)
2886 struct target_msqid_ds *target_md;
2888 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2889 return -TARGET_EFAULT;
2890 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2891 return -TARGET_EFAULT;
2892 target_md->msg_stime = tswapal(host_md->msg_stime);
2893 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2894 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2895 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2896 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2897 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2898 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2899 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2900 unlock_user_struct(target_md, target_addr, 1);
2901 return 0;
2904 struct target_msginfo {
2905 int msgpool;
2906 int msgmap;
2907 int msgmax;
2908 int msgmnb;
2909 int msgmni;
2910 int msgssz;
2911 int msgtql;
2912 unsigned short int msgseg;
2915 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2916 struct msginfo *host_msginfo)
2918 struct target_msginfo *target_msginfo;
2919 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2920 return -TARGET_EFAULT;
2921 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2922 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2923 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2924 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2925 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2926 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2927 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2928 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2929 unlock_user_struct(target_msginfo, target_addr, 1);
2930 return 0;
2933 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2935 struct msqid_ds dsarg;
2936 struct msginfo msginfo;
2937 abi_long ret = -TARGET_EINVAL;
2939 cmd &= 0xff;
2941 switch (cmd) {
2942 case IPC_STAT:
2943 case IPC_SET:
2944 case MSG_STAT:
2945 if (target_to_host_msqid_ds(&dsarg,ptr))
2946 return -TARGET_EFAULT;
2947 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2948 if (host_to_target_msqid_ds(ptr,&dsarg))
2949 return -TARGET_EFAULT;
2950 break;
2951 case IPC_RMID:
2952 ret = get_errno(msgctl(msgid, cmd, NULL));
2953 break;
2954 case IPC_INFO:
2955 case MSG_INFO:
2956 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2957 if (host_to_target_msginfo(ptr, &msginfo))
2958 return -TARGET_EFAULT;
2959 break;
2962 return ret;
2965 struct target_msgbuf {
2966 abi_long mtype;
2967 char mtext[1];
2970 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2971 ssize_t msgsz, int msgflg)
2973 struct target_msgbuf *target_mb;
2974 struct msgbuf *host_mb;
2975 abi_long ret = 0;
2977 if (msgsz < 0) {
2978 return -TARGET_EINVAL;
2981 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2982 return -TARGET_EFAULT;
2983 host_mb = malloc(msgsz+sizeof(long));
2984 if (!host_mb) {
2985 unlock_user_struct(target_mb, msgp, 0);
2986 return -TARGET_ENOMEM;
2988 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2989 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2990 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2991 free(host_mb);
2992 unlock_user_struct(target_mb, msgp, 0);
2994 return ret;
2997 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2998 unsigned int msgsz, abi_long msgtyp,
2999 int msgflg)
3001 struct target_msgbuf *target_mb;
3002 char *target_mtext;
3003 struct msgbuf *host_mb;
3004 abi_long ret = 0;
3006 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3007 return -TARGET_EFAULT;
3009 host_mb = g_malloc(msgsz+sizeof(long));
3010 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3012 if (ret > 0) {
3013 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3014 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3015 if (!target_mtext) {
3016 ret = -TARGET_EFAULT;
3017 goto end;
3019 memcpy(target_mb->mtext, host_mb->mtext, ret);
3020 unlock_user(target_mtext, target_mtext_addr, ret);
3023 target_mb->mtype = tswapal(host_mb->mtype);
3025 end:
3026 if (target_mb)
3027 unlock_user_struct(target_mb, msgp, 1);
3028 g_free(host_mb);
3029 return ret;
3032 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3033 abi_ulong target_addr)
3035 struct target_shmid_ds *target_sd;
3037 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3038 return -TARGET_EFAULT;
3039 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3040 return -TARGET_EFAULT;
3041 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3042 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3043 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3044 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3045 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3046 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3047 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3048 unlock_user_struct(target_sd, target_addr, 0);
3049 return 0;
3052 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3053 struct shmid_ds *host_sd)
3055 struct target_shmid_ds *target_sd;
3057 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3058 return -TARGET_EFAULT;
3059 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3060 return -TARGET_EFAULT;
3061 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3062 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3063 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3064 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3065 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3066 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3067 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3068 unlock_user_struct(target_sd, target_addr, 1);
3069 return 0;
3072 struct target_shminfo {
3073 abi_ulong shmmax;
3074 abi_ulong shmmin;
3075 abi_ulong shmmni;
3076 abi_ulong shmseg;
3077 abi_ulong shmall;
3080 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3081 struct shminfo *host_shminfo)
3083 struct target_shminfo *target_shminfo;
3084 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3085 return -TARGET_EFAULT;
3086 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3087 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3088 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3089 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3090 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3091 unlock_user_struct(target_shminfo, target_addr, 1);
3092 return 0;
3095 struct target_shm_info {
3096 int used_ids;
3097 abi_ulong shm_tot;
3098 abi_ulong shm_rss;
3099 abi_ulong shm_swp;
3100 abi_ulong swap_attempts;
3101 abi_ulong swap_successes;
3104 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3105 struct shm_info *host_shm_info)
3107 struct target_shm_info *target_shm_info;
3108 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3109 return -TARGET_EFAULT;
3110 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3111 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3112 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3113 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3114 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3115 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3116 unlock_user_struct(target_shm_info, target_addr, 1);
3117 return 0;
3120 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3122 struct shmid_ds dsarg;
3123 struct shminfo shminfo;
3124 struct shm_info shm_info;
3125 abi_long ret = -TARGET_EINVAL;
3127 cmd &= 0xff;
3129 switch(cmd) {
3130 case IPC_STAT:
3131 case IPC_SET:
3132 case SHM_STAT:
3133 if (target_to_host_shmid_ds(&dsarg, buf))
3134 return -TARGET_EFAULT;
3135 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3136 if (host_to_target_shmid_ds(buf, &dsarg))
3137 return -TARGET_EFAULT;
3138 break;
3139 case IPC_INFO:
3140 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3141 if (host_to_target_shminfo(buf, &shminfo))
3142 return -TARGET_EFAULT;
3143 break;
3144 case SHM_INFO:
3145 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3146 if (host_to_target_shm_info(buf, &shm_info))
3147 return -TARGET_EFAULT;
3148 break;
3149 case IPC_RMID:
3150 case SHM_LOCK:
3151 case SHM_UNLOCK:
3152 ret = get_errno(shmctl(shmid, cmd, NULL));
3153 break;
3156 return ret;
3159 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3161 abi_long raddr;
3162 void *host_raddr;
3163 struct shmid_ds shm_info;
3164 int i,ret;
3166 /* find out the length of the shared memory segment */
3167 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3168 if (is_error(ret)) {
3169 /* can't get length, bail out */
3170 return ret;
3173 mmap_lock();
3175 if (shmaddr)
3176 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3177 else {
3178 abi_ulong mmap_start;
3180 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3182 if (mmap_start == -1) {
3183 errno = ENOMEM;
3184 host_raddr = (void *)-1;
3185 } else
3186 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3189 if (host_raddr == (void *)-1) {
3190 mmap_unlock();
3191 return get_errno((long)host_raddr);
3193 raddr=h2g((unsigned long)host_raddr);
3195 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3196 PAGE_VALID | PAGE_READ |
3197 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3199 for (i = 0; i < N_SHM_REGIONS; i++) {
3200 if (shm_regions[i].start == 0) {
3201 shm_regions[i].start = raddr;
3202 shm_regions[i].size = shm_info.shm_segsz;
3203 break;
3207 mmap_unlock();
3208 return raddr;
3212 static inline abi_long do_shmdt(abi_ulong shmaddr)
3214 int i;
3216 for (i = 0; i < N_SHM_REGIONS; ++i) {
3217 if (shm_regions[i].start == shmaddr) {
3218 shm_regions[i].start = 0;
3219 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3220 break;
3224 return get_errno(shmdt(g2h(shmaddr)));
3227 #ifdef TARGET_NR_ipc
3228 /* ??? This only works with linear mappings. */
3229 /* do_ipc() must return target values and target errnos. */
3230 static abi_long do_ipc(unsigned int call, abi_long first,
3231 abi_long second, abi_long third,
3232 abi_long ptr, abi_long fifth)
3234 int version;
3235 abi_long ret = 0;
3237 version = call >> 16;
3238 call &= 0xffff;
3240 switch (call) {
3241 case IPCOP_semop:
3242 ret = do_semop(first, ptr, second);
3243 break;
3245 case IPCOP_semget:
3246 ret = get_errno(semget(first, second, third));
3247 break;
3249 case IPCOP_semctl: {
3250 /* The semun argument to semctl is passed by value, so dereference the
3251 * ptr argument. */
3252 abi_ulong atptr;
3253 get_user_ual(atptr, ptr);
3254 ret = do_semctl(first, second, third,
3255 (union target_semun) atptr);
3256 break;
3259 case IPCOP_msgget:
3260 ret = get_errno(msgget(first, second));
3261 break;
3263 case IPCOP_msgsnd:
3264 ret = do_msgsnd(first, ptr, second, third);
3265 break;
3267 case IPCOP_msgctl:
3268 ret = do_msgctl(first, second, ptr);
3269 break;
3271 case IPCOP_msgrcv:
3272 switch (version) {
3273 case 0:
3275 struct target_ipc_kludge {
3276 abi_long msgp;
3277 abi_long msgtyp;
3278 } *tmp;
3280 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3281 ret = -TARGET_EFAULT;
3282 break;
3285 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3287 unlock_user_struct(tmp, ptr, 0);
3288 break;
3290 default:
3291 ret = do_msgrcv(first, ptr, second, fifth, third);
3293 break;
3295 case IPCOP_shmat:
3296 switch (version) {
3297 default:
3299 abi_ulong raddr;
3300 raddr = do_shmat(first, ptr, second);
3301 if (is_error(raddr))
3302 return get_errno(raddr);
3303 if (put_user_ual(raddr, third))
3304 return -TARGET_EFAULT;
3305 break;
3307 case 1:
3308 ret = -TARGET_EINVAL;
3309 break;
3311 break;
3312 case IPCOP_shmdt:
3313 ret = do_shmdt(ptr);
3314 break;
3316 case IPCOP_shmget:
3317 /* IPC_* flag values are the same on all linux platforms */
3318 ret = get_errno(shmget(first, second, third));
3319 break;
3321 /* IPC_* and SHM_* command values are the same on all linux platforms */
3322 case IPCOP_shmctl:
3323 ret = do_shmctl(first, second, ptr);
3324 break;
3325 default:
3326 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3327 ret = -TARGET_ENOSYS;
3328 break;
3330 return ret;
3332 #endif
3334 /* kernel structure types definitions */
3336 #define STRUCT(name, ...) STRUCT_ ## name,
3337 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3338 enum {
3339 #include "syscall_types.h"
3340 STRUCT_MAX
3342 #undef STRUCT
3343 #undef STRUCT_SPECIAL
3345 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3346 #define STRUCT_SPECIAL(name)
3347 #include "syscall_types.h"
3348 #undef STRUCT
3349 #undef STRUCT_SPECIAL
3351 typedef struct IOCTLEntry IOCTLEntry;
3353 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3354 int fd, int cmd, abi_long arg);
3356 struct IOCTLEntry {
3357 int target_cmd;
3358 unsigned int host_cmd;
3359 const char *name;
3360 int access;
3361 do_ioctl_fn *do_ioctl;
3362 const argtype arg_type[5];
3365 #define IOC_R 0x0001
3366 #define IOC_W 0x0002
3367 #define IOC_RW (IOC_R | IOC_W)
3369 #define MAX_STRUCT_SIZE 4096
3371 #ifdef CONFIG_FIEMAP
3372 /* So fiemap access checks don't overflow on 32 bit systems.
3373 * This is very slightly smaller than the limit imposed by
3374 * the underlying kernel.
3376 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3377 / sizeof(struct fiemap_extent))
3379 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3380 int fd, int cmd, abi_long arg)
3382 /* The parameter for this ioctl is a struct fiemap followed
3383 * by an array of struct fiemap_extent whose size is set
3384 * in fiemap->fm_extent_count. The array is filled in by the
3385 * ioctl.
3387 int target_size_in, target_size_out;
3388 struct fiemap *fm;
3389 const argtype *arg_type = ie->arg_type;
3390 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3391 void *argptr, *p;
3392 abi_long ret;
3393 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3394 uint32_t outbufsz;
3395 int free_fm = 0;
3397 assert(arg_type[0] == TYPE_PTR);
3398 assert(ie->access == IOC_RW);
3399 arg_type++;
3400 target_size_in = thunk_type_size(arg_type, 0);
3401 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3402 if (!argptr) {
3403 return -TARGET_EFAULT;
3405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3406 unlock_user(argptr, arg, 0);
3407 fm = (struct fiemap *)buf_temp;
3408 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3409 return -TARGET_EINVAL;
3412 outbufsz = sizeof (*fm) +
3413 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3415 if (outbufsz > MAX_STRUCT_SIZE) {
3416 /* We can't fit all the extents into the fixed size buffer.
3417 * Allocate one that is large enough and use it instead.
3419 fm = malloc(outbufsz);
3420 if (!fm) {
3421 return -TARGET_ENOMEM;
3423 memcpy(fm, buf_temp, sizeof(struct fiemap));
3424 free_fm = 1;
3426 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3427 if (!is_error(ret)) {
3428 target_size_out = target_size_in;
3429 /* An extent_count of 0 means we were only counting the extents
3430 * so there are no structs to copy
3432 if (fm->fm_extent_count != 0) {
3433 target_size_out += fm->fm_mapped_extents * extent_size;
3435 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3436 if (!argptr) {
3437 ret = -TARGET_EFAULT;
3438 } else {
3439 /* Convert the struct fiemap */
3440 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3441 if (fm->fm_extent_count != 0) {
3442 p = argptr + target_size_in;
3443 /* ...and then all the struct fiemap_extents */
3444 for (i = 0; i < fm->fm_mapped_extents; i++) {
3445 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3446 THUNK_TARGET);
3447 p += extent_size;
3450 unlock_user(argptr, arg, target_size_out);
3453 if (free_fm) {
3454 free(fm);
3456 return ret;
3458 #endif
3460 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3461 int fd, int cmd, abi_long arg)
3463 const argtype *arg_type = ie->arg_type;
3464 int target_size;
3465 void *argptr;
3466 int ret;
3467 struct ifconf *host_ifconf;
3468 uint32_t outbufsz;
3469 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3470 int target_ifreq_size;
3471 int nb_ifreq;
3472 int free_buf = 0;
3473 int i;
3474 int target_ifc_len;
3475 abi_long target_ifc_buf;
3476 int host_ifc_len;
3477 char *host_ifc_buf;
3479 assert(arg_type[0] == TYPE_PTR);
3480 assert(ie->access == IOC_RW);
3482 arg_type++;
3483 target_size = thunk_type_size(arg_type, 0);
3485 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3486 if (!argptr)
3487 return -TARGET_EFAULT;
3488 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3489 unlock_user(argptr, arg, 0);
3491 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3492 target_ifc_len = host_ifconf->ifc_len;
3493 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3495 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3496 nb_ifreq = target_ifc_len / target_ifreq_size;
3497 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3499 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3500 if (outbufsz > MAX_STRUCT_SIZE) {
3501 /* We can't fit all the extents into the fixed size buffer.
3502 * Allocate one that is large enough and use it instead.
3504 host_ifconf = malloc(outbufsz);
3505 if (!host_ifconf) {
3506 return -TARGET_ENOMEM;
3508 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3509 free_buf = 1;
3511 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3513 host_ifconf->ifc_len = host_ifc_len;
3514 host_ifconf->ifc_buf = host_ifc_buf;
3516 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3517 if (!is_error(ret)) {
3518 /* convert host ifc_len to target ifc_len */
3520 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3521 target_ifc_len = nb_ifreq * target_ifreq_size;
3522 host_ifconf->ifc_len = target_ifc_len;
3524 /* restore target ifc_buf */
3526 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3528 /* copy struct ifconf to target user */
3530 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3531 if (!argptr)
3532 return -TARGET_EFAULT;
3533 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3534 unlock_user(argptr, arg, target_size);
3536 /* copy ifreq[] to target user */
3538 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3539 for (i = 0; i < nb_ifreq ; i++) {
3540 thunk_convert(argptr + i * target_ifreq_size,
3541 host_ifc_buf + i * sizeof(struct ifreq),
3542 ifreq_arg_type, THUNK_TARGET);
3544 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3547 if (free_buf) {
3548 free(host_ifconf);
3551 return ret;
3554 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3555 int cmd, abi_long arg)
3557 void *argptr;
3558 struct dm_ioctl *host_dm;
3559 abi_long guest_data;
3560 uint32_t guest_data_size;
3561 int target_size;
3562 const argtype *arg_type = ie->arg_type;
3563 abi_long ret;
3564 void *big_buf = NULL;
3565 char *host_data;
3567 arg_type++;
3568 target_size = thunk_type_size(arg_type, 0);
3569 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3570 if (!argptr) {
3571 ret = -TARGET_EFAULT;
3572 goto out;
3574 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3575 unlock_user(argptr, arg, 0);
3577 /* buf_temp is too small, so fetch things into a bigger buffer */
3578 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3579 memcpy(big_buf, buf_temp, target_size);
3580 buf_temp = big_buf;
3581 host_dm = big_buf;
3583 guest_data = arg + host_dm->data_start;
3584 if ((guest_data - arg) < 0) {
3585 ret = -EINVAL;
3586 goto out;
3588 guest_data_size = host_dm->data_size - host_dm->data_start;
3589 host_data = (char*)host_dm + host_dm->data_start;
3591 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3592 switch (ie->host_cmd) {
3593 case DM_REMOVE_ALL:
3594 case DM_LIST_DEVICES:
3595 case DM_DEV_CREATE:
3596 case DM_DEV_REMOVE:
3597 case DM_DEV_SUSPEND:
3598 case DM_DEV_STATUS:
3599 case DM_DEV_WAIT:
3600 case DM_TABLE_STATUS:
3601 case DM_TABLE_CLEAR:
3602 case DM_TABLE_DEPS:
3603 case DM_LIST_VERSIONS:
3604 /* no input data */
3605 break;
3606 case DM_DEV_RENAME:
3607 case DM_DEV_SET_GEOMETRY:
3608 /* data contains only strings */
3609 memcpy(host_data, argptr, guest_data_size);
3610 break;
3611 case DM_TARGET_MSG:
3612 memcpy(host_data, argptr, guest_data_size);
3613 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3614 break;
3615 case DM_TABLE_LOAD:
3617 void *gspec = argptr;
3618 void *cur_data = host_data;
3619 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3620 int spec_size = thunk_type_size(arg_type, 0);
3621 int i;
3623 for (i = 0; i < host_dm->target_count; i++) {
3624 struct dm_target_spec *spec = cur_data;
3625 uint32_t next;
3626 int slen;
3628 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3629 slen = strlen((char*)gspec + spec_size) + 1;
3630 next = spec->next;
3631 spec->next = sizeof(*spec) + slen;
3632 strcpy((char*)&spec[1], gspec + spec_size);
3633 gspec += next;
3634 cur_data += spec->next;
3636 break;
3638 default:
3639 ret = -TARGET_EINVAL;
3640 unlock_user(argptr, guest_data, 0);
3641 goto out;
3643 unlock_user(argptr, guest_data, 0);
3645 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3646 if (!is_error(ret)) {
3647 guest_data = arg + host_dm->data_start;
3648 guest_data_size = host_dm->data_size - host_dm->data_start;
3649 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3650 switch (ie->host_cmd) {
3651 case DM_REMOVE_ALL:
3652 case DM_DEV_CREATE:
3653 case DM_DEV_REMOVE:
3654 case DM_DEV_RENAME:
3655 case DM_DEV_SUSPEND:
3656 case DM_DEV_STATUS:
3657 case DM_TABLE_LOAD:
3658 case DM_TABLE_CLEAR:
3659 case DM_TARGET_MSG:
3660 case DM_DEV_SET_GEOMETRY:
3661 /* no return data */
3662 break;
3663 case DM_LIST_DEVICES:
3665 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3666 uint32_t remaining_data = guest_data_size;
3667 void *cur_data = argptr;
3668 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3669 int nl_size = 12; /* can't use thunk_size due to alignment */
3671 while (1) {
3672 uint32_t next = nl->next;
3673 if (next) {
3674 nl->next = nl_size + (strlen(nl->name) + 1);
3676 if (remaining_data < nl->next) {
3677 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3678 break;
3680 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3681 strcpy(cur_data + nl_size, nl->name);
3682 cur_data += nl->next;
3683 remaining_data -= nl->next;
3684 if (!next) {
3685 break;
3687 nl = (void*)nl + next;
3689 break;
3691 case DM_DEV_WAIT:
3692 case DM_TABLE_STATUS:
3694 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3695 void *cur_data = argptr;
3696 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3697 int spec_size = thunk_type_size(arg_type, 0);
3698 int i;
3700 for (i = 0; i < host_dm->target_count; i++) {
3701 uint32_t next = spec->next;
3702 int slen = strlen((char*)&spec[1]) + 1;
3703 spec->next = (cur_data - argptr) + spec_size + slen;
3704 if (guest_data_size < spec->next) {
3705 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3706 break;
3708 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3709 strcpy(cur_data + spec_size, (char*)&spec[1]);
3710 cur_data = argptr + spec->next;
3711 spec = (void*)host_dm + host_dm->data_start + next;
3713 break;
3715 case DM_TABLE_DEPS:
3717 void *hdata = (void*)host_dm + host_dm->data_start;
3718 int count = *(uint32_t*)hdata;
3719 uint64_t *hdev = hdata + 8;
3720 uint64_t *gdev = argptr + 8;
3721 int i;
3723 *(uint32_t*)argptr = tswap32(count);
3724 for (i = 0; i < count; i++) {
3725 *gdev = tswap64(*hdev);
3726 gdev++;
3727 hdev++;
3729 break;
3731 case DM_LIST_VERSIONS:
3733 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3734 uint32_t remaining_data = guest_data_size;
3735 void *cur_data = argptr;
3736 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3737 int vers_size = thunk_type_size(arg_type, 0);
3739 while (1) {
3740 uint32_t next = vers->next;
3741 if (next) {
3742 vers->next = vers_size + (strlen(vers->name) + 1);
3744 if (remaining_data < vers->next) {
3745 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3746 break;
3748 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3749 strcpy(cur_data + vers_size, vers->name);
3750 cur_data += vers->next;
3751 remaining_data -= vers->next;
3752 if (!next) {
3753 break;
3755 vers = (void*)vers + next;
3757 break;
3759 default:
3760 unlock_user(argptr, guest_data, 0);
3761 ret = -TARGET_EINVAL;
3762 goto out;
3764 unlock_user(argptr, guest_data, guest_data_size);
3766 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3767 if (!argptr) {
3768 ret = -TARGET_EFAULT;
3769 goto out;
3771 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3772 unlock_user(argptr, arg, target_size);
3774 out:
3775 g_free(big_buf);
3776 return ret;
3779 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3780 int cmd, abi_long arg)
3782 void *argptr;
3783 int target_size;
3784 const argtype *arg_type = ie->arg_type;
3785 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3786 abi_long ret;
3788 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3789 struct blkpg_partition host_part;
3791 /* Read and convert blkpg */
3792 arg_type++;
3793 target_size = thunk_type_size(arg_type, 0);
3794 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3795 if (!argptr) {
3796 ret = -TARGET_EFAULT;
3797 goto out;
3799 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3800 unlock_user(argptr, arg, 0);
3802 switch (host_blkpg->op) {
3803 case BLKPG_ADD_PARTITION:
3804 case BLKPG_DEL_PARTITION:
3805 /* payload is struct blkpg_partition */
3806 break;
3807 default:
3808 /* Unknown opcode */
3809 ret = -TARGET_EINVAL;
3810 goto out;
3813 /* Read and convert blkpg->data */
3814 arg = (abi_long)(uintptr_t)host_blkpg->data;
3815 target_size = thunk_type_size(part_arg_type, 0);
3816 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3817 if (!argptr) {
3818 ret = -TARGET_EFAULT;
3819 goto out;
3821 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3822 unlock_user(argptr, arg, 0);
3824 /* Swizzle the data pointer to our local copy and call! */
3825 host_blkpg->data = &host_part;
3826 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3828 out:
3829 return ret;
3832 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3833 int fd, int cmd, abi_long arg)
3835 const argtype *arg_type = ie->arg_type;
3836 const StructEntry *se;
3837 const argtype *field_types;
3838 const int *dst_offsets, *src_offsets;
3839 int target_size;
3840 void *argptr;
3841 abi_ulong *target_rt_dev_ptr;
3842 unsigned long *host_rt_dev_ptr;
3843 abi_long ret;
3844 int i;
3846 assert(ie->access == IOC_W);
3847 assert(*arg_type == TYPE_PTR);
3848 arg_type++;
3849 assert(*arg_type == TYPE_STRUCT);
3850 target_size = thunk_type_size(arg_type, 0);
3851 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3852 if (!argptr) {
3853 return -TARGET_EFAULT;
3855 arg_type++;
3856 assert(*arg_type == (int)STRUCT_rtentry);
3857 se = struct_entries + *arg_type++;
3858 assert(se->convert[0] == NULL);
3859 /* convert struct here to be able to catch rt_dev string */
3860 field_types = se->field_types;
3861 dst_offsets = se->field_offsets[THUNK_HOST];
3862 src_offsets = se->field_offsets[THUNK_TARGET];
3863 for (i = 0; i < se->nb_fields; i++) {
3864 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3865 assert(*field_types == TYPE_PTRVOID);
3866 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3867 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3868 if (*target_rt_dev_ptr != 0) {
3869 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3870 tswapal(*target_rt_dev_ptr));
3871 if (!*host_rt_dev_ptr) {
3872 unlock_user(argptr, arg, 0);
3873 return -TARGET_EFAULT;
3875 } else {
3876 *host_rt_dev_ptr = 0;
3878 field_types++;
3879 continue;
3881 field_types = thunk_convert(buf_temp + dst_offsets[i],
3882 argptr + src_offsets[i],
3883 field_types, THUNK_HOST);
3885 unlock_user(argptr, arg, 0);
3887 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3888 if (*host_rt_dev_ptr != 0) {
3889 unlock_user((void *)*host_rt_dev_ptr,
3890 *target_rt_dev_ptr, 0);
3892 return ret;
3895 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3896 int fd, int cmd, abi_long arg)
3898 int sig = target_to_host_signal(arg);
3899 return get_errno(ioctl(fd, ie->host_cmd, sig));
3902 static IOCTLEntry ioctl_entries[] = {
3903 #define IOCTL(cmd, access, ...) \
3904 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3905 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3906 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3907 #include "ioctls.h"
3908 { 0, 0, },
3911 /* ??? Implement proper locking for ioctls. */
3912 /* do_ioctl() Must return target values and target errnos. */
3913 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
3915 const IOCTLEntry *ie;
3916 const argtype *arg_type;
3917 abi_long ret;
3918 uint8_t buf_temp[MAX_STRUCT_SIZE];
3919 int target_size;
3920 void *argptr;
3922 ie = ioctl_entries;
3923 for(;;) {
3924 if (ie->target_cmd == 0) {
3925 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3926 return -TARGET_ENOSYS;
3928 if (ie->target_cmd == cmd)
3929 break;
3930 ie++;
3932 arg_type = ie->arg_type;
3933 #if defined(DEBUG)
3934 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3935 #endif
3936 if (ie->do_ioctl) {
3937 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3940 switch(arg_type[0]) {
3941 case TYPE_NULL:
3942 /* no argument */
3943 ret = get_errno(ioctl(fd, ie->host_cmd));
3944 break;
3945 case TYPE_PTRVOID:
3946 case TYPE_INT:
3947 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3948 break;
3949 case TYPE_PTR:
3950 arg_type++;
3951 target_size = thunk_type_size(arg_type, 0);
3952 switch(ie->access) {
3953 case IOC_R:
3954 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3955 if (!is_error(ret)) {
3956 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3957 if (!argptr)
3958 return -TARGET_EFAULT;
3959 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3960 unlock_user(argptr, arg, target_size);
3962 break;
3963 case IOC_W:
3964 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3965 if (!argptr)
3966 return -TARGET_EFAULT;
3967 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3968 unlock_user(argptr, arg, 0);
3969 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3970 break;
3971 default:
3972 case IOC_RW:
3973 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3974 if (!argptr)
3975 return -TARGET_EFAULT;
3976 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3977 unlock_user(argptr, arg, 0);
3978 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3979 if (!is_error(ret)) {
3980 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3981 if (!argptr)
3982 return -TARGET_EFAULT;
3983 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3984 unlock_user(argptr, arg, target_size);
3986 break;
3988 break;
3989 default:
3990 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3991 (long)cmd, arg_type[0]);
3992 ret = -TARGET_ENOSYS;
3993 break;
3995 return ret;
3998 static const bitmask_transtbl iflag_tbl[] = {
3999 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4000 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4001 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4002 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4003 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4004 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4005 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4006 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4007 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4008 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4009 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4010 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4011 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4012 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4013 { 0, 0, 0, 0 }
4016 static const bitmask_transtbl oflag_tbl[] = {
4017 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4018 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4019 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4020 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4021 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4022 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4023 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4024 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4025 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4026 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4027 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4028 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4029 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4030 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4031 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4032 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4033 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4034 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4035 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4036 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4037 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4038 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4039 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4040 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4041 { 0, 0, 0, 0 }
4044 static const bitmask_transtbl cflag_tbl[] = {
4045 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4046 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4047 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4048 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4049 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4050 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4051 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4052 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4053 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4054 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4055 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4056 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4057 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4058 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4059 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4060 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4061 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4062 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4063 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4064 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4065 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4066 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4067 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4068 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4069 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4070 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4071 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4072 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4073 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4074 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4075 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4076 { 0, 0, 0, 0 }
4079 static const bitmask_transtbl lflag_tbl[] = {
4080 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4081 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4082 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4083 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4084 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4085 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4086 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4087 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4088 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4089 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4090 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4091 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4092 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4093 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4094 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4095 { 0, 0, 0, 0 }
4098 static void target_to_host_termios (void *dst, const void *src)
4100 struct host_termios *host = dst;
4101 const struct target_termios *target = src;
4103 host->c_iflag =
4104 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4105 host->c_oflag =
4106 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4107 host->c_cflag =
4108 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4109 host->c_lflag =
4110 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4111 host->c_line = target->c_line;
4113 memset(host->c_cc, 0, sizeof(host->c_cc));
4114 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4115 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4116 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4117 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4118 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4119 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4120 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4121 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4122 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4123 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4124 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4125 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4126 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4127 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4128 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4129 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4130 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4133 static void host_to_target_termios (void *dst, const void *src)
4135 struct target_termios *target = dst;
4136 const struct host_termios *host = src;
4138 target->c_iflag =
4139 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4140 target->c_oflag =
4141 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4142 target->c_cflag =
4143 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4144 target->c_lflag =
4145 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4146 target->c_line = host->c_line;
4148 memset(target->c_cc, 0, sizeof(target->c_cc));
4149 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4150 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4151 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4152 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4153 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4154 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4155 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4156 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4157 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4158 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4159 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4160 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4161 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4162 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4163 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4164 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4165 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4168 static const StructEntry struct_termios_def = {
4169 .convert = { host_to_target_termios, target_to_host_termios },
4170 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4171 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4174 static bitmask_transtbl mmap_flags_tbl[] = {
4175 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4176 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4177 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4178 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4179 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4180 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4181 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4182 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4183 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4184 MAP_NORESERVE },
4185 { 0, 0, 0, 0 }
4188 #if defined(TARGET_I386)
4190 /* NOTE: there is really one LDT for all the threads */
4191 static uint8_t *ldt_table;
4193 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4195 int size;
4196 void *p;
4198 if (!ldt_table)
4199 return 0;
4200 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4201 if (size > bytecount)
4202 size = bytecount;
4203 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4204 if (!p)
4205 return -TARGET_EFAULT;
4206 /* ??? Should this by byteswapped? */
4207 memcpy(p, ldt_table, size);
4208 unlock_user(p, ptr, size);
4209 return size;
4212 /* XXX: add locking support */
4213 static abi_long write_ldt(CPUX86State *env,
4214 abi_ulong ptr, unsigned long bytecount, int oldmode)
4216 struct target_modify_ldt_ldt_s ldt_info;
4217 struct target_modify_ldt_ldt_s *target_ldt_info;
4218 int seg_32bit, contents, read_exec_only, limit_in_pages;
4219 int seg_not_present, useable, lm;
4220 uint32_t *lp, entry_1, entry_2;
4222 if (bytecount != sizeof(ldt_info))
4223 return -TARGET_EINVAL;
4224 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4225 return -TARGET_EFAULT;
4226 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4227 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4228 ldt_info.limit = tswap32(target_ldt_info->limit);
4229 ldt_info.flags = tswap32(target_ldt_info->flags);
4230 unlock_user_struct(target_ldt_info, ptr, 0);
4232 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4233 return -TARGET_EINVAL;
4234 seg_32bit = ldt_info.flags & 1;
4235 contents = (ldt_info.flags >> 1) & 3;
4236 read_exec_only = (ldt_info.flags >> 3) & 1;
4237 limit_in_pages = (ldt_info.flags >> 4) & 1;
4238 seg_not_present = (ldt_info.flags >> 5) & 1;
4239 useable = (ldt_info.flags >> 6) & 1;
4240 #ifdef TARGET_ABI32
4241 lm = 0;
4242 #else
4243 lm = (ldt_info.flags >> 7) & 1;
4244 #endif
4245 if (contents == 3) {
4246 if (oldmode)
4247 return -TARGET_EINVAL;
4248 if (seg_not_present == 0)
4249 return -TARGET_EINVAL;
4251 /* allocate the LDT */
4252 if (!ldt_table) {
4253 env->ldt.base = target_mmap(0,
4254 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4255 PROT_READ|PROT_WRITE,
4256 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4257 if (env->ldt.base == -1)
4258 return -TARGET_ENOMEM;
4259 memset(g2h(env->ldt.base), 0,
4260 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4261 env->ldt.limit = 0xffff;
4262 ldt_table = g2h(env->ldt.base);
4265 /* NOTE: same code as Linux kernel */
4266 /* Allow LDTs to be cleared by the user. */
4267 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4268 if (oldmode ||
4269 (contents == 0 &&
4270 read_exec_only == 1 &&
4271 seg_32bit == 0 &&
4272 limit_in_pages == 0 &&
4273 seg_not_present == 1 &&
4274 useable == 0 )) {
4275 entry_1 = 0;
4276 entry_2 = 0;
4277 goto install;
4281 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4282 (ldt_info.limit & 0x0ffff);
4283 entry_2 = (ldt_info.base_addr & 0xff000000) |
4284 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4285 (ldt_info.limit & 0xf0000) |
4286 ((read_exec_only ^ 1) << 9) |
4287 (contents << 10) |
4288 ((seg_not_present ^ 1) << 15) |
4289 (seg_32bit << 22) |
4290 (limit_in_pages << 23) |
4291 (lm << 21) |
4292 0x7000;
4293 if (!oldmode)
4294 entry_2 |= (useable << 20);
4296 /* Install the new entry ... */
4297 install:
4298 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4299 lp[0] = tswap32(entry_1);
4300 lp[1] = tswap32(entry_2);
4301 return 0;
4304 /* specific and weird i386 syscalls */
4305 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4306 unsigned long bytecount)
4308 abi_long ret;
4310 switch (func) {
4311 case 0:
4312 ret = read_ldt(ptr, bytecount);
4313 break;
4314 case 1:
4315 ret = write_ldt(env, ptr, bytecount, 1);
4316 break;
4317 case 0x11:
4318 ret = write_ldt(env, ptr, bytecount, 0);
4319 break;
4320 default:
4321 ret = -TARGET_ENOSYS;
4322 break;
4324 return ret;
4327 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4328 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4330 uint64_t *gdt_table = g2h(env->gdt.base);
4331 struct target_modify_ldt_ldt_s ldt_info;
4332 struct target_modify_ldt_ldt_s *target_ldt_info;
4333 int seg_32bit, contents, read_exec_only, limit_in_pages;
4334 int seg_not_present, useable, lm;
4335 uint32_t *lp, entry_1, entry_2;
4336 int i;
4338 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4339 if (!target_ldt_info)
4340 return -TARGET_EFAULT;
4341 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4342 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4343 ldt_info.limit = tswap32(target_ldt_info->limit);
4344 ldt_info.flags = tswap32(target_ldt_info->flags);
4345 if (ldt_info.entry_number == -1) {
4346 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4347 if (gdt_table[i] == 0) {
4348 ldt_info.entry_number = i;
4349 target_ldt_info->entry_number = tswap32(i);
4350 break;
4354 unlock_user_struct(target_ldt_info, ptr, 1);
4356 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4357 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4358 return -TARGET_EINVAL;
4359 seg_32bit = ldt_info.flags & 1;
4360 contents = (ldt_info.flags >> 1) & 3;
4361 read_exec_only = (ldt_info.flags >> 3) & 1;
4362 limit_in_pages = (ldt_info.flags >> 4) & 1;
4363 seg_not_present = (ldt_info.flags >> 5) & 1;
4364 useable = (ldt_info.flags >> 6) & 1;
4365 #ifdef TARGET_ABI32
4366 lm = 0;
4367 #else
4368 lm = (ldt_info.flags >> 7) & 1;
4369 #endif
4371 if (contents == 3) {
4372 if (seg_not_present == 0)
4373 return -TARGET_EINVAL;
4376 /* NOTE: same code as Linux kernel */
4377 /* Allow LDTs to be cleared by the user. */
4378 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4379 if ((contents == 0 &&
4380 read_exec_only == 1 &&
4381 seg_32bit == 0 &&
4382 limit_in_pages == 0 &&
4383 seg_not_present == 1 &&
4384 useable == 0 )) {
4385 entry_1 = 0;
4386 entry_2 = 0;
4387 goto install;
4391 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4392 (ldt_info.limit & 0x0ffff);
4393 entry_2 = (ldt_info.base_addr & 0xff000000) |
4394 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4395 (ldt_info.limit & 0xf0000) |
4396 ((read_exec_only ^ 1) << 9) |
4397 (contents << 10) |
4398 ((seg_not_present ^ 1) << 15) |
4399 (seg_32bit << 22) |
4400 (limit_in_pages << 23) |
4401 (useable << 20) |
4402 (lm << 21) |
4403 0x7000;
4405 /* Install the new entry ... */
4406 install:
4407 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4408 lp[0] = tswap32(entry_1);
4409 lp[1] = tswap32(entry_2);
4410 return 0;
4413 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4415 struct target_modify_ldt_ldt_s *target_ldt_info;
4416 uint64_t *gdt_table = g2h(env->gdt.base);
4417 uint32_t base_addr, limit, flags;
4418 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4419 int seg_not_present, useable, lm;
4420 uint32_t *lp, entry_1, entry_2;
4422 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4423 if (!target_ldt_info)
4424 return -TARGET_EFAULT;
4425 idx = tswap32(target_ldt_info->entry_number);
4426 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4427 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4428 unlock_user_struct(target_ldt_info, ptr, 1);
4429 return -TARGET_EINVAL;
4431 lp = (uint32_t *)(gdt_table + idx);
4432 entry_1 = tswap32(lp[0]);
4433 entry_2 = tswap32(lp[1]);
4435 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4436 contents = (entry_2 >> 10) & 3;
4437 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4438 seg_32bit = (entry_2 >> 22) & 1;
4439 limit_in_pages = (entry_2 >> 23) & 1;
4440 useable = (entry_2 >> 20) & 1;
4441 #ifdef TARGET_ABI32
4442 lm = 0;
4443 #else
4444 lm = (entry_2 >> 21) & 1;
4445 #endif
4446 flags = (seg_32bit << 0) | (contents << 1) |
4447 (read_exec_only << 3) | (limit_in_pages << 4) |
4448 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4449 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4450 base_addr = (entry_1 >> 16) |
4451 (entry_2 & 0xff000000) |
4452 ((entry_2 & 0xff) << 16);
4453 target_ldt_info->base_addr = tswapal(base_addr);
4454 target_ldt_info->limit = tswap32(limit);
4455 target_ldt_info->flags = tswap32(flags);
4456 unlock_user_struct(target_ldt_info, ptr, 1);
4457 return 0;
4459 #endif /* TARGET_I386 && TARGET_ABI32 */
4461 #ifndef TARGET_ABI32
4462 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4464 abi_long ret = 0;
4465 abi_ulong val;
4466 int idx;
4468 switch(code) {
4469 case TARGET_ARCH_SET_GS:
4470 case TARGET_ARCH_SET_FS:
4471 if (code == TARGET_ARCH_SET_GS)
4472 idx = R_GS;
4473 else
4474 idx = R_FS;
4475 cpu_x86_load_seg(env, idx, 0);
4476 env->segs[idx].base = addr;
4477 break;
4478 case TARGET_ARCH_GET_GS:
4479 case TARGET_ARCH_GET_FS:
4480 if (code == TARGET_ARCH_GET_GS)
4481 idx = R_GS;
4482 else
4483 idx = R_FS;
4484 val = env->segs[idx].base;
4485 if (put_user(val, addr, abi_ulong))
4486 ret = -TARGET_EFAULT;
4487 break;
4488 default:
4489 ret = -TARGET_EINVAL;
4490 break;
4492 return ret;
4494 #endif
4496 #endif /* defined(TARGET_I386) */
4498 #define NEW_STACK_SIZE 0x40000
4501 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4502 typedef struct {
4503 CPUArchState *env;
4504 pthread_mutex_t mutex;
4505 pthread_cond_t cond;
4506 pthread_t thread;
4507 uint32_t tid;
4508 abi_ulong child_tidptr;
4509 abi_ulong parent_tidptr;
4510 sigset_t sigmask;
4511 } new_thread_info;
4513 static void *clone_func(void *arg)
4515 new_thread_info *info = arg;
4516 CPUArchState *env;
4517 CPUState *cpu;
4518 TaskState *ts;
4520 rcu_register_thread();
4521 env = info->env;
4522 cpu = ENV_GET_CPU(env);
4523 thread_cpu = cpu;
4524 ts = (TaskState *)cpu->opaque;
4525 info->tid = gettid();
4526 cpu->host_tid = info->tid;
4527 task_settid(ts);
4528 if (info->child_tidptr)
4529 put_user_u32(info->tid, info->child_tidptr);
4530 if (info->parent_tidptr)
4531 put_user_u32(info->tid, info->parent_tidptr);
4532 /* Enable signals. */
4533 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4534 /* Signal to the parent that we're ready. */
4535 pthread_mutex_lock(&info->mutex);
4536 pthread_cond_broadcast(&info->cond);
4537 pthread_mutex_unlock(&info->mutex);
4538 /* Wait until the parent has finshed initializing the tls state. */
4539 pthread_mutex_lock(&clone_lock);
4540 pthread_mutex_unlock(&clone_lock);
4541 cpu_loop(env);
4542 /* never exits */
4543 return NULL;
4546 /* do_fork() Must return host values and target errnos (unlike most
4547 do_*() functions). */
4548 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4549 abi_ulong parent_tidptr, target_ulong newtls,
4550 abi_ulong child_tidptr)
4552 CPUState *cpu = ENV_GET_CPU(env);
4553 int ret;
4554 TaskState *ts;
4555 CPUState *new_cpu;
4556 CPUArchState *new_env;
4557 unsigned int nptl_flags;
4558 sigset_t sigmask;
4560 /* Emulate vfork() with fork() */
4561 if (flags & CLONE_VFORK)
4562 flags &= ~(CLONE_VFORK | CLONE_VM);
4564 if (flags & CLONE_VM) {
4565 TaskState *parent_ts = (TaskState *)cpu->opaque;
4566 new_thread_info info;
4567 pthread_attr_t attr;
4569 ts = g_malloc0(sizeof(TaskState));
4570 init_task_state(ts);
4571 /* we create a new CPU instance. */
4572 new_env = cpu_copy(env);
4573 /* Init regs that differ from the parent. */
4574 cpu_clone_regs(new_env, newsp);
4575 new_cpu = ENV_GET_CPU(new_env);
4576 new_cpu->opaque = ts;
4577 ts->bprm = parent_ts->bprm;
4578 ts->info = parent_ts->info;
4579 nptl_flags = flags;
4580 flags &= ~CLONE_NPTL_FLAGS2;
4582 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4583 ts->child_tidptr = child_tidptr;
4586 if (nptl_flags & CLONE_SETTLS)
4587 cpu_set_tls (new_env, newtls);
4589 /* Grab a mutex so that thread setup appears atomic. */
4590 pthread_mutex_lock(&clone_lock);
4592 memset(&info, 0, sizeof(info));
4593 pthread_mutex_init(&info.mutex, NULL);
4594 pthread_mutex_lock(&info.mutex);
4595 pthread_cond_init(&info.cond, NULL);
4596 info.env = new_env;
4597 if (nptl_flags & CLONE_CHILD_SETTID)
4598 info.child_tidptr = child_tidptr;
4599 if (nptl_flags & CLONE_PARENT_SETTID)
4600 info.parent_tidptr = parent_tidptr;
4602 ret = pthread_attr_init(&attr);
4603 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4604 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4605 /* It is not safe to deliver signals until the child has finished
4606 initializing, so temporarily block all signals. */
4607 sigfillset(&sigmask);
4608 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4610 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4611 /* TODO: Free new CPU state if thread creation failed. */
4613 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4614 pthread_attr_destroy(&attr);
4615 if (ret == 0) {
4616 /* Wait for the child to initialize. */
4617 pthread_cond_wait(&info.cond, &info.mutex);
4618 ret = info.tid;
4619 if (flags & CLONE_PARENT_SETTID)
4620 put_user_u32(ret, parent_tidptr);
4621 } else {
4622 ret = -1;
4624 pthread_mutex_unlock(&info.mutex);
4625 pthread_cond_destroy(&info.cond);
4626 pthread_mutex_destroy(&info.mutex);
4627 pthread_mutex_unlock(&clone_lock);
4628 } else {
4629 /* if no CLONE_VM, we consider it is a fork */
4630 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4631 return -TARGET_EINVAL;
4633 fork_start();
4634 ret = fork();
4635 if (ret == 0) {
4636 /* Child Process. */
4637 rcu_after_fork();
4638 cpu_clone_regs(env, newsp);
4639 fork_end(1);
4640 /* There is a race condition here. The parent process could
4641 theoretically read the TID in the child process before the child
4642 tid is set. This would require using either ptrace
4643 (not implemented) or having *_tidptr to point at a shared memory
4644 mapping. We can't repeat the spinlock hack used above because
4645 the child process gets its own copy of the lock. */
4646 if (flags & CLONE_CHILD_SETTID)
4647 put_user_u32(gettid(), child_tidptr);
4648 if (flags & CLONE_PARENT_SETTID)
4649 put_user_u32(gettid(), parent_tidptr);
4650 ts = (TaskState *)cpu->opaque;
4651 if (flags & CLONE_SETTLS)
4652 cpu_set_tls (env, newtls);
4653 if (flags & CLONE_CHILD_CLEARTID)
4654 ts->child_tidptr = child_tidptr;
4655 } else {
4656 fork_end(0);
4659 return ret;
4662 /* warning : doesn't handle linux specific flags... */
4663 static int target_to_host_fcntl_cmd(int cmd)
4665 switch(cmd) {
4666 case TARGET_F_DUPFD:
4667 case TARGET_F_GETFD:
4668 case TARGET_F_SETFD:
4669 case TARGET_F_GETFL:
4670 case TARGET_F_SETFL:
4671 return cmd;
4672 case TARGET_F_GETLK:
4673 return F_GETLK;
4674 case TARGET_F_SETLK:
4675 return F_SETLK;
4676 case TARGET_F_SETLKW:
4677 return F_SETLKW;
4678 case TARGET_F_GETOWN:
4679 return F_GETOWN;
4680 case TARGET_F_SETOWN:
4681 return F_SETOWN;
4682 case TARGET_F_GETSIG:
4683 return F_GETSIG;
4684 case TARGET_F_SETSIG:
4685 return F_SETSIG;
4686 #if TARGET_ABI_BITS == 32
4687 case TARGET_F_GETLK64:
4688 return F_GETLK64;
4689 case TARGET_F_SETLK64:
4690 return F_SETLK64;
4691 case TARGET_F_SETLKW64:
4692 return F_SETLKW64;
4693 #endif
4694 case TARGET_F_SETLEASE:
4695 return F_SETLEASE;
4696 case TARGET_F_GETLEASE:
4697 return F_GETLEASE;
4698 #ifdef F_DUPFD_CLOEXEC
4699 case TARGET_F_DUPFD_CLOEXEC:
4700 return F_DUPFD_CLOEXEC;
4701 #endif
4702 case TARGET_F_NOTIFY:
4703 return F_NOTIFY;
4704 #ifdef F_GETOWN_EX
4705 case TARGET_F_GETOWN_EX:
4706 return F_GETOWN_EX;
4707 #endif
4708 #ifdef F_SETOWN_EX
4709 case TARGET_F_SETOWN_EX:
4710 return F_SETOWN_EX;
4711 #endif
4712 default:
4713 return -TARGET_EINVAL;
4715 return -TARGET_EINVAL;
4718 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4719 static const bitmask_transtbl flock_tbl[] = {
4720 TRANSTBL_CONVERT(F_RDLCK),
4721 TRANSTBL_CONVERT(F_WRLCK),
4722 TRANSTBL_CONVERT(F_UNLCK),
4723 TRANSTBL_CONVERT(F_EXLCK),
4724 TRANSTBL_CONVERT(F_SHLCK),
4725 { 0, 0, 0, 0 }
4728 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4730 struct flock fl;
4731 struct target_flock *target_fl;
4732 struct flock64 fl64;
4733 struct target_flock64 *target_fl64;
4734 #ifdef F_GETOWN_EX
4735 struct f_owner_ex fox;
4736 struct target_f_owner_ex *target_fox;
4737 #endif
4738 abi_long ret;
4739 int host_cmd = target_to_host_fcntl_cmd(cmd);
4741 if (host_cmd == -TARGET_EINVAL)
4742 return host_cmd;
4744 switch(cmd) {
4745 case TARGET_F_GETLK:
4746 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4747 return -TARGET_EFAULT;
4748 fl.l_type =
4749 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4750 fl.l_whence = tswap16(target_fl->l_whence);
4751 fl.l_start = tswapal(target_fl->l_start);
4752 fl.l_len = tswapal(target_fl->l_len);
4753 fl.l_pid = tswap32(target_fl->l_pid);
4754 unlock_user_struct(target_fl, arg, 0);
4755 ret = get_errno(fcntl(fd, host_cmd, &fl));
4756 if (ret == 0) {
4757 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4758 return -TARGET_EFAULT;
4759 target_fl->l_type =
4760 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4761 target_fl->l_whence = tswap16(fl.l_whence);
4762 target_fl->l_start = tswapal(fl.l_start);
4763 target_fl->l_len = tswapal(fl.l_len);
4764 target_fl->l_pid = tswap32(fl.l_pid);
4765 unlock_user_struct(target_fl, arg, 1);
4767 break;
4769 case TARGET_F_SETLK:
4770 case TARGET_F_SETLKW:
4771 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4772 return -TARGET_EFAULT;
4773 fl.l_type =
4774 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4775 fl.l_whence = tswap16(target_fl->l_whence);
4776 fl.l_start = tswapal(target_fl->l_start);
4777 fl.l_len = tswapal(target_fl->l_len);
4778 fl.l_pid = tswap32(target_fl->l_pid);
4779 unlock_user_struct(target_fl, arg, 0);
4780 ret = get_errno(fcntl(fd, host_cmd, &fl));
4781 break;
4783 case TARGET_F_GETLK64:
4784 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4785 return -TARGET_EFAULT;
4786 fl64.l_type =
4787 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4788 fl64.l_whence = tswap16(target_fl64->l_whence);
4789 fl64.l_start = tswap64(target_fl64->l_start);
4790 fl64.l_len = tswap64(target_fl64->l_len);
4791 fl64.l_pid = tswap32(target_fl64->l_pid);
4792 unlock_user_struct(target_fl64, arg, 0);
4793 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4794 if (ret == 0) {
4795 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4796 return -TARGET_EFAULT;
4797 target_fl64->l_type =
4798 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4799 target_fl64->l_whence = tswap16(fl64.l_whence);
4800 target_fl64->l_start = tswap64(fl64.l_start);
4801 target_fl64->l_len = tswap64(fl64.l_len);
4802 target_fl64->l_pid = tswap32(fl64.l_pid);
4803 unlock_user_struct(target_fl64, arg, 1);
4805 break;
4806 case TARGET_F_SETLK64:
4807 case TARGET_F_SETLKW64:
4808 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4809 return -TARGET_EFAULT;
4810 fl64.l_type =
4811 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4812 fl64.l_whence = tswap16(target_fl64->l_whence);
4813 fl64.l_start = tswap64(target_fl64->l_start);
4814 fl64.l_len = tswap64(target_fl64->l_len);
4815 fl64.l_pid = tswap32(target_fl64->l_pid);
4816 unlock_user_struct(target_fl64, arg, 0);
4817 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4818 break;
4820 case TARGET_F_GETFL:
4821 ret = get_errno(fcntl(fd, host_cmd, arg));
4822 if (ret >= 0) {
4823 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4825 break;
4827 case TARGET_F_SETFL:
4828 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4829 break;
4831 #ifdef F_GETOWN_EX
4832 case TARGET_F_GETOWN_EX:
4833 ret = get_errno(fcntl(fd, host_cmd, &fox));
4834 if (ret >= 0) {
4835 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4836 return -TARGET_EFAULT;
4837 target_fox->type = tswap32(fox.type);
4838 target_fox->pid = tswap32(fox.pid);
4839 unlock_user_struct(target_fox, arg, 1);
4841 break;
4842 #endif
4844 #ifdef F_SETOWN_EX
4845 case TARGET_F_SETOWN_EX:
4846 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4847 return -TARGET_EFAULT;
4848 fox.type = tswap32(target_fox->type);
4849 fox.pid = tswap32(target_fox->pid);
4850 unlock_user_struct(target_fox, arg, 0);
4851 ret = get_errno(fcntl(fd, host_cmd, &fox));
4852 break;
4853 #endif
4855 case TARGET_F_SETOWN:
4856 case TARGET_F_GETOWN:
4857 case TARGET_F_SETSIG:
4858 case TARGET_F_GETSIG:
4859 case TARGET_F_SETLEASE:
4860 case TARGET_F_GETLEASE:
4861 ret = get_errno(fcntl(fd, host_cmd, arg));
4862 break;
4864 default:
4865 ret = get_errno(fcntl(fd, cmd, arg));
4866 break;
4868 return ret;
4871 #ifdef USE_UID16
4873 static inline int high2lowuid(int uid)
4875 if (uid > 65535)
4876 return 65534;
4877 else
4878 return uid;
4881 static inline int high2lowgid(int gid)
4883 if (gid > 65535)
4884 return 65534;
4885 else
4886 return gid;
4889 static inline int low2highuid(int uid)
4891 if ((int16_t)uid == -1)
4892 return -1;
4893 else
4894 return uid;
4897 static inline int low2highgid(int gid)
4899 if ((int16_t)gid == -1)
4900 return -1;
4901 else
4902 return gid;
4904 static inline int tswapid(int id)
4906 return tswap16(id);
4909 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4911 #else /* !USE_UID16 */
4912 static inline int high2lowuid(int uid)
4914 return uid;
4916 static inline int high2lowgid(int gid)
4918 return gid;
4920 static inline int low2highuid(int uid)
4922 return uid;
4924 static inline int low2highgid(int gid)
4926 return gid;
4928 static inline int tswapid(int id)
4930 return tswap32(id);
4933 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4935 #endif /* USE_UID16 */
4937 void syscall_init(void)
4939 IOCTLEntry *ie;
4940 const argtype *arg_type;
4941 int size;
4942 int i;
4944 thunk_init(STRUCT_MAX);
4946 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4947 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4948 #include "syscall_types.h"
4949 #undef STRUCT
4950 #undef STRUCT_SPECIAL
4952 /* Build target_to_host_errno_table[] table from
4953 * host_to_target_errno_table[]. */
4954 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4955 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4958 /* we patch the ioctl size if necessary. We rely on the fact that
4959 no ioctl has all the bits at '1' in the size field */
4960 ie = ioctl_entries;
4961 while (ie->target_cmd != 0) {
4962 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4963 TARGET_IOC_SIZEMASK) {
4964 arg_type = ie->arg_type;
4965 if (arg_type[0] != TYPE_PTR) {
4966 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4967 ie->target_cmd);
4968 exit(1);
4970 arg_type++;
4971 size = thunk_type_size(arg_type, 0);
4972 ie->target_cmd = (ie->target_cmd &
4973 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4974 (size << TARGET_IOC_SIZESHIFT);
4977 /* automatic consistency check if same arch */
4978 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4979 (defined(__x86_64__) && defined(TARGET_X86_64))
4980 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4981 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4982 ie->name, ie->target_cmd, ie->host_cmd);
4984 #endif
4985 ie++;
4989 #if TARGET_ABI_BITS == 32
4990 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4992 #ifdef TARGET_WORDS_BIGENDIAN
4993 return ((uint64_t)word0 << 32) | word1;
4994 #else
4995 return ((uint64_t)word1 << 32) | word0;
4996 #endif
4998 #else /* TARGET_ABI_BITS == 32 */
4999 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5001 return word0;
5003 #endif /* TARGET_ABI_BITS != 32 */
5005 #ifdef TARGET_NR_truncate64
5006 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5007 abi_long arg2,
5008 abi_long arg3,
5009 abi_long arg4)
5011 if (regpairs_aligned(cpu_env)) {
5012 arg2 = arg3;
5013 arg3 = arg4;
5015 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5017 #endif
5019 #ifdef TARGET_NR_ftruncate64
5020 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5021 abi_long arg2,
5022 abi_long arg3,
5023 abi_long arg4)
5025 if (regpairs_aligned(cpu_env)) {
5026 arg2 = arg3;
5027 arg3 = arg4;
5029 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5031 #endif
5033 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5034 abi_ulong target_addr)
5036 struct target_timespec *target_ts;
5038 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5039 return -TARGET_EFAULT;
5040 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5041 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5042 unlock_user_struct(target_ts, target_addr, 0);
5043 return 0;
5046 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5047 struct timespec *host_ts)
5049 struct target_timespec *target_ts;
5051 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5052 return -TARGET_EFAULT;
5053 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5054 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5055 unlock_user_struct(target_ts, target_addr, 1);
5056 return 0;
5059 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5060 abi_ulong target_addr)
5062 struct target_itimerspec *target_itspec;
5064 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5065 return -TARGET_EFAULT;
5068 host_itspec->it_interval.tv_sec =
5069 tswapal(target_itspec->it_interval.tv_sec);
5070 host_itspec->it_interval.tv_nsec =
5071 tswapal(target_itspec->it_interval.tv_nsec);
5072 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5073 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5075 unlock_user_struct(target_itspec, target_addr, 1);
5076 return 0;
5079 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5080 struct itimerspec *host_its)
5082 struct target_itimerspec *target_itspec;
5084 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5085 return -TARGET_EFAULT;
5088 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5089 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5091 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5092 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5094 unlock_user_struct(target_itspec, target_addr, 0);
5095 return 0;
5098 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5099 abi_ulong target_addr)
5101 struct target_sigevent *target_sevp;
5103 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5104 return -TARGET_EFAULT;
5107 /* This union is awkward on 64 bit systems because it has a 32 bit
5108 * integer and a pointer in it; we follow the conversion approach
5109 * used for handling sigval types in signal.c so the guest should get
5110 * the correct value back even if we did a 64 bit byteswap and it's
5111 * using the 32 bit integer.
5113 host_sevp->sigev_value.sival_ptr =
5114 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5115 host_sevp->sigev_signo =
5116 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5117 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5118 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5120 unlock_user_struct(target_sevp, target_addr, 1);
5121 return 0;
5124 #if defined(TARGET_NR_mlockall)
5125 static inline int target_to_host_mlockall_arg(int arg)
5127 int result = 0;
5129 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5130 result |= MCL_CURRENT;
5132 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5133 result |= MCL_FUTURE;
5135 return result;
5137 #endif
5139 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5140 static inline abi_long host_to_target_stat64(void *cpu_env,
5141 abi_ulong target_addr,
5142 struct stat *host_st)
5144 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5145 if (((CPUARMState *)cpu_env)->eabi) {
5146 struct target_eabi_stat64 *target_st;
5148 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5149 return -TARGET_EFAULT;
5150 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5151 __put_user(host_st->st_dev, &target_st->st_dev);
5152 __put_user(host_st->st_ino, &target_st->st_ino);
5153 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5154 __put_user(host_st->st_ino, &target_st->__st_ino);
5155 #endif
5156 __put_user(host_st->st_mode, &target_st->st_mode);
5157 __put_user(host_st->st_nlink, &target_st->st_nlink);
5158 __put_user(host_st->st_uid, &target_st->st_uid);
5159 __put_user(host_st->st_gid, &target_st->st_gid);
5160 __put_user(host_st->st_rdev, &target_st->st_rdev);
5161 __put_user(host_st->st_size, &target_st->st_size);
5162 __put_user(host_st->st_blksize, &target_st->st_blksize);
5163 __put_user(host_st->st_blocks, &target_st->st_blocks);
5164 __put_user(host_st->st_atime, &target_st->target_st_atime);
5165 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5166 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5167 unlock_user_struct(target_st, target_addr, 1);
5168 } else
5169 #endif
5171 #if defined(TARGET_HAS_STRUCT_STAT64)
5172 struct target_stat64 *target_st;
5173 #else
5174 struct target_stat *target_st;
5175 #endif
5177 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5178 return -TARGET_EFAULT;
5179 memset(target_st, 0, sizeof(*target_st));
5180 __put_user(host_st->st_dev, &target_st->st_dev);
5181 __put_user(host_st->st_ino, &target_st->st_ino);
5182 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5183 __put_user(host_st->st_ino, &target_st->__st_ino);
5184 #endif
5185 __put_user(host_st->st_mode, &target_st->st_mode);
5186 __put_user(host_st->st_nlink, &target_st->st_nlink);
5187 __put_user(host_st->st_uid, &target_st->st_uid);
5188 __put_user(host_st->st_gid, &target_st->st_gid);
5189 __put_user(host_st->st_rdev, &target_st->st_rdev);
5190 /* XXX: better use of kernel struct */
5191 __put_user(host_st->st_size, &target_st->st_size);
5192 __put_user(host_st->st_blksize, &target_st->st_blksize);
5193 __put_user(host_st->st_blocks, &target_st->st_blocks);
5194 __put_user(host_st->st_atime, &target_st->target_st_atime);
5195 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5196 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5197 unlock_user_struct(target_st, target_addr, 1);
5200 return 0;
5202 #endif
5204 /* ??? Using host futex calls even when target atomic operations
5205 are not really atomic probably breaks things. However implementing
5206 futexes locally would make futexes shared between multiple processes
5207 tricky. However they're probably useless because guest atomic
5208 operations won't work either. */
5209 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5210 target_ulong uaddr2, int val3)
5212 struct timespec ts, *pts;
5213 int base_op;
5215 /* ??? We assume FUTEX_* constants are the same on both host
5216 and target. */
5217 #ifdef FUTEX_CMD_MASK
5218 base_op = op & FUTEX_CMD_MASK;
5219 #else
5220 base_op = op;
5221 #endif
5222 switch (base_op) {
5223 case FUTEX_WAIT:
5224 case FUTEX_WAIT_BITSET:
5225 if (timeout) {
5226 pts = &ts;
5227 target_to_host_timespec(pts, timeout);
5228 } else {
5229 pts = NULL;
5231 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5232 pts, NULL, val3));
5233 case FUTEX_WAKE:
5234 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5235 case FUTEX_FD:
5236 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5237 case FUTEX_REQUEUE:
5238 case FUTEX_CMP_REQUEUE:
5239 case FUTEX_WAKE_OP:
5240 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5241 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5242 But the prototype takes a `struct timespec *'; insert casts
5243 to satisfy the compiler. We do not need to tswap TIMEOUT
5244 since it's not compared to guest memory. */
5245 pts = (struct timespec *)(uintptr_t) timeout;
5246 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5247 g2h(uaddr2),
5248 (base_op == FUTEX_CMP_REQUEUE
5249 ? tswap32(val3)
5250 : val3)));
5251 default:
5252 return -TARGET_ENOSYS;
5255 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5256 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5257 abi_long handle, abi_long mount_id,
5258 abi_long flags)
5260 struct file_handle *target_fh;
5261 struct file_handle *fh;
5262 int mid = 0;
5263 abi_long ret;
5264 char *name;
5265 unsigned int size, total_size;
5267 if (get_user_s32(size, handle)) {
5268 return -TARGET_EFAULT;
5271 name = lock_user_string(pathname);
5272 if (!name) {
5273 return -TARGET_EFAULT;
5276 total_size = sizeof(struct file_handle) + size;
5277 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5278 if (!target_fh) {
5279 unlock_user(name, pathname, 0);
5280 return -TARGET_EFAULT;
5283 fh = g_malloc0(total_size);
5284 fh->handle_bytes = size;
5286 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5287 unlock_user(name, pathname, 0);
5289 /* man name_to_handle_at(2):
5290 * Other than the use of the handle_bytes field, the caller should treat
5291 * the file_handle structure as an opaque data type
5294 memcpy(target_fh, fh, total_size);
5295 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5296 target_fh->handle_type = tswap32(fh->handle_type);
5297 g_free(fh);
5298 unlock_user(target_fh, handle, total_size);
5300 if (put_user_s32(mid, mount_id)) {
5301 return -TARGET_EFAULT;
5304 return ret;
5307 #endif
5309 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5310 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5311 abi_long flags)
5313 struct file_handle *target_fh;
5314 struct file_handle *fh;
5315 unsigned int size, total_size;
5316 abi_long ret;
5318 if (get_user_s32(size, handle)) {
5319 return -TARGET_EFAULT;
5322 total_size = sizeof(struct file_handle) + size;
5323 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5324 if (!target_fh) {
5325 return -TARGET_EFAULT;
5328 fh = g_malloc0(total_size);
5329 memcpy(fh, target_fh, total_size);
5330 fh->handle_bytes = size;
5331 fh->handle_type = tswap32(target_fh->handle_type);
5333 ret = get_errno(open_by_handle_at(mount_fd, fh,
5334 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5336 g_free(fh);
5338 unlock_user(target_fh, handle, total_size);
5340 return ret;
5342 #endif
5344 /* Map host to target signal numbers for the wait family of syscalls.
5345 Assume all other status bits are the same. */
5346 int host_to_target_waitstatus(int status)
5348 if (WIFSIGNALED(status)) {
5349 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5351 if (WIFSTOPPED(status)) {
5352 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5353 | (status & 0xff);
5355 return status;
5358 static int open_self_cmdline(void *cpu_env, int fd)
5360 int fd_orig = -1;
5361 bool word_skipped = false;
5363 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5364 if (fd_orig < 0) {
5365 return fd_orig;
5368 while (true) {
5369 ssize_t nb_read;
5370 char buf[128];
5371 char *cp_buf = buf;
5373 nb_read = read(fd_orig, buf, sizeof(buf));
5374 if (nb_read < 0) {
5375 fd_orig = close(fd_orig);
5376 return -1;
5377 } else if (nb_read == 0) {
5378 break;
5381 if (!word_skipped) {
5382 /* Skip the first string, which is the path to qemu-*-static
5383 instead of the actual command. */
5384 cp_buf = memchr(buf, 0, sizeof(buf));
5385 if (cp_buf) {
5386 /* Null byte found, skip one string */
5387 cp_buf++;
5388 nb_read -= cp_buf - buf;
5389 word_skipped = true;
5393 if (word_skipped) {
5394 if (write(fd, cp_buf, nb_read) != nb_read) {
5395 close(fd_orig);
5396 return -1;
5401 return close(fd_orig);
5404 static int open_self_maps(void *cpu_env, int fd)
5406 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5407 TaskState *ts = cpu->opaque;
5408 FILE *fp;
5409 char *line = NULL;
5410 size_t len = 0;
5411 ssize_t read;
5413 fp = fopen("/proc/self/maps", "r");
5414 if (fp == NULL) {
5415 return -EACCES;
5418 while ((read = getline(&line, &len, fp)) != -1) {
5419 int fields, dev_maj, dev_min, inode;
5420 uint64_t min, max, offset;
5421 char flag_r, flag_w, flag_x, flag_p;
5422 char path[512] = "";
5423 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5424 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5425 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5427 if ((fields < 10) || (fields > 11)) {
5428 continue;
5430 if (h2g_valid(min)) {
5431 int flags = page_get_flags(h2g(min));
5432 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5433 if (page_check_range(h2g(min), max - min, flags) == -1) {
5434 continue;
5436 if (h2g(min) == ts->info->stack_limit) {
5437 pstrcpy(path, sizeof(path), " [stack]");
5439 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5440 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5441 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5442 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5443 path[0] ? " " : "", path);
5447 free(line);
5448 fclose(fp);
5450 return 0;
5453 static int open_self_stat(void *cpu_env, int fd)
5455 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5456 TaskState *ts = cpu->opaque;
5457 abi_ulong start_stack = ts->info->start_stack;
5458 int i;
5460 for (i = 0; i < 44; i++) {
5461 char buf[128];
5462 int len;
5463 uint64_t val = 0;
5465 if (i == 0) {
5466 /* pid */
5467 val = getpid();
5468 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5469 } else if (i == 1) {
5470 /* app name */
5471 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5472 } else if (i == 27) {
5473 /* stack bottom */
5474 val = start_stack;
5475 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5476 } else {
5477 /* for the rest, there is MasterCard */
5478 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5481 len = strlen(buf);
5482 if (write(fd, buf, len) != len) {
5483 return -1;
5487 return 0;
5490 static int open_self_auxv(void *cpu_env, int fd)
5492 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5493 TaskState *ts = cpu->opaque;
5494 abi_ulong auxv = ts->info->saved_auxv;
5495 abi_ulong len = ts->info->auxv_len;
5496 char *ptr;
5499 * Auxiliary vector is stored in target process stack.
5500 * read in whole auxv vector and copy it to file
5502 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5503 if (ptr != NULL) {
5504 while (len > 0) {
5505 ssize_t r;
5506 r = write(fd, ptr, len);
5507 if (r <= 0) {
5508 break;
5510 len -= r;
5511 ptr += r;
5513 lseek(fd, 0, SEEK_SET);
5514 unlock_user(ptr, auxv, len);
5517 return 0;
5520 static int is_proc_myself(const char *filename, const char *entry)
5522 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5523 filename += strlen("/proc/");
5524 if (!strncmp(filename, "self/", strlen("self/"))) {
5525 filename += strlen("self/");
5526 } else if (*filename >= '1' && *filename <= '9') {
5527 char myself[80];
5528 snprintf(myself, sizeof(myself), "%d/", getpid());
5529 if (!strncmp(filename, myself, strlen(myself))) {
5530 filename += strlen(myself);
5531 } else {
5532 return 0;
5534 } else {
5535 return 0;
5537 if (!strcmp(filename, entry)) {
5538 return 1;
5541 return 0;
5544 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5545 static int is_proc(const char *filename, const char *entry)
5547 return strcmp(filename, entry) == 0;
5550 static int open_net_route(void *cpu_env, int fd)
5552 FILE *fp;
5553 char *line = NULL;
5554 size_t len = 0;
5555 ssize_t read;
5557 fp = fopen("/proc/net/route", "r");
5558 if (fp == NULL) {
5559 return -EACCES;
5562 /* read header */
5564 read = getline(&line, &len, fp);
5565 dprintf(fd, "%s", line);
5567 /* read routes */
5569 while ((read = getline(&line, &len, fp)) != -1) {
5570 char iface[16];
5571 uint32_t dest, gw, mask;
5572 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5573 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5574 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5575 &mask, &mtu, &window, &irtt);
5576 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5577 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5578 metric, tswap32(mask), mtu, window, irtt);
5581 free(line);
5582 fclose(fp);
5584 return 0;
5586 #endif
5588 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5590 struct fake_open {
5591 const char *filename;
5592 int (*fill)(void *cpu_env, int fd);
5593 int (*cmp)(const char *s1, const char *s2);
5595 const struct fake_open *fake_open;
5596 static const struct fake_open fakes[] = {
5597 { "maps", open_self_maps, is_proc_myself },
5598 { "stat", open_self_stat, is_proc_myself },
5599 { "auxv", open_self_auxv, is_proc_myself },
5600 { "cmdline", open_self_cmdline, is_proc_myself },
5601 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5602 { "/proc/net/route", open_net_route, is_proc },
5603 #endif
5604 { NULL, NULL, NULL }
5607 if (is_proc_myself(pathname, "exe")) {
5608 int execfd = qemu_getauxval(AT_EXECFD);
5609 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5612 for (fake_open = fakes; fake_open->filename; fake_open++) {
5613 if (fake_open->cmp(pathname, fake_open->filename)) {
5614 break;
5618 if (fake_open->filename) {
5619 const char *tmpdir;
5620 char filename[PATH_MAX];
5621 int fd, r;
5623 /* create temporary file to map stat to */
5624 tmpdir = getenv("TMPDIR");
5625 if (!tmpdir)
5626 tmpdir = "/tmp";
5627 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5628 fd = mkstemp(filename);
5629 if (fd < 0) {
5630 return fd;
5632 unlink(filename);
5634 if ((r = fake_open->fill(cpu_env, fd))) {
5635 close(fd);
5636 return r;
5638 lseek(fd, 0, SEEK_SET);
5640 return fd;
5643 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5646 #define TIMER_MAGIC 0x0caf0000
5647 #define TIMER_MAGIC_MASK 0xffff0000
5649 /* Convert QEMU provided timer ID back to internal 16bit index format */
5650 static target_timer_t get_timer_id(abi_long arg)
5652 target_timer_t timerid = arg;
5654 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5655 return -TARGET_EINVAL;
5658 timerid &= 0xffff;
5660 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5661 return -TARGET_EINVAL;
5664 return timerid;
5667 /* do_syscall() should always have a single exit point at the end so
5668 that actions, such as logging of syscall results, can be performed.
5669 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5670 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5671 abi_long arg2, abi_long arg3, abi_long arg4,
5672 abi_long arg5, abi_long arg6, abi_long arg7,
5673 abi_long arg8)
5675 CPUState *cpu = ENV_GET_CPU(cpu_env);
5676 abi_long ret;
5677 struct stat st;
5678 struct statfs stfs;
5679 void *p;
5681 #ifdef DEBUG
5682 gemu_log("syscall %d", num);
5683 #endif
5684 if(do_strace)
5685 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5687 switch(num) {
5688 case TARGET_NR_exit:
5689 /* In old applications this may be used to implement _exit(2).
5690 However in threaded applictions it is used for thread termination,
5691 and _exit_group is used for application termination.
5692 Do thread termination if we have more then one thread. */
5693 /* FIXME: This probably breaks if a signal arrives. We should probably
5694 be disabling signals. */
5695 if (CPU_NEXT(first_cpu)) {
5696 TaskState *ts;
5698 cpu_list_lock();
5699 /* Remove the CPU from the list. */
5700 QTAILQ_REMOVE(&cpus, cpu, node);
5701 cpu_list_unlock();
5702 ts = cpu->opaque;
5703 if (ts->child_tidptr) {
5704 put_user_u32(0, ts->child_tidptr);
5705 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5706 NULL, NULL, 0);
5708 thread_cpu = NULL;
5709 object_unref(OBJECT(cpu));
5710 g_free(ts);
5711 rcu_unregister_thread();
5712 pthread_exit(NULL);
5714 #ifdef TARGET_GPROF
5715 _mcleanup();
5716 #endif
5717 gdb_exit(cpu_env, arg1);
5718 _exit(arg1);
5719 ret = 0; /* avoid warning */
5720 break;
5721 case TARGET_NR_read:
5722 if (arg3 == 0)
5723 ret = 0;
5724 else {
5725 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5726 goto efault;
5727 ret = get_errno(read(arg1, p, arg3));
5728 unlock_user(p, arg2, ret);
5730 break;
5731 case TARGET_NR_write:
5732 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5733 goto efault;
5734 ret = get_errno(write(arg1, p, arg3));
5735 unlock_user(p, arg2, 0);
5736 break;
5737 #ifdef TARGET_NR_open
5738 case TARGET_NR_open:
5739 if (!(p = lock_user_string(arg1)))
5740 goto efault;
5741 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5742 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5743 arg3));
5744 unlock_user(p, arg1, 0);
5745 break;
5746 #endif
5747 case TARGET_NR_openat:
5748 if (!(p = lock_user_string(arg2)))
5749 goto efault;
5750 ret = get_errno(do_openat(cpu_env, arg1, p,
5751 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5752 arg4));
5753 unlock_user(p, arg2, 0);
5754 break;
5755 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5756 case TARGET_NR_name_to_handle_at:
5757 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5758 break;
5759 #endif
5760 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5761 case TARGET_NR_open_by_handle_at:
5762 ret = do_open_by_handle_at(arg1, arg2, arg3);
5763 break;
5764 #endif
5765 case TARGET_NR_close:
5766 ret = get_errno(close(arg1));
5767 break;
5768 case TARGET_NR_brk:
5769 ret = do_brk(arg1);
5770 break;
5771 #ifdef TARGET_NR_fork
5772 case TARGET_NR_fork:
5773 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5774 break;
5775 #endif
5776 #ifdef TARGET_NR_waitpid
5777 case TARGET_NR_waitpid:
5779 int status;
5780 ret = get_errno(waitpid(arg1, &status, arg3));
5781 if (!is_error(ret) && arg2 && ret
5782 && put_user_s32(host_to_target_waitstatus(status), arg2))
5783 goto efault;
5785 break;
5786 #endif
5787 #ifdef TARGET_NR_waitid
5788 case TARGET_NR_waitid:
5790 siginfo_t info;
5791 info.si_pid = 0;
5792 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5793 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5794 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5795 goto efault;
5796 host_to_target_siginfo(p, &info);
5797 unlock_user(p, arg3, sizeof(target_siginfo_t));
5800 break;
5801 #endif
5802 #ifdef TARGET_NR_creat /* not on alpha */
5803 case TARGET_NR_creat:
5804 if (!(p = lock_user_string(arg1)))
5805 goto efault;
5806 ret = get_errno(creat(p, arg2));
5807 unlock_user(p, arg1, 0);
5808 break;
5809 #endif
5810 #ifdef TARGET_NR_link
5811 case TARGET_NR_link:
5813 void * p2;
5814 p = lock_user_string(arg1);
5815 p2 = lock_user_string(arg2);
5816 if (!p || !p2)
5817 ret = -TARGET_EFAULT;
5818 else
5819 ret = get_errno(link(p, p2));
5820 unlock_user(p2, arg2, 0);
5821 unlock_user(p, arg1, 0);
5823 break;
5824 #endif
5825 #if defined(TARGET_NR_linkat)
5826 case TARGET_NR_linkat:
5828 void * p2 = NULL;
5829 if (!arg2 || !arg4)
5830 goto efault;
5831 p = lock_user_string(arg2);
5832 p2 = lock_user_string(arg4);
5833 if (!p || !p2)
5834 ret = -TARGET_EFAULT;
5835 else
5836 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5837 unlock_user(p, arg2, 0);
5838 unlock_user(p2, arg4, 0);
5840 break;
5841 #endif
5842 #ifdef TARGET_NR_unlink
5843 case TARGET_NR_unlink:
5844 if (!(p = lock_user_string(arg1)))
5845 goto efault;
5846 ret = get_errno(unlink(p));
5847 unlock_user(p, arg1, 0);
5848 break;
5849 #endif
5850 #if defined(TARGET_NR_unlinkat)
5851 case TARGET_NR_unlinkat:
5852 if (!(p = lock_user_string(arg2)))
5853 goto efault;
5854 ret = get_errno(unlinkat(arg1, p, arg3));
5855 unlock_user(p, arg2, 0);
5856 break;
5857 #endif
5858 case TARGET_NR_execve:
5860 char **argp, **envp;
5861 int argc, envc;
5862 abi_ulong gp;
5863 abi_ulong guest_argp;
5864 abi_ulong guest_envp;
5865 abi_ulong addr;
5866 char **q;
5867 int total_size = 0;
5869 argc = 0;
5870 guest_argp = arg2;
5871 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5872 if (get_user_ual(addr, gp))
5873 goto efault;
5874 if (!addr)
5875 break;
5876 argc++;
5878 envc = 0;
5879 guest_envp = arg3;
5880 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5881 if (get_user_ual(addr, gp))
5882 goto efault;
5883 if (!addr)
5884 break;
5885 envc++;
5888 argp = alloca((argc + 1) * sizeof(void *));
5889 envp = alloca((envc + 1) * sizeof(void *));
5891 for (gp = guest_argp, q = argp; gp;
5892 gp += sizeof(abi_ulong), q++) {
5893 if (get_user_ual(addr, gp))
5894 goto execve_efault;
5895 if (!addr)
5896 break;
5897 if (!(*q = lock_user_string(addr)))
5898 goto execve_efault;
5899 total_size += strlen(*q) + 1;
5901 *q = NULL;
5903 for (gp = guest_envp, q = envp; gp;
5904 gp += sizeof(abi_ulong), q++) {
5905 if (get_user_ual(addr, gp))
5906 goto execve_efault;
5907 if (!addr)
5908 break;
5909 if (!(*q = lock_user_string(addr)))
5910 goto execve_efault;
5911 total_size += strlen(*q) + 1;
5913 *q = NULL;
5915 if (!(p = lock_user_string(arg1)))
5916 goto execve_efault;
5917 ret = get_errno(execve(p, argp, envp));
5918 unlock_user(p, arg1, 0);
5920 goto execve_end;
5922 execve_efault:
5923 ret = -TARGET_EFAULT;
5925 execve_end:
5926 for (gp = guest_argp, q = argp; *q;
5927 gp += sizeof(abi_ulong), q++) {
5928 if (get_user_ual(addr, gp)
5929 || !addr)
5930 break;
5931 unlock_user(*q, addr, 0);
5933 for (gp = guest_envp, q = envp; *q;
5934 gp += sizeof(abi_ulong), q++) {
5935 if (get_user_ual(addr, gp)
5936 || !addr)
5937 break;
5938 unlock_user(*q, addr, 0);
5941 break;
5942 case TARGET_NR_chdir:
5943 if (!(p = lock_user_string(arg1)))
5944 goto efault;
5945 ret = get_errno(chdir(p));
5946 unlock_user(p, arg1, 0);
5947 break;
5948 #ifdef TARGET_NR_time
5949 case TARGET_NR_time:
5951 time_t host_time;
5952 ret = get_errno(time(&host_time));
5953 if (!is_error(ret)
5954 && arg1
5955 && put_user_sal(host_time, arg1))
5956 goto efault;
5958 break;
5959 #endif
5960 #ifdef TARGET_NR_mknod
5961 case TARGET_NR_mknod:
5962 if (!(p = lock_user_string(arg1)))
5963 goto efault;
5964 ret = get_errno(mknod(p, arg2, arg3));
5965 unlock_user(p, arg1, 0);
5966 break;
5967 #endif
5968 #if defined(TARGET_NR_mknodat)
5969 case TARGET_NR_mknodat:
5970 if (!(p = lock_user_string(arg2)))
5971 goto efault;
5972 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5973 unlock_user(p, arg2, 0);
5974 break;
5975 #endif
5976 #ifdef TARGET_NR_chmod
5977 case TARGET_NR_chmod:
5978 if (!(p = lock_user_string(arg1)))
5979 goto efault;
5980 ret = get_errno(chmod(p, arg2));
5981 unlock_user(p, arg1, 0);
5982 break;
5983 #endif
5984 #ifdef TARGET_NR_break
5985 case TARGET_NR_break:
5986 goto unimplemented;
5987 #endif
5988 #ifdef TARGET_NR_oldstat
5989 case TARGET_NR_oldstat:
5990 goto unimplemented;
5991 #endif
5992 case TARGET_NR_lseek:
5993 ret = get_errno(lseek(arg1, arg2, arg3));
5994 break;
5995 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5996 /* Alpha specific */
5997 case TARGET_NR_getxpid:
5998 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5999 ret = get_errno(getpid());
6000 break;
6001 #endif
6002 #ifdef TARGET_NR_getpid
6003 case TARGET_NR_getpid:
6004 ret = get_errno(getpid());
6005 break;
6006 #endif
6007 case TARGET_NR_mount:
6009 /* need to look at the data field */
6010 void *p2, *p3;
6012 if (arg1) {
6013 p = lock_user_string(arg1);
6014 if (!p) {
6015 goto efault;
6017 } else {
6018 p = NULL;
6021 p2 = lock_user_string(arg2);
6022 if (!p2) {
6023 if (arg1) {
6024 unlock_user(p, arg1, 0);
6026 goto efault;
6029 if (arg3) {
6030 p3 = lock_user_string(arg3);
6031 if (!p3) {
6032 if (arg1) {
6033 unlock_user(p, arg1, 0);
6035 unlock_user(p2, arg2, 0);
6036 goto efault;
6038 } else {
6039 p3 = NULL;
6042 /* FIXME - arg5 should be locked, but it isn't clear how to
6043 * do that since it's not guaranteed to be a NULL-terminated
6044 * string.
6046 if (!arg5) {
6047 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6048 } else {
6049 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6051 ret = get_errno(ret);
6053 if (arg1) {
6054 unlock_user(p, arg1, 0);
6056 unlock_user(p2, arg2, 0);
6057 if (arg3) {
6058 unlock_user(p3, arg3, 0);
6061 break;
6062 #ifdef TARGET_NR_umount
6063 case TARGET_NR_umount:
6064 if (!(p = lock_user_string(arg1)))
6065 goto efault;
6066 ret = get_errno(umount(p));
6067 unlock_user(p, arg1, 0);
6068 break;
6069 #endif
6070 #ifdef TARGET_NR_stime /* not on alpha */
6071 case TARGET_NR_stime:
6073 time_t host_time;
6074 if (get_user_sal(host_time, arg1))
6075 goto efault;
6076 ret = get_errno(stime(&host_time));
6078 break;
6079 #endif
6080 case TARGET_NR_ptrace:
6081 goto unimplemented;
6082 #ifdef TARGET_NR_alarm /* not on alpha */
6083 case TARGET_NR_alarm:
6084 ret = alarm(arg1);
6085 break;
6086 #endif
6087 #ifdef TARGET_NR_oldfstat
6088 case TARGET_NR_oldfstat:
6089 goto unimplemented;
6090 #endif
6091 #ifdef TARGET_NR_pause /* not on alpha */
6092 case TARGET_NR_pause:
6093 ret = get_errno(pause());
6094 break;
6095 #endif
6096 #ifdef TARGET_NR_utime
6097 case TARGET_NR_utime:
6099 struct utimbuf tbuf, *host_tbuf;
6100 struct target_utimbuf *target_tbuf;
6101 if (arg2) {
6102 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6103 goto efault;
6104 tbuf.actime = tswapal(target_tbuf->actime);
6105 tbuf.modtime = tswapal(target_tbuf->modtime);
6106 unlock_user_struct(target_tbuf, arg2, 0);
6107 host_tbuf = &tbuf;
6108 } else {
6109 host_tbuf = NULL;
6111 if (!(p = lock_user_string(arg1)))
6112 goto efault;
6113 ret = get_errno(utime(p, host_tbuf));
6114 unlock_user(p, arg1, 0);
6116 break;
6117 #endif
6118 #ifdef TARGET_NR_utimes
6119 case TARGET_NR_utimes:
6121 struct timeval *tvp, tv[2];
6122 if (arg2) {
6123 if (copy_from_user_timeval(&tv[0], arg2)
6124 || copy_from_user_timeval(&tv[1],
6125 arg2 + sizeof(struct target_timeval)))
6126 goto efault;
6127 tvp = tv;
6128 } else {
6129 tvp = NULL;
6131 if (!(p = lock_user_string(arg1)))
6132 goto efault;
6133 ret = get_errno(utimes(p, tvp));
6134 unlock_user(p, arg1, 0);
6136 break;
6137 #endif
6138 #if defined(TARGET_NR_futimesat)
6139 case TARGET_NR_futimesat:
6141 struct timeval *tvp, tv[2];
6142 if (arg3) {
6143 if (copy_from_user_timeval(&tv[0], arg3)
6144 || copy_from_user_timeval(&tv[1],
6145 arg3 + sizeof(struct target_timeval)))
6146 goto efault;
6147 tvp = tv;
6148 } else {
6149 tvp = NULL;
6151 if (!(p = lock_user_string(arg2)))
6152 goto efault;
6153 ret = get_errno(futimesat(arg1, path(p), tvp));
6154 unlock_user(p, arg2, 0);
6156 break;
6157 #endif
6158 #ifdef TARGET_NR_stty
6159 case TARGET_NR_stty:
6160 goto unimplemented;
6161 #endif
6162 #ifdef TARGET_NR_gtty
6163 case TARGET_NR_gtty:
6164 goto unimplemented;
6165 #endif
6166 #ifdef TARGET_NR_access
6167 case TARGET_NR_access:
6168 if (!(p = lock_user_string(arg1)))
6169 goto efault;
6170 ret = get_errno(access(path(p), arg2));
6171 unlock_user(p, arg1, 0);
6172 break;
6173 #endif
6174 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6175 case TARGET_NR_faccessat:
6176 if (!(p = lock_user_string(arg2)))
6177 goto efault;
6178 ret = get_errno(faccessat(arg1, p, arg3, 0));
6179 unlock_user(p, arg2, 0);
6180 break;
6181 #endif
6182 #ifdef TARGET_NR_nice /* not on alpha */
6183 case TARGET_NR_nice:
6184 ret = get_errno(nice(arg1));
6185 break;
6186 #endif
6187 #ifdef TARGET_NR_ftime
6188 case TARGET_NR_ftime:
6189 goto unimplemented;
6190 #endif
6191 case TARGET_NR_sync:
6192 sync();
6193 ret = 0;
6194 break;
6195 case TARGET_NR_kill:
6196 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6197 break;
6198 #ifdef TARGET_NR_rename
6199 case TARGET_NR_rename:
6201 void *p2;
6202 p = lock_user_string(arg1);
6203 p2 = lock_user_string(arg2);
6204 if (!p || !p2)
6205 ret = -TARGET_EFAULT;
6206 else
6207 ret = get_errno(rename(p, p2));
6208 unlock_user(p2, arg2, 0);
6209 unlock_user(p, arg1, 0);
6211 break;
6212 #endif
6213 #if defined(TARGET_NR_renameat)
6214 case TARGET_NR_renameat:
6216 void *p2;
6217 p = lock_user_string(arg2);
6218 p2 = lock_user_string(arg4);
6219 if (!p || !p2)
6220 ret = -TARGET_EFAULT;
6221 else
6222 ret = get_errno(renameat(arg1, p, arg3, p2));
6223 unlock_user(p2, arg4, 0);
6224 unlock_user(p, arg2, 0);
6226 break;
6227 #endif
6228 #ifdef TARGET_NR_mkdir
6229 case TARGET_NR_mkdir:
6230 if (!(p = lock_user_string(arg1)))
6231 goto efault;
6232 ret = get_errno(mkdir(p, arg2));
6233 unlock_user(p, arg1, 0);
6234 break;
6235 #endif
6236 #if defined(TARGET_NR_mkdirat)
6237 case TARGET_NR_mkdirat:
6238 if (!(p = lock_user_string(arg2)))
6239 goto efault;
6240 ret = get_errno(mkdirat(arg1, p, arg3));
6241 unlock_user(p, arg2, 0);
6242 break;
6243 #endif
6244 #ifdef TARGET_NR_rmdir
6245 case TARGET_NR_rmdir:
6246 if (!(p = lock_user_string(arg1)))
6247 goto efault;
6248 ret = get_errno(rmdir(p));
6249 unlock_user(p, arg1, 0);
6250 break;
6251 #endif
6252 case TARGET_NR_dup:
6253 ret = get_errno(dup(arg1));
6254 break;
6255 #ifdef TARGET_NR_pipe
6256 case TARGET_NR_pipe:
6257 ret = do_pipe(cpu_env, arg1, 0, 0);
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_pipe2
6261 case TARGET_NR_pipe2:
6262 ret = do_pipe(cpu_env, arg1,
6263 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6264 break;
6265 #endif
6266 case TARGET_NR_times:
6268 struct target_tms *tmsp;
6269 struct tms tms;
6270 ret = get_errno(times(&tms));
6271 if (arg1) {
6272 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6273 if (!tmsp)
6274 goto efault;
6275 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6276 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6277 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6278 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6280 if (!is_error(ret))
6281 ret = host_to_target_clock_t(ret);
6283 break;
6284 #ifdef TARGET_NR_prof
6285 case TARGET_NR_prof:
6286 goto unimplemented;
6287 #endif
6288 #ifdef TARGET_NR_signal
6289 case TARGET_NR_signal:
6290 goto unimplemented;
6291 #endif
6292 case TARGET_NR_acct:
6293 if (arg1 == 0) {
6294 ret = get_errno(acct(NULL));
6295 } else {
6296 if (!(p = lock_user_string(arg1)))
6297 goto efault;
6298 ret = get_errno(acct(path(p)));
6299 unlock_user(p, arg1, 0);
6301 break;
6302 #ifdef TARGET_NR_umount2
6303 case TARGET_NR_umount2:
6304 if (!(p = lock_user_string(arg1)))
6305 goto efault;
6306 ret = get_errno(umount2(p, arg2));
6307 unlock_user(p, arg1, 0);
6308 break;
6309 #endif
6310 #ifdef TARGET_NR_lock
6311 case TARGET_NR_lock:
6312 goto unimplemented;
6313 #endif
6314 case TARGET_NR_ioctl:
6315 ret = do_ioctl(arg1, arg2, arg3);
6316 break;
6317 case TARGET_NR_fcntl:
6318 ret = do_fcntl(arg1, arg2, arg3);
6319 break;
6320 #ifdef TARGET_NR_mpx
6321 case TARGET_NR_mpx:
6322 goto unimplemented;
6323 #endif
6324 case TARGET_NR_setpgid:
6325 ret = get_errno(setpgid(arg1, arg2));
6326 break;
6327 #ifdef TARGET_NR_ulimit
6328 case TARGET_NR_ulimit:
6329 goto unimplemented;
6330 #endif
6331 #ifdef TARGET_NR_oldolduname
6332 case TARGET_NR_oldolduname:
6333 goto unimplemented;
6334 #endif
6335 case TARGET_NR_umask:
6336 ret = get_errno(umask(arg1));
6337 break;
6338 case TARGET_NR_chroot:
6339 if (!(p = lock_user_string(arg1)))
6340 goto efault;
6341 ret = get_errno(chroot(p));
6342 unlock_user(p, arg1, 0);
6343 break;
6344 #ifdef TARGET_NR_ustat
6345 case TARGET_NR_ustat:
6346 goto unimplemented;
6347 #endif
6348 #ifdef TARGET_NR_dup2
6349 case TARGET_NR_dup2:
6350 ret = get_errno(dup2(arg1, arg2));
6351 break;
6352 #endif
6353 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6354 case TARGET_NR_dup3:
6355 ret = get_errno(dup3(arg1, arg2, arg3));
6356 break;
6357 #endif
6358 #ifdef TARGET_NR_getppid /* not on alpha */
6359 case TARGET_NR_getppid:
6360 ret = get_errno(getppid());
6361 break;
6362 #endif
6363 #ifdef TARGET_NR_getpgrp
6364 case TARGET_NR_getpgrp:
6365 ret = get_errno(getpgrp());
6366 break;
6367 #endif
6368 case TARGET_NR_setsid:
6369 ret = get_errno(setsid());
6370 break;
6371 #ifdef TARGET_NR_sigaction
6372 case TARGET_NR_sigaction:
6374 #if defined(TARGET_ALPHA)
6375 struct target_sigaction act, oact, *pact = 0;
6376 struct target_old_sigaction *old_act;
6377 if (arg2) {
6378 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6379 goto efault;
6380 act._sa_handler = old_act->_sa_handler;
6381 target_siginitset(&act.sa_mask, old_act->sa_mask);
6382 act.sa_flags = old_act->sa_flags;
6383 act.sa_restorer = 0;
6384 unlock_user_struct(old_act, arg2, 0);
6385 pact = &act;
6387 ret = get_errno(do_sigaction(arg1, pact, &oact));
6388 if (!is_error(ret) && arg3) {
6389 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6390 goto efault;
6391 old_act->_sa_handler = oact._sa_handler;
6392 old_act->sa_mask = oact.sa_mask.sig[0];
6393 old_act->sa_flags = oact.sa_flags;
6394 unlock_user_struct(old_act, arg3, 1);
6396 #elif defined(TARGET_MIPS)
6397 struct target_sigaction act, oact, *pact, *old_act;
6399 if (arg2) {
6400 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6401 goto efault;
6402 act._sa_handler = old_act->_sa_handler;
6403 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6404 act.sa_flags = old_act->sa_flags;
6405 unlock_user_struct(old_act, arg2, 0);
6406 pact = &act;
6407 } else {
6408 pact = NULL;
6411 ret = get_errno(do_sigaction(arg1, pact, &oact));
6413 if (!is_error(ret) && arg3) {
6414 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6415 goto efault;
6416 old_act->_sa_handler = oact._sa_handler;
6417 old_act->sa_flags = oact.sa_flags;
6418 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6419 old_act->sa_mask.sig[1] = 0;
6420 old_act->sa_mask.sig[2] = 0;
6421 old_act->sa_mask.sig[3] = 0;
6422 unlock_user_struct(old_act, arg3, 1);
6424 #else
6425 struct target_old_sigaction *old_act;
6426 struct target_sigaction act, oact, *pact;
6427 if (arg2) {
6428 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6429 goto efault;
6430 act._sa_handler = old_act->_sa_handler;
6431 target_siginitset(&act.sa_mask, old_act->sa_mask);
6432 act.sa_flags = old_act->sa_flags;
6433 act.sa_restorer = old_act->sa_restorer;
6434 unlock_user_struct(old_act, arg2, 0);
6435 pact = &act;
6436 } else {
6437 pact = NULL;
6439 ret = get_errno(do_sigaction(arg1, pact, &oact));
6440 if (!is_error(ret) && arg3) {
6441 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6442 goto efault;
6443 old_act->_sa_handler = oact._sa_handler;
6444 old_act->sa_mask = oact.sa_mask.sig[0];
6445 old_act->sa_flags = oact.sa_flags;
6446 old_act->sa_restorer = oact.sa_restorer;
6447 unlock_user_struct(old_act, arg3, 1);
6449 #endif
6451 break;
6452 #endif
6453 case TARGET_NR_rt_sigaction:
6455 #if defined(TARGET_ALPHA)
6456 struct target_sigaction act, oact, *pact = 0;
6457 struct target_rt_sigaction *rt_act;
6458 /* ??? arg4 == sizeof(sigset_t). */
6459 if (arg2) {
6460 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6461 goto efault;
6462 act._sa_handler = rt_act->_sa_handler;
6463 act.sa_mask = rt_act->sa_mask;
6464 act.sa_flags = rt_act->sa_flags;
6465 act.sa_restorer = arg5;
6466 unlock_user_struct(rt_act, arg2, 0);
6467 pact = &act;
6469 ret = get_errno(do_sigaction(arg1, pact, &oact));
6470 if (!is_error(ret) && arg3) {
6471 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6472 goto efault;
6473 rt_act->_sa_handler = oact._sa_handler;
6474 rt_act->sa_mask = oact.sa_mask;
6475 rt_act->sa_flags = oact.sa_flags;
6476 unlock_user_struct(rt_act, arg3, 1);
6478 #else
6479 struct target_sigaction *act;
6480 struct target_sigaction *oact;
6482 if (arg2) {
6483 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6484 goto efault;
6485 } else
6486 act = NULL;
6487 if (arg3) {
6488 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6489 ret = -TARGET_EFAULT;
6490 goto rt_sigaction_fail;
6492 } else
6493 oact = NULL;
6494 ret = get_errno(do_sigaction(arg1, act, oact));
6495 rt_sigaction_fail:
6496 if (act)
6497 unlock_user_struct(act, arg2, 0);
6498 if (oact)
6499 unlock_user_struct(oact, arg3, 1);
6500 #endif
6502 break;
6503 #ifdef TARGET_NR_sgetmask /* not on alpha */
6504 case TARGET_NR_sgetmask:
6506 sigset_t cur_set;
6507 abi_ulong target_set;
6508 do_sigprocmask(0, NULL, &cur_set);
6509 host_to_target_old_sigset(&target_set, &cur_set);
6510 ret = target_set;
6512 break;
6513 #endif
6514 #ifdef TARGET_NR_ssetmask /* not on alpha */
6515 case TARGET_NR_ssetmask:
6517 sigset_t set, oset, cur_set;
6518 abi_ulong target_set = arg1;
6519 do_sigprocmask(0, NULL, &cur_set);
6520 target_to_host_old_sigset(&set, &target_set);
6521 sigorset(&set, &set, &cur_set);
6522 do_sigprocmask(SIG_SETMASK, &set, &oset);
6523 host_to_target_old_sigset(&target_set, &oset);
6524 ret = target_set;
6526 break;
6527 #endif
6528 #ifdef TARGET_NR_sigprocmask
6529 case TARGET_NR_sigprocmask:
6531 #if defined(TARGET_ALPHA)
6532 sigset_t set, oldset;
6533 abi_ulong mask;
6534 int how;
6536 switch (arg1) {
6537 case TARGET_SIG_BLOCK:
6538 how = SIG_BLOCK;
6539 break;
6540 case TARGET_SIG_UNBLOCK:
6541 how = SIG_UNBLOCK;
6542 break;
6543 case TARGET_SIG_SETMASK:
6544 how = SIG_SETMASK;
6545 break;
6546 default:
6547 ret = -TARGET_EINVAL;
6548 goto fail;
6550 mask = arg2;
6551 target_to_host_old_sigset(&set, &mask);
6553 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6554 if (!is_error(ret)) {
6555 host_to_target_old_sigset(&mask, &oldset);
6556 ret = mask;
6557 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6559 #else
6560 sigset_t set, oldset, *set_ptr;
6561 int how;
6563 if (arg2) {
6564 switch (arg1) {
6565 case TARGET_SIG_BLOCK:
6566 how = SIG_BLOCK;
6567 break;
6568 case TARGET_SIG_UNBLOCK:
6569 how = SIG_UNBLOCK;
6570 break;
6571 case TARGET_SIG_SETMASK:
6572 how = SIG_SETMASK;
6573 break;
6574 default:
6575 ret = -TARGET_EINVAL;
6576 goto fail;
6578 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6579 goto efault;
6580 target_to_host_old_sigset(&set, p);
6581 unlock_user(p, arg2, 0);
6582 set_ptr = &set;
6583 } else {
6584 how = 0;
6585 set_ptr = NULL;
6587 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6588 if (!is_error(ret) && arg3) {
6589 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6590 goto efault;
6591 host_to_target_old_sigset(p, &oldset);
6592 unlock_user(p, arg3, sizeof(target_sigset_t));
6594 #endif
6596 break;
6597 #endif
6598 case TARGET_NR_rt_sigprocmask:
6600 int how = arg1;
6601 sigset_t set, oldset, *set_ptr;
6603 if (arg2) {
6604 switch(how) {
6605 case TARGET_SIG_BLOCK:
6606 how = SIG_BLOCK;
6607 break;
6608 case TARGET_SIG_UNBLOCK:
6609 how = SIG_UNBLOCK;
6610 break;
6611 case TARGET_SIG_SETMASK:
6612 how = SIG_SETMASK;
6613 break;
6614 default:
6615 ret = -TARGET_EINVAL;
6616 goto fail;
6618 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6619 goto efault;
6620 target_to_host_sigset(&set, p);
6621 unlock_user(p, arg2, 0);
6622 set_ptr = &set;
6623 } else {
6624 how = 0;
6625 set_ptr = NULL;
6627 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6628 if (!is_error(ret) && arg3) {
6629 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6630 goto efault;
6631 host_to_target_sigset(p, &oldset);
6632 unlock_user(p, arg3, sizeof(target_sigset_t));
6635 break;
6636 #ifdef TARGET_NR_sigpending
6637 case TARGET_NR_sigpending:
6639 sigset_t set;
6640 ret = get_errno(sigpending(&set));
6641 if (!is_error(ret)) {
6642 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6643 goto efault;
6644 host_to_target_old_sigset(p, &set);
6645 unlock_user(p, arg1, sizeof(target_sigset_t));
6648 break;
6649 #endif
6650 case TARGET_NR_rt_sigpending:
6652 sigset_t set;
6653 ret = get_errno(sigpending(&set));
6654 if (!is_error(ret)) {
6655 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6656 goto efault;
6657 host_to_target_sigset(p, &set);
6658 unlock_user(p, arg1, sizeof(target_sigset_t));
6661 break;
6662 #ifdef TARGET_NR_sigsuspend
6663 case TARGET_NR_sigsuspend:
6665 sigset_t set;
6666 #if defined(TARGET_ALPHA)
6667 abi_ulong mask = arg1;
6668 target_to_host_old_sigset(&set, &mask);
6669 #else
6670 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6671 goto efault;
6672 target_to_host_old_sigset(&set, p);
6673 unlock_user(p, arg1, 0);
6674 #endif
6675 ret = get_errno(sigsuspend(&set));
6677 break;
6678 #endif
6679 case TARGET_NR_rt_sigsuspend:
6681 sigset_t set;
6682 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6683 goto efault;
6684 target_to_host_sigset(&set, p);
6685 unlock_user(p, arg1, 0);
6686 ret = get_errno(sigsuspend(&set));
6688 break;
6689 case TARGET_NR_rt_sigtimedwait:
6691 sigset_t set;
6692 struct timespec uts, *puts;
6693 siginfo_t uinfo;
6695 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6696 goto efault;
6697 target_to_host_sigset(&set, p);
6698 unlock_user(p, arg1, 0);
6699 if (arg3) {
6700 puts = &uts;
6701 target_to_host_timespec(puts, arg3);
6702 } else {
6703 puts = NULL;
6705 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6706 if (!is_error(ret)) {
6707 if (arg2) {
6708 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6710 if (!p) {
6711 goto efault;
6713 host_to_target_siginfo(p, &uinfo);
6714 unlock_user(p, arg2, sizeof(target_siginfo_t));
6716 ret = host_to_target_signal(ret);
6719 break;
6720 case TARGET_NR_rt_sigqueueinfo:
6722 siginfo_t uinfo;
6723 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6724 goto efault;
6725 target_to_host_siginfo(&uinfo, p);
6726 unlock_user(p, arg1, 0);
6727 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6729 break;
6730 #ifdef TARGET_NR_sigreturn
6731 case TARGET_NR_sigreturn:
6732 /* NOTE: ret is eax, so not transcoding must be done */
6733 ret = do_sigreturn(cpu_env);
6734 break;
6735 #endif
6736 case TARGET_NR_rt_sigreturn:
6737 /* NOTE: ret is eax, so not transcoding must be done */
6738 ret = do_rt_sigreturn(cpu_env);
6739 break;
6740 case TARGET_NR_sethostname:
6741 if (!(p = lock_user_string(arg1)))
6742 goto efault;
6743 ret = get_errno(sethostname(p, arg2));
6744 unlock_user(p, arg1, 0);
6745 break;
6746 case TARGET_NR_setrlimit:
6748 int resource = target_to_host_resource(arg1);
6749 struct target_rlimit *target_rlim;
6750 struct rlimit rlim;
6751 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6752 goto efault;
6753 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6754 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6755 unlock_user_struct(target_rlim, arg2, 0);
6756 ret = get_errno(setrlimit(resource, &rlim));
6758 break;
6759 case TARGET_NR_getrlimit:
6761 int resource = target_to_host_resource(arg1);
6762 struct target_rlimit *target_rlim;
6763 struct rlimit rlim;
6765 ret = get_errno(getrlimit(resource, &rlim));
6766 if (!is_error(ret)) {
6767 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6768 goto efault;
6769 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6770 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6771 unlock_user_struct(target_rlim, arg2, 1);
6774 break;
6775 case TARGET_NR_getrusage:
6777 struct rusage rusage;
6778 ret = get_errno(getrusage(arg1, &rusage));
6779 if (!is_error(ret)) {
6780 ret = host_to_target_rusage(arg2, &rusage);
6783 break;
6784 case TARGET_NR_gettimeofday:
6786 struct timeval tv;
6787 ret = get_errno(gettimeofday(&tv, NULL));
6788 if (!is_error(ret)) {
6789 if (copy_to_user_timeval(arg1, &tv))
6790 goto efault;
6793 break;
6794 case TARGET_NR_settimeofday:
6796 struct timeval tv, *ptv = NULL;
6797 struct timezone tz, *ptz = NULL;
6799 if (arg1) {
6800 if (copy_from_user_timeval(&tv, arg1)) {
6801 goto efault;
6803 ptv = &tv;
6806 if (arg2) {
6807 if (copy_from_user_timezone(&tz, arg2)) {
6808 goto efault;
6810 ptz = &tz;
6813 ret = get_errno(settimeofday(ptv, ptz));
6815 break;
6816 #if defined(TARGET_NR_select)
6817 case TARGET_NR_select:
6818 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6819 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6820 #else
6822 struct target_sel_arg_struct *sel;
6823 abi_ulong inp, outp, exp, tvp;
6824 long nsel;
6826 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6827 goto efault;
6828 nsel = tswapal(sel->n);
6829 inp = tswapal(sel->inp);
6830 outp = tswapal(sel->outp);
6831 exp = tswapal(sel->exp);
6832 tvp = tswapal(sel->tvp);
6833 unlock_user_struct(sel, arg1, 0);
6834 ret = do_select(nsel, inp, outp, exp, tvp);
6836 #endif
6837 break;
6838 #endif
6839 #ifdef TARGET_NR_pselect6
6840 case TARGET_NR_pselect6:
6842 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6843 fd_set rfds, wfds, efds;
6844 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6845 struct timespec ts, *ts_ptr;
6848 * The 6th arg is actually two args smashed together,
6849 * so we cannot use the C library.
6851 sigset_t set;
6852 struct {
6853 sigset_t *set;
6854 size_t size;
6855 } sig, *sig_ptr;
6857 abi_ulong arg_sigset, arg_sigsize, *arg7;
6858 target_sigset_t *target_sigset;
6860 n = arg1;
6861 rfd_addr = arg2;
6862 wfd_addr = arg3;
6863 efd_addr = arg4;
6864 ts_addr = arg5;
6866 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6867 if (ret) {
6868 goto fail;
6870 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6871 if (ret) {
6872 goto fail;
6874 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6875 if (ret) {
6876 goto fail;
6880 * This takes a timespec, and not a timeval, so we cannot
6881 * use the do_select() helper ...
6883 if (ts_addr) {
6884 if (target_to_host_timespec(&ts, ts_addr)) {
6885 goto efault;
6887 ts_ptr = &ts;
6888 } else {
6889 ts_ptr = NULL;
6892 /* Extract the two packed args for the sigset */
6893 if (arg6) {
6894 sig_ptr = &sig;
6895 sig.size = _NSIG / 8;
6897 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6898 if (!arg7) {
6899 goto efault;
6901 arg_sigset = tswapal(arg7[0]);
6902 arg_sigsize = tswapal(arg7[1]);
6903 unlock_user(arg7, arg6, 0);
6905 if (arg_sigset) {
6906 sig.set = &set;
6907 if (arg_sigsize != sizeof(*target_sigset)) {
6908 /* Like the kernel, we enforce correct size sigsets */
6909 ret = -TARGET_EINVAL;
6910 goto fail;
6912 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6913 sizeof(*target_sigset), 1);
6914 if (!target_sigset) {
6915 goto efault;
6917 target_to_host_sigset(&set, target_sigset);
6918 unlock_user(target_sigset, arg_sigset, 0);
6919 } else {
6920 sig.set = NULL;
6922 } else {
6923 sig_ptr = NULL;
6926 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6927 ts_ptr, sig_ptr));
6929 if (!is_error(ret)) {
6930 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6931 goto efault;
6932 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6933 goto efault;
6934 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6935 goto efault;
6937 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6938 goto efault;
6941 break;
6942 #endif
6943 #ifdef TARGET_NR_symlink
6944 case TARGET_NR_symlink:
6946 void *p2;
6947 p = lock_user_string(arg1);
6948 p2 = lock_user_string(arg2);
6949 if (!p || !p2)
6950 ret = -TARGET_EFAULT;
6951 else
6952 ret = get_errno(symlink(p, p2));
6953 unlock_user(p2, arg2, 0);
6954 unlock_user(p, arg1, 0);
6956 break;
6957 #endif
6958 #if defined(TARGET_NR_symlinkat)
6959 case TARGET_NR_symlinkat:
6961 void *p2;
6962 p = lock_user_string(arg1);
6963 p2 = lock_user_string(arg3);
6964 if (!p || !p2)
6965 ret = -TARGET_EFAULT;
6966 else
6967 ret = get_errno(symlinkat(p, arg2, p2));
6968 unlock_user(p2, arg3, 0);
6969 unlock_user(p, arg1, 0);
6971 break;
6972 #endif
6973 #ifdef TARGET_NR_oldlstat
6974 case TARGET_NR_oldlstat:
6975 goto unimplemented;
6976 #endif
6977 #ifdef TARGET_NR_readlink
6978 case TARGET_NR_readlink:
6980 void *p2;
6981 p = lock_user_string(arg1);
6982 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6983 if (!p || !p2) {
6984 ret = -TARGET_EFAULT;
6985 } else if (!arg3) {
6986 /* Short circuit this for the magic exe check. */
6987 ret = -TARGET_EINVAL;
6988 } else if (is_proc_myself((const char *)p, "exe")) {
6989 char real[PATH_MAX], *temp;
6990 temp = realpath(exec_path, real);
6991 /* Return value is # of bytes that we wrote to the buffer. */
6992 if (temp == NULL) {
6993 ret = get_errno(-1);
6994 } else {
6995 /* Don't worry about sign mismatch as earlier mapping
6996 * logic would have thrown a bad address error. */
6997 ret = MIN(strlen(real), arg3);
6998 /* We cannot NUL terminate the string. */
6999 memcpy(p2, real, ret);
7001 } else {
7002 ret = get_errno(readlink(path(p), p2, arg3));
7004 unlock_user(p2, arg2, ret);
7005 unlock_user(p, arg1, 0);
7007 break;
7008 #endif
7009 #if defined(TARGET_NR_readlinkat)
7010 case TARGET_NR_readlinkat:
7012 void *p2;
7013 p = lock_user_string(arg2);
7014 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7015 if (!p || !p2) {
7016 ret = -TARGET_EFAULT;
7017 } else if (is_proc_myself((const char *)p, "exe")) {
7018 char real[PATH_MAX], *temp;
7019 temp = realpath(exec_path, real);
7020 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7021 snprintf((char *)p2, arg4, "%s", real);
7022 } else {
7023 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7025 unlock_user(p2, arg3, ret);
7026 unlock_user(p, arg2, 0);
7028 break;
7029 #endif
7030 #ifdef TARGET_NR_uselib
7031 case TARGET_NR_uselib:
7032 goto unimplemented;
7033 #endif
7034 #ifdef TARGET_NR_swapon
7035 case TARGET_NR_swapon:
7036 if (!(p = lock_user_string(arg1)))
7037 goto efault;
7038 ret = get_errno(swapon(p, arg2));
7039 unlock_user(p, arg1, 0);
7040 break;
7041 #endif
7042 case TARGET_NR_reboot:
7043 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7044 /* arg4 must be ignored in all other cases */
7045 p = lock_user_string(arg4);
7046 if (!p) {
7047 goto efault;
7049 ret = get_errno(reboot(arg1, arg2, arg3, p));
7050 unlock_user(p, arg4, 0);
7051 } else {
7052 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7054 break;
7055 #ifdef TARGET_NR_readdir
7056 case TARGET_NR_readdir:
7057 goto unimplemented;
7058 #endif
7059 #ifdef TARGET_NR_mmap
7060 case TARGET_NR_mmap:
7061 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7062 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7063 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7064 || defined(TARGET_S390X)
7066 abi_ulong *v;
7067 abi_ulong v1, v2, v3, v4, v5, v6;
7068 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7069 goto efault;
7070 v1 = tswapal(v[0]);
7071 v2 = tswapal(v[1]);
7072 v3 = tswapal(v[2]);
7073 v4 = tswapal(v[3]);
7074 v5 = tswapal(v[4]);
7075 v6 = tswapal(v[5]);
7076 unlock_user(v, arg1, 0);
7077 ret = get_errno(target_mmap(v1, v2, v3,
7078 target_to_host_bitmask(v4, mmap_flags_tbl),
7079 v5, v6));
7081 #else
7082 ret = get_errno(target_mmap(arg1, arg2, arg3,
7083 target_to_host_bitmask(arg4, mmap_flags_tbl),
7084 arg5,
7085 arg6));
7086 #endif
7087 break;
7088 #endif
7089 #ifdef TARGET_NR_mmap2
7090 case TARGET_NR_mmap2:
7091 #ifndef MMAP_SHIFT
7092 #define MMAP_SHIFT 12
7093 #endif
7094 ret = get_errno(target_mmap(arg1, arg2, arg3,
7095 target_to_host_bitmask(arg4, mmap_flags_tbl),
7096 arg5,
7097 arg6 << MMAP_SHIFT));
7098 break;
7099 #endif
7100 case TARGET_NR_munmap:
7101 ret = get_errno(target_munmap(arg1, arg2));
7102 break;
7103 case TARGET_NR_mprotect:
7105 TaskState *ts = cpu->opaque;
7106 /* Special hack to detect libc making the stack executable. */
7107 if ((arg3 & PROT_GROWSDOWN)
7108 && arg1 >= ts->info->stack_limit
7109 && arg1 <= ts->info->start_stack) {
7110 arg3 &= ~PROT_GROWSDOWN;
7111 arg2 = arg2 + arg1 - ts->info->stack_limit;
7112 arg1 = ts->info->stack_limit;
7115 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7116 break;
7117 #ifdef TARGET_NR_mremap
7118 case TARGET_NR_mremap:
7119 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7120 break;
7121 #endif
7122 /* ??? msync/mlock/munlock are broken for softmmu. */
7123 #ifdef TARGET_NR_msync
7124 case TARGET_NR_msync:
7125 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7126 break;
7127 #endif
7128 #ifdef TARGET_NR_mlock
7129 case TARGET_NR_mlock:
7130 ret = get_errno(mlock(g2h(arg1), arg2));
7131 break;
7132 #endif
7133 #ifdef TARGET_NR_munlock
7134 case TARGET_NR_munlock:
7135 ret = get_errno(munlock(g2h(arg1), arg2));
7136 break;
7137 #endif
7138 #ifdef TARGET_NR_mlockall
7139 case TARGET_NR_mlockall:
7140 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7141 break;
7142 #endif
7143 #ifdef TARGET_NR_munlockall
7144 case TARGET_NR_munlockall:
7145 ret = get_errno(munlockall());
7146 break;
7147 #endif
7148 case TARGET_NR_truncate:
7149 if (!(p = lock_user_string(arg1)))
7150 goto efault;
7151 ret = get_errno(truncate(p, arg2));
7152 unlock_user(p, arg1, 0);
7153 break;
7154 case TARGET_NR_ftruncate:
7155 ret = get_errno(ftruncate(arg1, arg2));
7156 break;
7157 case TARGET_NR_fchmod:
7158 ret = get_errno(fchmod(arg1, arg2));
7159 break;
7160 #if defined(TARGET_NR_fchmodat)
7161 case TARGET_NR_fchmodat:
7162 if (!(p = lock_user_string(arg2)))
7163 goto efault;
7164 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7165 unlock_user(p, arg2, 0);
7166 break;
7167 #endif
7168 case TARGET_NR_getpriority:
7169 /* Note that negative values are valid for getpriority, so we must
7170 differentiate based on errno settings. */
7171 errno = 0;
7172 ret = getpriority(arg1, arg2);
7173 if (ret == -1 && errno != 0) {
7174 ret = -host_to_target_errno(errno);
7175 break;
7177 #ifdef TARGET_ALPHA
7178 /* Return value is the unbiased priority. Signal no error. */
7179 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7180 #else
7181 /* Return value is a biased priority to avoid negative numbers. */
7182 ret = 20 - ret;
7183 #endif
7184 break;
7185 case TARGET_NR_setpriority:
7186 ret = get_errno(setpriority(arg1, arg2, arg3));
7187 break;
7188 #ifdef TARGET_NR_profil
7189 case TARGET_NR_profil:
7190 goto unimplemented;
7191 #endif
7192 case TARGET_NR_statfs:
7193 if (!(p = lock_user_string(arg1)))
7194 goto efault;
7195 ret = get_errno(statfs(path(p), &stfs));
7196 unlock_user(p, arg1, 0);
7197 convert_statfs:
7198 if (!is_error(ret)) {
7199 struct target_statfs *target_stfs;
7201 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7202 goto efault;
7203 __put_user(stfs.f_type, &target_stfs->f_type);
7204 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7205 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7206 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7207 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7208 __put_user(stfs.f_files, &target_stfs->f_files);
7209 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7210 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7211 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7212 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7213 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7214 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7215 unlock_user_struct(target_stfs, arg2, 1);
7217 break;
7218 case TARGET_NR_fstatfs:
7219 ret = get_errno(fstatfs(arg1, &stfs));
7220 goto convert_statfs;
7221 #ifdef TARGET_NR_statfs64
7222 case TARGET_NR_statfs64:
7223 if (!(p = lock_user_string(arg1)))
7224 goto efault;
7225 ret = get_errno(statfs(path(p), &stfs));
7226 unlock_user(p, arg1, 0);
7227 convert_statfs64:
7228 if (!is_error(ret)) {
7229 struct target_statfs64 *target_stfs;
7231 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7232 goto efault;
7233 __put_user(stfs.f_type, &target_stfs->f_type);
7234 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7235 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7236 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7237 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7238 __put_user(stfs.f_files, &target_stfs->f_files);
7239 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7240 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7241 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7242 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7243 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7244 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7245 unlock_user_struct(target_stfs, arg3, 1);
7247 break;
7248 case TARGET_NR_fstatfs64:
7249 ret = get_errno(fstatfs(arg1, &stfs));
7250 goto convert_statfs64;
7251 #endif
7252 #ifdef TARGET_NR_ioperm
7253 case TARGET_NR_ioperm:
7254 goto unimplemented;
7255 #endif
7256 #ifdef TARGET_NR_socketcall
7257 case TARGET_NR_socketcall:
7258 ret = do_socketcall(arg1, arg2);
7259 break;
7260 #endif
7261 #ifdef TARGET_NR_accept
7262 case TARGET_NR_accept:
7263 ret = do_accept4(arg1, arg2, arg3, 0);
7264 break;
7265 #endif
7266 #ifdef TARGET_NR_accept4
7267 case TARGET_NR_accept4:
7268 #ifdef CONFIG_ACCEPT4
7269 ret = do_accept4(arg1, arg2, arg3, arg4);
7270 #else
7271 goto unimplemented;
7272 #endif
7273 break;
7274 #endif
7275 #ifdef TARGET_NR_bind
7276 case TARGET_NR_bind:
7277 ret = do_bind(arg1, arg2, arg3);
7278 break;
7279 #endif
7280 #ifdef TARGET_NR_connect
7281 case TARGET_NR_connect:
7282 ret = do_connect(arg1, arg2, arg3);
7283 break;
7284 #endif
7285 #ifdef TARGET_NR_getpeername
7286 case TARGET_NR_getpeername:
7287 ret = do_getpeername(arg1, arg2, arg3);
7288 break;
7289 #endif
7290 #ifdef TARGET_NR_getsockname
7291 case TARGET_NR_getsockname:
7292 ret = do_getsockname(arg1, arg2, arg3);
7293 break;
7294 #endif
7295 #ifdef TARGET_NR_getsockopt
7296 case TARGET_NR_getsockopt:
7297 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7298 break;
7299 #endif
7300 #ifdef TARGET_NR_listen
7301 case TARGET_NR_listen:
7302 ret = get_errno(listen(arg1, arg2));
7303 break;
7304 #endif
7305 #ifdef TARGET_NR_recv
7306 case TARGET_NR_recv:
7307 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7308 break;
7309 #endif
7310 #ifdef TARGET_NR_recvfrom
7311 case TARGET_NR_recvfrom:
7312 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7313 break;
7314 #endif
7315 #ifdef TARGET_NR_recvmsg
7316 case TARGET_NR_recvmsg:
7317 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7318 break;
7319 #endif
7320 #ifdef TARGET_NR_send
7321 case TARGET_NR_send:
7322 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7323 break;
7324 #endif
7325 #ifdef TARGET_NR_sendmsg
7326 case TARGET_NR_sendmsg:
7327 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7328 break;
7329 #endif
7330 #ifdef TARGET_NR_sendmmsg
7331 case TARGET_NR_sendmmsg:
7332 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7333 break;
7334 case TARGET_NR_recvmmsg:
7335 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7336 break;
7337 #endif
7338 #ifdef TARGET_NR_sendto
7339 case TARGET_NR_sendto:
7340 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7341 break;
7342 #endif
7343 #ifdef TARGET_NR_shutdown
7344 case TARGET_NR_shutdown:
7345 ret = get_errno(shutdown(arg1, arg2));
7346 break;
7347 #endif
7348 #ifdef TARGET_NR_socket
7349 case TARGET_NR_socket:
7350 ret = do_socket(arg1, arg2, arg3);
7351 break;
7352 #endif
7353 #ifdef TARGET_NR_socketpair
7354 case TARGET_NR_socketpair:
7355 ret = do_socketpair(arg1, arg2, arg3, arg4);
7356 break;
7357 #endif
7358 #ifdef TARGET_NR_setsockopt
7359 case TARGET_NR_setsockopt:
7360 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7361 break;
7362 #endif
7364 case TARGET_NR_syslog:
7365 if (!(p = lock_user_string(arg2)))
7366 goto efault;
7367 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7368 unlock_user(p, arg2, 0);
7369 break;
7371 case TARGET_NR_setitimer:
7373 struct itimerval value, ovalue, *pvalue;
7375 if (arg2) {
7376 pvalue = &value;
7377 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7378 || copy_from_user_timeval(&pvalue->it_value,
7379 arg2 + sizeof(struct target_timeval)))
7380 goto efault;
7381 } else {
7382 pvalue = NULL;
7384 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7385 if (!is_error(ret) && arg3) {
7386 if (copy_to_user_timeval(arg3,
7387 &ovalue.it_interval)
7388 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7389 &ovalue.it_value))
7390 goto efault;
7393 break;
7394 case TARGET_NR_getitimer:
7396 struct itimerval value;
7398 ret = get_errno(getitimer(arg1, &value));
7399 if (!is_error(ret) && arg2) {
7400 if (copy_to_user_timeval(arg2,
7401 &value.it_interval)
7402 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7403 &value.it_value))
7404 goto efault;
7407 break;
7408 #ifdef TARGET_NR_stat
7409 case TARGET_NR_stat:
7410 if (!(p = lock_user_string(arg1)))
7411 goto efault;
7412 ret = get_errno(stat(path(p), &st));
7413 unlock_user(p, arg1, 0);
7414 goto do_stat;
7415 #endif
7416 #ifdef TARGET_NR_lstat
7417 case TARGET_NR_lstat:
7418 if (!(p = lock_user_string(arg1)))
7419 goto efault;
7420 ret = get_errno(lstat(path(p), &st));
7421 unlock_user(p, arg1, 0);
7422 goto do_stat;
7423 #endif
7424 case TARGET_NR_fstat:
7426 ret = get_errno(fstat(arg1, &st));
7427 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7428 do_stat:
7429 #endif
7430 if (!is_error(ret)) {
7431 struct target_stat *target_st;
7433 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7434 goto efault;
7435 memset(target_st, 0, sizeof(*target_st));
7436 __put_user(st.st_dev, &target_st->st_dev);
7437 __put_user(st.st_ino, &target_st->st_ino);
7438 __put_user(st.st_mode, &target_st->st_mode);
7439 __put_user(st.st_uid, &target_st->st_uid);
7440 __put_user(st.st_gid, &target_st->st_gid);
7441 __put_user(st.st_nlink, &target_st->st_nlink);
7442 __put_user(st.st_rdev, &target_st->st_rdev);
7443 __put_user(st.st_size, &target_st->st_size);
7444 __put_user(st.st_blksize, &target_st->st_blksize);
7445 __put_user(st.st_blocks, &target_st->st_blocks);
7446 __put_user(st.st_atime, &target_st->target_st_atime);
7447 __put_user(st.st_mtime, &target_st->target_st_mtime);
7448 __put_user(st.st_ctime, &target_st->target_st_ctime);
7449 unlock_user_struct(target_st, arg2, 1);
7452 break;
7453 #ifdef TARGET_NR_olduname
7454 case TARGET_NR_olduname:
7455 goto unimplemented;
7456 #endif
7457 #ifdef TARGET_NR_iopl
7458 case TARGET_NR_iopl:
7459 goto unimplemented;
7460 #endif
7461 case TARGET_NR_vhangup:
7462 ret = get_errno(vhangup());
7463 break;
7464 #ifdef TARGET_NR_idle
7465 case TARGET_NR_idle:
7466 goto unimplemented;
7467 #endif
7468 #ifdef TARGET_NR_syscall
7469 case TARGET_NR_syscall:
7470 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7471 arg6, arg7, arg8, 0);
7472 break;
7473 #endif
7474 case TARGET_NR_wait4:
7476 int status;
7477 abi_long status_ptr = arg2;
7478 struct rusage rusage, *rusage_ptr;
7479 abi_ulong target_rusage = arg4;
7480 abi_long rusage_err;
7481 if (target_rusage)
7482 rusage_ptr = &rusage;
7483 else
7484 rusage_ptr = NULL;
7485 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7486 if (!is_error(ret)) {
7487 if (status_ptr && ret) {
7488 status = host_to_target_waitstatus(status);
7489 if (put_user_s32(status, status_ptr))
7490 goto efault;
7492 if (target_rusage) {
7493 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7494 if (rusage_err) {
7495 ret = rusage_err;
7500 break;
7501 #ifdef TARGET_NR_swapoff
7502 case TARGET_NR_swapoff:
7503 if (!(p = lock_user_string(arg1)))
7504 goto efault;
7505 ret = get_errno(swapoff(p));
7506 unlock_user(p, arg1, 0);
7507 break;
7508 #endif
7509 case TARGET_NR_sysinfo:
7511 struct target_sysinfo *target_value;
7512 struct sysinfo value;
7513 ret = get_errno(sysinfo(&value));
7514 if (!is_error(ret) && arg1)
7516 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7517 goto efault;
7518 __put_user(value.uptime, &target_value->uptime);
7519 __put_user(value.loads[0], &target_value->loads[0]);
7520 __put_user(value.loads[1], &target_value->loads[1]);
7521 __put_user(value.loads[2], &target_value->loads[2]);
7522 __put_user(value.totalram, &target_value->totalram);
7523 __put_user(value.freeram, &target_value->freeram);
7524 __put_user(value.sharedram, &target_value->sharedram);
7525 __put_user(value.bufferram, &target_value->bufferram);
7526 __put_user(value.totalswap, &target_value->totalswap);
7527 __put_user(value.freeswap, &target_value->freeswap);
7528 __put_user(value.procs, &target_value->procs);
7529 __put_user(value.totalhigh, &target_value->totalhigh);
7530 __put_user(value.freehigh, &target_value->freehigh);
7531 __put_user(value.mem_unit, &target_value->mem_unit);
7532 unlock_user_struct(target_value, arg1, 1);
7535 break;
7536 #ifdef TARGET_NR_ipc
7537 case TARGET_NR_ipc:
7538 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7539 break;
7540 #endif
7541 #ifdef TARGET_NR_semget
7542 case TARGET_NR_semget:
7543 ret = get_errno(semget(arg1, arg2, arg3));
7544 break;
7545 #endif
7546 #ifdef TARGET_NR_semop
7547 case TARGET_NR_semop:
7548 ret = do_semop(arg1, arg2, arg3);
7549 break;
7550 #endif
7551 #ifdef TARGET_NR_semctl
7552 case TARGET_NR_semctl:
7553 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7554 break;
7555 #endif
7556 #ifdef TARGET_NR_msgctl
7557 case TARGET_NR_msgctl:
7558 ret = do_msgctl(arg1, arg2, arg3);
7559 break;
7560 #endif
7561 #ifdef TARGET_NR_msgget
7562 case TARGET_NR_msgget:
7563 ret = get_errno(msgget(arg1, arg2));
7564 break;
7565 #endif
7566 #ifdef TARGET_NR_msgrcv
7567 case TARGET_NR_msgrcv:
7568 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7569 break;
7570 #endif
7571 #ifdef TARGET_NR_msgsnd
7572 case TARGET_NR_msgsnd:
7573 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7574 break;
7575 #endif
7576 #ifdef TARGET_NR_shmget
7577 case TARGET_NR_shmget:
7578 ret = get_errno(shmget(arg1, arg2, arg3));
7579 break;
7580 #endif
7581 #ifdef TARGET_NR_shmctl
7582 case TARGET_NR_shmctl:
7583 ret = do_shmctl(arg1, arg2, arg3);
7584 break;
7585 #endif
7586 #ifdef TARGET_NR_shmat
7587 case TARGET_NR_shmat:
7588 ret = do_shmat(arg1, arg2, arg3);
7589 break;
7590 #endif
7591 #ifdef TARGET_NR_shmdt
7592 case TARGET_NR_shmdt:
7593 ret = do_shmdt(arg1);
7594 break;
7595 #endif
7596 case TARGET_NR_fsync:
7597 ret = get_errno(fsync(arg1));
7598 break;
7599 case TARGET_NR_clone:
7600 /* Linux manages to have three different orderings for its
7601 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7602 * match the kernel's CONFIG_CLONE_* settings.
7603 * Microblaze is further special in that it uses a sixth
7604 * implicit argument to clone for the TLS pointer.
7606 #if defined(TARGET_MICROBLAZE)
7607 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7608 #elif defined(TARGET_CLONE_BACKWARDS)
7609 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7610 #elif defined(TARGET_CLONE_BACKWARDS2)
7611 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7612 #else
7613 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7614 #endif
7615 break;
7616 #ifdef __NR_exit_group
7617 /* new thread calls */
7618 case TARGET_NR_exit_group:
7619 #ifdef TARGET_GPROF
7620 _mcleanup();
7621 #endif
7622 gdb_exit(cpu_env, arg1);
7623 ret = get_errno(exit_group(arg1));
7624 break;
7625 #endif
7626 case TARGET_NR_setdomainname:
7627 if (!(p = lock_user_string(arg1)))
7628 goto efault;
7629 ret = get_errno(setdomainname(p, arg2));
7630 unlock_user(p, arg1, 0);
7631 break;
7632 case TARGET_NR_uname:
7633 /* no need to transcode because we use the linux syscall */
7635 struct new_utsname * buf;
7637 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7638 goto efault;
7639 ret = get_errno(sys_uname(buf));
7640 if (!is_error(ret)) {
7641 /* Overrite the native machine name with whatever is being
7642 emulated. */
7643 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7644 /* Allow the user to override the reported release. */
7645 if (qemu_uname_release && *qemu_uname_release)
7646 strcpy (buf->release, qemu_uname_release);
7648 unlock_user_struct(buf, arg1, 1);
7650 break;
7651 #ifdef TARGET_I386
7652 case TARGET_NR_modify_ldt:
7653 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7654 break;
7655 #if !defined(TARGET_X86_64)
7656 case TARGET_NR_vm86old:
7657 goto unimplemented;
7658 case TARGET_NR_vm86:
7659 ret = do_vm86(cpu_env, arg1, arg2);
7660 break;
7661 #endif
7662 #endif
7663 case TARGET_NR_adjtimex:
7664 goto unimplemented;
7665 #ifdef TARGET_NR_create_module
7666 case TARGET_NR_create_module:
7667 #endif
7668 case TARGET_NR_init_module:
7669 case TARGET_NR_delete_module:
7670 #ifdef TARGET_NR_get_kernel_syms
7671 case TARGET_NR_get_kernel_syms:
7672 #endif
7673 goto unimplemented;
7674 case TARGET_NR_quotactl:
7675 goto unimplemented;
7676 case TARGET_NR_getpgid:
7677 ret = get_errno(getpgid(arg1));
7678 break;
7679 case TARGET_NR_fchdir:
7680 ret = get_errno(fchdir(arg1));
7681 break;
7682 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7683 case TARGET_NR_bdflush:
7684 goto unimplemented;
7685 #endif
7686 #ifdef TARGET_NR_sysfs
7687 case TARGET_NR_sysfs:
7688 goto unimplemented;
7689 #endif
7690 case TARGET_NR_personality:
7691 ret = get_errno(personality(arg1));
7692 break;
7693 #ifdef TARGET_NR_afs_syscall
7694 case TARGET_NR_afs_syscall:
7695 goto unimplemented;
7696 #endif
7697 #ifdef TARGET_NR__llseek /* Not on alpha */
7698 case TARGET_NR__llseek:
7700 int64_t res;
7701 #if !defined(__NR_llseek)
7702 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7703 if (res == -1) {
7704 ret = get_errno(res);
7705 } else {
7706 ret = 0;
7708 #else
7709 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7710 #endif
7711 if ((ret == 0) && put_user_s64(res, arg4)) {
7712 goto efault;
7715 break;
7716 #endif
7717 #ifdef TARGET_NR_getdents
7718 case TARGET_NR_getdents:
7719 #ifdef __NR_getdents
7720 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7722 struct target_dirent *target_dirp;
7723 struct linux_dirent *dirp;
7724 abi_long count = arg3;
7726 dirp = malloc(count);
7727 if (!dirp) {
7728 ret = -TARGET_ENOMEM;
7729 goto fail;
7732 ret = get_errno(sys_getdents(arg1, dirp, count));
7733 if (!is_error(ret)) {
7734 struct linux_dirent *de;
7735 struct target_dirent *tde;
7736 int len = ret;
7737 int reclen, treclen;
7738 int count1, tnamelen;
7740 count1 = 0;
7741 de = dirp;
7742 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7743 goto efault;
7744 tde = target_dirp;
7745 while (len > 0) {
7746 reclen = de->d_reclen;
7747 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7748 assert(tnamelen >= 0);
7749 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7750 assert(count1 + treclen <= count);
7751 tde->d_reclen = tswap16(treclen);
7752 tde->d_ino = tswapal(de->d_ino);
7753 tde->d_off = tswapal(de->d_off);
7754 memcpy(tde->d_name, de->d_name, tnamelen);
7755 de = (struct linux_dirent *)((char *)de + reclen);
7756 len -= reclen;
7757 tde = (struct target_dirent *)((char *)tde + treclen);
7758 count1 += treclen;
7760 ret = count1;
7761 unlock_user(target_dirp, arg2, ret);
7763 free(dirp);
7765 #else
7767 struct linux_dirent *dirp;
7768 abi_long count = arg3;
7770 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7771 goto efault;
7772 ret = get_errno(sys_getdents(arg1, dirp, count));
7773 if (!is_error(ret)) {
7774 struct linux_dirent *de;
7775 int len = ret;
7776 int reclen;
7777 de = dirp;
7778 while (len > 0) {
7779 reclen = de->d_reclen;
7780 if (reclen > len)
7781 break;
7782 de->d_reclen = tswap16(reclen);
7783 tswapls(&de->d_ino);
7784 tswapls(&de->d_off);
7785 de = (struct linux_dirent *)((char *)de + reclen);
7786 len -= reclen;
7789 unlock_user(dirp, arg2, ret);
7791 #endif
7792 #else
7793 /* Implement getdents in terms of getdents64 */
7795 struct linux_dirent64 *dirp;
7796 abi_long count = arg3;
7798 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7799 if (!dirp) {
7800 goto efault;
7802 ret = get_errno(sys_getdents64(arg1, dirp, count));
7803 if (!is_error(ret)) {
7804 /* Convert the dirent64 structs to target dirent. We do this
7805 * in-place, since we can guarantee that a target_dirent is no
7806 * larger than a dirent64; however this means we have to be
7807 * careful to read everything before writing in the new format.
7809 struct linux_dirent64 *de;
7810 struct target_dirent *tde;
7811 int len = ret;
7812 int tlen = 0;
7814 de = dirp;
7815 tde = (struct target_dirent *)dirp;
7816 while (len > 0) {
7817 int namelen, treclen;
7818 int reclen = de->d_reclen;
7819 uint64_t ino = de->d_ino;
7820 int64_t off = de->d_off;
7821 uint8_t type = de->d_type;
7823 namelen = strlen(de->d_name);
7824 treclen = offsetof(struct target_dirent, d_name)
7825 + namelen + 2;
7826 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7828 memmove(tde->d_name, de->d_name, namelen + 1);
7829 tde->d_ino = tswapal(ino);
7830 tde->d_off = tswapal(off);
7831 tde->d_reclen = tswap16(treclen);
7832 /* The target_dirent type is in what was formerly a padding
7833 * byte at the end of the structure:
7835 *(((char *)tde) + treclen - 1) = type;
7837 de = (struct linux_dirent64 *)((char *)de + reclen);
7838 tde = (struct target_dirent *)((char *)tde + treclen);
7839 len -= reclen;
7840 tlen += treclen;
7842 ret = tlen;
7844 unlock_user(dirp, arg2, ret);
7846 #endif
7847 break;
7848 #endif /* TARGET_NR_getdents */
7849 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7850 case TARGET_NR_getdents64:
7852 struct linux_dirent64 *dirp;
7853 abi_long count = arg3;
7854 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7855 goto efault;
7856 ret = get_errno(sys_getdents64(arg1, dirp, count));
7857 if (!is_error(ret)) {
7858 struct linux_dirent64 *de;
7859 int len = ret;
7860 int reclen;
7861 de = dirp;
7862 while (len > 0) {
7863 reclen = de->d_reclen;
7864 if (reclen > len)
7865 break;
7866 de->d_reclen = tswap16(reclen);
7867 tswap64s((uint64_t *)&de->d_ino);
7868 tswap64s((uint64_t *)&de->d_off);
7869 de = (struct linux_dirent64 *)((char *)de + reclen);
7870 len -= reclen;
7873 unlock_user(dirp, arg2, ret);
7875 break;
7876 #endif /* TARGET_NR_getdents64 */
7877 #if defined(TARGET_NR__newselect)
7878 case TARGET_NR__newselect:
7879 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7880 break;
7881 #endif
7882 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7883 # ifdef TARGET_NR_poll
7884 case TARGET_NR_poll:
7885 # endif
7886 # ifdef TARGET_NR_ppoll
7887 case TARGET_NR_ppoll:
7888 # endif
7890 struct target_pollfd *target_pfd;
7891 unsigned int nfds = arg2;
7892 int timeout = arg3;
7893 struct pollfd *pfd;
7894 unsigned int i;
7896 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7897 if (!target_pfd)
7898 goto efault;
7900 pfd = alloca(sizeof(struct pollfd) * nfds);
7901 for(i = 0; i < nfds; i++) {
7902 pfd[i].fd = tswap32(target_pfd[i].fd);
7903 pfd[i].events = tswap16(target_pfd[i].events);
7906 # ifdef TARGET_NR_ppoll
7907 if (num == TARGET_NR_ppoll) {
7908 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7909 target_sigset_t *target_set;
7910 sigset_t _set, *set = &_set;
7912 if (arg3) {
7913 if (target_to_host_timespec(timeout_ts, arg3)) {
7914 unlock_user(target_pfd, arg1, 0);
7915 goto efault;
7917 } else {
7918 timeout_ts = NULL;
7921 if (arg4) {
7922 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7923 if (!target_set) {
7924 unlock_user(target_pfd, arg1, 0);
7925 goto efault;
7927 target_to_host_sigset(set, target_set);
7928 } else {
7929 set = NULL;
7932 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7934 if (!is_error(ret) && arg3) {
7935 host_to_target_timespec(arg3, timeout_ts);
7937 if (arg4) {
7938 unlock_user(target_set, arg4, 0);
7940 } else
7941 # endif
7942 ret = get_errno(poll(pfd, nfds, timeout));
7944 if (!is_error(ret)) {
7945 for(i = 0; i < nfds; i++) {
7946 target_pfd[i].revents = tswap16(pfd[i].revents);
7949 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7951 break;
7952 #endif
7953 case TARGET_NR_flock:
7954 /* NOTE: the flock constant seems to be the same for every
7955 Linux platform */
7956 ret = get_errno(flock(arg1, arg2));
7957 break;
7958 case TARGET_NR_readv:
7960 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7961 if (vec != NULL) {
7962 ret = get_errno(readv(arg1, vec, arg3));
7963 unlock_iovec(vec, arg2, arg3, 1);
7964 } else {
7965 ret = -host_to_target_errno(errno);
7968 break;
7969 case TARGET_NR_writev:
7971 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7972 if (vec != NULL) {
7973 ret = get_errno(writev(arg1, vec, arg3));
7974 unlock_iovec(vec, arg2, arg3, 0);
7975 } else {
7976 ret = -host_to_target_errno(errno);
7979 break;
7980 case TARGET_NR_getsid:
7981 ret = get_errno(getsid(arg1));
7982 break;
7983 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7984 case TARGET_NR_fdatasync:
7985 ret = get_errno(fdatasync(arg1));
7986 break;
7987 #endif
7988 #ifdef TARGET_NR__sysctl
7989 case TARGET_NR__sysctl:
7990 /* We don't implement this, but ENOTDIR is always a safe
7991 return value. */
7992 ret = -TARGET_ENOTDIR;
7993 break;
7994 #endif
7995 case TARGET_NR_sched_getaffinity:
7997 unsigned int mask_size;
7998 unsigned long *mask;
8001 * sched_getaffinity needs multiples of ulong, so need to take
8002 * care of mismatches between target ulong and host ulong sizes.
8004 if (arg2 & (sizeof(abi_ulong) - 1)) {
8005 ret = -TARGET_EINVAL;
8006 break;
8008 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8010 mask = alloca(mask_size);
8011 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8013 if (!is_error(ret)) {
8014 if (ret > arg2) {
8015 /* More data returned than the caller's buffer will fit.
8016 * This only happens if sizeof(abi_long) < sizeof(long)
8017 * and the caller passed us a buffer holding an odd number
8018 * of abi_longs. If the host kernel is actually using the
8019 * extra 4 bytes then fail EINVAL; otherwise we can just
8020 * ignore them and only copy the interesting part.
8022 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8023 if (numcpus > arg2 * 8) {
8024 ret = -TARGET_EINVAL;
8025 break;
8027 ret = arg2;
8030 if (copy_to_user(arg3, mask, ret)) {
8031 goto efault;
8035 break;
8036 case TARGET_NR_sched_setaffinity:
8038 unsigned int mask_size;
8039 unsigned long *mask;
8042 * sched_setaffinity needs multiples of ulong, so need to take
8043 * care of mismatches between target ulong and host ulong sizes.
8045 if (arg2 & (sizeof(abi_ulong) - 1)) {
8046 ret = -TARGET_EINVAL;
8047 break;
8049 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8051 mask = alloca(mask_size);
8052 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8053 goto efault;
8055 memcpy(mask, p, arg2);
8056 unlock_user_struct(p, arg2, 0);
8058 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8060 break;
8061 case TARGET_NR_sched_setparam:
8063 struct sched_param *target_schp;
8064 struct sched_param schp;
8066 if (arg2 == 0) {
8067 return -TARGET_EINVAL;
8069 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8070 goto efault;
8071 schp.sched_priority = tswap32(target_schp->sched_priority);
8072 unlock_user_struct(target_schp, arg2, 0);
8073 ret = get_errno(sched_setparam(arg1, &schp));
8075 break;
8076 case TARGET_NR_sched_getparam:
8078 struct sched_param *target_schp;
8079 struct sched_param schp;
8081 if (arg2 == 0) {
8082 return -TARGET_EINVAL;
8084 ret = get_errno(sched_getparam(arg1, &schp));
8085 if (!is_error(ret)) {
8086 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8087 goto efault;
8088 target_schp->sched_priority = tswap32(schp.sched_priority);
8089 unlock_user_struct(target_schp, arg2, 1);
8092 break;
8093 case TARGET_NR_sched_setscheduler:
8095 struct sched_param *target_schp;
8096 struct sched_param schp;
8097 if (arg3 == 0) {
8098 return -TARGET_EINVAL;
8100 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8101 goto efault;
8102 schp.sched_priority = tswap32(target_schp->sched_priority);
8103 unlock_user_struct(target_schp, arg3, 0);
8104 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8106 break;
8107 case TARGET_NR_sched_getscheduler:
8108 ret = get_errno(sched_getscheduler(arg1));
8109 break;
8110 case TARGET_NR_sched_yield:
8111 ret = get_errno(sched_yield());
8112 break;
8113 case TARGET_NR_sched_get_priority_max:
8114 ret = get_errno(sched_get_priority_max(arg1));
8115 break;
8116 case TARGET_NR_sched_get_priority_min:
8117 ret = get_errno(sched_get_priority_min(arg1));
8118 break;
8119 case TARGET_NR_sched_rr_get_interval:
8121 struct timespec ts;
8122 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8123 if (!is_error(ret)) {
8124 ret = host_to_target_timespec(arg2, &ts);
8127 break;
8128 case TARGET_NR_nanosleep:
8130 struct timespec req, rem;
8131 target_to_host_timespec(&req, arg1);
8132 ret = get_errno(nanosleep(&req, &rem));
8133 if (is_error(ret) && arg2) {
8134 host_to_target_timespec(arg2, &rem);
8137 break;
8138 #ifdef TARGET_NR_query_module
8139 case TARGET_NR_query_module:
8140 goto unimplemented;
8141 #endif
8142 #ifdef TARGET_NR_nfsservctl
8143 case TARGET_NR_nfsservctl:
8144 goto unimplemented;
8145 #endif
8146 case TARGET_NR_prctl:
8147 switch (arg1) {
8148 case PR_GET_PDEATHSIG:
8150 int deathsig;
8151 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8152 if (!is_error(ret) && arg2
8153 && put_user_ual(deathsig, arg2)) {
8154 goto efault;
8156 break;
8158 #ifdef PR_GET_NAME
8159 case PR_GET_NAME:
8161 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8162 if (!name) {
8163 goto efault;
8165 ret = get_errno(prctl(arg1, (unsigned long)name,
8166 arg3, arg4, arg5));
8167 unlock_user(name, arg2, 16);
8168 break;
8170 case PR_SET_NAME:
8172 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8173 if (!name) {
8174 goto efault;
8176 ret = get_errno(prctl(arg1, (unsigned long)name,
8177 arg3, arg4, arg5));
8178 unlock_user(name, arg2, 0);
8179 break;
8181 #endif
8182 default:
8183 /* Most prctl options have no pointer arguments */
8184 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8185 break;
8187 break;
8188 #ifdef TARGET_NR_arch_prctl
8189 case TARGET_NR_arch_prctl:
8190 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8191 ret = do_arch_prctl(cpu_env, arg1, arg2);
8192 break;
8193 #else
8194 goto unimplemented;
8195 #endif
8196 #endif
8197 #ifdef TARGET_NR_pread64
8198 case TARGET_NR_pread64:
8199 if (regpairs_aligned(cpu_env)) {
8200 arg4 = arg5;
8201 arg5 = arg6;
8203 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8204 goto efault;
8205 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8206 unlock_user(p, arg2, ret);
8207 break;
8208 case TARGET_NR_pwrite64:
8209 if (regpairs_aligned(cpu_env)) {
8210 arg4 = arg5;
8211 arg5 = arg6;
8213 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8214 goto efault;
8215 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8216 unlock_user(p, arg2, 0);
8217 break;
8218 #endif
8219 case TARGET_NR_getcwd:
8220 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8221 goto efault;
8222 ret = get_errno(sys_getcwd1(p, arg2));
8223 unlock_user(p, arg1, ret);
8224 break;
8225 case TARGET_NR_capget:
8226 case TARGET_NR_capset:
8228 struct target_user_cap_header *target_header;
8229 struct target_user_cap_data *target_data = NULL;
8230 struct __user_cap_header_struct header;
8231 struct __user_cap_data_struct data[2];
8232 struct __user_cap_data_struct *dataptr = NULL;
8233 int i, target_datalen;
8234 int data_items = 1;
8236 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8237 goto efault;
8239 header.version = tswap32(target_header->version);
8240 header.pid = tswap32(target_header->pid);
8242 if (header.version != _LINUX_CAPABILITY_VERSION) {
8243 /* Version 2 and up takes pointer to two user_data structs */
8244 data_items = 2;
8247 target_datalen = sizeof(*target_data) * data_items;
8249 if (arg2) {
8250 if (num == TARGET_NR_capget) {
8251 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8252 } else {
8253 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8255 if (!target_data) {
8256 unlock_user_struct(target_header, arg1, 0);
8257 goto efault;
8260 if (num == TARGET_NR_capset) {
8261 for (i = 0; i < data_items; i++) {
8262 data[i].effective = tswap32(target_data[i].effective);
8263 data[i].permitted = tswap32(target_data[i].permitted);
8264 data[i].inheritable = tswap32(target_data[i].inheritable);
8268 dataptr = data;
8271 if (num == TARGET_NR_capget) {
8272 ret = get_errno(capget(&header, dataptr));
8273 } else {
8274 ret = get_errno(capset(&header, dataptr));
8277 /* The kernel always updates version for both capget and capset */
8278 target_header->version = tswap32(header.version);
8279 unlock_user_struct(target_header, arg1, 1);
8281 if (arg2) {
8282 if (num == TARGET_NR_capget) {
8283 for (i = 0; i < data_items; i++) {
8284 target_data[i].effective = tswap32(data[i].effective);
8285 target_data[i].permitted = tswap32(data[i].permitted);
8286 target_data[i].inheritable = tswap32(data[i].inheritable);
8288 unlock_user(target_data, arg2, target_datalen);
8289 } else {
8290 unlock_user(target_data, arg2, 0);
8293 break;
8295 case TARGET_NR_sigaltstack:
8296 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8297 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8298 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8299 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8300 break;
8301 #else
8302 goto unimplemented;
8303 #endif
8305 #ifdef CONFIG_SENDFILE
8306 case TARGET_NR_sendfile:
8308 off_t *offp = NULL;
8309 off_t off;
8310 if (arg3) {
8311 ret = get_user_sal(off, arg3);
8312 if (is_error(ret)) {
8313 break;
8315 offp = &off;
8317 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8318 if (!is_error(ret) && arg3) {
8319 abi_long ret2 = put_user_sal(off, arg3);
8320 if (is_error(ret2)) {
8321 ret = ret2;
8324 break;
8326 #ifdef TARGET_NR_sendfile64
8327 case TARGET_NR_sendfile64:
8329 off_t *offp = NULL;
8330 off_t off;
8331 if (arg3) {
8332 ret = get_user_s64(off, arg3);
8333 if (is_error(ret)) {
8334 break;
8336 offp = &off;
8338 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8339 if (!is_error(ret) && arg3) {
8340 abi_long ret2 = put_user_s64(off, arg3);
8341 if (is_error(ret2)) {
8342 ret = ret2;
8345 break;
8347 #endif
8348 #else
8349 case TARGET_NR_sendfile:
8350 #ifdef TARGET_NR_sendfile64
8351 case TARGET_NR_sendfile64:
8352 #endif
8353 goto unimplemented;
8354 #endif
8356 #ifdef TARGET_NR_getpmsg
8357 case TARGET_NR_getpmsg:
8358 goto unimplemented;
8359 #endif
8360 #ifdef TARGET_NR_putpmsg
8361 case TARGET_NR_putpmsg:
8362 goto unimplemented;
8363 #endif
8364 #ifdef TARGET_NR_vfork
8365 case TARGET_NR_vfork:
8366 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8367 0, 0, 0, 0));
8368 break;
8369 #endif
8370 #ifdef TARGET_NR_ugetrlimit
8371 case TARGET_NR_ugetrlimit:
8373 struct rlimit rlim;
8374 int resource = target_to_host_resource(arg1);
8375 ret = get_errno(getrlimit(resource, &rlim));
8376 if (!is_error(ret)) {
8377 struct target_rlimit *target_rlim;
8378 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8379 goto efault;
8380 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8381 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8382 unlock_user_struct(target_rlim, arg2, 1);
8384 break;
8386 #endif
8387 #ifdef TARGET_NR_truncate64
8388 case TARGET_NR_truncate64:
8389 if (!(p = lock_user_string(arg1)))
8390 goto efault;
8391 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8392 unlock_user(p, arg1, 0);
8393 break;
8394 #endif
8395 #ifdef TARGET_NR_ftruncate64
8396 case TARGET_NR_ftruncate64:
8397 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8398 break;
8399 #endif
8400 #ifdef TARGET_NR_stat64
8401 case TARGET_NR_stat64:
8402 if (!(p = lock_user_string(arg1)))
8403 goto efault;
8404 ret = get_errno(stat(path(p), &st));
8405 unlock_user(p, arg1, 0);
8406 if (!is_error(ret))
8407 ret = host_to_target_stat64(cpu_env, arg2, &st);
8408 break;
8409 #endif
8410 #ifdef TARGET_NR_lstat64
8411 case TARGET_NR_lstat64:
8412 if (!(p = lock_user_string(arg1)))
8413 goto efault;
8414 ret = get_errno(lstat(path(p), &st));
8415 unlock_user(p, arg1, 0);
8416 if (!is_error(ret))
8417 ret = host_to_target_stat64(cpu_env, arg2, &st);
8418 break;
8419 #endif
8420 #ifdef TARGET_NR_fstat64
8421 case TARGET_NR_fstat64:
8422 ret = get_errno(fstat(arg1, &st));
8423 if (!is_error(ret))
8424 ret = host_to_target_stat64(cpu_env, arg2, &st);
8425 break;
8426 #endif
8427 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8428 #ifdef TARGET_NR_fstatat64
8429 case TARGET_NR_fstatat64:
8430 #endif
8431 #ifdef TARGET_NR_newfstatat
8432 case TARGET_NR_newfstatat:
8433 #endif
8434 if (!(p = lock_user_string(arg2)))
8435 goto efault;
8436 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8437 if (!is_error(ret))
8438 ret = host_to_target_stat64(cpu_env, arg3, &st);
8439 break;
8440 #endif
8441 #ifdef TARGET_NR_lchown
8442 case TARGET_NR_lchown:
8443 if (!(p = lock_user_string(arg1)))
8444 goto efault;
8445 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8446 unlock_user(p, arg1, 0);
8447 break;
8448 #endif
8449 #ifdef TARGET_NR_getuid
8450 case TARGET_NR_getuid:
8451 ret = get_errno(high2lowuid(getuid()));
8452 break;
8453 #endif
8454 #ifdef TARGET_NR_getgid
8455 case TARGET_NR_getgid:
8456 ret = get_errno(high2lowgid(getgid()));
8457 break;
8458 #endif
8459 #ifdef TARGET_NR_geteuid
8460 case TARGET_NR_geteuid:
8461 ret = get_errno(high2lowuid(geteuid()));
8462 break;
8463 #endif
8464 #ifdef TARGET_NR_getegid
8465 case TARGET_NR_getegid:
8466 ret = get_errno(high2lowgid(getegid()));
8467 break;
8468 #endif
8469 case TARGET_NR_setreuid:
8470 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8471 break;
8472 case TARGET_NR_setregid:
8473 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8474 break;
8475 case TARGET_NR_getgroups:
8477 int gidsetsize = arg1;
8478 target_id *target_grouplist;
8479 gid_t *grouplist;
8480 int i;
8482 grouplist = alloca(gidsetsize * sizeof(gid_t));
8483 ret = get_errno(getgroups(gidsetsize, grouplist));
8484 if (gidsetsize == 0)
8485 break;
8486 if (!is_error(ret)) {
8487 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8488 if (!target_grouplist)
8489 goto efault;
8490 for(i = 0;i < ret; i++)
8491 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8492 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8495 break;
8496 case TARGET_NR_setgroups:
8498 int gidsetsize = arg1;
8499 target_id *target_grouplist;
8500 gid_t *grouplist = NULL;
8501 int i;
8502 if (gidsetsize) {
8503 grouplist = alloca(gidsetsize * sizeof(gid_t));
8504 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8505 if (!target_grouplist) {
8506 ret = -TARGET_EFAULT;
8507 goto fail;
8509 for (i = 0; i < gidsetsize; i++) {
8510 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8512 unlock_user(target_grouplist, arg2, 0);
8514 ret = get_errno(setgroups(gidsetsize, grouplist));
8516 break;
8517 case TARGET_NR_fchown:
8518 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8519 break;
8520 #if defined(TARGET_NR_fchownat)
8521 case TARGET_NR_fchownat:
8522 if (!(p = lock_user_string(arg2)))
8523 goto efault;
8524 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8525 low2highgid(arg4), arg5));
8526 unlock_user(p, arg2, 0);
8527 break;
8528 #endif
8529 #ifdef TARGET_NR_setresuid
8530 case TARGET_NR_setresuid:
8531 ret = get_errno(setresuid(low2highuid(arg1),
8532 low2highuid(arg2),
8533 low2highuid(arg3)));
8534 break;
8535 #endif
8536 #ifdef TARGET_NR_getresuid
8537 case TARGET_NR_getresuid:
8539 uid_t ruid, euid, suid;
8540 ret = get_errno(getresuid(&ruid, &euid, &suid));
8541 if (!is_error(ret)) {
8542 if (put_user_id(high2lowuid(ruid), arg1)
8543 || put_user_id(high2lowuid(euid), arg2)
8544 || put_user_id(high2lowuid(suid), arg3))
8545 goto efault;
8548 break;
8549 #endif
8550 #ifdef TARGET_NR_getresgid
8551 case TARGET_NR_setresgid:
8552 ret = get_errno(setresgid(low2highgid(arg1),
8553 low2highgid(arg2),
8554 low2highgid(arg3)));
8555 break;
8556 #endif
8557 #ifdef TARGET_NR_getresgid
8558 case TARGET_NR_getresgid:
8560 gid_t rgid, egid, sgid;
8561 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8562 if (!is_error(ret)) {
8563 if (put_user_id(high2lowgid(rgid), arg1)
8564 || put_user_id(high2lowgid(egid), arg2)
8565 || put_user_id(high2lowgid(sgid), arg3))
8566 goto efault;
8569 break;
8570 #endif
8571 #ifdef TARGET_NR_chown
8572 case TARGET_NR_chown:
8573 if (!(p = lock_user_string(arg1)))
8574 goto efault;
8575 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8576 unlock_user(p, arg1, 0);
8577 break;
8578 #endif
8579 case TARGET_NR_setuid:
8580 ret = get_errno(setuid(low2highuid(arg1)));
8581 break;
8582 case TARGET_NR_setgid:
8583 ret = get_errno(setgid(low2highgid(arg1)));
8584 break;
8585 case TARGET_NR_setfsuid:
8586 ret = get_errno(setfsuid(arg1));
8587 break;
8588 case TARGET_NR_setfsgid:
8589 ret = get_errno(setfsgid(arg1));
8590 break;
8592 #ifdef TARGET_NR_lchown32
8593 case TARGET_NR_lchown32:
8594 if (!(p = lock_user_string(arg1)))
8595 goto efault;
8596 ret = get_errno(lchown(p, arg2, arg3));
8597 unlock_user(p, arg1, 0);
8598 break;
8599 #endif
8600 #ifdef TARGET_NR_getuid32
8601 case TARGET_NR_getuid32:
8602 ret = get_errno(getuid());
8603 break;
8604 #endif
8606 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8607 /* Alpha specific */
8608 case TARGET_NR_getxuid:
8610 uid_t euid;
8611 euid=geteuid();
8612 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8614 ret = get_errno(getuid());
8615 break;
8616 #endif
8617 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8618 /* Alpha specific */
8619 case TARGET_NR_getxgid:
8621 uid_t egid;
8622 egid=getegid();
8623 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8625 ret = get_errno(getgid());
8626 break;
8627 #endif
8628 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8629 /* Alpha specific */
8630 case TARGET_NR_osf_getsysinfo:
8631 ret = -TARGET_EOPNOTSUPP;
8632 switch (arg1) {
8633 case TARGET_GSI_IEEE_FP_CONTROL:
8635 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8637 /* Copied from linux ieee_fpcr_to_swcr. */
8638 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8639 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8640 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8641 | SWCR_TRAP_ENABLE_DZE
8642 | SWCR_TRAP_ENABLE_OVF);
8643 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8644 | SWCR_TRAP_ENABLE_INE);
8645 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8646 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8648 if (put_user_u64 (swcr, arg2))
8649 goto efault;
8650 ret = 0;
8652 break;
8654 /* case GSI_IEEE_STATE_AT_SIGNAL:
8655 -- Not implemented in linux kernel.
8656 case GSI_UACPROC:
8657 -- Retrieves current unaligned access state; not much used.
8658 case GSI_PROC_TYPE:
8659 -- Retrieves implver information; surely not used.
8660 case GSI_GET_HWRPB:
8661 -- Grabs a copy of the HWRPB; surely not used.
8664 break;
8665 #endif
8666 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8667 /* Alpha specific */
8668 case TARGET_NR_osf_setsysinfo:
8669 ret = -TARGET_EOPNOTSUPP;
8670 switch (arg1) {
8671 case TARGET_SSI_IEEE_FP_CONTROL:
8673 uint64_t swcr, fpcr, orig_fpcr;
8675 if (get_user_u64 (swcr, arg2)) {
8676 goto efault;
8678 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8679 fpcr = orig_fpcr & FPCR_DYN_MASK;
8681 /* Copied from linux ieee_swcr_to_fpcr. */
8682 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8683 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8684 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8685 | SWCR_TRAP_ENABLE_DZE
8686 | SWCR_TRAP_ENABLE_OVF)) << 48;
8687 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8688 | SWCR_TRAP_ENABLE_INE)) << 57;
8689 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8690 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8692 cpu_alpha_store_fpcr(cpu_env, fpcr);
8693 ret = 0;
8695 break;
8697 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8699 uint64_t exc, fpcr, orig_fpcr;
8700 int si_code;
8702 if (get_user_u64(exc, arg2)) {
8703 goto efault;
8706 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8708 /* We only add to the exception status here. */
8709 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8711 cpu_alpha_store_fpcr(cpu_env, fpcr);
8712 ret = 0;
8714 /* Old exceptions are not signaled. */
8715 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8717 /* If any exceptions set by this call,
8718 and are unmasked, send a signal. */
8719 si_code = 0;
8720 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8721 si_code = TARGET_FPE_FLTRES;
8723 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8724 si_code = TARGET_FPE_FLTUND;
8726 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8727 si_code = TARGET_FPE_FLTOVF;
8729 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8730 si_code = TARGET_FPE_FLTDIV;
8732 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8733 si_code = TARGET_FPE_FLTINV;
8735 if (si_code != 0) {
8736 target_siginfo_t info;
8737 info.si_signo = SIGFPE;
8738 info.si_errno = 0;
8739 info.si_code = si_code;
8740 info._sifields._sigfault._addr
8741 = ((CPUArchState *)cpu_env)->pc;
8742 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8745 break;
8747 /* case SSI_NVPAIRS:
8748 -- Used with SSIN_UACPROC to enable unaligned accesses.
8749 case SSI_IEEE_STATE_AT_SIGNAL:
8750 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8751 -- Not implemented in linux kernel
8754 break;
8755 #endif
8756 #ifdef TARGET_NR_osf_sigprocmask
8757 /* Alpha specific. */
8758 case TARGET_NR_osf_sigprocmask:
8760 abi_ulong mask;
8761 int how;
8762 sigset_t set, oldset;
8764 switch(arg1) {
8765 case TARGET_SIG_BLOCK:
8766 how = SIG_BLOCK;
8767 break;
8768 case TARGET_SIG_UNBLOCK:
8769 how = SIG_UNBLOCK;
8770 break;
8771 case TARGET_SIG_SETMASK:
8772 how = SIG_SETMASK;
8773 break;
8774 default:
8775 ret = -TARGET_EINVAL;
8776 goto fail;
8778 mask = arg2;
8779 target_to_host_old_sigset(&set, &mask);
8780 do_sigprocmask(how, &set, &oldset);
8781 host_to_target_old_sigset(&mask, &oldset);
8782 ret = mask;
8784 break;
8785 #endif
8787 #ifdef TARGET_NR_getgid32
8788 case TARGET_NR_getgid32:
8789 ret = get_errno(getgid());
8790 break;
8791 #endif
8792 #ifdef TARGET_NR_geteuid32
8793 case TARGET_NR_geteuid32:
8794 ret = get_errno(geteuid());
8795 break;
8796 #endif
8797 #ifdef TARGET_NR_getegid32
8798 case TARGET_NR_getegid32:
8799 ret = get_errno(getegid());
8800 break;
8801 #endif
8802 #ifdef TARGET_NR_setreuid32
8803 case TARGET_NR_setreuid32:
8804 ret = get_errno(setreuid(arg1, arg2));
8805 break;
8806 #endif
8807 #ifdef TARGET_NR_setregid32
8808 case TARGET_NR_setregid32:
8809 ret = get_errno(setregid(arg1, arg2));
8810 break;
8811 #endif
8812 #ifdef TARGET_NR_getgroups32
8813 case TARGET_NR_getgroups32:
8815 int gidsetsize = arg1;
8816 uint32_t *target_grouplist;
8817 gid_t *grouplist;
8818 int i;
8820 grouplist = alloca(gidsetsize * sizeof(gid_t));
8821 ret = get_errno(getgroups(gidsetsize, grouplist));
8822 if (gidsetsize == 0)
8823 break;
8824 if (!is_error(ret)) {
8825 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8826 if (!target_grouplist) {
8827 ret = -TARGET_EFAULT;
8828 goto fail;
8830 for(i = 0;i < ret; i++)
8831 target_grouplist[i] = tswap32(grouplist[i]);
8832 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8835 break;
8836 #endif
8837 #ifdef TARGET_NR_setgroups32
8838 case TARGET_NR_setgroups32:
8840 int gidsetsize = arg1;
8841 uint32_t *target_grouplist;
8842 gid_t *grouplist;
8843 int i;
8845 grouplist = alloca(gidsetsize * sizeof(gid_t));
8846 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8847 if (!target_grouplist) {
8848 ret = -TARGET_EFAULT;
8849 goto fail;
8851 for(i = 0;i < gidsetsize; i++)
8852 grouplist[i] = tswap32(target_grouplist[i]);
8853 unlock_user(target_grouplist, arg2, 0);
8854 ret = get_errno(setgroups(gidsetsize, grouplist));
8856 break;
8857 #endif
8858 #ifdef TARGET_NR_fchown32
8859 case TARGET_NR_fchown32:
8860 ret = get_errno(fchown(arg1, arg2, arg3));
8861 break;
8862 #endif
8863 #ifdef TARGET_NR_setresuid32
8864 case TARGET_NR_setresuid32:
8865 ret = get_errno(setresuid(arg1, arg2, arg3));
8866 break;
8867 #endif
8868 #ifdef TARGET_NR_getresuid32
8869 case TARGET_NR_getresuid32:
8871 uid_t ruid, euid, suid;
8872 ret = get_errno(getresuid(&ruid, &euid, &suid));
8873 if (!is_error(ret)) {
8874 if (put_user_u32(ruid, arg1)
8875 || put_user_u32(euid, arg2)
8876 || put_user_u32(suid, arg3))
8877 goto efault;
8880 break;
8881 #endif
8882 #ifdef TARGET_NR_setresgid32
8883 case TARGET_NR_setresgid32:
8884 ret = get_errno(setresgid(arg1, arg2, arg3));
8885 break;
8886 #endif
8887 #ifdef TARGET_NR_getresgid32
8888 case TARGET_NR_getresgid32:
8890 gid_t rgid, egid, sgid;
8891 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8892 if (!is_error(ret)) {
8893 if (put_user_u32(rgid, arg1)
8894 || put_user_u32(egid, arg2)
8895 || put_user_u32(sgid, arg3))
8896 goto efault;
8899 break;
8900 #endif
8901 #ifdef TARGET_NR_chown32
8902 case TARGET_NR_chown32:
8903 if (!(p = lock_user_string(arg1)))
8904 goto efault;
8905 ret = get_errno(chown(p, arg2, arg3));
8906 unlock_user(p, arg1, 0);
8907 break;
8908 #endif
8909 #ifdef TARGET_NR_setuid32
8910 case TARGET_NR_setuid32:
8911 ret = get_errno(setuid(arg1));
8912 break;
8913 #endif
8914 #ifdef TARGET_NR_setgid32
8915 case TARGET_NR_setgid32:
8916 ret = get_errno(setgid(arg1));
8917 break;
8918 #endif
8919 #ifdef TARGET_NR_setfsuid32
8920 case TARGET_NR_setfsuid32:
8921 ret = get_errno(setfsuid(arg1));
8922 break;
8923 #endif
8924 #ifdef TARGET_NR_setfsgid32
8925 case TARGET_NR_setfsgid32:
8926 ret = get_errno(setfsgid(arg1));
8927 break;
8928 #endif
8930 case TARGET_NR_pivot_root:
8931 goto unimplemented;
8932 #ifdef TARGET_NR_mincore
8933 case TARGET_NR_mincore:
8935 void *a;
8936 ret = -TARGET_EFAULT;
8937 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8938 goto efault;
8939 if (!(p = lock_user_string(arg3)))
8940 goto mincore_fail;
8941 ret = get_errno(mincore(a, arg2, p));
8942 unlock_user(p, arg3, ret);
8943 mincore_fail:
8944 unlock_user(a, arg1, 0);
8946 break;
8947 #endif
8948 #ifdef TARGET_NR_arm_fadvise64_64
8949 case TARGET_NR_arm_fadvise64_64:
8952 * arm_fadvise64_64 looks like fadvise64_64 but
8953 * with different argument order
8955 abi_long temp;
8956 temp = arg3;
8957 arg3 = arg4;
8958 arg4 = temp;
8960 #endif
8961 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8962 #ifdef TARGET_NR_fadvise64_64
8963 case TARGET_NR_fadvise64_64:
8964 #endif
8965 #ifdef TARGET_NR_fadvise64
8966 case TARGET_NR_fadvise64:
8967 #endif
8968 #ifdef TARGET_S390X
8969 switch (arg4) {
8970 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8971 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8972 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8973 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8974 default: break;
8976 #endif
8977 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8978 break;
8979 #endif
8980 #ifdef TARGET_NR_madvise
8981 case TARGET_NR_madvise:
8982 /* A straight passthrough may not be safe because qemu sometimes
8983 turns private file-backed mappings into anonymous mappings.
8984 This will break MADV_DONTNEED.
8985 This is a hint, so ignoring and returning success is ok. */
8986 ret = get_errno(0);
8987 break;
8988 #endif
8989 #if TARGET_ABI_BITS == 32
8990 case TARGET_NR_fcntl64:
8992 int cmd;
8993 struct flock64 fl;
8994 struct target_flock64 *target_fl;
8995 #ifdef TARGET_ARM
8996 struct target_eabi_flock64 *target_efl;
8997 #endif
8999 cmd = target_to_host_fcntl_cmd(arg2);
9000 if (cmd == -TARGET_EINVAL) {
9001 ret = cmd;
9002 break;
9005 switch(arg2) {
9006 case TARGET_F_GETLK64:
9007 #ifdef TARGET_ARM
9008 if (((CPUARMState *)cpu_env)->eabi) {
9009 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9010 goto efault;
9011 fl.l_type = tswap16(target_efl->l_type);
9012 fl.l_whence = tswap16(target_efl->l_whence);
9013 fl.l_start = tswap64(target_efl->l_start);
9014 fl.l_len = tswap64(target_efl->l_len);
9015 fl.l_pid = tswap32(target_efl->l_pid);
9016 unlock_user_struct(target_efl, arg3, 0);
9017 } else
9018 #endif
9020 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9021 goto efault;
9022 fl.l_type = tswap16(target_fl->l_type);
9023 fl.l_whence = tswap16(target_fl->l_whence);
9024 fl.l_start = tswap64(target_fl->l_start);
9025 fl.l_len = tswap64(target_fl->l_len);
9026 fl.l_pid = tswap32(target_fl->l_pid);
9027 unlock_user_struct(target_fl, arg3, 0);
9029 ret = get_errno(fcntl(arg1, cmd, &fl));
9030 if (ret == 0) {
9031 #ifdef TARGET_ARM
9032 if (((CPUARMState *)cpu_env)->eabi) {
9033 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9034 goto efault;
9035 target_efl->l_type = tswap16(fl.l_type);
9036 target_efl->l_whence = tswap16(fl.l_whence);
9037 target_efl->l_start = tswap64(fl.l_start);
9038 target_efl->l_len = tswap64(fl.l_len);
9039 target_efl->l_pid = tswap32(fl.l_pid);
9040 unlock_user_struct(target_efl, arg3, 1);
9041 } else
9042 #endif
9044 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9045 goto efault;
9046 target_fl->l_type = tswap16(fl.l_type);
9047 target_fl->l_whence = tswap16(fl.l_whence);
9048 target_fl->l_start = tswap64(fl.l_start);
9049 target_fl->l_len = tswap64(fl.l_len);
9050 target_fl->l_pid = tswap32(fl.l_pid);
9051 unlock_user_struct(target_fl, arg3, 1);
9054 break;
9056 case TARGET_F_SETLK64:
9057 case TARGET_F_SETLKW64:
9058 #ifdef TARGET_ARM
9059 if (((CPUARMState *)cpu_env)->eabi) {
9060 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9061 goto efault;
9062 fl.l_type = tswap16(target_efl->l_type);
9063 fl.l_whence = tswap16(target_efl->l_whence);
9064 fl.l_start = tswap64(target_efl->l_start);
9065 fl.l_len = tswap64(target_efl->l_len);
9066 fl.l_pid = tswap32(target_efl->l_pid);
9067 unlock_user_struct(target_efl, arg3, 0);
9068 } else
9069 #endif
9071 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9072 goto efault;
9073 fl.l_type = tswap16(target_fl->l_type);
9074 fl.l_whence = tswap16(target_fl->l_whence);
9075 fl.l_start = tswap64(target_fl->l_start);
9076 fl.l_len = tswap64(target_fl->l_len);
9077 fl.l_pid = tswap32(target_fl->l_pid);
9078 unlock_user_struct(target_fl, arg3, 0);
9080 ret = get_errno(fcntl(arg1, cmd, &fl));
9081 break;
9082 default:
9083 ret = do_fcntl(arg1, arg2, arg3);
9084 break;
9086 break;
9088 #endif
9089 #ifdef TARGET_NR_cacheflush
9090 case TARGET_NR_cacheflush:
9091 /* self-modifying code is handled automatically, so nothing needed */
9092 ret = 0;
9093 break;
9094 #endif
9095 #ifdef TARGET_NR_security
9096 case TARGET_NR_security:
9097 goto unimplemented;
9098 #endif
9099 #ifdef TARGET_NR_getpagesize
9100 case TARGET_NR_getpagesize:
9101 ret = TARGET_PAGE_SIZE;
9102 break;
9103 #endif
9104 case TARGET_NR_gettid:
9105 ret = get_errno(gettid());
9106 break;
9107 #ifdef TARGET_NR_readahead
9108 case TARGET_NR_readahead:
9109 #if TARGET_ABI_BITS == 32
9110 if (regpairs_aligned(cpu_env)) {
9111 arg2 = arg3;
9112 arg3 = arg4;
9113 arg4 = arg5;
9115 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9116 #else
9117 ret = get_errno(readahead(arg1, arg2, arg3));
9118 #endif
9119 break;
9120 #endif
9121 #ifdef CONFIG_ATTR
9122 #ifdef TARGET_NR_setxattr
9123 case TARGET_NR_listxattr:
9124 case TARGET_NR_llistxattr:
9126 void *p, *b = 0;
9127 if (arg2) {
9128 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9129 if (!b) {
9130 ret = -TARGET_EFAULT;
9131 break;
9134 p = lock_user_string(arg1);
9135 if (p) {
9136 if (num == TARGET_NR_listxattr) {
9137 ret = get_errno(listxattr(p, b, arg3));
9138 } else {
9139 ret = get_errno(llistxattr(p, b, arg3));
9141 } else {
9142 ret = -TARGET_EFAULT;
9144 unlock_user(p, arg1, 0);
9145 unlock_user(b, arg2, arg3);
9146 break;
9148 case TARGET_NR_flistxattr:
9150 void *b = 0;
9151 if (arg2) {
9152 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9153 if (!b) {
9154 ret = -TARGET_EFAULT;
9155 break;
9158 ret = get_errno(flistxattr(arg1, b, arg3));
9159 unlock_user(b, arg2, arg3);
9160 break;
9162 case TARGET_NR_setxattr:
9163 case TARGET_NR_lsetxattr:
9165 void *p, *n, *v = 0;
9166 if (arg3) {
9167 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9168 if (!v) {
9169 ret = -TARGET_EFAULT;
9170 break;
9173 p = lock_user_string(arg1);
9174 n = lock_user_string(arg2);
9175 if (p && n) {
9176 if (num == TARGET_NR_setxattr) {
9177 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9178 } else {
9179 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9181 } else {
9182 ret = -TARGET_EFAULT;
9184 unlock_user(p, arg1, 0);
9185 unlock_user(n, arg2, 0);
9186 unlock_user(v, arg3, 0);
9188 break;
9189 case TARGET_NR_fsetxattr:
9191 void *n, *v = 0;
9192 if (arg3) {
9193 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9194 if (!v) {
9195 ret = -TARGET_EFAULT;
9196 break;
9199 n = lock_user_string(arg2);
9200 if (n) {
9201 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9202 } else {
9203 ret = -TARGET_EFAULT;
9205 unlock_user(n, arg2, 0);
9206 unlock_user(v, arg3, 0);
9208 break;
9209 case TARGET_NR_getxattr:
9210 case TARGET_NR_lgetxattr:
9212 void *p, *n, *v = 0;
9213 if (arg3) {
9214 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9215 if (!v) {
9216 ret = -TARGET_EFAULT;
9217 break;
9220 p = lock_user_string(arg1);
9221 n = lock_user_string(arg2);
9222 if (p && n) {
9223 if (num == TARGET_NR_getxattr) {
9224 ret = get_errno(getxattr(p, n, v, arg4));
9225 } else {
9226 ret = get_errno(lgetxattr(p, n, v, arg4));
9228 } else {
9229 ret = -TARGET_EFAULT;
9231 unlock_user(p, arg1, 0);
9232 unlock_user(n, arg2, 0);
9233 unlock_user(v, arg3, arg4);
9235 break;
9236 case TARGET_NR_fgetxattr:
9238 void *n, *v = 0;
9239 if (arg3) {
9240 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9241 if (!v) {
9242 ret = -TARGET_EFAULT;
9243 break;
9246 n = lock_user_string(arg2);
9247 if (n) {
9248 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9249 } else {
9250 ret = -TARGET_EFAULT;
9252 unlock_user(n, arg2, 0);
9253 unlock_user(v, arg3, arg4);
9255 break;
9256 case TARGET_NR_removexattr:
9257 case TARGET_NR_lremovexattr:
9259 void *p, *n;
9260 p = lock_user_string(arg1);
9261 n = lock_user_string(arg2);
9262 if (p && n) {
9263 if (num == TARGET_NR_removexattr) {
9264 ret = get_errno(removexattr(p, n));
9265 } else {
9266 ret = get_errno(lremovexattr(p, n));
9268 } else {
9269 ret = -TARGET_EFAULT;
9271 unlock_user(p, arg1, 0);
9272 unlock_user(n, arg2, 0);
9274 break;
9275 case TARGET_NR_fremovexattr:
9277 void *n;
9278 n = lock_user_string(arg2);
9279 if (n) {
9280 ret = get_errno(fremovexattr(arg1, n));
9281 } else {
9282 ret = -TARGET_EFAULT;
9284 unlock_user(n, arg2, 0);
9286 break;
9287 #endif
9288 #endif /* CONFIG_ATTR */
9289 #ifdef TARGET_NR_set_thread_area
9290 case TARGET_NR_set_thread_area:
9291 #if defined(TARGET_MIPS)
9292 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9293 ret = 0;
9294 break;
9295 #elif defined(TARGET_CRIS)
9296 if (arg1 & 0xff)
9297 ret = -TARGET_EINVAL;
9298 else {
9299 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9300 ret = 0;
9302 break;
9303 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9304 ret = do_set_thread_area(cpu_env, arg1);
9305 break;
9306 #elif defined(TARGET_M68K)
9308 TaskState *ts = cpu->opaque;
9309 ts->tp_value = arg1;
9310 ret = 0;
9311 break;
9313 #else
9314 goto unimplemented_nowarn;
9315 #endif
9316 #endif
9317 #ifdef TARGET_NR_get_thread_area
9318 case TARGET_NR_get_thread_area:
9319 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9320 ret = do_get_thread_area(cpu_env, arg1);
9321 break;
9322 #elif defined(TARGET_M68K)
9324 TaskState *ts = cpu->opaque;
9325 ret = ts->tp_value;
9326 break;
9328 #else
9329 goto unimplemented_nowarn;
9330 #endif
9331 #endif
9332 #ifdef TARGET_NR_getdomainname
9333 case TARGET_NR_getdomainname:
9334 goto unimplemented_nowarn;
9335 #endif
9337 #ifdef TARGET_NR_clock_gettime
9338 case TARGET_NR_clock_gettime:
9340 struct timespec ts;
9341 ret = get_errno(clock_gettime(arg1, &ts));
9342 if (!is_error(ret)) {
9343 host_to_target_timespec(arg2, &ts);
9345 break;
9347 #endif
9348 #ifdef TARGET_NR_clock_getres
9349 case TARGET_NR_clock_getres:
9351 struct timespec ts;
9352 ret = get_errno(clock_getres(arg1, &ts));
9353 if (!is_error(ret)) {
9354 host_to_target_timespec(arg2, &ts);
9356 break;
9358 #endif
9359 #ifdef TARGET_NR_clock_nanosleep
9360 case TARGET_NR_clock_nanosleep:
9362 struct timespec ts;
9363 target_to_host_timespec(&ts, arg3);
9364 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9365 if (arg4)
9366 host_to_target_timespec(arg4, &ts);
9368 #if defined(TARGET_PPC)
9369 /* clock_nanosleep is odd in that it returns positive errno values.
9370 * On PPC, CR0 bit 3 should be set in such a situation. */
9371 if (ret) {
9372 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9374 #endif
9375 break;
9377 #endif
9379 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9380 case TARGET_NR_set_tid_address:
9381 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9382 break;
9383 #endif
9385 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9386 case TARGET_NR_tkill:
9387 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9388 break;
9389 #endif
9391 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9392 case TARGET_NR_tgkill:
9393 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9394 target_to_host_signal(arg3)));
9395 break;
9396 #endif
9398 #ifdef TARGET_NR_set_robust_list
9399 case TARGET_NR_set_robust_list:
9400 case TARGET_NR_get_robust_list:
9401 /* The ABI for supporting robust futexes has userspace pass
9402 * the kernel a pointer to a linked list which is updated by
9403 * userspace after the syscall; the list is walked by the kernel
9404 * when the thread exits. Since the linked list in QEMU guest
9405 * memory isn't a valid linked list for the host and we have
9406 * no way to reliably intercept the thread-death event, we can't
9407 * support these. Silently return ENOSYS so that guest userspace
9408 * falls back to a non-robust futex implementation (which should
9409 * be OK except in the corner case of the guest crashing while
9410 * holding a mutex that is shared with another process via
9411 * shared memory).
9413 goto unimplemented_nowarn;
9414 #endif
9416 #if defined(TARGET_NR_utimensat)
9417 case TARGET_NR_utimensat:
9419 struct timespec *tsp, ts[2];
9420 if (!arg3) {
9421 tsp = NULL;
9422 } else {
9423 target_to_host_timespec(ts, arg3);
9424 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9425 tsp = ts;
9427 if (!arg2)
9428 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9429 else {
9430 if (!(p = lock_user_string(arg2))) {
9431 ret = -TARGET_EFAULT;
9432 goto fail;
9434 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9435 unlock_user(p, arg2, 0);
9438 break;
9439 #endif
9440 case TARGET_NR_futex:
9441 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9442 break;
9443 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9444 case TARGET_NR_inotify_init:
9445 ret = get_errno(sys_inotify_init());
9446 break;
9447 #endif
9448 #ifdef CONFIG_INOTIFY1
9449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9450 case TARGET_NR_inotify_init1:
9451 ret = get_errno(sys_inotify_init1(arg1));
9452 break;
9453 #endif
9454 #endif
9455 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9456 case TARGET_NR_inotify_add_watch:
9457 p = lock_user_string(arg2);
9458 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9459 unlock_user(p, arg2, 0);
9460 break;
9461 #endif
9462 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9463 case TARGET_NR_inotify_rm_watch:
9464 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9465 break;
9466 #endif
9468 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9469 case TARGET_NR_mq_open:
9471 struct mq_attr posix_mq_attr, *attrp;
9473 p = lock_user_string(arg1 - 1);
9474 if (arg4 != 0) {
9475 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9476 attrp = &posix_mq_attr;
9477 } else {
9478 attrp = 0;
9480 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9481 unlock_user (p, arg1, 0);
9483 break;
9485 case TARGET_NR_mq_unlink:
9486 p = lock_user_string(arg1 - 1);
9487 ret = get_errno(mq_unlink(p));
9488 unlock_user (p, arg1, 0);
9489 break;
9491 case TARGET_NR_mq_timedsend:
9493 struct timespec ts;
9495 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9496 if (arg5 != 0) {
9497 target_to_host_timespec(&ts, arg5);
9498 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9499 host_to_target_timespec(arg5, &ts);
9501 else
9502 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9503 unlock_user (p, arg2, arg3);
9505 break;
9507 case TARGET_NR_mq_timedreceive:
9509 struct timespec ts;
9510 unsigned int prio;
9512 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9513 if (arg5 != 0) {
9514 target_to_host_timespec(&ts, arg5);
9515 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9516 host_to_target_timespec(arg5, &ts);
9518 else
9519 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9520 unlock_user (p, arg2, arg3);
9521 if (arg4 != 0)
9522 put_user_u32(prio, arg4);
9524 break;
9526 /* Not implemented for now... */
9527 /* case TARGET_NR_mq_notify: */
9528 /* break; */
9530 case TARGET_NR_mq_getsetattr:
9532 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9533 ret = 0;
9534 if (arg3 != 0) {
9535 ret = mq_getattr(arg1, &posix_mq_attr_out);
9536 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9538 if (arg2 != 0) {
9539 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9540 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9544 break;
9545 #endif
9547 #ifdef CONFIG_SPLICE
9548 #ifdef TARGET_NR_tee
9549 case TARGET_NR_tee:
9551 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9553 break;
9554 #endif
9555 #ifdef TARGET_NR_splice
9556 case TARGET_NR_splice:
9558 loff_t loff_in, loff_out;
9559 loff_t *ploff_in = NULL, *ploff_out = NULL;
9560 if (arg2) {
9561 if (get_user_u64(loff_in, arg2)) {
9562 goto efault;
9564 ploff_in = &loff_in;
9566 if (arg4) {
9567 if (get_user_u64(loff_out, arg4)) {
9568 goto efault;
9570 ploff_out = &loff_out;
9572 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9573 if (arg2) {
9574 if (put_user_u64(loff_in, arg2)) {
9575 goto efault;
9578 if (arg4) {
9579 if (put_user_u64(loff_out, arg4)) {
9580 goto efault;
9584 break;
9585 #endif
9586 #ifdef TARGET_NR_vmsplice
9587 case TARGET_NR_vmsplice:
9589 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9590 if (vec != NULL) {
9591 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9592 unlock_iovec(vec, arg2, arg3, 0);
9593 } else {
9594 ret = -host_to_target_errno(errno);
9597 break;
9598 #endif
9599 #endif /* CONFIG_SPLICE */
9600 #ifdef CONFIG_EVENTFD
9601 #if defined(TARGET_NR_eventfd)
9602 case TARGET_NR_eventfd:
9603 ret = get_errno(eventfd(arg1, 0));
9604 break;
9605 #endif
9606 #if defined(TARGET_NR_eventfd2)
9607 case TARGET_NR_eventfd2:
9609 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9610 if (arg2 & TARGET_O_NONBLOCK) {
9611 host_flags |= O_NONBLOCK;
9613 if (arg2 & TARGET_O_CLOEXEC) {
9614 host_flags |= O_CLOEXEC;
9616 ret = get_errno(eventfd(arg1, host_flags));
9617 break;
9619 #endif
9620 #endif /* CONFIG_EVENTFD */
9621 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9622 case TARGET_NR_fallocate:
9623 #if TARGET_ABI_BITS == 32
9624 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9625 target_offset64(arg5, arg6)));
9626 #else
9627 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9628 #endif
9629 break;
9630 #endif
9631 #if defined(CONFIG_SYNC_FILE_RANGE)
9632 #if defined(TARGET_NR_sync_file_range)
9633 case TARGET_NR_sync_file_range:
9634 #if TARGET_ABI_BITS == 32
9635 #if defined(TARGET_MIPS)
9636 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9637 target_offset64(arg5, arg6), arg7));
9638 #else
9639 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9640 target_offset64(arg4, arg5), arg6));
9641 #endif /* !TARGET_MIPS */
9642 #else
9643 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9644 #endif
9645 break;
9646 #endif
9647 #if defined(TARGET_NR_sync_file_range2)
9648 case TARGET_NR_sync_file_range2:
9649 /* This is like sync_file_range but the arguments are reordered */
9650 #if TARGET_ABI_BITS == 32
9651 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9652 target_offset64(arg5, arg6), arg2));
9653 #else
9654 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9655 #endif
9656 break;
9657 #endif
9658 #endif
9659 #if defined(CONFIG_EPOLL)
9660 #if defined(TARGET_NR_epoll_create)
9661 case TARGET_NR_epoll_create:
9662 ret = get_errno(epoll_create(arg1));
9663 break;
9664 #endif
9665 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9666 case TARGET_NR_epoll_create1:
9667 ret = get_errno(epoll_create1(arg1));
9668 break;
9669 #endif
9670 #if defined(TARGET_NR_epoll_ctl)
9671 case TARGET_NR_epoll_ctl:
9673 struct epoll_event ep;
9674 struct epoll_event *epp = 0;
9675 if (arg4) {
9676 struct target_epoll_event *target_ep;
9677 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9678 goto efault;
9680 ep.events = tswap32(target_ep->events);
9681 /* The epoll_data_t union is just opaque data to the kernel,
9682 * so we transfer all 64 bits across and need not worry what
9683 * actual data type it is.
9685 ep.data.u64 = tswap64(target_ep->data.u64);
9686 unlock_user_struct(target_ep, arg4, 0);
9687 epp = &ep;
9689 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9690 break;
9692 #endif
9694 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9695 #define IMPLEMENT_EPOLL_PWAIT
9696 #endif
9697 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9698 #if defined(TARGET_NR_epoll_wait)
9699 case TARGET_NR_epoll_wait:
9700 #endif
9701 #if defined(IMPLEMENT_EPOLL_PWAIT)
9702 case TARGET_NR_epoll_pwait:
9703 #endif
9705 struct target_epoll_event *target_ep;
9706 struct epoll_event *ep;
9707 int epfd = arg1;
9708 int maxevents = arg3;
9709 int timeout = arg4;
9711 target_ep = lock_user(VERIFY_WRITE, arg2,
9712 maxevents * sizeof(struct target_epoll_event), 1);
9713 if (!target_ep) {
9714 goto efault;
9717 ep = alloca(maxevents * sizeof(struct epoll_event));
9719 switch (num) {
9720 #if defined(IMPLEMENT_EPOLL_PWAIT)
9721 case TARGET_NR_epoll_pwait:
9723 target_sigset_t *target_set;
9724 sigset_t _set, *set = &_set;
9726 if (arg5) {
9727 target_set = lock_user(VERIFY_READ, arg5,
9728 sizeof(target_sigset_t), 1);
9729 if (!target_set) {
9730 unlock_user(target_ep, arg2, 0);
9731 goto efault;
9733 target_to_host_sigset(set, target_set);
9734 unlock_user(target_set, arg5, 0);
9735 } else {
9736 set = NULL;
9739 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9740 break;
9742 #endif
9743 #if defined(TARGET_NR_epoll_wait)
9744 case TARGET_NR_epoll_wait:
9745 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9746 break;
9747 #endif
9748 default:
9749 ret = -TARGET_ENOSYS;
9751 if (!is_error(ret)) {
9752 int i;
9753 for (i = 0; i < ret; i++) {
9754 target_ep[i].events = tswap32(ep[i].events);
9755 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9758 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9759 break;
9761 #endif
9762 #endif
9763 #ifdef TARGET_NR_prlimit64
9764 case TARGET_NR_prlimit64:
9766 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9767 struct target_rlimit64 *target_rnew, *target_rold;
9768 struct host_rlimit64 rnew, rold, *rnewp = 0;
9769 int resource = target_to_host_resource(arg2);
9770 if (arg3) {
9771 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9772 goto efault;
9774 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9775 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9776 unlock_user_struct(target_rnew, arg3, 0);
9777 rnewp = &rnew;
9780 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9781 if (!is_error(ret) && arg4) {
9782 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9783 goto efault;
9785 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9786 target_rold->rlim_max = tswap64(rold.rlim_max);
9787 unlock_user_struct(target_rold, arg4, 1);
9789 break;
9791 #endif
9792 #ifdef TARGET_NR_gethostname
9793 case TARGET_NR_gethostname:
9795 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9796 if (name) {
9797 ret = get_errno(gethostname(name, arg2));
9798 unlock_user(name, arg1, arg2);
9799 } else {
9800 ret = -TARGET_EFAULT;
9802 break;
9804 #endif
9805 #ifdef TARGET_NR_atomic_cmpxchg_32
9806 case TARGET_NR_atomic_cmpxchg_32:
9808 /* should use start_exclusive from main.c */
9809 abi_ulong mem_value;
9810 if (get_user_u32(mem_value, arg6)) {
9811 target_siginfo_t info;
9812 info.si_signo = SIGSEGV;
9813 info.si_errno = 0;
9814 info.si_code = TARGET_SEGV_MAPERR;
9815 info._sifields._sigfault._addr = arg6;
9816 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9817 ret = 0xdeadbeef;
9820 if (mem_value == arg2)
9821 put_user_u32(arg1, arg6);
9822 ret = mem_value;
9823 break;
9825 #endif
9826 #ifdef TARGET_NR_atomic_barrier
9827 case TARGET_NR_atomic_barrier:
9829 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9830 ret = 0;
9831 break;
9833 #endif
9835 #ifdef TARGET_NR_timer_create
9836 case TARGET_NR_timer_create:
9838 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9840 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9842 int clkid = arg1;
9843 int timer_index = next_free_host_timer();
9845 if (timer_index < 0) {
9846 ret = -TARGET_EAGAIN;
9847 } else {
9848 timer_t *phtimer = g_posix_timers + timer_index;
9850 if (arg2) {
9851 phost_sevp = &host_sevp;
9852 ret = target_to_host_sigevent(phost_sevp, arg2);
9853 if (ret != 0) {
9854 break;
9858 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9859 if (ret) {
9860 phtimer = NULL;
9861 } else {
9862 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9863 goto efault;
9867 break;
9869 #endif
9871 #ifdef TARGET_NR_timer_settime
9872 case TARGET_NR_timer_settime:
9874 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9875 * struct itimerspec * old_value */
9876 target_timer_t timerid = get_timer_id(arg1);
9878 if (timerid < 0) {
9879 ret = timerid;
9880 } else if (arg3 == 0) {
9881 ret = -TARGET_EINVAL;
9882 } else {
9883 timer_t htimer = g_posix_timers[timerid];
9884 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9886 target_to_host_itimerspec(&hspec_new, arg3);
9887 ret = get_errno(
9888 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9889 host_to_target_itimerspec(arg2, &hspec_old);
9891 break;
9893 #endif
9895 #ifdef TARGET_NR_timer_gettime
9896 case TARGET_NR_timer_gettime:
9898 /* args: timer_t timerid, struct itimerspec *curr_value */
9899 target_timer_t timerid = get_timer_id(arg1);
9901 if (timerid < 0) {
9902 ret = timerid;
9903 } else if (!arg2) {
9904 ret = -TARGET_EFAULT;
9905 } else {
9906 timer_t htimer = g_posix_timers[timerid];
9907 struct itimerspec hspec;
9908 ret = get_errno(timer_gettime(htimer, &hspec));
9910 if (host_to_target_itimerspec(arg2, &hspec)) {
9911 ret = -TARGET_EFAULT;
9914 break;
9916 #endif
9918 #ifdef TARGET_NR_timer_getoverrun
9919 case TARGET_NR_timer_getoverrun:
9921 /* args: timer_t timerid */
9922 target_timer_t timerid = get_timer_id(arg1);
9924 if (timerid < 0) {
9925 ret = timerid;
9926 } else {
9927 timer_t htimer = g_posix_timers[timerid];
9928 ret = get_errno(timer_getoverrun(htimer));
9930 break;
9932 #endif
9934 #ifdef TARGET_NR_timer_delete
9935 case TARGET_NR_timer_delete:
9937 /* args: timer_t timerid */
9938 target_timer_t timerid = get_timer_id(arg1);
9940 if (timerid < 0) {
9941 ret = timerid;
9942 } else {
9943 timer_t htimer = g_posix_timers[timerid];
9944 ret = get_errno(timer_delete(htimer));
9945 g_posix_timers[timerid] = 0;
9947 break;
9949 #endif
9951 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9952 case TARGET_NR_timerfd_create:
9953 ret = get_errno(timerfd_create(arg1,
9954 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9955 break;
9956 #endif
9958 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9959 case TARGET_NR_timerfd_gettime:
9961 struct itimerspec its_curr;
9963 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9965 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9966 goto efault;
9969 break;
9970 #endif
9972 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9973 case TARGET_NR_timerfd_settime:
9975 struct itimerspec its_new, its_old, *p_new;
9977 if (arg3) {
9978 if (target_to_host_itimerspec(&its_new, arg3)) {
9979 goto efault;
9981 p_new = &its_new;
9982 } else {
9983 p_new = NULL;
9986 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9988 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9989 goto efault;
9992 break;
9993 #endif
9995 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9996 case TARGET_NR_ioprio_get:
9997 ret = get_errno(ioprio_get(arg1, arg2));
9998 break;
9999 #endif
10001 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10002 case TARGET_NR_ioprio_set:
10003 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10004 break;
10005 #endif
10007 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10008 case TARGET_NR_setns:
10009 ret = get_errno(setns(arg1, arg2));
10010 break;
10011 #endif
10012 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10013 case TARGET_NR_unshare:
10014 ret = get_errno(unshare(arg1));
10015 break;
10016 #endif
10018 default:
10019 unimplemented:
10020 gemu_log("qemu: Unsupported syscall: %d\n", num);
10021 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10022 unimplemented_nowarn:
10023 #endif
10024 ret = -TARGET_ENOSYS;
10025 break;
10027 fail:
10028 #ifdef DEBUG
10029 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10030 #endif
10031 if(do_strace)
10032 print_syscall_ret(num, ret);
10033 return ret;
10034 efault:
10035 ret = -TARGET_EFAULT;
10036 goto fail;