megasas: add MegaRAID SAS 2108 emulation
[qemu/ar7.git] / linux-user / syscall.c
bloba175cc15f8c538182b4b6f0813790bc58aa0b26d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef TARGET_GPROF
73 #include <sys/gmon.h>
74 #endif
75 #ifdef CONFIG_EVENTFD
76 #include <sys/eventfd.h>
77 #endif
78 #ifdef CONFIG_EPOLL
79 #include <sys/epoll.h>
80 #endif
81 #ifdef CONFIG_ATTR
82 #include "qemu/xattr.h"
83 #endif
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
86 #endif
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
114 #include "uname.h"
116 #include "qemu.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 #ifdef __NR_getdents
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #endif
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #endif
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
225 loff_t *, res, uint, wh);
226 #endif
227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #endif
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #endif
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group,int,error_code)
237 #endif
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address,int *,tidptr)
240 #endif
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253 _syscall2(int, capget, struct __user_cap_header_struct *, header,
254 struct __user_cap_data_struct *, data);
255 _syscall2(int, capset, struct __user_cap_header_struct *, header,
256 struct __user_cap_data_struct *, data);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get, int, which, int, who)
259 #endif
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
262 #endif
264 static bitmask_transtbl fcntl_flags_tbl[] = {
265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #endif
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #endif
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 #endif
287 #if defined(O_PATH)
288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 #endif
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
293 #endif
294 { 0, 0, 0, 0 }
297 static int sys_getcwd1(char *buf, size_t size)
299 if (getcwd(buf, size) == NULL) {
300 /* getcwd() sets errno */
301 return (-1);
303 return strlen(buf)+1;
306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
309 * open(2) has extra parameter 'mode' when called with
310 * flag O_CREAT.
312 if ((flags & O_CREAT) != 0) {
313 return (openat(dirfd, pathname, flags, mode));
315 return (openat(dirfd, pathname, flags));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd, const char *pathname,
321 const struct timespec times[2], int flags)
323 if (pathname == NULL)
324 return futimens(dirfd, times);
325 else
326 return utimensat(dirfd, pathname, times, flags);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
331 const struct timespec *,tsp,int,flags)
332 #else
333 static int sys_utimensat(int dirfd, const char *pathname,
334 const struct timespec times[2], int flags)
336 errno = ENOSYS;
337 return -1;
339 #endif
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
350 #endif
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
354 return (inotify_add_watch(fd, pathname, mask));
356 #endif
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd, int32_t wd)
360 return (inotify_rm_watch(fd, wd));
362 #endif
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags)
367 return (inotify_init1(flags));
369 #endif
370 #endif
371 #else
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
380 #ifndef __NR_ppoll
381 # define __NR_ppoll -1
382 #endif
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
385 struct timespec *, timeout, const sigset_t *, sigmask,
386 size_t, sigsetsize)
387 #endif
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
392 #endif
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
396 #endif
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
401 #endif
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64 {
405 uint64_t rlim_cur;
406 uint64_t rlim_max;
408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
409 const struct host_rlimit64 *, new_limit,
410 struct host_rlimit64 *, old_limit)
411 #endif
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
420 int k ;
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
423 if (g_posix_timers[k] == 0) {
424 g_posix_timers[k] = (timer_t) 1;
425 return k;
428 return -1;
430 #endif
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433 #ifdef TARGET_ARM
434 static inline int regpairs_aligned(void *cpu_env) {
435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
442 * r3 as arg1 */
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #else
445 static inline int regpairs_aligned(void *cpu_env) { return 0; }
446 #endif
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
460 [EIDRM] = TARGET_EIDRM,
461 [ECHRNG] = TARGET_ECHRNG,
462 [EL2NSYNC] = TARGET_EL2NSYNC,
463 [EL3HLT] = TARGET_EL3HLT,
464 [EL3RST] = TARGET_EL3RST,
465 [ELNRNG] = TARGET_ELNRNG,
466 [EUNATCH] = TARGET_EUNATCH,
467 [ENOCSI] = TARGET_ENOCSI,
468 [EL2HLT] = TARGET_EL2HLT,
469 [EDEADLK] = TARGET_EDEADLK,
470 [ENOLCK] = TARGET_ENOLCK,
471 [EBADE] = TARGET_EBADE,
472 [EBADR] = TARGET_EBADR,
473 [EXFULL] = TARGET_EXFULL,
474 [ENOANO] = TARGET_ENOANO,
475 [EBADRQC] = TARGET_EBADRQC,
476 [EBADSLT] = TARGET_EBADSLT,
477 [EBFONT] = TARGET_EBFONT,
478 [ENOSTR] = TARGET_ENOSTR,
479 [ENODATA] = TARGET_ENODATA,
480 [ETIME] = TARGET_ETIME,
481 [ENOSR] = TARGET_ENOSR,
482 [ENONET] = TARGET_ENONET,
483 [ENOPKG] = TARGET_ENOPKG,
484 [EREMOTE] = TARGET_EREMOTE,
485 [ENOLINK] = TARGET_ENOLINK,
486 [EADV] = TARGET_EADV,
487 [ESRMNT] = TARGET_ESRMNT,
488 [ECOMM] = TARGET_ECOMM,
489 [EPROTO] = TARGET_EPROTO,
490 [EDOTDOT] = TARGET_EDOTDOT,
491 [EMULTIHOP] = TARGET_EMULTIHOP,
492 [EBADMSG] = TARGET_EBADMSG,
493 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
494 [EOVERFLOW] = TARGET_EOVERFLOW,
495 [ENOTUNIQ] = TARGET_ENOTUNIQ,
496 [EBADFD] = TARGET_EBADFD,
497 [EREMCHG] = TARGET_EREMCHG,
498 [ELIBACC] = TARGET_ELIBACC,
499 [ELIBBAD] = TARGET_ELIBBAD,
500 [ELIBSCN] = TARGET_ELIBSCN,
501 [ELIBMAX] = TARGET_ELIBMAX,
502 [ELIBEXEC] = TARGET_ELIBEXEC,
503 [EILSEQ] = TARGET_EILSEQ,
504 [ENOSYS] = TARGET_ENOSYS,
505 [ELOOP] = TARGET_ELOOP,
506 [ERESTART] = TARGET_ERESTART,
507 [ESTRPIPE] = TARGET_ESTRPIPE,
508 [ENOTEMPTY] = TARGET_ENOTEMPTY,
509 [EUSERS] = TARGET_EUSERS,
510 [ENOTSOCK] = TARGET_ENOTSOCK,
511 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
512 [EMSGSIZE] = TARGET_EMSGSIZE,
513 [EPROTOTYPE] = TARGET_EPROTOTYPE,
514 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
515 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
516 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
517 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
518 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
519 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
520 [EADDRINUSE] = TARGET_EADDRINUSE,
521 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
522 [ENETDOWN] = TARGET_ENETDOWN,
523 [ENETUNREACH] = TARGET_ENETUNREACH,
524 [ENETRESET] = TARGET_ENETRESET,
525 [ECONNABORTED] = TARGET_ECONNABORTED,
526 [ECONNRESET] = TARGET_ECONNRESET,
527 [ENOBUFS] = TARGET_ENOBUFS,
528 [EISCONN] = TARGET_EISCONN,
529 [ENOTCONN] = TARGET_ENOTCONN,
530 [EUCLEAN] = TARGET_EUCLEAN,
531 [ENOTNAM] = TARGET_ENOTNAM,
532 [ENAVAIL] = TARGET_ENAVAIL,
533 [EISNAM] = TARGET_EISNAM,
534 [EREMOTEIO] = TARGET_EREMOTEIO,
535 [ESHUTDOWN] = TARGET_ESHUTDOWN,
536 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
537 [ETIMEDOUT] = TARGET_ETIMEDOUT,
538 [ECONNREFUSED] = TARGET_ECONNREFUSED,
539 [EHOSTDOWN] = TARGET_EHOSTDOWN,
540 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
541 [EALREADY] = TARGET_EALREADY,
542 [EINPROGRESS] = TARGET_EINPROGRESS,
543 [ESTALE] = TARGET_ESTALE,
544 [ECANCELED] = TARGET_ECANCELED,
545 [ENOMEDIUM] = TARGET_ENOMEDIUM,
546 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
547 #ifdef ENOKEY
548 [ENOKEY] = TARGET_ENOKEY,
549 #endif
550 #ifdef EKEYEXPIRED
551 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
552 #endif
553 #ifdef EKEYREVOKED
554 [EKEYREVOKED] = TARGET_EKEYREVOKED,
555 #endif
556 #ifdef EKEYREJECTED
557 [EKEYREJECTED] = TARGET_EKEYREJECTED,
558 #endif
559 #ifdef EOWNERDEAD
560 [EOWNERDEAD] = TARGET_EOWNERDEAD,
561 #endif
562 #ifdef ENOTRECOVERABLE
563 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
564 #endif
567 static inline int host_to_target_errno(int err)
569 if(host_to_target_errno_table[err])
570 return host_to_target_errno_table[err];
571 return err;
574 static inline int target_to_host_errno(int err)
576 if (target_to_host_errno_table[err])
577 return target_to_host_errno_table[err];
578 return err;
581 static inline abi_long get_errno(abi_long ret)
583 if (ret == -1)
584 return -host_to_target_errno(errno);
585 else
586 return ret;
589 static inline int is_error(abi_long ret)
591 return (abi_ulong)ret >= (abi_ulong)(-4096);
594 char *target_strerror(int err)
596 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
597 return NULL;
599 return strerror(target_to_host_errno(err));
602 static inline int host_to_target_sock_type(int host_type)
604 int target_type;
606 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
607 case SOCK_DGRAM:
608 target_type = TARGET_SOCK_DGRAM;
609 break;
610 case SOCK_STREAM:
611 target_type = TARGET_SOCK_STREAM;
612 break;
613 default:
614 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
615 break;
618 #if defined(SOCK_CLOEXEC)
619 if (host_type & SOCK_CLOEXEC) {
620 target_type |= TARGET_SOCK_CLOEXEC;
622 #endif
624 #if defined(SOCK_NONBLOCK)
625 if (host_type & SOCK_NONBLOCK) {
626 target_type |= TARGET_SOCK_NONBLOCK;
628 #endif
630 return target_type;
633 static abi_ulong target_brk;
634 static abi_ulong target_original_brk;
635 static abi_ulong brk_page;
637 void target_set_brk(abi_ulong new_brk)
639 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
640 brk_page = HOST_PAGE_ALIGN(target_brk);
643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
644 #define DEBUGF_BRK(message, args...)
646 /* do_brk() must return target values and target errnos. */
647 abi_long do_brk(abi_ulong new_brk)
649 abi_long mapped_addr;
650 int new_alloc_size;
652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
654 if (!new_brk) {
655 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
656 return target_brk;
658 if (new_brk < target_original_brk) {
659 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
660 target_brk);
661 return target_brk;
664 /* If the new brk is less than the highest page reserved to the
665 * target heap allocation, set it and we're almost done... */
666 if (new_brk <= brk_page) {
667 /* Heap contents are initialized to zero, as for anonymous
668 * mapped pages. */
669 if (new_brk > target_brk) {
670 memset(g2h(target_brk), 0, new_brk - target_brk);
672 target_brk = new_brk;
673 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
674 return target_brk;
677 /* We need to allocate more memory after the brk... Note that
678 * we don't use MAP_FIXED because that will map over the top of
679 * any existing mapping (like the one with the host libc or qemu
680 * itself); instead we treat "mapped but at wrong address" as
681 * a failure and unmap again.
683 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
684 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
685 PROT_READ|PROT_WRITE,
686 MAP_ANON|MAP_PRIVATE, 0, 0));
688 if (mapped_addr == brk_page) {
689 /* Heap contents are initialized to zero, as for anonymous
690 * mapped pages. Technically the new pages are already
691 * initialized to zero since they *are* anonymous mapped
692 * pages, however we have to take care with the contents that
693 * come from the remaining part of the previous page: it may
694 * contains garbage data due to a previous heap usage (grown
695 * then shrunken). */
696 memset(g2h(target_brk), 0, brk_page - target_brk);
698 target_brk = new_brk;
699 brk_page = HOST_PAGE_ALIGN(target_brk);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
701 target_brk);
702 return target_brk;
703 } else if (mapped_addr != -1) {
704 /* Mapped but at wrong address, meaning there wasn't actually
705 * enough space for this brk.
707 target_munmap(mapped_addr, new_alloc_size);
708 mapped_addr = -1;
709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
711 else {
712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
715 #if defined(TARGET_ALPHA)
716 /* We (partially) emulate OSF/1 on Alpha, which requires we
717 return a proper errno, not an unchanged brk value. */
718 return -TARGET_ENOMEM;
719 #endif
720 /* For everything else, return the previous break. */
721 return target_brk;
724 static inline abi_long copy_from_user_fdset(fd_set *fds,
725 abi_ulong target_fds_addr,
726 int n)
728 int i, nw, j, k;
729 abi_ulong b, *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_READ,
733 target_fds_addr,
734 sizeof(abi_ulong) * nw,
735 1)))
736 return -TARGET_EFAULT;
738 FD_ZERO(fds);
739 k = 0;
740 for (i = 0; i < nw; i++) {
741 /* grab the abi_ulong */
742 __get_user(b, &target_fds[i]);
743 for (j = 0; j < TARGET_ABI_BITS; j++) {
744 /* check the bit inside the abi_ulong */
745 if ((b >> j) & 1)
746 FD_SET(k, fds);
747 k++;
751 unlock_user(target_fds, target_fds_addr, 0);
753 return 0;
756 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
757 abi_ulong target_fds_addr,
758 int n)
760 if (target_fds_addr) {
761 if (copy_from_user_fdset(fds, target_fds_addr, n))
762 return -TARGET_EFAULT;
763 *fds_ptr = fds;
764 } else {
765 *fds_ptr = NULL;
767 return 0;
770 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
771 const fd_set *fds,
772 int n)
774 int i, nw, j, k;
775 abi_long v;
776 abi_ulong *target_fds;
778 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
779 if (!(target_fds = lock_user(VERIFY_WRITE,
780 target_fds_addr,
781 sizeof(abi_ulong) * nw,
782 0)))
783 return -TARGET_EFAULT;
785 k = 0;
786 for (i = 0; i < nw; i++) {
787 v = 0;
788 for (j = 0; j < TARGET_ABI_BITS; j++) {
789 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
790 k++;
792 __put_user(v, &target_fds[i]);
795 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
797 return 0;
800 #if defined(__alpha__)
801 #define HOST_HZ 1024
802 #else
803 #define HOST_HZ 100
804 #endif
806 static inline abi_long host_to_target_clock_t(long ticks)
808 #if HOST_HZ == TARGET_HZ
809 return ticks;
810 #else
811 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
812 #endif
815 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
816 const struct rusage *rusage)
818 struct target_rusage *target_rusage;
820 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
821 return -TARGET_EFAULT;
822 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
823 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
824 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
825 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
826 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
827 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
828 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
829 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
830 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
831 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
832 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
833 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
834 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
835 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
836 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
837 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
838 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
839 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
840 unlock_user_struct(target_rusage, target_addr, 1);
842 return 0;
845 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
847 abi_ulong target_rlim_swap;
848 rlim_t result;
850 target_rlim_swap = tswapal(target_rlim);
851 if (target_rlim_swap == TARGET_RLIM_INFINITY)
852 return RLIM_INFINITY;
854 result = target_rlim_swap;
855 if (target_rlim_swap != (rlim_t)result)
856 return RLIM_INFINITY;
858 return result;
861 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
863 abi_ulong target_rlim_swap;
864 abi_ulong result;
866 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
867 target_rlim_swap = TARGET_RLIM_INFINITY;
868 else
869 target_rlim_swap = rlim;
870 result = tswapal(target_rlim_swap);
872 return result;
875 static inline int target_to_host_resource(int code)
877 switch (code) {
878 case TARGET_RLIMIT_AS:
879 return RLIMIT_AS;
880 case TARGET_RLIMIT_CORE:
881 return RLIMIT_CORE;
882 case TARGET_RLIMIT_CPU:
883 return RLIMIT_CPU;
884 case TARGET_RLIMIT_DATA:
885 return RLIMIT_DATA;
886 case TARGET_RLIMIT_FSIZE:
887 return RLIMIT_FSIZE;
888 case TARGET_RLIMIT_LOCKS:
889 return RLIMIT_LOCKS;
890 case TARGET_RLIMIT_MEMLOCK:
891 return RLIMIT_MEMLOCK;
892 case TARGET_RLIMIT_MSGQUEUE:
893 return RLIMIT_MSGQUEUE;
894 case TARGET_RLIMIT_NICE:
895 return RLIMIT_NICE;
896 case TARGET_RLIMIT_NOFILE:
897 return RLIMIT_NOFILE;
898 case TARGET_RLIMIT_NPROC:
899 return RLIMIT_NPROC;
900 case TARGET_RLIMIT_RSS:
901 return RLIMIT_RSS;
902 case TARGET_RLIMIT_RTPRIO:
903 return RLIMIT_RTPRIO;
904 case TARGET_RLIMIT_SIGPENDING:
905 return RLIMIT_SIGPENDING;
906 case TARGET_RLIMIT_STACK:
907 return RLIMIT_STACK;
908 default:
909 return code;
913 static inline abi_long copy_from_user_timeval(struct timeval *tv,
914 abi_ulong target_tv_addr)
916 struct target_timeval *target_tv;
918 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
919 return -TARGET_EFAULT;
921 __get_user(tv->tv_sec, &target_tv->tv_sec);
922 __get_user(tv->tv_usec, &target_tv->tv_usec);
924 unlock_user_struct(target_tv, target_tv_addr, 0);
926 return 0;
929 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
930 const struct timeval *tv)
932 struct target_timeval *target_tv;
934 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
935 return -TARGET_EFAULT;
937 __put_user(tv->tv_sec, &target_tv->tv_sec);
938 __put_user(tv->tv_usec, &target_tv->tv_usec);
940 unlock_user_struct(target_tv, target_tv_addr, 1);
942 return 0;
945 static inline abi_long copy_from_user_timezone(struct timezone *tz,
946 abi_ulong target_tz_addr)
948 struct target_timezone *target_tz;
950 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
951 return -TARGET_EFAULT;
954 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
955 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
957 unlock_user_struct(target_tz, target_tz_addr, 0);
959 return 0;
962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
963 #include <mqueue.h>
965 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
966 abi_ulong target_mq_attr_addr)
968 struct target_mq_attr *target_mq_attr;
970 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
971 target_mq_attr_addr, 1))
972 return -TARGET_EFAULT;
974 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
975 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
976 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
977 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
981 return 0;
984 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
985 const struct mq_attr *attr)
987 struct target_mq_attr *target_mq_attr;
989 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
990 target_mq_attr_addr, 0))
991 return -TARGET_EFAULT;
993 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
994 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
995 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
996 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
998 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1000 return 0;
1002 #endif
1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1005 /* do_select() must return target values and target errnos. */
1006 static abi_long do_select(int n,
1007 abi_ulong rfd_addr, abi_ulong wfd_addr,
1008 abi_ulong efd_addr, abi_ulong target_tv_addr)
1010 fd_set rfds, wfds, efds;
1011 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1012 struct timeval tv, *tv_ptr;
1013 abi_long ret;
1015 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1016 if (ret) {
1017 return ret;
1019 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1020 if (ret) {
1021 return ret;
1023 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1024 if (ret) {
1025 return ret;
1028 if (target_tv_addr) {
1029 if (copy_from_user_timeval(&tv, target_tv_addr))
1030 return -TARGET_EFAULT;
1031 tv_ptr = &tv;
1032 } else {
1033 tv_ptr = NULL;
1036 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1038 if (!is_error(ret)) {
1039 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1040 return -TARGET_EFAULT;
1041 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1042 return -TARGET_EFAULT;
1043 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1044 return -TARGET_EFAULT;
1046 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1047 return -TARGET_EFAULT;
1050 return ret;
1052 #endif
1054 static abi_long do_pipe2(int host_pipe[], int flags)
1056 #ifdef CONFIG_PIPE2
1057 return pipe2(host_pipe, flags);
1058 #else
1059 return -ENOSYS;
1060 #endif
1063 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1064 int flags, int is_pipe2)
1066 int host_pipe[2];
1067 abi_long ret;
1068 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1070 if (is_error(ret))
1071 return get_errno(ret);
1073 /* Several targets have special calling conventions for the original
1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1075 if (!is_pipe2) {
1076 #if defined(TARGET_ALPHA)
1077 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1078 return host_pipe[0];
1079 #elif defined(TARGET_MIPS)
1080 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1081 return host_pipe[0];
1082 #elif defined(TARGET_SH4)
1083 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_SPARC)
1086 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1087 return host_pipe[0];
1088 #endif
1091 if (put_user_s32(host_pipe[0], pipedes)
1092 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1093 return -TARGET_EFAULT;
1094 return get_errno(ret);
1097 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1098 abi_ulong target_addr,
1099 socklen_t len)
1101 struct target_ip_mreqn *target_smreqn;
1103 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1104 if (!target_smreqn)
1105 return -TARGET_EFAULT;
1106 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1107 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1108 if (len == sizeof(struct target_ip_mreqn))
1109 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1110 unlock_user(target_smreqn, target_addr, 0);
1112 return 0;
1115 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1116 abi_ulong target_addr,
1117 socklen_t len)
1119 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1120 sa_family_t sa_family;
1121 struct target_sockaddr *target_saddr;
1123 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1124 if (!target_saddr)
1125 return -TARGET_EFAULT;
1127 sa_family = tswap16(target_saddr->sa_family);
1129 /* Oops. The caller might send a incomplete sun_path; sun_path
1130 * must be terminated by \0 (see the manual page), but
1131 * unfortunately it is quite common to specify sockaddr_un
1132 * length as "strlen(x->sun_path)" while it should be
1133 * "strlen(...) + 1". We'll fix that here if needed.
1134 * Linux kernel has a similar feature.
1137 if (sa_family == AF_UNIX) {
1138 if (len < unix_maxlen && len > 0) {
1139 char *cp = (char*)target_saddr;
1141 if ( cp[len-1] && !cp[len] )
1142 len++;
1144 if (len > unix_maxlen)
1145 len = unix_maxlen;
1148 memcpy(addr, target_saddr, len);
1149 addr->sa_family = sa_family;
1150 if (sa_family == AF_PACKET) {
1151 struct target_sockaddr_ll *lladdr;
1153 lladdr = (struct target_sockaddr_ll *)addr;
1154 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1155 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1157 unlock_user(target_saddr, target_addr, 0);
1159 return 0;
1162 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1163 struct sockaddr *addr,
1164 socklen_t len)
1166 struct target_sockaddr *target_saddr;
1168 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1169 if (!target_saddr)
1170 return -TARGET_EFAULT;
1171 memcpy(target_saddr, addr, len);
1172 target_saddr->sa_family = tswap16(addr->sa_family);
1173 unlock_user(target_saddr, target_addr, len);
1175 return 0;
1178 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1179 struct target_msghdr *target_msgh)
1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1182 abi_long msg_controllen;
1183 abi_ulong target_cmsg_addr;
1184 struct target_cmsghdr *target_cmsg;
1185 socklen_t space = 0;
1187 msg_controllen = tswapal(target_msgh->msg_controllen);
1188 if (msg_controllen < sizeof (struct target_cmsghdr))
1189 goto the_end;
1190 target_cmsg_addr = tswapal(target_msgh->msg_control);
1191 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1192 if (!target_cmsg)
1193 return -TARGET_EFAULT;
1195 while (cmsg && target_cmsg) {
1196 void *data = CMSG_DATA(cmsg);
1197 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1199 int len = tswapal(target_cmsg->cmsg_len)
1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1202 space += CMSG_SPACE(len);
1203 if (space > msgh->msg_controllen) {
1204 space -= CMSG_SPACE(len);
1205 gemu_log("Host cmsg overflow\n");
1206 break;
1209 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1210 cmsg->cmsg_level = SOL_SOCKET;
1211 } else {
1212 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1214 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1215 cmsg->cmsg_len = CMSG_LEN(len);
1217 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1218 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1219 memcpy(data, target_data, len);
1220 } else {
1221 int *fd = (int *)data;
1222 int *target_fd = (int *)target_data;
1223 int i, numfds = len / sizeof(int);
1225 for (i = 0; i < numfds; i++)
1226 fd[i] = tswap32(target_fd[i]);
1229 cmsg = CMSG_NXTHDR(msgh, cmsg);
1230 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1232 unlock_user(target_cmsg, target_cmsg_addr, 0);
1233 the_end:
1234 msgh->msg_controllen = space;
1235 return 0;
1238 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1239 struct msghdr *msgh)
1241 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1242 abi_long msg_controllen;
1243 abi_ulong target_cmsg_addr;
1244 struct target_cmsghdr *target_cmsg;
1245 socklen_t space = 0;
1247 msg_controllen = tswapal(target_msgh->msg_controllen);
1248 if (msg_controllen < sizeof (struct target_cmsghdr))
1249 goto the_end;
1250 target_cmsg_addr = tswapal(target_msgh->msg_control);
1251 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1252 if (!target_cmsg)
1253 return -TARGET_EFAULT;
1255 while (cmsg && target_cmsg) {
1256 void *data = CMSG_DATA(cmsg);
1257 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1259 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1261 space += TARGET_CMSG_SPACE(len);
1262 if (space > msg_controllen) {
1263 space -= TARGET_CMSG_SPACE(len);
1264 gemu_log("Target cmsg overflow\n");
1265 break;
1268 if (cmsg->cmsg_level == SOL_SOCKET) {
1269 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1270 } else {
1271 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1273 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1274 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1276 switch (cmsg->cmsg_level) {
1277 case SOL_SOCKET:
1278 switch (cmsg->cmsg_type) {
1279 case SCM_RIGHTS:
1281 int *fd = (int *)data;
1282 int *target_fd = (int *)target_data;
1283 int i, numfds = len / sizeof(int);
1285 for (i = 0; i < numfds; i++)
1286 target_fd[i] = tswap32(fd[i]);
1287 break;
1289 case SO_TIMESTAMP:
1291 struct timeval *tv = (struct timeval *)data;
1292 struct target_timeval *target_tv =
1293 (struct target_timeval *)target_data;
1295 if (len != sizeof(struct timeval))
1296 goto unimplemented;
1298 /* copy struct timeval to target */
1299 target_tv->tv_sec = tswapal(tv->tv_sec);
1300 target_tv->tv_usec = tswapal(tv->tv_usec);
1301 break;
1303 case SCM_CREDENTIALS:
1305 struct ucred *cred = (struct ucred *)data;
1306 struct target_ucred *target_cred =
1307 (struct target_ucred *)target_data;
1309 __put_user(cred->pid, &target_cred->pid);
1310 __put_user(cred->uid, &target_cred->uid);
1311 __put_user(cred->gid, &target_cred->gid);
1312 break;
1314 default:
1315 goto unimplemented;
1317 break;
1319 default:
1320 unimplemented:
1321 gemu_log("Unsupported ancillary data: %d/%d\n",
1322 cmsg->cmsg_level, cmsg->cmsg_type);
1323 memcpy(target_data, data, len);
1326 cmsg = CMSG_NXTHDR(msgh, cmsg);
1327 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1329 unlock_user(target_cmsg, target_cmsg_addr, space);
1330 the_end:
1331 target_msgh->msg_controllen = tswapal(space);
1332 return 0;
1335 /* do_setsockopt() Must return target values and target errnos. */
1336 static abi_long do_setsockopt(int sockfd, int level, int optname,
1337 abi_ulong optval_addr, socklen_t optlen)
1339 abi_long ret;
1340 int val;
1341 struct ip_mreqn *ip_mreq;
1342 struct ip_mreq_source *ip_mreq_source;
1344 switch(level) {
1345 case SOL_TCP:
1346 /* TCP options all take an 'int' value. */
1347 if (optlen < sizeof(uint32_t))
1348 return -TARGET_EINVAL;
1350 if (get_user_u32(val, optval_addr))
1351 return -TARGET_EFAULT;
1352 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1353 break;
1354 case SOL_IP:
1355 switch(optname) {
1356 case IP_TOS:
1357 case IP_TTL:
1358 case IP_HDRINCL:
1359 case IP_ROUTER_ALERT:
1360 case IP_RECVOPTS:
1361 case IP_RETOPTS:
1362 case IP_PKTINFO:
1363 case IP_MTU_DISCOVER:
1364 case IP_RECVERR:
1365 case IP_RECVTOS:
1366 #ifdef IP_FREEBIND
1367 case IP_FREEBIND:
1368 #endif
1369 case IP_MULTICAST_TTL:
1370 case IP_MULTICAST_LOOP:
1371 val = 0;
1372 if (optlen >= sizeof(uint32_t)) {
1373 if (get_user_u32(val, optval_addr))
1374 return -TARGET_EFAULT;
1375 } else if (optlen >= 1) {
1376 if (get_user_u8(val, optval_addr))
1377 return -TARGET_EFAULT;
1379 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1380 break;
1381 case IP_ADD_MEMBERSHIP:
1382 case IP_DROP_MEMBERSHIP:
1383 if (optlen < sizeof (struct target_ip_mreq) ||
1384 optlen > sizeof (struct target_ip_mreqn))
1385 return -TARGET_EINVAL;
1387 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1388 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1389 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1390 break;
1392 case IP_BLOCK_SOURCE:
1393 case IP_UNBLOCK_SOURCE:
1394 case IP_ADD_SOURCE_MEMBERSHIP:
1395 case IP_DROP_SOURCE_MEMBERSHIP:
1396 if (optlen != sizeof (struct target_ip_mreq_source))
1397 return -TARGET_EINVAL;
1399 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1400 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1401 unlock_user (ip_mreq_source, optval_addr, 0);
1402 break;
1404 default:
1405 goto unimplemented;
1407 break;
1408 case SOL_IPV6:
1409 switch (optname) {
1410 case IPV6_MTU_DISCOVER:
1411 case IPV6_MTU:
1412 case IPV6_V6ONLY:
1413 case IPV6_RECVPKTINFO:
1414 val = 0;
1415 if (optlen < sizeof(uint32_t)) {
1416 return -TARGET_EINVAL;
1418 if (get_user_u32(val, optval_addr)) {
1419 return -TARGET_EFAULT;
1421 ret = get_errno(setsockopt(sockfd, level, optname,
1422 &val, sizeof(val)));
1423 break;
1424 default:
1425 goto unimplemented;
1427 break;
1428 case SOL_RAW:
1429 switch (optname) {
1430 case ICMP_FILTER:
1431 /* struct icmp_filter takes an u32 value */
1432 if (optlen < sizeof(uint32_t)) {
1433 return -TARGET_EINVAL;
1436 if (get_user_u32(val, optval_addr)) {
1437 return -TARGET_EFAULT;
1439 ret = get_errno(setsockopt(sockfd, level, optname,
1440 &val, sizeof(val)));
1441 break;
1443 default:
1444 goto unimplemented;
1446 break;
1447 case TARGET_SOL_SOCKET:
1448 switch (optname) {
1449 case TARGET_SO_RCVTIMEO:
1451 struct timeval tv;
1453 optname = SO_RCVTIMEO;
1455 set_timeout:
1456 if (optlen != sizeof(struct target_timeval)) {
1457 return -TARGET_EINVAL;
1460 if (copy_from_user_timeval(&tv, optval_addr)) {
1461 return -TARGET_EFAULT;
1464 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1465 &tv, sizeof(tv)));
1466 return ret;
1468 case TARGET_SO_SNDTIMEO:
1469 optname = SO_SNDTIMEO;
1470 goto set_timeout;
1471 case TARGET_SO_ATTACH_FILTER:
1473 struct target_sock_fprog *tfprog;
1474 struct target_sock_filter *tfilter;
1475 struct sock_fprog fprog;
1476 struct sock_filter *filter;
1477 int i;
1479 if (optlen != sizeof(*tfprog)) {
1480 return -TARGET_EINVAL;
1482 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1483 return -TARGET_EFAULT;
1485 if (!lock_user_struct(VERIFY_READ, tfilter,
1486 tswapal(tfprog->filter), 0)) {
1487 unlock_user_struct(tfprog, optval_addr, 1);
1488 return -TARGET_EFAULT;
1491 fprog.len = tswap16(tfprog->len);
1492 filter = malloc(fprog.len * sizeof(*filter));
1493 if (filter == NULL) {
1494 unlock_user_struct(tfilter, tfprog->filter, 1);
1495 unlock_user_struct(tfprog, optval_addr, 1);
1496 return -TARGET_ENOMEM;
1498 for (i = 0; i < fprog.len; i++) {
1499 filter[i].code = tswap16(tfilter[i].code);
1500 filter[i].jt = tfilter[i].jt;
1501 filter[i].jf = tfilter[i].jf;
1502 filter[i].k = tswap32(tfilter[i].k);
1504 fprog.filter = filter;
1506 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1507 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1508 free(filter);
1510 unlock_user_struct(tfilter, tfprog->filter, 1);
1511 unlock_user_struct(tfprog, optval_addr, 1);
1512 return ret;
1514 case TARGET_SO_BINDTODEVICE:
1516 char *dev_ifname, *addr_ifname;
1518 if (optlen > IFNAMSIZ - 1) {
1519 optlen = IFNAMSIZ - 1;
1521 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1522 if (!dev_ifname) {
1523 return -TARGET_EFAULT;
1525 optname = SO_BINDTODEVICE;
1526 addr_ifname = alloca(IFNAMSIZ);
1527 memcpy(addr_ifname, dev_ifname, optlen);
1528 addr_ifname[optlen] = 0;
1529 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1530 unlock_user (dev_ifname, optval_addr, 0);
1531 return ret;
1533 /* Options with 'int' argument. */
1534 case TARGET_SO_DEBUG:
1535 optname = SO_DEBUG;
1536 break;
1537 case TARGET_SO_REUSEADDR:
1538 optname = SO_REUSEADDR;
1539 break;
1540 case TARGET_SO_TYPE:
1541 optname = SO_TYPE;
1542 break;
1543 case TARGET_SO_ERROR:
1544 optname = SO_ERROR;
1545 break;
1546 case TARGET_SO_DONTROUTE:
1547 optname = SO_DONTROUTE;
1548 break;
1549 case TARGET_SO_BROADCAST:
1550 optname = SO_BROADCAST;
1551 break;
1552 case TARGET_SO_SNDBUF:
1553 optname = SO_SNDBUF;
1554 break;
1555 case TARGET_SO_SNDBUFFORCE:
1556 optname = SO_SNDBUFFORCE;
1557 break;
1558 case TARGET_SO_RCVBUF:
1559 optname = SO_RCVBUF;
1560 break;
1561 case TARGET_SO_RCVBUFFORCE:
1562 optname = SO_RCVBUFFORCE;
1563 break;
1564 case TARGET_SO_KEEPALIVE:
1565 optname = SO_KEEPALIVE;
1566 break;
1567 case TARGET_SO_OOBINLINE:
1568 optname = SO_OOBINLINE;
1569 break;
1570 case TARGET_SO_NO_CHECK:
1571 optname = SO_NO_CHECK;
1572 break;
1573 case TARGET_SO_PRIORITY:
1574 optname = SO_PRIORITY;
1575 break;
1576 #ifdef SO_BSDCOMPAT
1577 case TARGET_SO_BSDCOMPAT:
1578 optname = SO_BSDCOMPAT;
1579 break;
1580 #endif
1581 case TARGET_SO_PASSCRED:
1582 optname = SO_PASSCRED;
1583 break;
1584 case TARGET_SO_PASSSEC:
1585 optname = SO_PASSSEC;
1586 break;
1587 case TARGET_SO_TIMESTAMP:
1588 optname = SO_TIMESTAMP;
1589 break;
1590 case TARGET_SO_RCVLOWAT:
1591 optname = SO_RCVLOWAT;
1592 break;
1593 break;
1594 default:
1595 goto unimplemented;
1597 if (optlen < sizeof(uint32_t))
1598 return -TARGET_EINVAL;
1600 if (get_user_u32(val, optval_addr))
1601 return -TARGET_EFAULT;
1602 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1603 break;
1604 default:
1605 unimplemented:
1606 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1607 ret = -TARGET_ENOPROTOOPT;
1609 return ret;
1612 /* do_getsockopt() Must return target values and target errnos. */
1613 static abi_long do_getsockopt(int sockfd, int level, int optname,
1614 abi_ulong optval_addr, abi_ulong optlen)
1616 abi_long ret;
1617 int len, val;
1618 socklen_t lv;
1620 switch(level) {
1621 case TARGET_SOL_SOCKET:
1622 level = SOL_SOCKET;
1623 switch (optname) {
1624 /* These don't just return a single integer */
1625 case TARGET_SO_LINGER:
1626 case TARGET_SO_RCVTIMEO:
1627 case TARGET_SO_SNDTIMEO:
1628 case TARGET_SO_PEERNAME:
1629 goto unimplemented;
1630 case TARGET_SO_PEERCRED: {
1631 struct ucred cr;
1632 socklen_t crlen;
1633 struct target_ucred *tcr;
1635 if (get_user_u32(len, optlen)) {
1636 return -TARGET_EFAULT;
1638 if (len < 0) {
1639 return -TARGET_EINVAL;
1642 crlen = sizeof(cr);
1643 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1644 &cr, &crlen));
1645 if (ret < 0) {
1646 return ret;
1648 if (len > crlen) {
1649 len = crlen;
1651 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1652 return -TARGET_EFAULT;
1654 __put_user(cr.pid, &tcr->pid);
1655 __put_user(cr.uid, &tcr->uid);
1656 __put_user(cr.gid, &tcr->gid);
1657 unlock_user_struct(tcr, optval_addr, 1);
1658 if (put_user_u32(len, optlen)) {
1659 return -TARGET_EFAULT;
1661 break;
1663 /* Options with 'int' argument. */
1664 case TARGET_SO_DEBUG:
1665 optname = SO_DEBUG;
1666 goto int_case;
1667 case TARGET_SO_REUSEADDR:
1668 optname = SO_REUSEADDR;
1669 goto int_case;
1670 case TARGET_SO_TYPE:
1671 optname = SO_TYPE;
1672 goto int_case;
1673 case TARGET_SO_ERROR:
1674 optname = SO_ERROR;
1675 goto int_case;
1676 case TARGET_SO_DONTROUTE:
1677 optname = SO_DONTROUTE;
1678 goto int_case;
1679 case TARGET_SO_BROADCAST:
1680 optname = SO_BROADCAST;
1681 goto int_case;
1682 case TARGET_SO_SNDBUF:
1683 optname = SO_SNDBUF;
1684 goto int_case;
1685 case TARGET_SO_RCVBUF:
1686 optname = SO_RCVBUF;
1687 goto int_case;
1688 case TARGET_SO_KEEPALIVE:
1689 optname = SO_KEEPALIVE;
1690 goto int_case;
1691 case TARGET_SO_OOBINLINE:
1692 optname = SO_OOBINLINE;
1693 goto int_case;
1694 case TARGET_SO_NO_CHECK:
1695 optname = SO_NO_CHECK;
1696 goto int_case;
1697 case TARGET_SO_PRIORITY:
1698 optname = SO_PRIORITY;
1699 goto int_case;
1700 #ifdef SO_BSDCOMPAT
1701 case TARGET_SO_BSDCOMPAT:
1702 optname = SO_BSDCOMPAT;
1703 goto int_case;
1704 #endif
1705 case TARGET_SO_PASSCRED:
1706 optname = SO_PASSCRED;
1707 goto int_case;
1708 case TARGET_SO_TIMESTAMP:
1709 optname = SO_TIMESTAMP;
1710 goto int_case;
1711 case TARGET_SO_RCVLOWAT:
1712 optname = SO_RCVLOWAT;
1713 goto int_case;
1714 case TARGET_SO_ACCEPTCONN:
1715 optname = SO_ACCEPTCONN;
1716 goto int_case;
1717 default:
1718 goto int_case;
1720 break;
1721 case SOL_TCP:
1722 /* TCP options all take an 'int' value. */
1723 int_case:
1724 if (get_user_u32(len, optlen))
1725 return -TARGET_EFAULT;
1726 if (len < 0)
1727 return -TARGET_EINVAL;
1728 lv = sizeof(lv);
1729 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1730 if (ret < 0)
1731 return ret;
1732 if (optname == SO_TYPE) {
1733 val = host_to_target_sock_type(val);
1735 if (len > lv)
1736 len = lv;
1737 if (len == 4) {
1738 if (put_user_u32(val, optval_addr))
1739 return -TARGET_EFAULT;
1740 } else {
1741 if (put_user_u8(val, optval_addr))
1742 return -TARGET_EFAULT;
1744 if (put_user_u32(len, optlen))
1745 return -TARGET_EFAULT;
1746 break;
1747 case SOL_IP:
1748 switch(optname) {
1749 case IP_TOS:
1750 case IP_TTL:
1751 case IP_HDRINCL:
1752 case IP_ROUTER_ALERT:
1753 case IP_RECVOPTS:
1754 case IP_RETOPTS:
1755 case IP_PKTINFO:
1756 case IP_MTU_DISCOVER:
1757 case IP_RECVERR:
1758 case IP_RECVTOS:
1759 #ifdef IP_FREEBIND
1760 case IP_FREEBIND:
1761 #endif
1762 case IP_MULTICAST_TTL:
1763 case IP_MULTICAST_LOOP:
1764 if (get_user_u32(len, optlen))
1765 return -TARGET_EFAULT;
1766 if (len < 0)
1767 return -TARGET_EINVAL;
1768 lv = sizeof(lv);
1769 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1770 if (ret < 0)
1771 return ret;
1772 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1773 len = 1;
1774 if (put_user_u32(len, optlen)
1775 || put_user_u8(val, optval_addr))
1776 return -TARGET_EFAULT;
1777 } else {
1778 if (len > sizeof(int))
1779 len = sizeof(int);
1780 if (put_user_u32(len, optlen)
1781 || put_user_u32(val, optval_addr))
1782 return -TARGET_EFAULT;
1784 break;
1785 default:
1786 ret = -TARGET_ENOPROTOOPT;
1787 break;
1789 break;
1790 default:
1791 unimplemented:
1792 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1793 level, optname);
1794 ret = -TARGET_EOPNOTSUPP;
1795 break;
1797 return ret;
1800 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1801 int count, int copy)
1803 struct target_iovec *target_vec;
1804 struct iovec *vec;
1805 abi_ulong total_len, max_len;
1806 int i;
1807 int err = 0;
1808 bool bad_address = false;
1810 if (count == 0) {
1811 errno = 0;
1812 return NULL;
1814 if (count < 0 || count > IOV_MAX) {
1815 errno = EINVAL;
1816 return NULL;
1819 vec = calloc(count, sizeof(struct iovec));
1820 if (vec == NULL) {
1821 errno = ENOMEM;
1822 return NULL;
1825 target_vec = lock_user(VERIFY_READ, target_addr,
1826 count * sizeof(struct target_iovec), 1);
1827 if (target_vec == NULL) {
1828 err = EFAULT;
1829 goto fail2;
1832 /* ??? If host page size > target page size, this will result in a
1833 value larger than what we can actually support. */
1834 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1835 total_len = 0;
1837 for (i = 0; i < count; i++) {
1838 abi_ulong base = tswapal(target_vec[i].iov_base);
1839 abi_long len = tswapal(target_vec[i].iov_len);
1841 if (len < 0) {
1842 err = EINVAL;
1843 goto fail;
1844 } else if (len == 0) {
1845 /* Zero length pointer is ignored. */
1846 vec[i].iov_base = 0;
1847 } else {
1848 vec[i].iov_base = lock_user(type, base, len, copy);
1849 /* If the first buffer pointer is bad, this is a fault. But
1850 * subsequent bad buffers will result in a partial write; this
1851 * is realized by filling the vector with null pointers and
1852 * zero lengths. */
1853 if (!vec[i].iov_base) {
1854 if (i == 0) {
1855 err = EFAULT;
1856 goto fail;
1857 } else {
1858 bad_address = true;
1861 if (bad_address) {
1862 len = 0;
1864 if (len > max_len - total_len) {
1865 len = max_len - total_len;
1868 vec[i].iov_len = len;
1869 total_len += len;
1872 unlock_user(target_vec, target_addr, 0);
1873 return vec;
1875 fail:
1876 unlock_user(target_vec, target_addr, 0);
1877 fail2:
1878 free(vec);
1879 errno = err;
1880 return NULL;
1883 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1884 int count, int copy)
1886 struct target_iovec *target_vec;
1887 int i;
1889 target_vec = lock_user(VERIFY_READ, target_addr,
1890 count * sizeof(struct target_iovec), 1);
1891 if (target_vec) {
1892 for (i = 0; i < count; i++) {
1893 abi_ulong base = tswapal(target_vec[i].iov_base);
1894 abi_long len = tswapal(target_vec[i].iov_base);
1895 if (len < 0) {
1896 break;
1898 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1900 unlock_user(target_vec, target_addr, 0);
1903 free(vec);
1906 static inline int target_to_host_sock_type(int *type)
1908 int host_type = 0;
1909 int target_type = *type;
1911 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1912 case TARGET_SOCK_DGRAM:
1913 host_type = SOCK_DGRAM;
1914 break;
1915 case TARGET_SOCK_STREAM:
1916 host_type = SOCK_STREAM;
1917 break;
1918 default:
1919 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1920 break;
1922 if (target_type & TARGET_SOCK_CLOEXEC) {
1923 #if defined(SOCK_CLOEXEC)
1924 host_type |= SOCK_CLOEXEC;
1925 #else
1926 return -TARGET_EINVAL;
1927 #endif
1929 if (target_type & TARGET_SOCK_NONBLOCK) {
1930 #if defined(SOCK_NONBLOCK)
1931 host_type |= SOCK_NONBLOCK;
1932 #elif !defined(O_NONBLOCK)
1933 return -TARGET_EINVAL;
1934 #endif
1936 *type = host_type;
1937 return 0;
1940 /* Try to emulate socket type flags after socket creation. */
1941 static int sock_flags_fixup(int fd, int target_type)
1943 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1944 if (target_type & TARGET_SOCK_NONBLOCK) {
1945 int flags = fcntl(fd, F_GETFL);
1946 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1947 close(fd);
1948 return -TARGET_EINVAL;
1951 #endif
1952 return fd;
1955 /* do_socket() Must return target values and target errnos. */
1956 static abi_long do_socket(int domain, int type, int protocol)
1958 int target_type = type;
1959 int ret;
1961 ret = target_to_host_sock_type(&type);
1962 if (ret) {
1963 return ret;
1966 if (domain == PF_NETLINK)
1967 return -TARGET_EAFNOSUPPORT;
1968 ret = get_errno(socket(domain, type, protocol));
1969 if (ret >= 0) {
1970 ret = sock_flags_fixup(ret, target_type);
1972 return ret;
1975 /* do_bind() Must return target values and target errnos. */
1976 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1977 socklen_t addrlen)
1979 void *addr;
1980 abi_long ret;
1982 if ((int)addrlen < 0) {
1983 return -TARGET_EINVAL;
1986 addr = alloca(addrlen+1);
1988 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1989 if (ret)
1990 return ret;
1992 return get_errno(bind(sockfd, addr, addrlen));
1995 /* do_connect() Must return target values and target errnos. */
1996 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1997 socklen_t addrlen)
1999 void *addr;
2000 abi_long ret;
2002 if ((int)addrlen < 0) {
2003 return -TARGET_EINVAL;
2006 addr = alloca(addrlen+1);
2008 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2009 if (ret)
2010 return ret;
2012 return get_errno(connect(sockfd, addr, addrlen));
2015 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2016 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2017 int flags, int send)
2019 abi_long ret, len;
2020 struct msghdr msg;
2021 int count;
2022 struct iovec *vec;
2023 abi_ulong target_vec;
2025 if (msgp->msg_name) {
2026 msg.msg_namelen = tswap32(msgp->msg_namelen);
2027 msg.msg_name = alloca(msg.msg_namelen+1);
2028 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2029 msg.msg_namelen);
2030 if (ret) {
2031 goto out2;
2033 } else {
2034 msg.msg_name = NULL;
2035 msg.msg_namelen = 0;
2037 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2038 msg.msg_control = alloca(msg.msg_controllen);
2039 msg.msg_flags = tswap32(msgp->msg_flags);
2041 count = tswapal(msgp->msg_iovlen);
2042 target_vec = tswapal(msgp->msg_iov);
2043 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2044 target_vec, count, send);
2045 if (vec == NULL) {
2046 ret = -host_to_target_errno(errno);
2047 goto out2;
2049 msg.msg_iovlen = count;
2050 msg.msg_iov = vec;
2052 if (send) {
2053 ret = target_to_host_cmsg(&msg, msgp);
2054 if (ret == 0)
2055 ret = get_errno(sendmsg(fd, &msg, flags));
2056 } else {
2057 ret = get_errno(recvmsg(fd, &msg, flags));
2058 if (!is_error(ret)) {
2059 len = ret;
2060 ret = host_to_target_cmsg(msgp, &msg);
2061 if (!is_error(ret)) {
2062 msgp->msg_namelen = tswap32(msg.msg_namelen);
2063 if (msg.msg_name != NULL) {
2064 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2065 msg.msg_name, msg.msg_namelen);
2066 if (ret) {
2067 goto out;
2071 ret = len;
2076 out:
2077 unlock_iovec(vec, target_vec, count, !send);
2078 out2:
2079 return ret;
2082 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2083 int flags, int send)
2085 abi_long ret;
2086 struct target_msghdr *msgp;
2088 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2089 msgp,
2090 target_msg,
2091 send ? 1 : 0)) {
2092 return -TARGET_EFAULT;
2094 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2095 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2096 return ret;
2099 #ifdef TARGET_NR_sendmmsg
2100 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2101 * so it might not have this *mmsg-specific flag either.
2103 #ifndef MSG_WAITFORONE
2104 #define MSG_WAITFORONE 0x10000
2105 #endif
2107 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2108 unsigned int vlen, unsigned int flags,
2109 int send)
2111 struct target_mmsghdr *mmsgp;
2112 abi_long ret = 0;
2113 int i;
2115 if (vlen > UIO_MAXIOV) {
2116 vlen = UIO_MAXIOV;
2119 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2120 if (!mmsgp) {
2121 return -TARGET_EFAULT;
2124 for (i = 0; i < vlen; i++) {
2125 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2126 if (is_error(ret)) {
2127 break;
2129 mmsgp[i].msg_len = tswap32(ret);
2130 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2131 if (flags & MSG_WAITFORONE) {
2132 flags |= MSG_DONTWAIT;
2136 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2138 /* Return number of datagrams sent if we sent any at all;
2139 * otherwise return the error.
2141 if (i) {
2142 return i;
2144 return ret;
2146 #endif
2148 /* If we don't have a system accept4() then just call accept.
2149 * The callsites to do_accept4() will ensure that they don't
2150 * pass a non-zero flags argument in this config.
2152 #ifndef CONFIG_ACCEPT4
2153 static inline int accept4(int sockfd, struct sockaddr *addr,
2154 socklen_t *addrlen, int flags)
2156 assert(flags == 0);
2157 return accept(sockfd, addr, addrlen);
2159 #endif
2161 /* do_accept4() Must return target values and target errnos. */
2162 static abi_long do_accept4(int fd, abi_ulong target_addr,
2163 abi_ulong target_addrlen_addr, int flags)
2165 socklen_t addrlen;
2166 void *addr;
2167 abi_long ret;
2168 int host_flags;
2170 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2172 if (target_addr == 0) {
2173 return get_errno(accept4(fd, NULL, NULL, host_flags));
2176 /* linux returns EINVAL if addrlen pointer is invalid */
2177 if (get_user_u32(addrlen, target_addrlen_addr))
2178 return -TARGET_EINVAL;
2180 if ((int)addrlen < 0) {
2181 return -TARGET_EINVAL;
2184 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2185 return -TARGET_EINVAL;
2187 addr = alloca(addrlen);
2189 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2190 if (!is_error(ret)) {
2191 host_to_target_sockaddr(target_addr, addr, addrlen);
2192 if (put_user_u32(addrlen, target_addrlen_addr))
2193 ret = -TARGET_EFAULT;
2195 return ret;
2198 /* do_getpeername() Must return target values and target errnos. */
2199 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2200 abi_ulong target_addrlen_addr)
2202 socklen_t addrlen;
2203 void *addr;
2204 abi_long ret;
2206 if (get_user_u32(addrlen, target_addrlen_addr))
2207 return -TARGET_EFAULT;
2209 if ((int)addrlen < 0) {
2210 return -TARGET_EINVAL;
2213 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2214 return -TARGET_EFAULT;
2216 addr = alloca(addrlen);
2218 ret = get_errno(getpeername(fd, addr, &addrlen));
2219 if (!is_error(ret)) {
2220 host_to_target_sockaddr(target_addr, addr, addrlen);
2221 if (put_user_u32(addrlen, target_addrlen_addr))
2222 ret = -TARGET_EFAULT;
2224 return ret;
2227 /* do_getsockname() Must return target values and target errnos. */
2228 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2229 abi_ulong target_addrlen_addr)
2231 socklen_t addrlen;
2232 void *addr;
2233 abi_long ret;
2235 if (get_user_u32(addrlen, target_addrlen_addr))
2236 return -TARGET_EFAULT;
2238 if ((int)addrlen < 0) {
2239 return -TARGET_EINVAL;
2242 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2243 return -TARGET_EFAULT;
2245 addr = alloca(addrlen);
2247 ret = get_errno(getsockname(fd, addr, &addrlen));
2248 if (!is_error(ret)) {
2249 host_to_target_sockaddr(target_addr, addr, addrlen);
2250 if (put_user_u32(addrlen, target_addrlen_addr))
2251 ret = -TARGET_EFAULT;
2253 return ret;
2256 /* do_socketpair() Must return target values and target errnos. */
2257 static abi_long do_socketpair(int domain, int type, int protocol,
2258 abi_ulong target_tab_addr)
2260 int tab[2];
2261 abi_long ret;
2263 target_to_host_sock_type(&type);
2265 ret = get_errno(socketpair(domain, type, protocol, tab));
2266 if (!is_error(ret)) {
2267 if (put_user_s32(tab[0], target_tab_addr)
2268 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2269 ret = -TARGET_EFAULT;
2271 return ret;
2274 /* do_sendto() Must return target values and target errnos. */
2275 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2276 abi_ulong target_addr, socklen_t addrlen)
2278 void *addr;
2279 void *host_msg;
2280 abi_long ret;
2282 if ((int)addrlen < 0) {
2283 return -TARGET_EINVAL;
2286 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2287 if (!host_msg)
2288 return -TARGET_EFAULT;
2289 if (target_addr) {
2290 addr = alloca(addrlen+1);
2291 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2292 if (ret) {
2293 unlock_user(host_msg, msg, 0);
2294 return ret;
2296 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2297 } else {
2298 ret = get_errno(send(fd, host_msg, len, flags));
2300 unlock_user(host_msg, msg, 0);
2301 return ret;
2304 /* do_recvfrom() Must return target values and target errnos. */
2305 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2306 abi_ulong target_addr,
2307 abi_ulong target_addrlen)
2309 socklen_t addrlen;
2310 void *addr;
2311 void *host_msg;
2312 abi_long ret;
2314 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2315 if (!host_msg)
2316 return -TARGET_EFAULT;
2317 if (target_addr) {
2318 if (get_user_u32(addrlen, target_addrlen)) {
2319 ret = -TARGET_EFAULT;
2320 goto fail;
2322 if ((int)addrlen < 0) {
2323 ret = -TARGET_EINVAL;
2324 goto fail;
2326 addr = alloca(addrlen);
2327 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2328 } else {
2329 addr = NULL; /* To keep compiler quiet. */
2330 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2332 if (!is_error(ret)) {
2333 if (target_addr) {
2334 host_to_target_sockaddr(target_addr, addr, addrlen);
2335 if (put_user_u32(addrlen, target_addrlen)) {
2336 ret = -TARGET_EFAULT;
2337 goto fail;
2340 unlock_user(host_msg, msg, len);
2341 } else {
2342 fail:
2343 unlock_user(host_msg, msg, 0);
2345 return ret;
2348 #ifdef TARGET_NR_socketcall
2349 /* do_socketcall() Must return target values and target errnos. */
2350 static abi_long do_socketcall(int num, abi_ulong vptr)
2352 static const unsigned ac[] = { /* number of arguments per call */
2353 [SOCKOP_socket] = 3, /* domain, type, protocol */
2354 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2355 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2356 [SOCKOP_listen] = 2, /* sockfd, backlog */
2357 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2358 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2359 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2360 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2361 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2362 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2363 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2364 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2365 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2366 [SOCKOP_shutdown] = 2, /* sockfd, how */
2367 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2368 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2369 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2370 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2372 abi_long a[6]; /* max 6 args */
2374 /* first, collect the arguments in a[] according to ac[] */
2375 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2376 unsigned i;
2377 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2378 for (i = 0; i < ac[num]; ++i) {
2379 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2380 return -TARGET_EFAULT;
2385 /* now when we have the args, actually handle the call */
2386 switch (num) {
2387 case SOCKOP_socket: /* domain, type, protocol */
2388 return do_socket(a[0], a[1], a[2]);
2389 case SOCKOP_bind: /* sockfd, addr, addrlen */
2390 return do_bind(a[0], a[1], a[2]);
2391 case SOCKOP_connect: /* sockfd, addr, addrlen */
2392 return do_connect(a[0], a[1], a[2]);
2393 case SOCKOP_listen: /* sockfd, backlog */
2394 return get_errno(listen(a[0], a[1]));
2395 case SOCKOP_accept: /* sockfd, addr, addrlen */
2396 return do_accept4(a[0], a[1], a[2], 0);
2397 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2398 return do_accept4(a[0], a[1], a[2], a[3]);
2399 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2400 return do_getsockname(a[0], a[1], a[2]);
2401 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2402 return do_getpeername(a[0], a[1], a[2]);
2403 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2404 return do_socketpair(a[0], a[1], a[2], a[3]);
2405 case SOCKOP_send: /* sockfd, msg, len, flags */
2406 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2407 case SOCKOP_recv: /* sockfd, msg, len, flags */
2408 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2409 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2410 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2411 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2412 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2413 case SOCKOP_shutdown: /* sockfd, how */
2414 return get_errno(shutdown(a[0], a[1]));
2415 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2416 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2417 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2418 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2419 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2420 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2421 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2422 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2423 default:
2424 gemu_log("Unsupported socketcall: %d\n", num);
2425 return -TARGET_ENOSYS;
2428 #endif
2430 #define N_SHM_REGIONS 32
2432 static struct shm_region {
2433 abi_ulong start;
2434 abi_ulong size;
2435 } shm_regions[N_SHM_REGIONS];
2437 struct target_semid_ds
2439 struct target_ipc_perm sem_perm;
2440 abi_ulong sem_otime;
2441 #if !defined(TARGET_PPC64)
2442 abi_ulong __unused1;
2443 #endif
2444 abi_ulong sem_ctime;
2445 #if !defined(TARGET_PPC64)
2446 abi_ulong __unused2;
2447 #endif
2448 abi_ulong sem_nsems;
2449 abi_ulong __unused3;
2450 abi_ulong __unused4;
2453 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2454 abi_ulong target_addr)
2456 struct target_ipc_perm *target_ip;
2457 struct target_semid_ds *target_sd;
2459 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2460 return -TARGET_EFAULT;
2461 target_ip = &(target_sd->sem_perm);
2462 host_ip->__key = tswap32(target_ip->__key);
2463 host_ip->uid = tswap32(target_ip->uid);
2464 host_ip->gid = tswap32(target_ip->gid);
2465 host_ip->cuid = tswap32(target_ip->cuid);
2466 host_ip->cgid = tswap32(target_ip->cgid);
2467 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2468 host_ip->mode = tswap32(target_ip->mode);
2469 #else
2470 host_ip->mode = tswap16(target_ip->mode);
2471 #endif
2472 #if defined(TARGET_PPC)
2473 host_ip->__seq = tswap32(target_ip->__seq);
2474 #else
2475 host_ip->__seq = tswap16(target_ip->__seq);
2476 #endif
2477 unlock_user_struct(target_sd, target_addr, 0);
2478 return 0;
2481 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2482 struct ipc_perm *host_ip)
2484 struct target_ipc_perm *target_ip;
2485 struct target_semid_ds *target_sd;
2487 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2488 return -TARGET_EFAULT;
2489 target_ip = &(target_sd->sem_perm);
2490 target_ip->__key = tswap32(host_ip->__key);
2491 target_ip->uid = tswap32(host_ip->uid);
2492 target_ip->gid = tswap32(host_ip->gid);
2493 target_ip->cuid = tswap32(host_ip->cuid);
2494 target_ip->cgid = tswap32(host_ip->cgid);
2495 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2496 target_ip->mode = tswap32(host_ip->mode);
2497 #else
2498 target_ip->mode = tswap16(host_ip->mode);
2499 #endif
2500 #if defined(TARGET_PPC)
2501 target_ip->__seq = tswap32(host_ip->__seq);
2502 #else
2503 target_ip->__seq = tswap16(host_ip->__seq);
2504 #endif
2505 unlock_user_struct(target_sd, target_addr, 1);
2506 return 0;
2509 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2510 abi_ulong target_addr)
2512 struct target_semid_ds *target_sd;
2514 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2515 return -TARGET_EFAULT;
2516 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2517 return -TARGET_EFAULT;
2518 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2519 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2520 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2521 unlock_user_struct(target_sd, target_addr, 0);
2522 return 0;
2525 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2526 struct semid_ds *host_sd)
2528 struct target_semid_ds *target_sd;
2530 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2531 return -TARGET_EFAULT;
2532 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2533 return -TARGET_EFAULT;
2534 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2535 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2536 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2537 unlock_user_struct(target_sd, target_addr, 1);
2538 return 0;
2541 struct target_seminfo {
2542 int semmap;
2543 int semmni;
2544 int semmns;
2545 int semmnu;
2546 int semmsl;
2547 int semopm;
2548 int semume;
2549 int semusz;
2550 int semvmx;
2551 int semaem;
2554 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2555 struct seminfo *host_seminfo)
2557 struct target_seminfo *target_seminfo;
2558 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2559 return -TARGET_EFAULT;
2560 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2561 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2562 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2563 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2564 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2565 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2566 __put_user(host_seminfo->semume, &target_seminfo->semume);
2567 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2568 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2569 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2570 unlock_user_struct(target_seminfo, target_addr, 1);
2571 return 0;
2574 union semun {
2575 int val;
2576 struct semid_ds *buf;
2577 unsigned short *array;
2578 struct seminfo *__buf;
2581 union target_semun {
2582 int val;
2583 abi_ulong buf;
2584 abi_ulong array;
2585 abi_ulong __buf;
2588 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2589 abi_ulong target_addr)
2591 int nsems;
2592 unsigned short *array;
2593 union semun semun;
2594 struct semid_ds semid_ds;
2595 int i, ret;
2597 semun.buf = &semid_ds;
2599 ret = semctl(semid, 0, IPC_STAT, semun);
2600 if (ret == -1)
2601 return get_errno(ret);
2603 nsems = semid_ds.sem_nsems;
2605 *host_array = malloc(nsems*sizeof(unsigned short));
2606 if (!*host_array) {
2607 return -TARGET_ENOMEM;
2609 array = lock_user(VERIFY_READ, target_addr,
2610 nsems*sizeof(unsigned short), 1);
2611 if (!array) {
2612 free(*host_array);
2613 return -TARGET_EFAULT;
2616 for(i=0; i<nsems; i++) {
2617 __get_user((*host_array)[i], &array[i]);
2619 unlock_user(array, target_addr, 0);
2621 return 0;
2624 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2625 unsigned short **host_array)
2627 int nsems;
2628 unsigned short *array;
2629 union semun semun;
2630 struct semid_ds semid_ds;
2631 int i, ret;
2633 semun.buf = &semid_ds;
2635 ret = semctl(semid, 0, IPC_STAT, semun);
2636 if (ret == -1)
2637 return get_errno(ret);
2639 nsems = semid_ds.sem_nsems;
2641 array = lock_user(VERIFY_WRITE, target_addr,
2642 nsems*sizeof(unsigned short), 0);
2643 if (!array)
2644 return -TARGET_EFAULT;
2646 for(i=0; i<nsems; i++) {
2647 __put_user((*host_array)[i], &array[i]);
2649 free(*host_array);
2650 unlock_user(array, target_addr, 1);
2652 return 0;
2655 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2656 union target_semun target_su)
2658 union semun arg;
2659 struct semid_ds dsarg;
2660 unsigned short *array = NULL;
2661 struct seminfo seminfo;
2662 abi_long ret = -TARGET_EINVAL;
2663 abi_long err;
2664 cmd &= 0xff;
2666 switch( cmd ) {
2667 case GETVAL:
2668 case SETVAL:
2669 /* In 64 bit cross-endian situations, we will erroneously pick up
2670 * the wrong half of the union for the "val" element. To rectify
2671 * this, the entire 8-byte structure is byteswapped, followed by
2672 * a swap of the 4 byte val field. In other cases, the data is
2673 * already in proper host byte order. */
2674 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2675 target_su.buf = tswapal(target_su.buf);
2676 arg.val = tswap32(target_su.val);
2677 } else {
2678 arg.val = target_su.val;
2680 ret = get_errno(semctl(semid, semnum, cmd, arg));
2681 break;
2682 case GETALL:
2683 case SETALL:
2684 err = target_to_host_semarray(semid, &array, target_su.array);
2685 if (err)
2686 return err;
2687 arg.array = array;
2688 ret = get_errno(semctl(semid, semnum, cmd, arg));
2689 err = host_to_target_semarray(semid, target_su.array, &array);
2690 if (err)
2691 return err;
2692 break;
2693 case IPC_STAT:
2694 case IPC_SET:
2695 case SEM_STAT:
2696 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2697 if (err)
2698 return err;
2699 arg.buf = &dsarg;
2700 ret = get_errno(semctl(semid, semnum, cmd, arg));
2701 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2702 if (err)
2703 return err;
2704 break;
2705 case IPC_INFO:
2706 case SEM_INFO:
2707 arg.__buf = &seminfo;
2708 ret = get_errno(semctl(semid, semnum, cmd, arg));
2709 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2710 if (err)
2711 return err;
2712 break;
2713 case IPC_RMID:
2714 case GETPID:
2715 case GETNCNT:
2716 case GETZCNT:
2717 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2718 break;
2721 return ret;
2724 struct target_sembuf {
2725 unsigned short sem_num;
2726 short sem_op;
2727 short sem_flg;
2730 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2731 abi_ulong target_addr,
2732 unsigned nsops)
2734 struct target_sembuf *target_sembuf;
2735 int i;
2737 target_sembuf = lock_user(VERIFY_READ, target_addr,
2738 nsops*sizeof(struct target_sembuf), 1);
2739 if (!target_sembuf)
2740 return -TARGET_EFAULT;
2742 for(i=0; i<nsops; i++) {
2743 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2744 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2745 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2748 unlock_user(target_sembuf, target_addr, 0);
2750 return 0;
2753 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2755 struct sembuf sops[nsops];
2757 if (target_to_host_sembuf(sops, ptr, nsops))
2758 return -TARGET_EFAULT;
2760 return get_errno(semop(semid, sops, nsops));
2763 struct target_msqid_ds
2765 struct target_ipc_perm msg_perm;
2766 abi_ulong msg_stime;
2767 #if TARGET_ABI_BITS == 32
2768 abi_ulong __unused1;
2769 #endif
2770 abi_ulong msg_rtime;
2771 #if TARGET_ABI_BITS == 32
2772 abi_ulong __unused2;
2773 #endif
2774 abi_ulong msg_ctime;
2775 #if TARGET_ABI_BITS == 32
2776 abi_ulong __unused3;
2777 #endif
2778 abi_ulong __msg_cbytes;
2779 abi_ulong msg_qnum;
2780 abi_ulong msg_qbytes;
2781 abi_ulong msg_lspid;
2782 abi_ulong msg_lrpid;
2783 abi_ulong __unused4;
2784 abi_ulong __unused5;
2787 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2788 abi_ulong target_addr)
2790 struct target_msqid_ds *target_md;
2792 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2793 return -TARGET_EFAULT;
2794 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2795 return -TARGET_EFAULT;
2796 host_md->msg_stime = tswapal(target_md->msg_stime);
2797 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2798 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2799 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2800 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2801 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2802 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2803 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2804 unlock_user_struct(target_md, target_addr, 0);
2805 return 0;
2808 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2809 struct msqid_ds *host_md)
2811 struct target_msqid_ds *target_md;
2813 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2814 return -TARGET_EFAULT;
2815 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2816 return -TARGET_EFAULT;
2817 target_md->msg_stime = tswapal(host_md->msg_stime);
2818 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2819 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2820 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2821 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2822 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2823 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2824 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2825 unlock_user_struct(target_md, target_addr, 1);
2826 return 0;
2829 struct target_msginfo {
2830 int msgpool;
2831 int msgmap;
2832 int msgmax;
2833 int msgmnb;
2834 int msgmni;
2835 int msgssz;
2836 int msgtql;
2837 unsigned short int msgseg;
2840 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2841 struct msginfo *host_msginfo)
2843 struct target_msginfo *target_msginfo;
2844 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2845 return -TARGET_EFAULT;
2846 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2847 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2848 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2849 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2850 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2851 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2852 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2853 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2854 unlock_user_struct(target_msginfo, target_addr, 1);
2855 return 0;
2858 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2860 struct msqid_ds dsarg;
2861 struct msginfo msginfo;
2862 abi_long ret = -TARGET_EINVAL;
2864 cmd &= 0xff;
2866 switch (cmd) {
2867 case IPC_STAT:
2868 case IPC_SET:
2869 case MSG_STAT:
2870 if (target_to_host_msqid_ds(&dsarg,ptr))
2871 return -TARGET_EFAULT;
2872 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2873 if (host_to_target_msqid_ds(ptr,&dsarg))
2874 return -TARGET_EFAULT;
2875 break;
2876 case IPC_RMID:
2877 ret = get_errno(msgctl(msgid, cmd, NULL));
2878 break;
2879 case IPC_INFO:
2880 case MSG_INFO:
2881 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2882 if (host_to_target_msginfo(ptr, &msginfo))
2883 return -TARGET_EFAULT;
2884 break;
2887 return ret;
2890 struct target_msgbuf {
2891 abi_long mtype;
2892 char mtext[1];
2895 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2896 ssize_t msgsz, int msgflg)
2898 struct target_msgbuf *target_mb;
2899 struct msgbuf *host_mb;
2900 abi_long ret = 0;
2902 if (msgsz < 0) {
2903 return -TARGET_EINVAL;
2906 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2907 return -TARGET_EFAULT;
2908 host_mb = malloc(msgsz+sizeof(long));
2909 if (!host_mb) {
2910 unlock_user_struct(target_mb, msgp, 0);
2911 return -TARGET_ENOMEM;
2913 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2914 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2915 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2916 free(host_mb);
2917 unlock_user_struct(target_mb, msgp, 0);
2919 return ret;
2922 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2923 unsigned int msgsz, abi_long msgtyp,
2924 int msgflg)
2926 struct target_msgbuf *target_mb;
2927 char *target_mtext;
2928 struct msgbuf *host_mb;
2929 abi_long ret = 0;
2931 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2932 return -TARGET_EFAULT;
2934 host_mb = g_malloc(msgsz+sizeof(long));
2935 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2937 if (ret > 0) {
2938 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2939 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2940 if (!target_mtext) {
2941 ret = -TARGET_EFAULT;
2942 goto end;
2944 memcpy(target_mb->mtext, host_mb->mtext, ret);
2945 unlock_user(target_mtext, target_mtext_addr, ret);
2948 target_mb->mtype = tswapal(host_mb->mtype);
2950 end:
2951 if (target_mb)
2952 unlock_user_struct(target_mb, msgp, 1);
2953 g_free(host_mb);
2954 return ret;
2957 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2958 abi_ulong target_addr)
2960 struct target_shmid_ds *target_sd;
2962 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2963 return -TARGET_EFAULT;
2964 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2965 return -TARGET_EFAULT;
2966 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2967 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2968 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2969 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2970 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2971 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2972 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2973 unlock_user_struct(target_sd, target_addr, 0);
2974 return 0;
2977 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2978 struct shmid_ds *host_sd)
2980 struct target_shmid_ds *target_sd;
2982 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2983 return -TARGET_EFAULT;
2984 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2985 return -TARGET_EFAULT;
2986 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2987 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2988 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2989 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2990 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2991 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2992 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2993 unlock_user_struct(target_sd, target_addr, 1);
2994 return 0;
2997 struct target_shminfo {
2998 abi_ulong shmmax;
2999 abi_ulong shmmin;
3000 abi_ulong shmmni;
3001 abi_ulong shmseg;
3002 abi_ulong shmall;
3005 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3006 struct shminfo *host_shminfo)
3008 struct target_shminfo *target_shminfo;
3009 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3010 return -TARGET_EFAULT;
3011 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3012 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3013 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3014 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3015 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3016 unlock_user_struct(target_shminfo, target_addr, 1);
3017 return 0;
3020 struct target_shm_info {
3021 int used_ids;
3022 abi_ulong shm_tot;
3023 abi_ulong shm_rss;
3024 abi_ulong shm_swp;
3025 abi_ulong swap_attempts;
3026 abi_ulong swap_successes;
3029 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3030 struct shm_info *host_shm_info)
3032 struct target_shm_info *target_shm_info;
3033 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3034 return -TARGET_EFAULT;
3035 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3036 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3037 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3038 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3039 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3040 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3041 unlock_user_struct(target_shm_info, target_addr, 1);
3042 return 0;
3045 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3047 struct shmid_ds dsarg;
3048 struct shminfo shminfo;
3049 struct shm_info shm_info;
3050 abi_long ret = -TARGET_EINVAL;
3052 cmd &= 0xff;
3054 switch(cmd) {
3055 case IPC_STAT:
3056 case IPC_SET:
3057 case SHM_STAT:
3058 if (target_to_host_shmid_ds(&dsarg, buf))
3059 return -TARGET_EFAULT;
3060 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3061 if (host_to_target_shmid_ds(buf, &dsarg))
3062 return -TARGET_EFAULT;
3063 break;
3064 case IPC_INFO:
3065 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3066 if (host_to_target_shminfo(buf, &shminfo))
3067 return -TARGET_EFAULT;
3068 break;
3069 case SHM_INFO:
3070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3071 if (host_to_target_shm_info(buf, &shm_info))
3072 return -TARGET_EFAULT;
3073 break;
3074 case IPC_RMID:
3075 case SHM_LOCK:
3076 case SHM_UNLOCK:
3077 ret = get_errno(shmctl(shmid, cmd, NULL));
3078 break;
3081 return ret;
3084 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3086 abi_long raddr;
3087 void *host_raddr;
3088 struct shmid_ds shm_info;
3089 int i,ret;
3091 /* find out the length of the shared memory segment */
3092 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3093 if (is_error(ret)) {
3094 /* can't get length, bail out */
3095 return ret;
3098 mmap_lock();
3100 if (shmaddr)
3101 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3102 else {
3103 abi_ulong mmap_start;
3105 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3107 if (mmap_start == -1) {
3108 errno = ENOMEM;
3109 host_raddr = (void *)-1;
3110 } else
3111 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3114 if (host_raddr == (void *)-1) {
3115 mmap_unlock();
3116 return get_errno((long)host_raddr);
3118 raddr=h2g((unsigned long)host_raddr);
3120 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3121 PAGE_VALID | PAGE_READ |
3122 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3124 for (i = 0; i < N_SHM_REGIONS; i++) {
3125 if (shm_regions[i].start == 0) {
3126 shm_regions[i].start = raddr;
3127 shm_regions[i].size = shm_info.shm_segsz;
3128 break;
3132 mmap_unlock();
3133 return raddr;
3137 static inline abi_long do_shmdt(abi_ulong shmaddr)
3139 int i;
3141 for (i = 0; i < N_SHM_REGIONS; ++i) {
3142 if (shm_regions[i].start == shmaddr) {
3143 shm_regions[i].start = 0;
3144 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3145 break;
3149 return get_errno(shmdt(g2h(shmaddr)));
3152 #ifdef TARGET_NR_ipc
3153 /* ??? This only works with linear mappings. */
3154 /* do_ipc() must return target values and target errnos. */
3155 static abi_long do_ipc(unsigned int call, abi_long first,
3156 abi_long second, abi_long third,
3157 abi_long ptr, abi_long fifth)
3159 int version;
3160 abi_long ret = 0;
3162 version = call >> 16;
3163 call &= 0xffff;
3165 switch (call) {
3166 case IPCOP_semop:
3167 ret = do_semop(first, ptr, second);
3168 break;
3170 case IPCOP_semget:
3171 ret = get_errno(semget(first, second, third));
3172 break;
3174 case IPCOP_semctl: {
3175 /* The semun argument to semctl is passed by value, so dereference the
3176 * ptr argument. */
3177 abi_ulong atptr;
3178 get_user_ual(atptr, ptr);
3179 ret = do_semctl(first, second, third,
3180 (union target_semun) atptr);
3181 break;
3184 case IPCOP_msgget:
3185 ret = get_errno(msgget(first, second));
3186 break;
3188 case IPCOP_msgsnd:
3189 ret = do_msgsnd(first, ptr, second, third);
3190 break;
3192 case IPCOP_msgctl:
3193 ret = do_msgctl(first, second, ptr);
3194 break;
3196 case IPCOP_msgrcv:
3197 switch (version) {
3198 case 0:
3200 struct target_ipc_kludge {
3201 abi_long msgp;
3202 abi_long msgtyp;
3203 } *tmp;
3205 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3206 ret = -TARGET_EFAULT;
3207 break;
3210 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3212 unlock_user_struct(tmp, ptr, 0);
3213 break;
3215 default:
3216 ret = do_msgrcv(first, ptr, second, fifth, third);
3218 break;
3220 case IPCOP_shmat:
3221 switch (version) {
3222 default:
3224 abi_ulong raddr;
3225 raddr = do_shmat(first, ptr, second);
3226 if (is_error(raddr))
3227 return get_errno(raddr);
3228 if (put_user_ual(raddr, third))
3229 return -TARGET_EFAULT;
3230 break;
3232 case 1:
3233 ret = -TARGET_EINVAL;
3234 break;
3236 break;
3237 case IPCOP_shmdt:
3238 ret = do_shmdt(ptr);
3239 break;
3241 case IPCOP_shmget:
3242 /* IPC_* flag values are the same on all linux platforms */
3243 ret = get_errno(shmget(first, second, third));
3244 break;
3246 /* IPC_* and SHM_* command values are the same on all linux platforms */
3247 case IPCOP_shmctl:
3248 ret = do_shmctl(first, second, ptr);
3249 break;
3250 default:
3251 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3252 ret = -TARGET_ENOSYS;
3253 break;
3255 return ret;
3257 #endif
3259 /* kernel structure types definitions */
3261 #define STRUCT(name, ...) STRUCT_ ## name,
3262 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3263 enum {
3264 #include "syscall_types.h"
3266 #undef STRUCT
3267 #undef STRUCT_SPECIAL
3269 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3270 #define STRUCT_SPECIAL(name)
3271 #include "syscall_types.h"
3272 #undef STRUCT
3273 #undef STRUCT_SPECIAL
3275 typedef struct IOCTLEntry IOCTLEntry;
3277 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3278 int fd, abi_long cmd, abi_long arg);
3280 struct IOCTLEntry {
3281 unsigned int target_cmd;
3282 unsigned int host_cmd;
3283 const char *name;
3284 int access;
3285 do_ioctl_fn *do_ioctl;
3286 const argtype arg_type[5];
3289 #define IOC_R 0x0001
3290 #define IOC_W 0x0002
3291 #define IOC_RW (IOC_R | IOC_W)
3293 #define MAX_STRUCT_SIZE 4096
3295 #ifdef CONFIG_FIEMAP
3296 /* So fiemap access checks don't overflow on 32 bit systems.
3297 * This is very slightly smaller than the limit imposed by
3298 * the underlying kernel.
3300 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3301 / sizeof(struct fiemap_extent))
3303 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3304 int fd, abi_long cmd, abi_long arg)
3306 /* The parameter for this ioctl is a struct fiemap followed
3307 * by an array of struct fiemap_extent whose size is set
3308 * in fiemap->fm_extent_count. The array is filled in by the
3309 * ioctl.
3311 int target_size_in, target_size_out;
3312 struct fiemap *fm;
3313 const argtype *arg_type = ie->arg_type;
3314 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3315 void *argptr, *p;
3316 abi_long ret;
3317 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3318 uint32_t outbufsz;
3319 int free_fm = 0;
3321 assert(arg_type[0] == TYPE_PTR);
3322 assert(ie->access == IOC_RW);
3323 arg_type++;
3324 target_size_in = thunk_type_size(arg_type, 0);
3325 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3326 if (!argptr) {
3327 return -TARGET_EFAULT;
3329 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3330 unlock_user(argptr, arg, 0);
3331 fm = (struct fiemap *)buf_temp;
3332 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3333 return -TARGET_EINVAL;
3336 outbufsz = sizeof (*fm) +
3337 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3339 if (outbufsz > MAX_STRUCT_SIZE) {
3340 /* We can't fit all the extents into the fixed size buffer.
3341 * Allocate one that is large enough and use it instead.
3343 fm = malloc(outbufsz);
3344 if (!fm) {
3345 return -TARGET_ENOMEM;
3347 memcpy(fm, buf_temp, sizeof(struct fiemap));
3348 free_fm = 1;
3350 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3351 if (!is_error(ret)) {
3352 target_size_out = target_size_in;
3353 /* An extent_count of 0 means we were only counting the extents
3354 * so there are no structs to copy
3356 if (fm->fm_extent_count != 0) {
3357 target_size_out += fm->fm_mapped_extents * extent_size;
3359 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3360 if (!argptr) {
3361 ret = -TARGET_EFAULT;
3362 } else {
3363 /* Convert the struct fiemap */
3364 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3365 if (fm->fm_extent_count != 0) {
3366 p = argptr + target_size_in;
3367 /* ...and then all the struct fiemap_extents */
3368 for (i = 0; i < fm->fm_mapped_extents; i++) {
3369 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3370 THUNK_TARGET);
3371 p += extent_size;
3374 unlock_user(argptr, arg, target_size_out);
3377 if (free_fm) {
3378 free(fm);
3380 return ret;
3382 #endif
3384 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3385 int fd, abi_long cmd, abi_long arg)
3387 const argtype *arg_type = ie->arg_type;
3388 int target_size;
3389 void *argptr;
3390 int ret;
3391 struct ifconf *host_ifconf;
3392 uint32_t outbufsz;
3393 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3394 int target_ifreq_size;
3395 int nb_ifreq;
3396 int free_buf = 0;
3397 int i;
3398 int target_ifc_len;
3399 abi_long target_ifc_buf;
3400 int host_ifc_len;
3401 char *host_ifc_buf;
3403 assert(arg_type[0] == TYPE_PTR);
3404 assert(ie->access == IOC_RW);
3406 arg_type++;
3407 target_size = thunk_type_size(arg_type, 0);
3409 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3410 if (!argptr)
3411 return -TARGET_EFAULT;
3412 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3413 unlock_user(argptr, arg, 0);
3415 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3416 target_ifc_len = host_ifconf->ifc_len;
3417 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3419 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3420 nb_ifreq = target_ifc_len / target_ifreq_size;
3421 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3423 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3424 if (outbufsz > MAX_STRUCT_SIZE) {
3425 /* We can't fit all the extents into the fixed size buffer.
3426 * Allocate one that is large enough and use it instead.
3428 host_ifconf = malloc(outbufsz);
3429 if (!host_ifconf) {
3430 return -TARGET_ENOMEM;
3432 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3433 free_buf = 1;
3435 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3437 host_ifconf->ifc_len = host_ifc_len;
3438 host_ifconf->ifc_buf = host_ifc_buf;
3440 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3441 if (!is_error(ret)) {
3442 /* convert host ifc_len to target ifc_len */
3444 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3445 target_ifc_len = nb_ifreq * target_ifreq_size;
3446 host_ifconf->ifc_len = target_ifc_len;
3448 /* restore target ifc_buf */
3450 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3452 /* copy struct ifconf to target user */
3454 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3455 if (!argptr)
3456 return -TARGET_EFAULT;
3457 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3458 unlock_user(argptr, arg, target_size);
3460 /* copy ifreq[] to target user */
3462 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3463 for (i = 0; i < nb_ifreq ; i++) {
3464 thunk_convert(argptr + i * target_ifreq_size,
3465 host_ifc_buf + i * sizeof(struct ifreq),
3466 ifreq_arg_type, THUNK_TARGET);
3468 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3471 if (free_buf) {
3472 free(host_ifconf);
3475 return ret;
3478 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3479 abi_long cmd, abi_long arg)
3481 void *argptr;
3482 struct dm_ioctl *host_dm;
3483 abi_long guest_data;
3484 uint32_t guest_data_size;
3485 int target_size;
3486 const argtype *arg_type = ie->arg_type;
3487 abi_long ret;
3488 void *big_buf = NULL;
3489 char *host_data;
3491 arg_type++;
3492 target_size = thunk_type_size(arg_type, 0);
3493 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3494 if (!argptr) {
3495 ret = -TARGET_EFAULT;
3496 goto out;
3498 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3499 unlock_user(argptr, arg, 0);
3501 /* buf_temp is too small, so fetch things into a bigger buffer */
3502 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3503 memcpy(big_buf, buf_temp, target_size);
3504 buf_temp = big_buf;
3505 host_dm = big_buf;
3507 guest_data = arg + host_dm->data_start;
3508 if ((guest_data - arg) < 0) {
3509 ret = -EINVAL;
3510 goto out;
3512 guest_data_size = host_dm->data_size - host_dm->data_start;
3513 host_data = (char*)host_dm + host_dm->data_start;
3515 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3516 switch (ie->host_cmd) {
3517 case DM_REMOVE_ALL:
3518 case DM_LIST_DEVICES:
3519 case DM_DEV_CREATE:
3520 case DM_DEV_REMOVE:
3521 case DM_DEV_SUSPEND:
3522 case DM_DEV_STATUS:
3523 case DM_DEV_WAIT:
3524 case DM_TABLE_STATUS:
3525 case DM_TABLE_CLEAR:
3526 case DM_TABLE_DEPS:
3527 case DM_LIST_VERSIONS:
3528 /* no input data */
3529 break;
3530 case DM_DEV_RENAME:
3531 case DM_DEV_SET_GEOMETRY:
3532 /* data contains only strings */
3533 memcpy(host_data, argptr, guest_data_size);
3534 break;
3535 case DM_TARGET_MSG:
3536 memcpy(host_data, argptr, guest_data_size);
3537 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3538 break;
3539 case DM_TABLE_LOAD:
3541 void *gspec = argptr;
3542 void *cur_data = host_data;
3543 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3544 int spec_size = thunk_type_size(arg_type, 0);
3545 int i;
3547 for (i = 0; i < host_dm->target_count; i++) {
3548 struct dm_target_spec *spec = cur_data;
3549 uint32_t next;
3550 int slen;
3552 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3553 slen = strlen((char*)gspec + spec_size) + 1;
3554 next = spec->next;
3555 spec->next = sizeof(*spec) + slen;
3556 strcpy((char*)&spec[1], gspec + spec_size);
3557 gspec += next;
3558 cur_data += spec->next;
3560 break;
3562 default:
3563 ret = -TARGET_EINVAL;
3564 goto out;
3566 unlock_user(argptr, guest_data, 0);
3568 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3569 if (!is_error(ret)) {
3570 guest_data = arg + host_dm->data_start;
3571 guest_data_size = host_dm->data_size - host_dm->data_start;
3572 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3573 switch (ie->host_cmd) {
3574 case DM_REMOVE_ALL:
3575 case DM_DEV_CREATE:
3576 case DM_DEV_REMOVE:
3577 case DM_DEV_RENAME:
3578 case DM_DEV_SUSPEND:
3579 case DM_DEV_STATUS:
3580 case DM_TABLE_LOAD:
3581 case DM_TABLE_CLEAR:
3582 case DM_TARGET_MSG:
3583 case DM_DEV_SET_GEOMETRY:
3584 /* no return data */
3585 break;
3586 case DM_LIST_DEVICES:
3588 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3589 uint32_t remaining_data = guest_data_size;
3590 void *cur_data = argptr;
3591 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3592 int nl_size = 12; /* can't use thunk_size due to alignment */
3594 while (1) {
3595 uint32_t next = nl->next;
3596 if (next) {
3597 nl->next = nl_size + (strlen(nl->name) + 1);
3599 if (remaining_data < nl->next) {
3600 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3601 break;
3603 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3604 strcpy(cur_data + nl_size, nl->name);
3605 cur_data += nl->next;
3606 remaining_data -= nl->next;
3607 if (!next) {
3608 break;
3610 nl = (void*)nl + next;
3612 break;
3614 case DM_DEV_WAIT:
3615 case DM_TABLE_STATUS:
3617 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3618 void *cur_data = argptr;
3619 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3620 int spec_size = thunk_type_size(arg_type, 0);
3621 int i;
3623 for (i = 0; i < host_dm->target_count; i++) {
3624 uint32_t next = spec->next;
3625 int slen = strlen((char*)&spec[1]) + 1;
3626 spec->next = (cur_data - argptr) + spec_size + slen;
3627 if (guest_data_size < spec->next) {
3628 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3629 break;
3631 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3632 strcpy(cur_data + spec_size, (char*)&spec[1]);
3633 cur_data = argptr + spec->next;
3634 spec = (void*)host_dm + host_dm->data_start + next;
3636 break;
3638 case DM_TABLE_DEPS:
3640 void *hdata = (void*)host_dm + host_dm->data_start;
3641 int count = *(uint32_t*)hdata;
3642 uint64_t *hdev = hdata + 8;
3643 uint64_t *gdev = argptr + 8;
3644 int i;
3646 *(uint32_t*)argptr = tswap32(count);
3647 for (i = 0; i < count; i++) {
3648 *gdev = tswap64(*hdev);
3649 gdev++;
3650 hdev++;
3652 break;
3654 case DM_LIST_VERSIONS:
3656 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3657 uint32_t remaining_data = guest_data_size;
3658 void *cur_data = argptr;
3659 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3660 int vers_size = thunk_type_size(arg_type, 0);
3662 while (1) {
3663 uint32_t next = vers->next;
3664 if (next) {
3665 vers->next = vers_size + (strlen(vers->name) + 1);
3667 if (remaining_data < vers->next) {
3668 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3669 break;
3671 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3672 strcpy(cur_data + vers_size, vers->name);
3673 cur_data += vers->next;
3674 remaining_data -= vers->next;
3675 if (!next) {
3676 break;
3678 vers = (void*)vers + next;
3680 break;
3682 default:
3683 ret = -TARGET_EINVAL;
3684 goto out;
3686 unlock_user(argptr, guest_data, guest_data_size);
3688 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3689 if (!argptr) {
3690 ret = -TARGET_EFAULT;
3691 goto out;
3693 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3694 unlock_user(argptr, arg, target_size);
3696 out:
3697 g_free(big_buf);
3698 return ret;
3701 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3702 abi_long cmd, abi_long arg)
3704 void *argptr;
3705 int target_size;
3706 const argtype *arg_type = ie->arg_type;
3707 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3708 abi_long ret;
3710 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3711 struct blkpg_partition host_part;
3713 /* Read and convert blkpg */
3714 arg_type++;
3715 target_size = thunk_type_size(arg_type, 0);
3716 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3717 if (!argptr) {
3718 ret = -TARGET_EFAULT;
3719 goto out;
3721 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3722 unlock_user(argptr, arg, 0);
3724 switch (host_blkpg->op) {
3725 case BLKPG_ADD_PARTITION:
3726 case BLKPG_DEL_PARTITION:
3727 /* payload is struct blkpg_partition */
3728 break;
3729 default:
3730 /* Unknown opcode */
3731 ret = -TARGET_EINVAL;
3732 goto out;
3735 /* Read and convert blkpg->data */
3736 arg = (abi_long)(uintptr_t)host_blkpg->data;
3737 target_size = thunk_type_size(part_arg_type, 0);
3738 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3739 if (!argptr) {
3740 ret = -TARGET_EFAULT;
3741 goto out;
3743 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3744 unlock_user(argptr, arg, 0);
3746 /* Swizzle the data pointer to our local copy and call! */
3747 host_blkpg->data = &host_part;
3748 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3750 out:
3751 return ret;
3754 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3755 int fd, abi_long cmd, abi_long arg)
3757 const argtype *arg_type = ie->arg_type;
3758 const StructEntry *se;
3759 const argtype *field_types;
3760 const int *dst_offsets, *src_offsets;
3761 int target_size;
3762 void *argptr;
3763 abi_ulong *target_rt_dev_ptr;
3764 unsigned long *host_rt_dev_ptr;
3765 abi_long ret;
3766 int i;
3768 assert(ie->access == IOC_W);
3769 assert(*arg_type == TYPE_PTR);
3770 arg_type++;
3771 assert(*arg_type == TYPE_STRUCT);
3772 target_size = thunk_type_size(arg_type, 0);
3773 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3774 if (!argptr) {
3775 return -TARGET_EFAULT;
3777 arg_type++;
3778 assert(*arg_type == (int)STRUCT_rtentry);
3779 se = struct_entries + *arg_type++;
3780 assert(se->convert[0] == NULL);
3781 /* convert struct here to be able to catch rt_dev string */
3782 field_types = se->field_types;
3783 dst_offsets = se->field_offsets[THUNK_HOST];
3784 src_offsets = se->field_offsets[THUNK_TARGET];
3785 for (i = 0; i < se->nb_fields; i++) {
3786 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3787 assert(*field_types == TYPE_PTRVOID);
3788 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3789 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3790 if (*target_rt_dev_ptr != 0) {
3791 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3792 tswapal(*target_rt_dev_ptr));
3793 if (!*host_rt_dev_ptr) {
3794 unlock_user(argptr, arg, 0);
3795 return -TARGET_EFAULT;
3797 } else {
3798 *host_rt_dev_ptr = 0;
3800 field_types++;
3801 continue;
3803 field_types = thunk_convert(buf_temp + dst_offsets[i],
3804 argptr + src_offsets[i],
3805 field_types, THUNK_HOST);
3807 unlock_user(argptr, arg, 0);
3809 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3810 if (*host_rt_dev_ptr != 0) {
3811 unlock_user((void *)*host_rt_dev_ptr,
3812 *target_rt_dev_ptr, 0);
3814 return ret;
3817 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3818 int fd, abi_long cmd, abi_long arg)
3820 int sig = target_to_host_signal(arg);
3821 return get_errno(ioctl(fd, ie->host_cmd, sig));
3824 static IOCTLEntry ioctl_entries[] = {
3825 #define IOCTL(cmd, access, ...) \
3826 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3827 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3828 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3829 #include "ioctls.h"
3830 { 0, 0, },
3833 /* ??? Implement proper locking for ioctls. */
3834 /* do_ioctl() Must return target values and target errnos. */
3835 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3837 const IOCTLEntry *ie;
3838 const argtype *arg_type;
3839 abi_long ret;
3840 uint8_t buf_temp[MAX_STRUCT_SIZE];
3841 int target_size;
3842 void *argptr;
3844 ie = ioctl_entries;
3845 for(;;) {
3846 if (ie->target_cmd == 0) {
3847 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3848 return -TARGET_ENOSYS;
3850 if (ie->target_cmd == cmd)
3851 break;
3852 ie++;
3854 arg_type = ie->arg_type;
3855 #if defined(DEBUG)
3856 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3857 #endif
3858 if (ie->do_ioctl) {
3859 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3862 switch(arg_type[0]) {
3863 case TYPE_NULL:
3864 /* no argument */
3865 ret = get_errno(ioctl(fd, ie->host_cmd));
3866 break;
3867 case TYPE_PTRVOID:
3868 case TYPE_INT:
3869 /* int argment */
3870 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3871 break;
3872 case TYPE_PTR:
3873 arg_type++;
3874 target_size = thunk_type_size(arg_type, 0);
3875 switch(ie->access) {
3876 case IOC_R:
3877 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3878 if (!is_error(ret)) {
3879 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3880 if (!argptr)
3881 return -TARGET_EFAULT;
3882 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3883 unlock_user(argptr, arg, target_size);
3885 break;
3886 case IOC_W:
3887 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3888 if (!argptr)
3889 return -TARGET_EFAULT;
3890 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3891 unlock_user(argptr, arg, 0);
3892 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3893 break;
3894 default:
3895 case IOC_RW:
3896 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3897 if (!argptr)
3898 return -TARGET_EFAULT;
3899 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3900 unlock_user(argptr, arg, 0);
3901 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3902 if (!is_error(ret)) {
3903 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3904 if (!argptr)
3905 return -TARGET_EFAULT;
3906 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3907 unlock_user(argptr, arg, target_size);
3909 break;
3911 break;
3912 default:
3913 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3914 (long)cmd, arg_type[0]);
3915 ret = -TARGET_ENOSYS;
3916 break;
3918 return ret;
3921 static const bitmask_transtbl iflag_tbl[] = {
3922 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3923 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3924 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3925 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3926 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3927 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3928 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3929 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3930 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3931 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3932 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3933 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3934 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3935 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3936 { 0, 0, 0, 0 }
3939 static const bitmask_transtbl oflag_tbl[] = {
3940 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3941 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3942 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3943 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3944 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3945 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3946 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3947 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3948 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3949 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3950 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3951 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3952 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3953 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3954 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3955 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3956 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3957 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3958 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3959 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3960 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3961 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3962 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3963 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3964 { 0, 0, 0, 0 }
3967 static const bitmask_transtbl cflag_tbl[] = {
3968 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3969 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3970 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3971 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3972 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3973 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3974 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3975 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3976 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3977 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3978 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3979 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3980 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3981 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3982 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3983 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3984 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3985 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3986 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3987 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3988 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3989 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3990 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3991 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3992 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3993 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3994 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3995 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3996 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3997 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3998 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3999 { 0, 0, 0, 0 }
4002 static const bitmask_transtbl lflag_tbl[] = {
4003 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4004 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4005 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4006 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4007 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4008 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4009 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4010 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4011 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4012 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4013 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4014 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4015 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4016 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4017 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4018 { 0, 0, 0, 0 }
4021 static void target_to_host_termios (void *dst, const void *src)
4023 struct host_termios *host = dst;
4024 const struct target_termios *target = src;
4026 host->c_iflag =
4027 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4028 host->c_oflag =
4029 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4030 host->c_cflag =
4031 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4032 host->c_lflag =
4033 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4034 host->c_line = target->c_line;
4036 memset(host->c_cc, 0, sizeof(host->c_cc));
4037 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4038 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4039 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4040 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4041 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4042 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4043 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4044 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4045 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4046 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4047 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4048 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4049 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4050 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4051 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4052 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4053 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4056 static void host_to_target_termios (void *dst, const void *src)
4058 struct target_termios *target = dst;
4059 const struct host_termios *host = src;
4061 target->c_iflag =
4062 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4063 target->c_oflag =
4064 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4065 target->c_cflag =
4066 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4067 target->c_lflag =
4068 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4069 target->c_line = host->c_line;
4071 memset(target->c_cc, 0, sizeof(target->c_cc));
4072 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4073 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4074 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4075 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4076 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4077 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4078 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4079 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4080 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4081 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4082 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4083 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4084 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4085 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4086 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4087 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4088 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4091 static const StructEntry struct_termios_def = {
4092 .convert = { host_to_target_termios, target_to_host_termios },
4093 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4094 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4097 static bitmask_transtbl mmap_flags_tbl[] = {
4098 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4099 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4100 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4101 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4102 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4103 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4104 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4105 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4106 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4107 MAP_NORESERVE },
4108 { 0, 0, 0, 0 }
4111 #if defined(TARGET_I386)
4113 /* NOTE: there is really one LDT for all the threads */
4114 static uint8_t *ldt_table;
4116 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4118 int size;
4119 void *p;
4121 if (!ldt_table)
4122 return 0;
4123 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4124 if (size > bytecount)
4125 size = bytecount;
4126 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4127 if (!p)
4128 return -TARGET_EFAULT;
4129 /* ??? Should this by byteswapped? */
4130 memcpy(p, ldt_table, size);
4131 unlock_user(p, ptr, size);
4132 return size;
4135 /* XXX: add locking support */
4136 static abi_long write_ldt(CPUX86State *env,
4137 abi_ulong ptr, unsigned long bytecount, int oldmode)
4139 struct target_modify_ldt_ldt_s ldt_info;
4140 struct target_modify_ldt_ldt_s *target_ldt_info;
4141 int seg_32bit, contents, read_exec_only, limit_in_pages;
4142 int seg_not_present, useable, lm;
4143 uint32_t *lp, entry_1, entry_2;
4145 if (bytecount != sizeof(ldt_info))
4146 return -TARGET_EINVAL;
4147 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4148 return -TARGET_EFAULT;
4149 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4150 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4151 ldt_info.limit = tswap32(target_ldt_info->limit);
4152 ldt_info.flags = tswap32(target_ldt_info->flags);
4153 unlock_user_struct(target_ldt_info, ptr, 0);
4155 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4156 return -TARGET_EINVAL;
4157 seg_32bit = ldt_info.flags & 1;
4158 contents = (ldt_info.flags >> 1) & 3;
4159 read_exec_only = (ldt_info.flags >> 3) & 1;
4160 limit_in_pages = (ldt_info.flags >> 4) & 1;
4161 seg_not_present = (ldt_info.flags >> 5) & 1;
4162 useable = (ldt_info.flags >> 6) & 1;
4163 #ifdef TARGET_ABI32
4164 lm = 0;
4165 #else
4166 lm = (ldt_info.flags >> 7) & 1;
4167 #endif
4168 if (contents == 3) {
4169 if (oldmode)
4170 return -TARGET_EINVAL;
4171 if (seg_not_present == 0)
4172 return -TARGET_EINVAL;
4174 /* allocate the LDT */
4175 if (!ldt_table) {
4176 env->ldt.base = target_mmap(0,
4177 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4178 PROT_READ|PROT_WRITE,
4179 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4180 if (env->ldt.base == -1)
4181 return -TARGET_ENOMEM;
4182 memset(g2h(env->ldt.base), 0,
4183 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4184 env->ldt.limit = 0xffff;
4185 ldt_table = g2h(env->ldt.base);
4188 /* NOTE: same code as Linux kernel */
4189 /* Allow LDTs to be cleared by the user. */
4190 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4191 if (oldmode ||
4192 (contents == 0 &&
4193 read_exec_only == 1 &&
4194 seg_32bit == 0 &&
4195 limit_in_pages == 0 &&
4196 seg_not_present == 1 &&
4197 useable == 0 )) {
4198 entry_1 = 0;
4199 entry_2 = 0;
4200 goto install;
4204 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4205 (ldt_info.limit & 0x0ffff);
4206 entry_2 = (ldt_info.base_addr & 0xff000000) |
4207 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4208 (ldt_info.limit & 0xf0000) |
4209 ((read_exec_only ^ 1) << 9) |
4210 (contents << 10) |
4211 ((seg_not_present ^ 1) << 15) |
4212 (seg_32bit << 22) |
4213 (limit_in_pages << 23) |
4214 (lm << 21) |
4215 0x7000;
4216 if (!oldmode)
4217 entry_2 |= (useable << 20);
4219 /* Install the new entry ... */
4220 install:
4221 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4222 lp[0] = tswap32(entry_1);
4223 lp[1] = tswap32(entry_2);
4224 return 0;
4227 /* specific and weird i386 syscalls */
4228 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4229 unsigned long bytecount)
4231 abi_long ret;
4233 switch (func) {
4234 case 0:
4235 ret = read_ldt(ptr, bytecount);
4236 break;
4237 case 1:
4238 ret = write_ldt(env, ptr, bytecount, 1);
4239 break;
4240 case 0x11:
4241 ret = write_ldt(env, ptr, bytecount, 0);
4242 break;
4243 default:
4244 ret = -TARGET_ENOSYS;
4245 break;
4247 return ret;
4250 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4251 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4253 uint64_t *gdt_table = g2h(env->gdt.base);
4254 struct target_modify_ldt_ldt_s ldt_info;
4255 struct target_modify_ldt_ldt_s *target_ldt_info;
4256 int seg_32bit, contents, read_exec_only, limit_in_pages;
4257 int seg_not_present, useable, lm;
4258 uint32_t *lp, entry_1, entry_2;
4259 int i;
4261 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4262 if (!target_ldt_info)
4263 return -TARGET_EFAULT;
4264 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4265 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4266 ldt_info.limit = tswap32(target_ldt_info->limit);
4267 ldt_info.flags = tswap32(target_ldt_info->flags);
4268 if (ldt_info.entry_number == -1) {
4269 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4270 if (gdt_table[i] == 0) {
4271 ldt_info.entry_number = i;
4272 target_ldt_info->entry_number = tswap32(i);
4273 break;
4277 unlock_user_struct(target_ldt_info, ptr, 1);
4279 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4280 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4281 return -TARGET_EINVAL;
4282 seg_32bit = ldt_info.flags & 1;
4283 contents = (ldt_info.flags >> 1) & 3;
4284 read_exec_only = (ldt_info.flags >> 3) & 1;
4285 limit_in_pages = (ldt_info.flags >> 4) & 1;
4286 seg_not_present = (ldt_info.flags >> 5) & 1;
4287 useable = (ldt_info.flags >> 6) & 1;
4288 #ifdef TARGET_ABI32
4289 lm = 0;
4290 #else
4291 lm = (ldt_info.flags >> 7) & 1;
4292 #endif
4294 if (contents == 3) {
4295 if (seg_not_present == 0)
4296 return -TARGET_EINVAL;
4299 /* NOTE: same code as Linux kernel */
4300 /* Allow LDTs to be cleared by the user. */
4301 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4302 if ((contents == 0 &&
4303 read_exec_only == 1 &&
4304 seg_32bit == 0 &&
4305 limit_in_pages == 0 &&
4306 seg_not_present == 1 &&
4307 useable == 0 )) {
4308 entry_1 = 0;
4309 entry_2 = 0;
4310 goto install;
4314 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4315 (ldt_info.limit & 0x0ffff);
4316 entry_2 = (ldt_info.base_addr & 0xff000000) |
4317 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4318 (ldt_info.limit & 0xf0000) |
4319 ((read_exec_only ^ 1) << 9) |
4320 (contents << 10) |
4321 ((seg_not_present ^ 1) << 15) |
4322 (seg_32bit << 22) |
4323 (limit_in_pages << 23) |
4324 (useable << 20) |
4325 (lm << 21) |
4326 0x7000;
4328 /* Install the new entry ... */
4329 install:
4330 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4331 lp[0] = tswap32(entry_1);
4332 lp[1] = tswap32(entry_2);
4333 return 0;
4336 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4338 struct target_modify_ldt_ldt_s *target_ldt_info;
4339 uint64_t *gdt_table = g2h(env->gdt.base);
4340 uint32_t base_addr, limit, flags;
4341 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4342 int seg_not_present, useable, lm;
4343 uint32_t *lp, entry_1, entry_2;
4345 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4346 if (!target_ldt_info)
4347 return -TARGET_EFAULT;
4348 idx = tswap32(target_ldt_info->entry_number);
4349 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4350 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4351 unlock_user_struct(target_ldt_info, ptr, 1);
4352 return -TARGET_EINVAL;
4354 lp = (uint32_t *)(gdt_table + idx);
4355 entry_1 = tswap32(lp[0]);
4356 entry_2 = tswap32(lp[1]);
4358 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4359 contents = (entry_2 >> 10) & 3;
4360 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4361 seg_32bit = (entry_2 >> 22) & 1;
4362 limit_in_pages = (entry_2 >> 23) & 1;
4363 useable = (entry_2 >> 20) & 1;
4364 #ifdef TARGET_ABI32
4365 lm = 0;
4366 #else
4367 lm = (entry_2 >> 21) & 1;
4368 #endif
4369 flags = (seg_32bit << 0) | (contents << 1) |
4370 (read_exec_only << 3) | (limit_in_pages << 4) |
4371 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4372 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4373 base_addr = (entry_1 >> 16) |
4374 (entry_2 & 0xff000000) |
4375 ((entry_2 & 0xff) << 16);
4376 target_ldt_info->base_addr = tswapal(base_addr);
4377 target_ldt_info->limit = tswap32(limit);
4378 target_ldt_info->flags = tswap32(flags);
4379 unlock_user_struct(target_ldt_info, ptr, 1);
4380 return 0;
4382 #endif /* TARGET_I386 && TARGET_ABI32 */
4384 #ifndef TARGET_ABI32
4385 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4387 abi_long ret = 0;
4388 abi_ulong val;
4389 int idx;
4391 switch(code) {
4392 case TARGET_ARCH_SET_GS:
4393 case TARGET_ARCH_SET_FS:
4394 if (code == TARGET_ARCH_SET_GS)
4395 idx = R_GS;
4396 else
4397 idx = R_FS;
4398 cpu_x86_load_seg(env, idx, 0);
4399 env->segs[idx].base = addr;
4400 break;
4401 case TARGET_ARCH_GET_GS:
4402 case TARGET_ARCH_GET_FS:
4403 if (code == TARGET_ARCH_GET_GS)
4404 idx = R_GS;
4405 else
4406 idx = R_FS;
4407 val = env->segs[idx].base;
4408 if (put_user(val, addr, abi_ulong))
4409 ret = -TARGET_EFAULT;
4410 break;
4411 default:
4412 ret = -TARGET_EINVAL;
4413 break;
4415 return ret;
4417 #endif
4419 #endif /* defined(TARGET_I386) */
4421 #define NEW_STACK_SIZE 0x40000
4424 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4425 typedef struct {
4426 CPUArchState *env;
4427 pthread_mutex_t mutex;
4428 pthread_cond_t cond;
4429 pthread_t thread;
4430 uint32_t tid;
4431 abi_ulong child_tidptr;
4432 abi_ulong parent_tidptr;
4433 sigset_t sigmask;
4434 } new_thread_info;
4436 static void *clone_func(void *arg)
4438 new_thread_info *info = arg;
4439 CPUArchState *env;
4440 CPUState *cpu;
4441 TaskState *ts;
4443 env = info->env;
4444 cpu = ENV_GET_CPU(env);
4445 thread_cpu = cpu;
4446 ts = (TaskState *)cpu->opaque;
4447 info->tid = gettid();
4448 cpu->host_tid = info->tid;
4449 task_settid(ts);
4450 if (info->child_tidptr)
4451 put_user_u32(info->tid, info->child_tidptr);
4452 if (info->parent_tidptr)
4453 put_user_u32(info->tid, info->parent_tidptr);
4454 /* Enable signals. */
4455 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4456 /* Signal to the parent that we're ready. */
4457 pthread_mutex_lock(&info->mutex);
4458 pthread_cond_broadcast(&info->cond);
4459 pthread_mutex_unlock(&info->mutex);
4460 /* Wait until the parent has finshed initializing the tls state. */
4461 pthread_mutex_lock(&clone_lock);
4462 pthread_mutex_unlock(&clone_lock);
4463 cpu_loop(env);
4464 /* never exits */
4465 return NULL;
4468 /* do_fork() Must return host values and target errnos (unlike most
4469 do_*() functions). */
4470 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4471 abi_ulong parent_tidptr, target_ulong newtls,
4472 abi_ulong child_tidptr)
4474 CPUState *cpu = ENV_GET_CPU(env);
4475 int ret;
4476 TaskState *ts;
4477 CPUState *new_cpu;
4478 CPUArchState *new_env;
4479 unsigned int nptl_flags;
4480 sigset_t sigmask;
4482 /* Emulate vfork() with fork() */
4483 if (flags & CLONE_VFORK)
4484 flags &= ~(CLONE_VFORK | CLONE_VM);
4486 if (flags & CLONE_VM) {
4487 TaskState *parent_ts = (TaskState *)cpu->opaque;
4488 new_thread_info info;
4489 pthread_attr_t attr;
4491 ts = g_malloc0(sizeof(TaskState));
4492 init_task_state(ts);
4493 /* we create a new CPU instance. */
4494 new_env = cpu_copy(env);
4495 /* Init regs that differ from the parent. */
4496 cpu_clone_regs(new_env, newsp);
4497 new_cpu = ENV_GET_CPU(new_env);
4498 new_cpu->opaque = ts;
4499 ts->bprm = parent_ts->bprm;
4500 ts->info = parent_ts->info;
4501 nptl_flags = flags;
4502 flags &= ~CLONE_NPTL_FLAGS2;
4504 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4505 ts->child_tidptr = child_tidptr;
4508 if (nptl_flags & CLONE_SETTLS)
4509 cpu_set_tls (new_env, newtls);
4511 /* Grab a mutex so that thread setup appears atomic. */
4512 pthread_mutex_lock(&clone_lock);
4514 memset(&info, 0, sizeof(info));
4515 pthread_mutex_init(&info.mutex, NULL);
4516 pthread_mutex_lock(&info.mutex);
4517 pthread_cond_init(&info.cond, NULL);
4518 info.env = new_env;
4519 if (nptl_flags & CLONE_CHILD_SETTID)
4520 info.child_tidptr = child_tidptr;
4521 if (nptl_flags & CLONE_PARENT_SETTID)
4522 info.parent_tidptr = parent_tidptr;
4524 ret = pthread_attr_init(&attr);
4525 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4526 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4527 /* It is not safe to deliver signals until the child has finished
4528 initializing, so temporarily block all signals. */
4529 sigfillset(&sigmask);
4530 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4532 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4533 /* TODO: Free new CPU state if thread creation failed. */
4535 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4536 pthread_attr_destroy(&attr);
4537 if (ret == 0) {
4538 /* Wait for the child to initialize. */
4539 pthread_cond_wait(&info.cond, &info.mutex);
4540 ret = info.tid;
4541 if (flags & CLONE_PARENT_SETTID)
4542 put_user_u32(ret, parent_tidptr);
4543 } else {
4544 ret = -1;
4546 pthread_mutex_unlock(&info.mutex);
4547 pthread_cond_destroy(&info.cond);
4548 pthread_mutex_destroy(&info.mutex);
4549 pthread_mutex_unlock(&clone_lock);
4550 } else {
4551 /* if no CLONE_VM, we consider it is a fork */
4552 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4553 return -EINVAL;
4554 fork_start();
4555 ret = fork();
4556 if (ret == 0) {
4557 /* Child Process. */
4558 cpu_clone_regs(env, newsp);
4559 fork_end(1);
4560 /* There is a race condition here. The parent process could
4561 theoretically read the TID in the child process before the child
4562 tid is set. This would require using either ptrace
4563 (not implemented) or having *_tidptr to point at a shared memory
4564 mapping. We can't repeat the spinlock hack used above because
4565 the child process gets its own copy of the lock. */
4566 if (flags & CLONE_CHILD_SETTID)
4567 put_user_u32(gettid(), child_tidptr);
4568 if (flags & CLONE_PARENT_SETTID)
4569 put_user_u32(gettid(), parent_tidptr);
4570 ts = (TaskState *)cpu->opaque;
4571 if (flags & CLONE_SETTLS)
4572 cpu_set_tls (env, newtls);
4573 if (flags & CLONE_CHILD_CLEARTID)
4574 ts->child_tidptr = child_tidptr;
4575 } else {
4576 fork_end(0);
4579 return ret;
4582 /* warning : doesn't handle linux specific flags... */
4583 static int target_to_host_fcntl_cmd(int cmd)
4585 switch(cmd) {
4586 case TARGET_F_DUPFD:
4587 case TARGET_F_GETFD:
4588 case TARGET_F_SETFD:
4589 case TARGET_F_GETFL:
4590 case TARGET_F_SETFL:
4591 return cmd;
4592 case TARGET_F_GETLK:
4593 return F_GETLK;
4594 case TARGET_F_SETLK:
4595 return F_SETLK;
4596 case TARGET_F_SETLKW:
4597 return F_SETLKW;
4598 case TARGET_F_GETOWN:
4599 return F_GETOWN;
4600 case TARGET_F_SETOWN:
4601 return F_SETOWN;
4602 case TARGET_F_GETSIG:
4603 return F_GETSIG;
4604 case TARGET_F_SETSIG:
4605 return F_SETSIG;
4606 #if TARGET_ABI_BITS == 32
4607 case TARGET_F_GETLK64:
4608 return F_GETLK64;
4609 case TARGET_F_SETLK64:
4610 return F_SETLK64;
4611 case TARGET_F_SETLKW64:
4612 return F_SETLKW64;
4613 #endif
4614 case TARGET_F_SETLEASE:
4615 return F_SETLEASE;
4616 case TARGET_F_GETLEASE:
4617 return F_GETLEASE;
4618 #ifdef F_DUPFD_CLOEXEC
4619 case TARGET_F_DUPFD_CLOEXEC:
4620 return F_DUPFD_CLOEXEC;
4621 #endif
4622 case TARGET_F_NOTIFY:
4623 return F_NOTIFY;
4624 #ifdef F_GETOWN_EX
4625 case TARGET_F_GETOWN_EX:
4626 return F_GETOWN_EX;
4627 #endif
4628 #ifdef F_SETOWN_EX
4629 case TARGET_F_SETOWN_EX:
4630 return F_SETOWN_EX;
4631 #endif
4632 default:
4633 return -TARGET_EINVAL;
4635 return -TARGET_EINVAL;
4638 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4639 static const bitmask_transtbl flock_tbl[] = {
4640 TRANSTBL_CONVERT(F_RDLCK),
4641 TRANSTBL_CONVERT(F_WRLCK),
4642 TRANSTBL_CONVERT(F_UNLCK),
4643 TRANSTBL_CONVERT(F_EXLCK),
4644 TRANSTBL_CONVERT(F_SHLCK),
4645 { 0, 0, 0, 0 }
4648 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4650 struct flock fl;
4651 struct target_flock *target_fl;
4652 struct flock64 fl64;
4653 struct target_flock64 *target_fl64;
4654 #ifdef F_GETOWN_EX
4655 struct f_owner_ex fox;
4656 struct target_f_owner_ex *target_fox;
4657 #endif
4658 abi_long ret;
4659 int host_cmd = target_to_host_fcntl_cmd(cmd);
4661 if (host_cmd == -TARGET_EINVAL)
4662 return host_cmd;
4664 switch(cmd) {
4665 case TARGET_F_GETLK:
4666 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4667 return -TARGET_EFAULT;
4668 fl.l_type =
4669 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4670 fl.l_whence = tswap16(target_fl->l_whence);
4671 fl.l_start = tswapal(target_fl->l_start);
4672 fl.l_len = tswapal(target_fl->l_len);
4673 fl.l_pid = tswap32(target_fl->l_pid);
4674 unlock_user_struct(target_fl, arg, 0);
4675 ret = get_errno(fcntl(fd, host_cmd, &fl));
4676 if (ret == 0) {
4677 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4678 return -TARGET_EFAULT;
4679 target_fl->l_type =
4680 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4681 target_fl->l_whence = tswap16(fl.l_whence);
4682 target_fl->l_start = tswapal(fl.l_start);
4683 target_fl->l_len = tswapal(fl.l_len);
4684 target_fl->l_pid = tswap32(fl.l_pid);
4685 unlock_user_struct(target_fl, arg, 1);
4687 break;
4689 case TARGET_F_SETLK:
4690 case TARGET_F_SETLKW:
4691 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4692 return -TARGET_EFAULT;
4693 fl.l_type =
4694 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4695 fl.l_whence = tswap16(target_fl->l_whence);
4696 fl.l_start = tswapal(target_fl->l_start);
4697 fl.l_len = tswapal(target_fl->l_len);
4698 fl.l_pid = tswap32(target_fl->l_pid);
4699 unlock_user_struct(target_fl, arg, 0);
4700 ret = get_errno(fcntl(fd, host_cmd, &fl));
4701 break;
4703 case TARGET_F_GETLK64:
4704 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4705 return -TARGET_EFAULT;
4706 fl64.l_type =
4707 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4708 fl64.l_whence = tswap16(target_fl64->l_whence);
4709 fl64.l_start = tswap64(target_fl64->l_start);
4710 fl64.l_len = tswap64(target_fl64->l_len);
4711 fl64.l_pid = tswap32(target_fl64->l_pid);
4712 unlock_user_struct(target_fl64, arg, 0);
4713 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4714 if (ret == 0) {
4715 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4716 return -TARGET_EFAULT;
4717 target_fl64->l_type =
4718 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4719 target_fl64->l_whence = tswap16(fl64.l_whence);
4720 target_fl64->l_start = tswap64(fl64.l_start);
4721 target_fl64->l_len = tswap64(fl64.l_len);
4722 target_fl64->l_pid = tswap32(fl64.l_pid);
4723 unlock_user_struct(target_fl64, arg, 1);
4725 break;
4726 case TARGET_F_SETLK64:
4727 case TARGET_F_SETLKW64:
4728 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4729 return -TARGET_EFAULT;
4730 fl64.l_type =
4731 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4732 fl64.l_whence = tswap16(target_fl64->l_whence);
4733 fl64.l_start = tswap64(target_fl64->l_start);
4734 fl64.l_len = tswap64(target_fl64->l_len);
4735 fl64.l_pid = tswap32(target_fl64->l_pid);
4736 unlock_user_struct(target_fl64, arg, 0);
4737 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4738 break;
4740 case TARGET_F_GETFL:
4741 ret = get_errno(fcntl(fd, host_cmd, arg));
4742 if (ret >= 0) {
4743 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4745 break;
4747 case TARGET_F_SETFL:
4748 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4749 break;
4751 #ifdef F_GETOWN_EX
4752 case TARGET_F_GETOWN_EX:
4753 ret = get_errno(fcntl(fd, host_cmd, &fox));
4754 if (ret >= 0) {
4755 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4756 return -TARGET_EFAULT;
4757 target_fox->type = tswap32(fox.type);
4758 target_fox->pid = tswap32(fox.pid);
4759 unlock_user_struct(target_fox, arg, 1);
4761 break;
4762 #endif
4764 #ifdef F_SETOWN_EX
4765 case TARGET_F_SETOWN_EX:
4766 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4767 return -TARGET_EFAULT;
4768 fox.type = tswap32(target_fox->type);
4769 fox.pid = tswap32(target_fox->pid);
4770 unlock_user_struct(target_fox, arg, 0);
4771 ret = get_errno(fcntl(fd, host_cmd, &fox));
4772 break;
4773 #endif
4775 case TARGET_F_SETOWN:
4776 case TARGET_F_GETOWN:
4777 case TARGET_F_SETSIG:
4778 case TARGET_F_GETSIG:
4779 case TARGET_F_SETLEASE:
4780 case TARGET_F_GETLEASE:
4781 ret = get_errno(fcntl(fd, host_cmd, arg));
4782 break;
4784 default:
4785 ret = get_errno(fcntl(fd, cmd, arg));
4786 break;
4788 return ret;
4791 #ifdef USE_UID16
4793 static inline int high2lowuid(int uid)
4795 if (uid > 65535)
4796 return 65534;
4797 else
4798 return uid;
4801 static inline int high2lowgid(int gid)
4803 if (gid > 65535)
4804 return 65534;
4805 else
4806 return gid;
4809 static inline int low2highuid(int uid)
4811 if ((int16_t)uid == -1)
4812 return -1;
4813 else
4814 return uid;
4817 static inline int low2highgid(int gid)
4819 if ((int16_t)gid == -1)
4820 return -1;
4821 else
4822 return gid;
4824 static inline int tswapid(int id)
4826 return tswap16(id);
4829 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4831 #else /* !USE_UID16 */
4832 static inline int high2lowuid(int uid)
4834 return uid;
4836 static inline int high2lowgid(int gid)
4838 return gid;
4840 static inline int low2highuid(int uid)
4842 return uid;
4844 static inline int low2highgid(int gid)
4846 return gid;
4848 static inline int tswapid(int id)
4850 return tswap32(id);
4853 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4855 #endif /* USE_UID16 */
4857 void syscall_init(void)
4859 IOCTLEntry *ie;
4860 const argtype *arg_type;
4861 int size;
4862 int i;
4864 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4865 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4866 #include "syscall_types.h"
4867 #undef STRUCT
4868 #undef STRUCT_SPECIAL
4870 /* Build target_to_host_errno_table[] table from
4871 * host_to_target_errno_table[]. */
4872 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4873 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4876 /* we patch the ioctl size if necessary. We rely on the fact that
4877 no ioctl has all the bits at '1' in the size field */
4878 ie = ioctl_entries;
4879 while (ie->target_cmd != 0) {
4880 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4881 TARGET_IOC_SIZEMASK) {
4882 arg_type = ie->arg_type;
4883 if (arg_type[0] != TYPE_PTR) {
4884 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4885 ie->target_cmd);
4886 exit(1);
4888 arg_type++;
4889 size = thunk_type_size(arg_type, 0);
4890 ie->target_cmd = (ie->target_cmd &
4891 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4892 (size << TARGET_IOC_SIZESHIFT);
4895 /* automatic consistency check if same arch */
4896 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4897 (defined(__x86_64__) && defined(TARGET_X86_64))
4898 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4899 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4900 ie->name, ie->target_cmd, ie->host_cmd);
4902 #endif
4903 ie++;
4907 #if TARGET_ABI_BITS == 32
4908 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4910 #ifdef TARGET_WORDS_BIGENDIAN
4911 return ((uint64_t)word0 << 32) | word1;
4912 #else
4913 return ((uint64_t)word1 << 32) | word0;
4914 #endif
4916 #else /* TARGET_ABI_BITS == 32 */
4917 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4919 return word0;
4921 #endif /* TARGET_ABI_BITS != 32 */
4923 #ifdef TARGET_NR_truncate64
4924 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4925 abi_long arg2,
4926 abi_long arg3,
4927 abi_long arg4)
4929 if (regpairs_aligned(cpu_env)) {
4930 arg2 = arg3;
4931 arg3 = arg4;
4933 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4935 #endif
4937 #ifdef TARGET_NR_ftruncate64
4938 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4939 abi_long arg2,
4940 abi_long arg3,
4941 abi_long arg4)
4943 if (regpairs_aligned(cpu_env)) {
4944 arg2 = arg3;
4945 arg3 = arg4;
4947 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4949 #endif
4951 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4952 abi_ulong target_addr)
4954 struct target_timespec *target_ts;
4956 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4957 return -TARGET_EFAULT;
4958 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4959 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4960 unlock_user_struct(target_ts, target_addr, 0);
4961 return 0;
4964 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4965 struct timespec *host_ts)
4967 struct target_timespec *target_ts;
4969 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4970 return -TARGET_EFAULT;
4971 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4972 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4973 unlock_user_struct(target_ts, target_addr, 1);
4974 return 0;
4977 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4978 abi_ulong target_addr)
4980 struct target_itimerspec *target_itspec;
4982 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4983 return -TARGET_EFAULT;
4986 host_itspec->it_interval.tv_sec =
4987 tswapal(target_itspec->it_interval.tv_sec);
4988 host_itspec->it_interval.tv_nsec =
4989 tswapal(target_itspec->it_interval.tv_nsec);
4990 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4991 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4993 unlock_user_struct(target_itspec, target_addr, 1);
4994 return 0;
4997 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4998 struct itimerspec *host_its)
5000 struct target_itimerspec *target_itspec;
5002 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5003 return -TARGET_EFAULT;
5006 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5007 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5009 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5010 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5012 unlock_user_struct(target_itspec, target_addr, 0);
5013 return 0;
5016 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5017 abi_ulong target_addr)
5019 struct target_sigevent *target_sevp;
5021 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5022 return -TARGET_EFAULT;
5025 /* This union is awkward on 64 bit systems because it has a 32 bit
5026 * integer and a pointer in it; we follow the conversion approach
5027 * used for handling sigval types in signal.c so the guest should get
5028 * the correct value back even if we did a 64 bit byteswap and it's
5029 * using the 32 bit integer.
5031 host_sevp->sigev_value.sival_ptr =
5032 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5033 host_sevp->sigev_signo =
5034 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5035 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5036 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5038 unlock_user_struct(target_sevp, target_addr, 1);
5039 return 0;
5042 #if defined(TARGET_NR_mlockall)
5043 static inline int target_to_host_mlockall_arg(int arg)
5045 int result = 0;
5047 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5048 result |= MCL_CURRENT;
5050 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5051 result |= MCL_FUTURE;
5053 return result;
5055 #endif
5057 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5058 static inline abi_long host_to_target_stat64(void *cpu_env,
5059 abi_ulong target_addr,
5060 struct stat *host_st)
5062 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5063 if (((CPUARMState *)cpu_env)->eabi) {
5064 struct target_eabi_stat64 *target_st;
5066 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5067 return -TARGET_EFAULT;
5068 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5069 __put_user(host_st->st_dev, &target_st->st_dev);
5070 __put_user(host_st->st_ino, &target_st->st_ino);
5071 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5072 __put_user(host_st->st_ino, &target_st->__st_ino);
5073 #endif
5074 __put_user(host_st->st_mode, &target_st->st_mode);
5075 __put_user(host_st->st_nlink, &target_st->st_nlink);
5076 __put_user(host_st->st_uid, &target_st->st_uid);
5077 __put_user(host_st->st_gid, &target_st->st_gid);
5078 __put_user(host_st->st_rdev, &target_st->st_rdev);
5079 __put_user(host_st->st_size, &target_st->st_size);
5080 __put_user(host_st->st_blksize, &target_st->st_blksize);
5081 __put_user(host_st->st_blocks, &target_st->st_blocks);
5082 __put_user(host_st->st_atime, &target_st->target_st_atime);
5083 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5084 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5085 unlock_user_struct(target_st, target_addr, 1);
5086 } else
5087 #endif
5089 #if defined(TARGET_HAS_STRUCT_STAT64)
5090 struct target_stat64 *target_st;
5091 #else
5092 struct target_stat *target_st;
5093 #endif
5095 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5096 return -TARGET_EFAULT;
5097 memset(target_st, 0, sizeof(*target_st));
5098 __put_user(host_st->st_dev, &target_st->st_dev);
5099 __put_user(host_st->st_ino, &target_st->st_ino);
5100 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5101 __put_user(host_st->st_ino, &target_st->__st_ino);
5102 #endif
5103 __put_user(host_st->st_mode, &target_st->st_mode);
5104 __put_user(host_st->st_nlink, &target_st->st_nlink);
5105 __put_user(host_st->st_uid, &target_st->st_uid);
5106 __put_user(host_st->st_gid, &target_st->st_gid);
5107 __put_user(host_st->st_rdev, &target_st->st_rdev);
5108 /* XXX: better use of kernel struct */
5109 __put_user(host_st->st_size, &target_st->st_size);
5110 __put_user(host_st->st_blksize, &target_st->st_blksize);
5111 __put_user(host_st->st_blocks, &target_st->st_blocks);
5112 __put_user(host_st->st_atime, &target_st->target_st_atime);
5113 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5114 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5115 unlock_user_struct(target_st, target_addr, 1);
5118 return 0;
5120 #endif
5122 /* ??? Using host futex calls even when target atomic operations
5123 are not really atomic probably breaks things. However implementing
5124 futexes locally would make futexes shared between multiple processes
5125 tricky. However they're probably useless because guest atomic
5126 operations won't work either. */
5127 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5128 target_ulong uaddr2, int val3)
5130 struct timespec ts, *pts;
5131 int base_op;
5133 /* ??? We assume FUTEX_* constants are the same on both host
5134 and target. */
5135 #ifdef FUTEX_CMD_MASK
5136 base_op = op & FUTEX_CMD_MASK;
5137 #else
5138 base_op = op;
5139 #endif
5140 switch (base_op) {
5141 case FUTEX_WAIT:
5142 case FUTEX_WAIT_BITSET:
5143 if (timeout) {
5144 pts = &ts;
5145 target_to_host_timespec(pts, timeout);
5146 } else {
5147 pts = NULL;
5149 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5150 pts, NULL, val3));
5151 case FUTEX_WAKE:
5152 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5153 case FUTEX_FD:
5154 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5155 case FUTEX_REQUEUE:
5156 case FUTEX_CMP_REQUEUE:
5157 case FUTEX_WAKE_OP:
5158 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5159 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5160 But the prototype takes a `struct timespec *'; insert casts
5161 to satisfy the compiler. We do not need to tswap TIMEOUT
5162 since it's not compared to guest memory. */
5163 pts = (struct timespec *)(uintptr_t) timeout;
5164 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5165 g2h(uaddr2),
5166 (base_op == FUTEX_CMP_REQUEUE
5167 ? tswap32(val3)
5168 : val3)));
5169 default:
5170 return -TARGET_ENOSYS;
5174 /* Map host to target signal numbers for the wait family of syscalls.
5175 Assume all other status bits are the same. */
5176 int host_to_target_waitstatus(int status)
5178 if (WIFSIGNALED(status)) {
5179 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5181 if (WIFSTOPPED(status)) {
5182 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5183 | (status & 0xff);
5185 return status;
5188 static int open_self_cmdline(void *cpu_env, int fd)
5190 int fd_orig = -1;
5191 bool word_skipped = false;
5193 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5194 if (fd_orig < 0) {
5195 return fd_orig;
5198 while (true) {
5199 ssize_t nb_read;
5200 char buf[128];
5201 char *cp_buf = buf;
5203 nb_read = read(fd_orig, buf, sizeof(buf));
5204 if (nb_read < 0) {
5205 fd_orig = close(fd_orig);
5206 return -1;
5207 } else if (nb_read == 0) {
5208 break;
5211 if (!word_skipped) {
5212 /* Skip the first string, which is the path to qemu-*-static
5213 instead of the actual command. */
5214 cp_buf = memchr(buf, 0, sizeof(buf));
5215 if (cp_buf) {
5216 /* Null byte found, skip one string */
5217 cp_buf++;
5218 nb_read -= cp_buf - buf;
5219 word_skipped = true;
5223 if (word_skipped) {
5224 if (write(fd, cp_buf, nb_read) != nb_read) {
5225 close(fd_orig);
5226 return -1;
5231 return close(fd_orig);
5234 static int open_self_maps(void *cpu_env, int fd)
5236 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5237 TaskState *ts = cpu->opaque;
5238 FILE *fp;
5239 char *line = NULL;
5240 size_t len = 0;
5241 ssize_t read;
5243 fp = fopen("/proc/self/maps", "r");
5244 if (fp == NULL) {
5245 return -EACCES;
5248 while ((read = getline(&line, &len, fp)) != -1) {
5249 int fields, dev_maj, dev_min, inode;
5250 uint64_t min, max, offset;
5251 char flag_r, flag_w, flag_x, flag_p;
5252 char path[512] = "";
5253 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5254 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5255 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5257 if ((fields < 10) || (fields > 11)) {
5258 continue;
5260 if (h2g_valid(min)) {
5261 int flags = page_get_flags(h2g(min));
5262 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5263 if (page_check_range(h2g(min), max - min, flags) == -1) {
5264 continue;
5266 if (h2g(min) == ts->info->stack_limit) {
5267 pstrcpy(path, sizeof(path), " [stack]");
5269 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5270 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5271 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5272 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5273 path[0] ? " " : "", path);
5277 free(line);
5278 fclose(fp);
5280 return 0;
5283 static int open_self_stat(void *cpu_env, int fd)
5285 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5286 TaskState *ts = cpu->opaque;
5287 abi_ulong start_stack = ts->info->start_stack;
5288 int i;
5290 for (i = 0; i < 44; i++) {
5291 char buf[128];
5292 int len;
5293 uint64_t val = 0;
5295 if (i == 0) {
5296 /* pid */
5297 val = getpid();
5298 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5299 } else if (i == 1) {
5300 /* app name */
5301 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5302 } else if (i == 27) {
5303 /* stack bottom */
5304 val = start_stack;
5305 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5306 } else {
5307 /* for the rest, there is MasterCard */
5308 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5311 len = strlen(buf);
5312 if (write(fd, buf, len) != len) {
5313 return -1;
5317 return 0;
5320 static int open_self_auxv(void *cpu_env, int fd)
5322 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5323 TaskState *ts = cpu->opaque;
5324 abi_ulong auxv = ts->info->saved_auxv;
5325 abi_ulong len = ts->info->auxv_len;
5326 char *ptr;
5329 * Auxiliary vector is stored in target process stack.
5330 * read in whole auxv vector and copy it to file
5332 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5333 if (ptr != NULL) {
5334 while (len > 0) {
5335 ssize_t r;
5336 r = write(fd, ptr, len);
5337 if (r <= 0) {
5338 break;
5340 len -= r;
5341 ptr += r;
5343 lseek(fd, 0, SEEK_SET);
5344 unlock_user(ptr, auxv, len);
5347 return 0;
5350 static int is_proc_myself(const char *filename, const char *entry)
5352 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5353 filename += strlen("/proc/");
5354 if (!strncmp(filename, "self/", strlen("self/"))) {
5355 filename += strlen("self/");
5356 } else if (*filename >= '1' && *filename <= '9') {
5357 char myself[80];
5358 snprintf(myself, sizeof(myself), "%d/", getpid());
5359 if (!strncmp(filename, myself, strlen(myself))) {
5360 filename += strlen(myself);
5361 } else {
5362 return 0;
5364 } else {
5365 return 0;
5367 if (!strcmp(filename, entry)) {
5368 return 1;
5371 return 0;
5374 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5375 static int is_proc(const char *filename, const char *entry)
5377 return strcmp(filename, entry) == 0;
5380 static int open_net_route(void *cpu_env, int fd)
5382 FILE *fp;
5383 char *line = NULL;
5384 size_t len = 0;
5385 ssize_t read;
5387 fp = fopen("/proc/net/route", "r");
5388 if (fp == NULL) {
5389 return -EACCES;
5392 /* read header */
5394 read = getline(&line, &len, fp);
5395 dprintf(fd, "%s", line);
5397 /* read routes */
5399 while ((read = getline(&line, &len, fp)) != -1) {
5400 char iface[16];
5401 uint32_t dest, gw, mask;
5402 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5403 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5404 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5405 &mask, &mtu, &window, &irtt);
5406 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5407 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5408 metric, tswap32(mask), mtu, window, irtt);
5411 free(line);
5412 fclose(fp);
5414 return 0;
5416 #endif
5418 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5420 struct fake_open {
5421 const char *filename;
5422 int (*fill)(void *cpu_env, int fd);
5423 int (*cmp)(const char *s1, const char *s2);
5425 const struct fake_open *fake_open;
5426 static const struct fake_open fakes[] = {
5427 { "maps", open_self_maps, is_proc_myself },
5428 { "stat", open_self_stat, is_proc_myself },
5429 { "auxv", open_self_auxv, is_proc_myself },
5430 { "cmdline", open_self_cmdline, is_proc_myself },
5431 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5432 { "/proc/net/route", open_net_route, is_proc },
5433 #endif
5434 { NULL, NULL, NULL }
5437 if (is_proc_myself(pathname, "exe")) {
5438 int execfd = qemu_getauxval(AT_EXECFD);
5439 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5442 for (fake_open = fakes; fake_open->filename; fake_open++) {
5443 if (fake_open->cmp(pathname, fake_open->filename)) {
5444 break;
5448 if (fake_open->filename) {
5449 const char *tmpdir;
5450 char filename[PATH_MAX];
5451 int fd, r;
5453 /* create temporary file to map stat to */
5454 tmpdir = getenv("TMPDIR");
5455 if (!tmpdir)
5456 tmpdir = "/tmp";
5457 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5458 fd = mkstemp(filename);
5459 if (fd < 0) {
5460 return fd;
5462 unlink(filename);
5464 if ((r = fake_open->fill(cpu_env, fd))) {
5465 close(fd);
5466 return r;
5468 lseek(fd, 0, SEEK_SET);
5470 return fd;
5473 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5476 /* do_syscall() should always have a single exit point at the end so
5477 that actions, such as logging of syscall results, can be performed.
5478 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5479 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5480 abi_long arg2, abi_long arg3, abi_long arg4,
5481 abi_long arg5, abi_long arg6, abi_long arg7,
5482 abi_long arg8)
5484 CPUState *cpu = ENV_GET_CPU(cpu_env);
5485 abi_long ret;
5486 struct stat st;
5487 struct statfs stfs;
5488 void *p;
5490 #ifdef DEBUG
5491 gemu_log("syscall %d", num);
5492 #endif
5493 if(do_strace)
5494 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5496 switch(num) {
5497 case TARGET_NR_exit:
5498 /* In old applications this may be used to implement _exit(2).
5499 However in threaded applictions it is used for thread termination,
5500 and _exit_group is used for application termination.
5501 Do thread termination if we have more then one thread. */
5502 /* FIXME: This probably breaks if a signal arrives. We should probably
5503 be disabling signals. */
5504 if (CPU_NEXT(first_cpu)) {
5505 TaskState *ts;
5507 cpu_list_lock();
5508 /* Remove the CPU from the list. */
5509 QTAILQ_REMOVE(&cpus, cpu, node);
5510 cpu_list_unlock();
5511 ts = cpu->opaque;
5512 if (ts->child_tidptr) {
5513 put_user_u32(0, ts->child_tidptr);
5514 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5515 NULL, NULL, 0);
5517 thread_cpu = NULL;
5518 object_unref(OBJECT(cpu));
5519 g_free(ts);
5520 pthread_exit(NULL);
5522 #ifdef TARGET_GPROF
5523 _mcleanup();
5524 #endif
5525 gdb_exit(cpu_env, arg1);
5526 _exit(arg1);
5527 ret = 0; /* avoid warning */
5528 break;
5529 case TARGET_NR_read:
5530 if (arg3 == 0)
5531 ret = 0;
5532 else {
5533 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5534 goto efault;
5535 ret = get_errno(read(arg1, p, arg3));
5536 unlock_user(p, arg2, ret);
5538 break;
5539 case TARGET_NR_write:
5540 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5541 goto efault;
5542 ret = get_errno(write(arg1, p, arg3));
5543 unlock_user(p, arg2, 0);
5544 break;
5545 case TARGET_NR_open:
5546 if (!(p = lock_user_string(arg1)))
5547 goto efault;
5548 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5549 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5550 arg3));
5551 unlock_user(p, arg1, 0);
5552 break;
5553 case TARGET_NR_openat:
5554 if (!(p = lock_user_string(arg2)))
5555 goto efault;
5556 ret = get_errno(do_openat(cpu_env, arg1, p,
5557 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5558 arg4));
5559 unlock_user(p, arg2, 0);
5560 break;
5561 case TARGET_NR_close:
5562 ret = get_errno(close(arg1));
5563 break;
5564 case TARGET_NR_brk:
5565 ret = do_brk(arg1);
5566 break;
5567 case TARGET_NR_fork:
5568 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5569 break;
5570 #ifdef TARGET_NR_waitpid
5571 case TARGET_NR_waitpid:
5573 int status;
5574 ret = get_errno(waitpid(arg1, &status, arg3));
5575 if (!is_error(ret) && arg2 && ret
5576 && put_user_s32(host_to_target_waitstatus(status), arg2))
5577 goto efault;
5579 break;
5580 #endif
5581 #ifdef TARGET_NR_waitid
5582 case TARGET_NR_waitid:
5584 siginfo_t info;
5585 info.si_pid = 0;
5586 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5587 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5588 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5589 goto efault;
5590 host_to_target_siginfo(p, &info);
5591 unlock_user(p, arg3, sizeof(target_siginfo_t));
5594 break;
5595 #endif
5596 #ifdef TARGET_NR_creat /* not on alpha */
5597 case TARGET_NR_creat:
5598 if (!(p = lock_user_string(arg1)))
5599 goto efault;
5600 ret = get_errno(creat(p, arg2));
5601 unlock_user(p, arg1, 0);
5602 break;
5603 #endif
5604 case TARGET_NR_link:
5606 void * p2;
5607 p = lock_user_string(arg1);
5608 p2 = lock_user_string(arg2);
5609 if (!p || !p2)
5610 ret = -TARGET_EFAULT;
5611 else
5612 ret = get_errno(link(p, p2));
5613 unlock_user(p2, arg2, 0);
5614 unlock_user(p, arg1, 0);
5616 break;
5617 #if defined(TARGET_NR_linkat)
5618 case TARGET_NR_linkat:
5620 void * p2 = NULL;
5621 if (!arg2 || !arg4)
5622 goto efault;
5623 p = lock_user_string(arg2);
5624 p2 = lock_user_string(arg4);
5625 if (!p || !p2)
5626 ret = -TARGET_EFAULT;
5627 else
5628 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5629 unlock_user(p, arg2, 0);
5630 unlock_user(p2, arg4, 0);
5632 break;
5633 #endif
5634 case TARGET_NR_unlink:
5635 if (!(p = lock_user_string(arg1)))
5636 goto efault;
5637 ret = get_errno(unlink(p));
5638 unlock_user(p, arg1, 0);
5639 break;
5640 #if defined(TARGET_NR_unlinkat)
5641 case TARGET_NR_unlinkat:
5642 if (!(p = lock_user_string(arg2)))
5643 goto efault;
5644 ret = get_errno(unlinkat(arg1, p, arg3));
5645 unlock_user(p, arg2, 0);
5646 break;
5647 #endif
5648 case TARGET_NR_execve:
5650 char **argp, **envp;
5651 int argc, envc;
5652 abi_ulong gp;
5653 abi_ulong guest_argp;
5654 abi_ulong guest_envp;
5655 abi_ulong addr;
5656 char **q;
5657 int total_size = 0;
5659 argc = 0;
5660 guest_argp = arg2;
5661 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5662 if (get_user_ual(addr, gp))
5663 goto efault;
5664 if (!addr)
5665 break;
5666 argc++;
5668 envc = 0;
5669 guest_envp = arg3;
5670 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5671 if (get_user_ual(addr, gp))
5672 goto efault;
5673 if (!addr)
5674 break;
5675 envc++;
5678 argp = alloca((argc + 1) * sizeof(void *));
5679 envp = alloca((envc + 1) * sizeof(void *));
5681 for (gp = guest_argp, q = argp; gp;
5682 gp += sizeof(abi_ulong), q++) {
5683 if (get_user_ual(addr, gp))
5684 goto execve_efault;
5685 if (!addr)
5686 break;
5687 if (!(*q = lock_user_string(addr)))
5688 goto execve_efault;
5689 total_size += strlen(*q) + 1;
5691 *q = NULL;
5693 for (gp = guest_envp, q = envp; gp;
5694 gp += sizeof(abi_ulong), q++) {
5695 if (get_user_ual(addr, gp))
5696 goto execve_efault;
5697 if (!addr)
5698 break;
5699 if (!(*q = lock_user_string(addr)))
5700 goto execve_efault;
5701 total_size += strlen(*q) + 1;
5703 *q = NULL;
5705 /* This case will not be caught by the host's execve() if its
5706 page size is bigger than the target's. */
5707 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5708 ret = -TARGET_E2BIG;
5709 goto execve_end;
5711 if (!(p = lock_user_string(arg1)))
5712 goto execve_efault;
5713 ret = get_errno(execve(p, argp, envp));
5714 unlock_user(p, arg1, 0);
5716 goto execve_end;
5718 execve_efault:
5719 ret = -TARGET_EFAULT;
5721 execve_end:
5722 for (gp = guest_argp, q = argp; *q;
5723 gp += sizeof(abi_ulong), q++) {
5724 if (get_user_ual(addr, gp)
5725 || !addr)
5726 break;
5727 unlock_user(*q, addr, 0);
5729 for (gp = guest_envp, q = envp; *q;
5730 gp += sizeof(abi_ulong), q++) {
5731 if (get_user_ual(addr, gp)
5732 || !addr)
5733 break;
5734 unlock_user(*q, addr, 0);
5737 break;
5738 case TARGET_NR_chdir:
5739 if (!(p = lock_user_string(arg1)))
5740 goto efault;
5741 ret = get_errno(chdir(p));
5742 unlock_user(p, arg1, 0);
5743 break;
5744 #ifdef TARGET_NR_time
5745 case TARGET_NR_time:
5747 time_t host_time;
5748 ret = get_errno(time(&host_time));
5749 if (!is_error(ret)
5750 && arg1
5751 && put_user_sal(host_time, arg1))
5752 goto efault;
5754 break;
5755 #endif
5756 case TARGET_NR_mknod:
5757 if (!(p = lock_user_string(arg1)))
5758 goto efault;
5759 ret = get_errno(mknod(p, arg2, arg3));
5760 unlock_user(p, arg1, 0);
5761 break;
5762 #if defined(TARGET_NR_mknodat)
5763 case TARGET_NR_mknodat:
5764 if (!(p = lock_user_string(arg2)))
5765 goto efault;
5766 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5767 unlock_user(p, arg2, 0);
5768 break;
5769 #endif
5770 case TARGET_NR_chmod:
5771 if (!(p = lock_user_string(arg1)))
5772 goto efault;
5773 ret = get_errno(chmod(p, arg2));
5774 unlock_user(p, arg1, 0);
5775 break;
5776 #ifdef TARGET_NR_break
5777 case TARGET_NR_break:
5778 goto unimplemented;
5779 #endif
5780 #ifdef TARGET_NR_oldstat
5781 case TARGET_NR_oldstat:
5782 goto unimplemented;
5783 #endif
5784 case TARGET_NR_lseek:
5785 ret = get_errno(lseek(arg1, arg2, arg3));
5786 break;
5787 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5788 /* Alpha specific */
5789 case TARGET_NR_getxpid:
5790 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5791 ret = get_errno(getpid());
5792 break;
5793 #endif
5794 #ifdef TARGET_NR_getpid
5795 case TARGET_NR_getpid:
5796 ret = get_errno(getpid());
5797 break;
5798 #endif
5799 case TARGET_NR_mount:
5801 /* need to look at the data field */
5802 void *p2, *p3;
5804 if (arg1) {
5805 p = lock_user_string(arg1);
5806 if (!p) {
5807 goto efault;
5809 } else {
5810 p = NULL;
5813 p2 = lock_user_string(arg2);
5814 if (!p2) {
5815 if (arg1) {
5816 unlock_user(p, arg1, 0);
5818 goto efault;
5821 if (arg3) {
5822 p3 = lock_user_string(arg3);
5823 if (!p3) {
5824 if (arg1) {
5825 unlock_user(p, arg1, 0);
5827 unlock_user(p2, arg2, 0);
5828 goto efault;
5830 } else {
5831 p3 = NULL;
5834 /* FIXME - arg5 should be locked, but it isn't clear how to
5835 * do that since it's not guaranteed to be a NULL-terminated
5836 * string.
5838 if (!arg5) {
5839 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5840 } else {
5841 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5843 ret = get_errno(ret);
5845 if (arg1) {
5846 unlock_user(p, arg1, 0);
5848 unlock_user(p2, arg2, 0);
5849 if (arg3) {
5850 unlock_user(p3, arg3, 0);
5853 break;
5854 #ifdef TARGET_NR_umount
5855 case TARGET_NR_umount:
5856 if (!(p = lock_user_string(arg1)))
5857 goto efault;
5858 ret = get_errno(umount(p));
5859 unlock_user(p, arg1, 0);
5860 break;
5861 #endif
5862 #ifdef TARGET_NR_stime /* not on alpha */
5863 case TARGET_NR_stime:
5865 time_t host_time;
5866 if (get_user_sal(host_time, arg1))
5867 goto efault;
5868 ret = get_errno(stime(&host_time));
5870 break;
5871 #endif
5872 case TARGET_NR_ptrace:
5873 goto unimplemented;
5874 #ifdef TARGET_NR_alarm /* not on alpha */
5875 case TARGET_NR_alarm:
5876 ret = alarm(arg1);
5877 break;
5878 #endif
5879 #ifdef TARGET_NR_oldfstat
5880 case TARGET_NR_oldfstat:
5881 goto unimplemented;
5882 #endif
5883 #ifdef TARGET_NR_pause /* not on alpha */
5884 case TARGET_NR_pause:
5885 ret = get_errno(pause());
5886 break;
5887 #endif
5888 #ifdef TARGET_NR_utime
5889 case TARGET_NR_utime:
5891 struct utimbuf tbuf, *host_tbuf;
5892 struct target_utimbuf *target_tbuf;
5893 if (arg2) {
5894 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5895 goto efault;
5896 tbuf.actime = tswapal(target_tbuf->actime);
5897 tbuf.modtime = tswapal(target_tbuf->modtime);
5898 unlock_user_struct(target_tbuf, arg2, 0);
5899 host_tbuf = &tbuf;
5900 } else {
5901 host_tbuf = NULL;
5903 if (!(p = lock_user_string(arg1)))
5904 goto efault;
5905 ret = get_errno(utime(p, host_tbuf));
5906 unlock_user(p, arg1, 0);
5908 break;
5909 #endif
5910 case TARGET_NR_utimes:
5912 struct timeval *tvp, tv[2];
5913 if (arg2) {
5914 if (copy_from_user_timeval(&tv[0], arg2)
5915 || copy_from_user_timeval(&tv[1],
5916 arg2 + sizeof(struct target_timeval)))
5917 goto efault;
5918 tvp = tv;
5919 } else {
5920 tvp = NULL;
5922 if (!(p = lock_user_string(arg1)))
5923 goto efault;
5924 ret = get_errno(utimes(p, tvp));
5925 unlock_user(p, arg1, 0);
5927 break;
5928 #if defined(TARGET_NR_futimesat)
5929 case TARGET_NR_futimesat:
5931 struct timeval *tvp, tv[2];
5932 if (arg3) {
5933 if (copy_from_user_timeval(&tv[0], arg3)
5934 || copy_from_user_timeval(&tv[1],
5935 arg3 + sizeof(struct target_timeval)))
5936 goto efault;
5937 tvp = tv;
5938 } else {
5939 tvp = NULL;
5941 if (!(p = lock_user_string(arg2)))
5942 goto efault;
5943 ret = get_errno(futimesat(arg1, path(p), tvp));
5944 unlock_user(p, arg2, 0);
5946 break;
5947 #endif
5948 #ifdef TARGET_NR_stty
5949 case TARGET_NR_stty:
5950 goto unimplemented;
5951 #endif
5952 #ifdef TARGET_NR_gtty
5953 case TARGET_NR_gtty:
5954 goto unimplemented;
5955 #endif
5956 case TARGET_NR_access:
5957 if (!(p = lock_user_string(arg1)))
5958 goto efault;
5959 ret = get_errno(access(path(p), arg2));
5960 unlock_user(p, arg1, 0);
5961 break;
5962 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5963 case TARGET_NR_faccessat:
5964 if (!(p = lock_user_string(arg2)))
5965 goto efault;
5966 ret = get_errno(faccessat(arg1, p, arg3, 0));
5967 unlock_user(p, arg2, 0);
5968 break;
5969 #endif
5970 #ifdef TARGET_NR_nice /* not on alpha */
5971 case TARGET_NR_nice:
5972 ret = get_errno(nice(arg1));
5973 break;
5974 #endif
5975 #ifdef TARGET_NR_ftime
5976 case TARGET_NR_ftime:
5977 goto unimplemented;
5978 #endif
5979 case TARGET_NR_sync:
5980 sync();
5981 ret = 0;
5982 break;
5983 case TARGET_NR_kill:
5984 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5985 break;
5986 case TARGET_NR_rename:
5988 void *p2;
5989 p = lock_user_string(arg1);
5990 p2 = lock_user_string(arg2);
5991 if (!p || !p2)
5992 ret = -TARGET_EFAULT;
5993 else
5994 ret = get_errno(rename(p, p2));
5995 unlock_user(p2, arg2, 0);
5996 unlock_user(p, arg1, 0);
5998 break;
5999 #if defined(TARGET_NR_renameat)
6000 case TARGET_NR_renameat:
6002 void *p2;
6003 p = lock_user_string(arg2);
6004 p2 = lock_user_string(arg4);
6005 if (!p || !p2)
6006 ret = -TARGET_EFAULT;
6007 else
6008 ret = get_errno(renameat(arg1, p, arg3, p2));
6009 unlock_user(p2, arg4, 0);
6010 unlock_user(p, arg2, 0);
6012 break;
6013 #endif
6014 case TARGET_NR_mkdir:
6015 if (!(p = lock_user_string(arg1)))
6016 goto efault;
6017 ret = get_errno(mkdir(p, arg2));
6018 unlock_user(p, arg1, 0);
6019 break;
6020 #if defined(TARGET_NR_mkdirat)
6021 case TARGET_NR_mkdirat:
6022 if (!(p = lock_user_string(arg2)))
6023 goto efault;
6024 ret = get_errno(mkdirat(arg1, p, arg3));
6025 unlock_user(p, arg2, 0);
6026 break;
6027 #endif
6028 case TARGET_NR_rmdir:
6029 if (!(p = lock_user_string(arg1)))
6030 goto efault;
6031 ret = get_errno(rmdir(p));
6032 unlock_user(p, arg1, 0);
6033 break;
6034 case TARGET_NR_dup:
6035 ret = get_errno(dup(arg1));
6036 break;
6037 case TARGET_NR_pipe:
6038 ret = do_pipe(cpu_env, arg1, 0, 0);
6039 break;
6040 #ifdef TARGET_NR_pipe2
6041 case TARGET_NR_pipe2:
6042 ret = do_pipe(cpu_env, arg1,
6043 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6044 break;
6045 #endif
6046 case TARGET_NR_times:
6048 struct target_tms *tmsp;
6049 struct tms tms;
6050 ret = get_errno(times(&tms));
6051 if (arg1) {
6052 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6053 if (!tmsp)
6054 goto efault;
6055 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6056 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6057 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6058 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6060 if (!is_error(ret))
6061 ret = host_to_target_clock_t(ret);
6063 break;
6064 #ifdef TARGET_NR_prof
6065 case TARGET_NR_prof:
6066 goto unimplemented;
6067 #endif
6068 #ifdef TARGET_NR_signal
6069 case TARGET_NR_signal:
6070 goto unimplemented;
6071 #endif
6072 case TARGET_NR_acct:
6073 if (arg1 == 0) {
6074 ret = get_errno(acct(NULL));
6075 } else {
6076 if (!(p = lock_user_string(arg1)))
6077 goto efault;
6078 ret = get_errno(acct(path(p)));
6079 unlock_user(p, arg1, 0);
6081 break;
6082 #ifdef TARGET_NR_umount2
6083 case TARGET_NR_umount2:
6084 if (!(p = lock_user_string(arg1)))
6085 goto efault;
6086 ret = get_errno(umount2(p, arg2));
6087 unlock_user(p, arg1, 0);
6088 break;
6089 #endif
6090 #ifdef TARGET_NR_lock
6091 case TARGET_NR_lock:
6092 goto unimplemented;
6093 #endif
6094 case TARGET_NR_ioctl:
6095 ret = do_ioctl(arg1, arg2, arg3);
6096 break;
6097 case TARGET_NR_fcntl:
6098 ret = do_fcntl(arg1, arg2, arg3);
6099 break;
6100 #ifdef TARGET_NR_mpx
6101 case TARGET_NR_mpx:
6102 goto unimplemented;
6103 #endif
6104 case TARGET_NR_setpgid:
6105 ret = get_errno(setpgid(arg1, arg2));
6106 break;
6107 #ifdef TARGET_NR_ulimit
6108 case TARGET_NR_ulimit:
6109 goto unimplemented;
6110 #endif
6111 #ifdef TARGET_NR_oldolduname
6112 case TARGET_NR_oldolduname:
6113 goto unimplemented;
6114 #endif
6115 case TARGET_NR_umask:
6116 ret = get_errno(umask(arg1));
6117 break;
6118 case TARGET_NR_chroot:
6119 if (!(p = lock_user_string(arg1)))
6120 goto efault;
6121 ret = get_errno(chroot(p));
6122 unlock_user(p, arg1, 0);
6123 break;
6124 case TARGET_NR_ustat:
6125 goto unimplemented;
6126 case TARGET_NR_dup2:
6127 ret = get_errno(dup2(arg1, arg2));
6128 break;
6129 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6130 case TARGET_NR_dup3:
6131 ret = get_errno(dup3(arg1, arg2, arg3));
6132 break;
6133 #endif
6134 #ifdef TARGET_NR_getppid /* not on alpha */
6135 case TARGET_NR_getppid:
6136 ret = get_errno(getppid());
6137 break;
6138 #endif
6139 case TARGET_NR_getpgrp:
6140 ret = get_errno(getpgrp());
6141 break;
6142 case TARGET_NR_setsid:
6143 ret = get_errno(setsid());
6144 break;
6145 #ifdef TARGET_NR_sigaction
6146 case TARGET_NR_sigaction:
6148 #if defined(TARGET_ALPHA)
6149 struct target_sigaction act, oact, *pact = 0;
6150 struct target_old_sigaction *old_act;
6151 if (arg2) {
6152 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6153 goto efault;
6154 act._sa_handler = old_act->_sa_handler;
6155 target_siginitset(&act.sa_mask, old_act->sa_mask);
6156 act.sa_flags = old_act->sa_flags;
6157 act.sa_restorer = 0;
6158 unlock_user_struct(old_act, arg2, 0);
6159 pact = &act;
6161 ret = get_errno(do_sigaction(arg1, pact, &oact));
6162 if (!is_error(ret) && arg3) {
6163 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6164 goto efault;
6165 old_act->_sa_handler = oact._sa_handler;
6166 old_act->sa_mask = oact.sa_mask.sig[0];
6167 old_act->sa_flags = oact.sa_flags;
6168 unlock_user_struct(old_act, arg3, 1);
6170 #elif defined(TARGET_MIPS)
6171 struct target_sigaction act, oact, *pact, *old_act;
6173 if (arg2) {
6174 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6175 goto efault;
6176 act._sa_handler = old_act->_sa_handler;
6177 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6178 act.sa_flags = old_act->sa_flags;
6179 unlock_user_struct(old_act, arg2, 0);
6180 pact = &act;
6181 } else {
6182 pact = NULL;
6185 ret = get_errno(do_sigaction(arg1, pact, &oact));
6187 if (!is_error(ret) && arg3) {
6188 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6189 goto efault;
6190 old_act->_sa_handler = oact._sa_handler;
6191 old_act->sa_flags = oact.sa_flags;
6192 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6193 old_act->sa_mask.sig[1] = 0;
6194 old_act->sa_mask.sig[2] = 0;
6195 old_act->sa_mask.sig[3] = 0;
6196 unlock_user_struct(old_act, arg3, 1);
6198 #else
6199 struct target_old_sigaction *old_act;
6200 struct target_sigaction act, oact, *pact;
6201 if (arg2) {
6202 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6203 goto efault;
6204 act._sa_handler = old_act->_sa_handler;
6205 target_siginitset(&act.sa_mask, old_act->sa_mask);
6206 act.sa_flags = old_act->sa_flags;
6207 act.sa_restorer = old_act->sa_restorer;
6208 unlock_user_struct(old_act, arg2, 0);
6209 pact = &act;
6210 } else {
6211 pact = NULL;
6213 ret = get_errno(do_sigaction(arg1, pact, &oact));
6214 if (!is_error(ret) && arg3) {
6215 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6216 goto efault;
6217 old_act->_sa_handler = oact._sa_handler;
6218 old_act->sa_mask = oact.sa_mask.sig[0];
6219 old_act->sa_flags = oact.sa_flags;
6220 old_act->sa_restorer = oact.sa_restorer;
6221 unlock_user_struct(old_act, arg3, 1);
6223 #endif
6225 break;
6226 #endif
6227 case TARGET_NR_rt_sigaction:
6229 #if defined(TARGET_ALPHA)
6230 struct target_sigaction act, oact, *pact = 0;
6231 struct target_rt_sigaction *rt_act;
6232 /* ??? arg4 == sizeof(sigset_t). */
6233 if (arg2) {
6234 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6235 goto efault;
6236 act._sa_handler = rt_act->_sa_handler;
6237 act.sa_mask = rt_act->sa_mask;
6238 act.sa_flags = rt_act->sa_flags;
6239 act.sa_restorer = arg5;
6240 unlock_user_struct(rt_act, arg2, 0);
6241 pact = &act;
6243 ret = get_errno(do_sigaction(arg1, pact, &oact));
6244 if (!is_error(ret) && arg3) {
6245 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6246 goto efault;
6247 rt_act->_sa_handler = oact._sa_handler;
6248 rt_act->sa_mask = oact.sa_mask;
6249 rt_act->sa_flags = oact.sa_flags;
6250 unlock_user_struct(rt_act, arg3, 1);
6252 #else
6253 struct target_sigaction *act;
6254 struct target_sigaction *oact;
6256 if (arg2) {
6257 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6258 goto efault;
6259 } else
6260 act = NULL;
6261 if (arg3) {
6262 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6263 ret = -TARGET_EFAULT;
6264 goto rt_sigaction_fail;
6266 } else
6267 oact = NULL;
6268 ret = get_errno(do_sigaction(arg1, act, oact));
6269 rt_sigaction_fail:
6270 if (act)
6271 unlock_user_struct(act, arg2, 0);
6272 if (oact)
6273 unlock_user_struct(oact, arg3, 1);
6274 #endif
6276 break;
6277 #ifdef TARGET_NR_sgetmask /* not on alpha */
6278 case TARGET_NR_sgetmask:
6280 sigset_t cur_set;
6281 abi_ulong target_set;
6282 do_sigprocmask(0, NULL, &cur_set);
6283 host_to_target_old_sigset(&target_set, &cur_set);
6284 ret = target_set;
6286 break;
6287 #endif
6288 #ifdef TARGET_NR_ssetmask /* not on alpha */
6289 case TARGET_NR_ssetmask:
6291 sigset_t set, oset, cur_set;
6292 abi_ulong target_set = arg1;
6293 do_sigprocmask(0, NULL, &cur_set);
6294 target_to_host_old_sigset(&set, &target_set);
6295 sigorset(&set, &set, &cur_set);
6296 do_sigprocmask(SIG_SETMASK, &set, &oset);
6297 host_to_target_old_sigset(&target_set, &oset);
6298 ret = target_set;
6300 break;
6301 #endif
6302 #ifdef TARGET_NR_sigprocmask
6303 case TARGET_NR_sigprocmask:
6305 #if defined(TARGET_ALPHA)
6306 sigset_t set, oldset;
6307 abi_ulong mask;
6308 int how;
6310 switch (arg1) {
6311 case TARGET_SIG_BLOCK:
6312 how = SIG_BLOCK;
6313 break;
6314 case TARGET_SIG_UNBLOCK:
6315 how = SIG_UNBLOCK;
6316 break;
6317 case TARGET_SIG_SETMASK:
6318 how = SIG_SETMASK;
6319 break;
6320 default:
6321 ret = -TARGET_EINVAL;
6322 goto fail;
6324 mask = arg2;
6325 target_to_host_old_sigset(&set, &mask);
6327 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6328 if (!is_error(ret)) {
6329 host_to_target_old_sigset(&mask, &oldset);
6330 ret = mask;
6331 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6333 #else
6334 sigset_t set, oldset, *set_ptr;
6335 int how;
6337 if (arg2) {
6338 switch (arg1) {
6339 case TARGET_SIG_BLOCK:
6340 how = SIG_BLOCK;
6341 break;
6342 case TARGET_SIG_UNBLOCK:
6343 how = SIG_UNBLOCK;
6344 break;
6345 case TARGET_SIG_SETMASK:
6346 how = SIG_SETMASK;
6347 break;
6348 default:
6349 ret = -TARGET_EINVAL;
6350 goto fail;
6352 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6353 goto efault;
6354 target_to_host_old_sigset(&set, p);
6355 unlock_user(p, arg2, 0);
6356 set_ptr = &set;
6357 } else {
6358 how = 0;
6359 set_ptr = NULL;
6361 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6362 if (!is_error(ret) && arg3) {
6363 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6364 goto efault;
6365 host_to_target_old_sigset(p, &oldset);
6366 unlock_user(p, arg3, sizeof(target_sigset_t));
6368 #endif
6370 break;
6371 #endif
6372 case TARGET_NR_rt_sigprocmask:
6374 int how = arg1;
6375 sigset_t set, oldset, *set_ptr;
6377 if (arg2) {
6378 switch(how) {
6379 case TARGET_SIG_BLOCK:
6380 how = SIG_BLOCK;
6381 break;
6382 case TARGET_SIG_UNBLOCK:
6383 how = SIG_UNBLOCK;
6384 break;
6385 case TARGET_SIG_SETMASK:
6386 how = SIG_SETMASK;
6387 break;
6388 default:
6389 ret = -TARGET_EINVAL;
6390 goto fail;
6392 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6393 goto efault;
6394 target_to_host_sigset(&set, p);
6395 unlock_user(p, arg2, 0);
6396 set_ptr = &set;
6397 } else {
6398 how = 0;
6399 set_ptr = NULL;
6401 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6402 if (!is_error(ret) && arg3) {
6403 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6404 goto efault;
6405 host_to_target_sigset(p, &oldset);
6406 unlock_user(p, arg3, sizeof(target_sigset_t));
6409 break;
6410 #ifdef TARGET_NR_sigpending
6411 case TARGET_NR_sigpending:
6413 sigset_t set;
6414 ret = get_errno(sigpending(&set));
6415 if (!is_error(ret)) {
6416 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6417 goto efault;
6418 host_to_target_old_sigset(p, &set);
6419 unlock_user(p, arg1, sizeof(target_sigset_t));
6422 break;
6423 #endif
6424 case TARGET_NR_rt_sigpending:
6426 sigset_t set;
6427 ret = get_errno(sigpending(&set));
6428 if (!is_error(ret)) {
6429 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6430 goto efault;
6431 host_to_target_sigset(p, &set);
6432 unlock_user(p, arg1, sizeof(target_sigset_t));
6435 break;
6436 #ifdef TARGET_NR_sigsuspend
6437 case TARGET_NR_sigsuspend:
6439 sigset_t set;
6440 #if defined(TARGET_ALPHA)
6441 abi_ulong mask = arg1;
6442 target_to_host_old_sigset(&set, &mask);
6443 #else
6444 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6445 goto efault;
6446 target_to_host_old_sigset(&set, p);
6447 unlock_user(p, arg1, 0);
6448 #endif
6449 ret = get_errno(sigsuspend(&set));
6451 break;
6452 #endif
6453 case TARGET_NR_rt_sigsuspend:
6455 sigset_t set;
6456 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6457 goto efault;
6458 target_to_host_sigset(&set, p);
6459 unlock_user(p, arg1, 0);
6460 ret = get_errno(sigsuspend(&set));
6462 break;
6463 case TARGET_NR_rt_sigtimedwait:
6465 sigset_t set;
6466 struct timespec uts, *puts;
6467 siginfo_t uinfo;
6469 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6470 goto efault;
6471 target_to_host_sigset(&set, p);
6472 unlock_user(p, arg1, 0);
6473 if (arg3) {
6474 puts = &uts;
6475 target_to_host_timespec(puts, arg3);
6476 } else {
6477 puts = NULL;
6479 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6480 if (!is_error(ret)) {
6481 if (arg2) {
6482 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6484 if (!p) {
6485 goto efault;
6487 host_to_target_siginfo(p, &uinfo);
6488 unlock_user(p, arg2, sizeof(target_siginfo_t));
6490 ret = host_to_target_signal(ret);
6493 break;
6494 case TARGET_NR_rt_sigqueueinfo:
6496 siginfo_t uinfo;
6497 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6498 goto efault;
6499 target_to_host_siginfo(&uinfo, p);
6500 unlock_user(p, arg1, 0);
6501 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6503 break;
6504 #ifdef TARGET_NR_sigreturn
6505 case TARGET_NR_sigreturn:
6506 /* NOTE: ret is eax, so not transcoding must be done */
6507 ret = do_sigreturn(cpu_env);
6508 break;
6509 #endif
6510 case TARGET_NR_rt_sigreturn:
6511 /* NOTE: ret is eax, so not transcoding must be done */
6512 ret = do_rt_sigreturn(cpu_env);
6513 break;
6514 case TARGET_NR_sethostname:
6515 if (!(p = lock_user_string(arg1)))
6516 goto efault;
6517 ret = get_errno(sethostname(p, arg2));
6518 unlock_user(p, arg1, 0);
6519 break;
6520 case TARGET_NR_setrlimit:
6522 int resource = target_to_host_resource(arg1);
6523 struct target_rlimit *target_rlim;
6524 struct rlimit rlim;
6525 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6526 goto efault;
6527 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6528 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6529 unlock_user_struct(target_rlim, arg2, 0);
6530 ret = get_errno(setrlimit(resource, &rlim));
6532 break;
6533 case TARGET_NR_getrlimit:
6535 int resource = target_to_host_resource(arg1);
6536 struct target_rlimit *target_rlim;
6537 struct rlimit rlim;
6539 ret = get_errno(getrlimit(resource, &rlim));
6540 if (!is_error(ret)) {
6541 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6542 goto efault;
6543 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6544 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6545 unlock_user_struct(target_rlim, arg2, 1);
6548 break;
6549 case TARGET_NR_getrusage:
6551 struct rusage rusage;
6552 ret = get_errno(getrusage(arg1, &rusage));
6553 if (!is_error(ret)) {
6554 ret = host_to_target_rusage(arg2, &rusage);
6557 break;
6558 case TARGET_NR_gettimeofday:
6560 struct timeval tv;
6561 ret = get_errno(gettimeofday(&tv, NULL));
6562 if (!is_error(ret)) {
6563 if (copy_to_user_timeval(arg1, &tv))
6564 goto efault;
6567 break;
6568 case TARGET_NR_settimeofday:
6570 struct timeval tv, *ptv = NULL;
6571 struct timezone tz, *ptz = NULL;
6573 if (arg1) {
6574 if (copy_from_user_timeval(&tv, arg1)) {
6575 goto efault;
6577 ptv = &tv;
6580 if (arg2) {
6581 if (copy_from_user_timezone(&tz, arg2)) {
6582 goto efault;
6584 ptz = &tz;
6587 ret = get_errno(settimeofday(ptv, ptz));
6589 break;
6590 #if defined(TARGET_NR_select)
6591 case TARGET_NR_select:
6592 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6593 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6594 #else
6596 struct target_sel_arg_struct *sel;
6597 abi_ulong inp, outp, exp, tvp;
6598 long nsel;
6600 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6601 goto efault;
6602 nsel = tswapal(sel->n);
6603 inp = tswapal(sel->inp);
6604 outp = tswapal(sel->outp);
6605 exp = tswapal(sel->exp);
6606 tvp = tswapal(sel->tvp);
6607 unlock_user_struct(sel, arg1, 0);
6608 ret = do_select(nsel, inp, outp, exp, tvp);
6610 #endif
6611 break;
6612 #endif
6613 #ifdef TARGET_NR_pselect6
6614 case TARGET_NR_pselect6:
6616 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6617 fd_set rfds, wfds, efds;
6618 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6619 struct timespec ts, *ts_ptr;
6622 * The 6th arg is actually two args smashed together,
6623 * so we cannot use the C library.
6625 sigset_t set;
6626 struct {
6627 sigset_t *set;
6628 size_t size;
6629 } sig, *sig_ptr;
6631 abi_ulong arg_sigset, arg_sigsize, *arg7;
6632 target_sigset_t *target_sigset;
6634 n = arg1;
6635 rfd_addr = arg2;
6636 wfd_addr = arg3;
6637 efd_addr = arg4;
6638 ts_addr = arg5;
6640 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6641 if (ret) {
6642 goto fail;
6644 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6645 if (ret) {
6646 goto fail;
6648 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6649 if (ret) {
6650 goto fail;
6654 * This takes a timespec, and not a timeval, so we cannot
6655 * use the do_select() helper ...
6657 if (ts_addr) {
6658 if (target_to_host_timespec(&ts, ts_addr)) {
6659 goto efault;
6661 ts_ptr = &ts;
6662 } else {
6663 ts_ptr = NULL;
6666 /* Extract the two packed args for the sigset */
6667 if (arg6) {
6668 sig_ptr = &sig;
6669 sig.size = _NSIG / 8;
6671 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6672 if (!arg7) {
6673 goto efault;
6675 arg_sigset = tswapal(arg7[0]);
6676 arg_sigsize = tswapal(arg7[1]);
6677 unlock_user(arg7, arg6, 0);
6679 if (arg_sigset) {
6680 sig.set = &set;
6681 if (arg_sigsize != sizeof(*target_sigset)) {
6682 /* Like the kernel, we enforce correct size sigsets */
6683 ret = -TARGET_EINVAL;
6684 goto fail;
6686 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6687 sizeof(*target_sigset), 1);
6688 if (!target_sigset) {
6689 goto efault;
6691 target_to_host_sigset(&set, target_sigset);
6692 unlock_user(target_sigset, arg_sigset, 0);
6693 } else {
6694 sig.set = NULL;
6696 } else {
6697 sig_ptr = NULL;
6700 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6701 ts_ptr, sig_ptr));
6703 if (!is_error(ret)) {
6704 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6705 goto efault;
6706 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6707 goto efault;
6708 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6709 goto efault;
6711 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6712 goto efault;
6715 break;
6716 #endif
6717 case TARGET_NR_symlink:
6719 void *p2;
6720 p = lock_user_string(arg1);
6721 p2 = lock_user_string(arg2);
6722 if (!p || !p2)
6723 ret = -TARGET_EFAULT;
6724 else
6725 ret = get_errno(symlink(p, p2));
6726 unlock_user(p2, arg2, 0);
6727 unlock_user(p, arg1, 0);
6729 break;
6730 #if defined(TARGET_NR_symlinkat)
6731 case TARGET_NR_symlinkat:
6733 void *p2;
6734 p = lock_user_string(arg1);
6735 p2 = lock_user_string(arg3);
6736 if (!p || !p2)
6737 ret = -TARGET_EFAULT;
6738 else
6739 ret = get_errno(symlinkat(p, arg2, p2));
6740 unlock_user(p2, arg3, 0);
6741 unlock_user(p, arg1, 0);
6743 break;
6744 #endif
6745 #ifdef TARGET_NR_oldlstat
6746 case TARGET_NR_oldlstat:
6747 goto unimplemented;
6748 #endif
6749 case TARGET_NR_readlink:
6751 void *p2;
6752 p = lock_user_string(arg1);
6753 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6754 if (!p || !p2) {
6755 ret = -TARGET_EFAULT;
6756 } else if (!arg3) {
6757 /* Short circuit this for the magic exe check. */
6758 ret = -TARGET_EINVAL;
6759 } else if (is_proc_myself((const char *)p, "exe")) {
6760 char real[PATH_MAX], *temp;
6761 temp = realpath(exec_path, real);
6762 /* Return value is # of bytes that we wrote to the buffer. */
6763 if (temp == NULL) {
6764 ret = get_errno(-1);
6765 } else {
6766 /* Don't worry about sign mismatch as earlier mapping
6767 * logic would have thrown a bad address error. */
6768 ret = MIN(strlen(real), arg3);
6769 /* We cannot NUL terminate the string. */
6770 memcpy(p2, real, ret);
6772 } else {
6773 ret = get_errno(readlink(path(p), p2, arg3));
6775 unlock_user(p2, arg2, ret);
6776 unlock_user(p, arg1, 0);
6778 break;
6779 #if defined(TARGET_NR_readlinkat)
6780 case TARGET_NR_readlinkat:
6782 void *p2;
6783 p = lock_user_string(arg2);
6784 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6785 if (!p || !p2) {
6786 ret = -TARGET_EFAULT;
6787 } else if (is_proc_myself((const char *)p, "exe")) {
6788 char real[PATH_MAX], *temp;
6789 temp = realpath(exec_path, real);
6790 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6791 snprintf((char *)p2, arg4, "%s", real);
6792 } else {
6793 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6795 unlock_user(p2, arg3, ret);
6796 unlock_user(p, arg2, 0);
6798 break;
6799 #endif
6800 #ifdef TARGET_NR_uselib
6801 case TARGET_NR_uselib:
6802 goto unimplemented;
6803 #endif
6804 #ifdef TARGET_NR_swapon
6805 case TARGET_NR_swapon:
6806 if (!(p = lock_user_string(arg1)))
6807 goto efault;
6808 ret = get_errno(swapon(p, arg2));
6809 unlock_user(p, arg1, 0);
6810 break;
6811 #endif
6812 case TARGET_NR_reboot:
6813 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6814 /* arg4 must be ignored in all other cases */
6815 p = lock_user_string(arg4);
6816 if (!p) {
6817 goto efault;
6819 ret = get_errno(reboot(arg1, arg2, arg3, p));
6820 unlock_user(p, arg4, 0);
6821 } else {
6822 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6824 break;
6825 #ifdef TARGET_NR_readdir
6826 case TARGET_NR_readdir:
6827 goto unimplemented;
6828 #endif
6829 #ifdef TARGET_NR_mmap
6830 case TARGET_NR_mmap:
6831 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6832 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6833 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6834 || defined(TARGET_S390X)
6836 abi_ulong *v;
6837 abi_ulong v1, v2, v3, v4, v5, v6;
6838 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6839 goto efault;
6840 v1 = tswapal(v[0]);
6841 v2 = tswapal(v[1]);
6842 v3 = tswapal(v[2]);
6843 v4 = tswapal(v[3]);
6844 v5 = tswapal(v[4]);
6845 v6 = tswapal(v[5]);
6846 unlock_user(v, arg1, 0);
6847 ret = get_errno(target_mmap(v1, v2, v3,
6848 target_to_host_bitmask(v4, mmap_flags_tbl),
6849 v5, v6));
6851 #else
6852 ret = get_errno(target_mmap(arg1, arg2, arg3,
6853 target_to_host_bitmask(arg4, mmap_flags_tbl),
6854 arg5,
6855 arg6));
6856 #endif
6857 break;
6858 #endif
6859 #ifdef TARGET_NR_mmap2
6860 case TARGET_NR_mmap2:
6861 #ifndef MMAP_SHIFT
6862 #define MMAP_SHIFT 12
6863 #endif
6864 ret = get_errno(target_mmap(arg1, arg2, arg3,
6865 target_to_host_bitmask(arg4, mmap_flags_tbl),
6866 arg5,
6867 arg6 << MMAP_SHIFT));
6868 break;
6869 #endif
6870 case TARGET_NR_munmap:
6871 ret = get_errno(target_munmap(arg1, arg2));
6872 break;
6873 case TARGET_NR_mprotect:
6875 TaskState *ts = cpu->opaque;
6876 /* Special hack to detect libc making the stack executable. */
6877 if ((arg3 & PROT_GROWSDOWN)
6878 && arg1 >= ts->info->stack_limit
6879 && arg1 <= ts->info->start_stack) {
6880 arg3 &= ~PROT_GROWSDOWN;
6881 arg2 = arg2 + arg1 - ts->info->stack_limit;
6882 arg1 = ts->info->stack_limit;
6885 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6886 break;
6887 #ifdef TARGET_NR_mremap
6888 case TARGET_NR_mremap:
6889 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6890 break;
6891 #endif
6892 /* ??? msync/mlock/munlock are broken for softmmu. */
6893 #ifdef TARGET_NR_msync
6894 case TARGET_NR_msync:
6895 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6896 break;
6897 #endif
6898 #ifdef TARGET_NR_mlock
6899 case TARGET_NR_mlock:
6900 ret = get_errno(mlock(g2h(arg1), arg2));
6901 break;
6902 #endif
6903 #ifdef TARGET_NR_munlock
6904 case TARGET_NR_munlock:
6905 ret = get_errno(munlock(g2h(arg1), arg2));
6906 break;
6907 #endif
6908 #ifdef TARGET_NR_mlockall
6909 case TARGET_NR_mlockall:
6910 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
6911 break;
6912 #endif
6913 #ifdef TARGET_NR_munlockall
6914 case TARGET_NR_munlockall:
6915 ret = get_errno(munlockall());
6916 break;
6917 #endif
6918 case TARGET_NR_truncate:
6919 if (!(p = lock_user_string(arg1)))
6920 goto efault;
6921 ret = get_errno(truncate(p, arg2));
6922 unlock_user(p, arg1, 0);
6923 break;
6924 case TARGET_NR_ftruncate:
6925 ret = get_errno(ftruncate(arg1, arg2));
6926 break;
6927 case TARGET_NR_fchmod:
6928 ret = get_errno(fchmod(arg1, arg2));
6929 break;
6930 #if defined(TARGET_NR_fchmodat)
6931 case TARGET_NR_fchmodat:
6932 if (!(p = lock_user_string(arg2)))
6933 goto efault;
6934 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6935 unlock_user(p, arg2, 0);
6936 break;
6937 #endif
6938 case TARGET_NR_getpriority:
6939 /* Note that negative values are valid for getpriority, so we must
6940 differentiate based on errno settings. */
6941 errno = 0;
6942 ret = getpriority(arg1, arg2);
6943 if (ret == -1 && errno != 0) {
6944 ret = -host_to_target_errno(errno);
6945 break;
6947 #ifdef TARGET_ALPHA
6948 /* Return value is the unbiased priority. Signal no error. */
6949 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6950 #else
6951 /* Return value is a biased priority to avoid negative numbers. */
6952 ret = 20 - ret;
6953 #endif
6954 break;
6955 case TARGET_NR_setpriority:
6956 ret = get_errno(setpriority(arg1, arg2, arg3));
6957 break;
6958 #ifdef TARGET_NR_profil
6959 case TARGET_NR_profil:
6960 goto unimplemented;
6961 #endif
6962 case TARGET_NR_statfs:
6963 if (!(p = lock_user_string(arg1)))
6964 goto efault;
6965 ret = get_errno(statfs(path(p), &stfs));
6966 unlock_user(p, arg1, 0);
6967 convert_statfs:
6968 if (!is_error(ret)) {
6969 struct target_statfs *target_stfs;
6971 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6972 goto efault;
6973 __put_user(stfs.f_type, &target_stfs->f_type);
6974 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6975 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6976 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6977 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6978 __put_user(stfs.f_files, &target_stfs->f_files);
6979 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6980 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6981 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6982 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6983 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6984 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6985 unlock_user_struct(target_stfs, arg2, 1);
6987 break;
6988 case TARGET_NR_fstatfs:
6989 ret = get_errno(fstatfs(arg1, &stfs));
6990 goto convert_statfs;
6991 #ifdef TARGET_NR_statfs64
6992 case TARGET_NR_statfs64:
6993 if (!(p = lock_user_string(arg1)))
6994 goto efault;
6995 ret = get_errno(statfs(path(p), &stfs));
6996 unlock_user(p, arg1, 0);
6997 convert_statfs64:
6998 if (!is_error(ret)) {
6999 struct target_statfs64 *target_stfs;
7001 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7002 goto efault;
7003 __put_user(stfs.f_type, &target_stfs->f_type);
7004 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7005 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7006 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7007 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7008 __put_user(stfs.f_files, &target_stfs->f_files);
7009 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7010 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7011 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7012 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7013 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7014 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7015 unlock_user_struct(target_stfs, arg3, 1);
7017 break;
7018 case TARGET_NR_fstatfs64:
7019 ret = get_errno(fstatfs(arg1, &stfs));
7020 goto convert_statfs64;
7021 #endif
7022 #ifdef TARGET_NR_ioperm
7023 case TARGET_NR_ioperm:
7024 goto unimplemented;
7025 #endif
7026 #ifdef TARGET_NR_socketcall
7027 case TARGET_NR_socketcall:
7028 ret = do_socketcall(arg1, arg2);
7029 break;
7030 #endif
7031 #ifdef TARGET_NR_accept
7032 case TARGET_NR_accept:
7033 ret = do_accept4(arg1, arg2, arg3, 0);
7034 break;
7035 #endif
7036 #ifdef TARGET_NR_accept4
7037 case TARGET_NR_accept4:
7038 #ifdef CONFIG_ACCEPT4
7039 ret = do_accept4(arg1, arg2, arg3, arg4);
7040 #else
7041 goto unimplemented;
7042 #endif
7043 break;
7044 #endif
7045 #ifdef TARGET_NR_bind
7046 case TARGET_NR_bind:
7047 ret = do_bind(arg1, arg2, arg3);
7048 break;
7049 #endif
7050 #ifdef TARGET_NR_connect
7051 case TARGET_NR_connect:
7052 ret = do_connect(arg1, arg2, arg3);
7053 break;
7054 #endif
7055 #ifdef TARGET_NR_getpeername
7056 case TARGET_NR_getpeername:
7057 ret = do_getpeername(arg1, arg2, arg3);
7058 break;
7059 #endif
7060 #ifdef TARGET_NR_getsockname
7061 case TARGET_NR_getsockname:
7062 ret = do_getsockname(arg1, arg2, arg3);
7063 break;
7064 #endif
7065 #ifdef TARGET_NR_getsockopt
7066 case TARGET_NR_getsockopt:
7067 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7068 break;
7069 #endif
7070 #ifdef TARGET_NR_listen
7071 case TARGET_NR_listen:
7072 ret = get_errno(listen(arg1, arg2));
7073 break;
7074 #endif
7075 #ifdef TARGET_NR_recv
7076 case TARGET_NR_recv:
7077 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7078 break;
7079 #endif
7080 #ifdef TARGET_NR_recvfrom
7081 case TARGET_NR_recvfrom:
7082 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7083 break;
7084 #endif
7085 #ifdef TARGET_NR_recvmsg
7086 case TARGET_NR_recvmsg:
7087 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7088 break;
7089 #endif
7090 #ifdef TARGET_NR_send
7091 case TARGET_NR_send:
7092 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7093 break;
7094 #endif
7095 #ifdef TARGET_NR_sendmsg
7096 case TARGET_NR_sendmsg:
7097 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7098 break;
7099 #endif
7100 #ifdef TARGET_NR_sendmmsg
7101 case TARGET_NR_sendmmsg:
7102 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7103 break;
7104 case TARGET_NR_recvmmsg:
7105 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7106 break;
7107 #endif
7108 #ifdef TARGET_NR_sendto
7109 case TARGET_NR_sendto:
7110 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7111 break;
7112 #endif
7113 #ifdef TARGET_NR_shutdown
7114 case TARGET_NR_shutdown:
7115 ret = get_errno(shutdown(arg1, arg2));
7116 break;
7117 #endif
7118 #ifdef TARGET_NR_socket
7119 case TARGET_NR_socket:
7120 ret = do_socket(arg1, arg2, arg3);
7121 break;
7122 #endif
7123 #ifdef TARGET_NR_socketpair
7124 case TARGET_NR_socketpair:
7125 ret = do_socketpair(arg1, arg2, arg3, arg4);
7126 break;
7127 #endif
7128 #ifdef TARGET_NR_setsockopt
7129 case TARGET_NR_setsockopt:
7130 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7131 break;
7132 #endif
7134 case TARGET_NR_syslog:
7135 if (!(p = lock_user_string(arg2)))
7136 goto efault;
7137 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7138 unlock_user(p, arg2, 0);
7139 break;
7141 case TARGET_NR_setitimer:
7143 struct itimerval value, ovalue, *pvalue;
7145 if (arg2) {
7146 pvalue = &value;
7147 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7148 || copy_from_user_timeval(&pvalue->it_value,
7149 arg2 + sizeof(struct target_timeval)))
7150 goto efault;
7151 } else {
7152 pvalue = NULL;
7154 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7155 if (!is_error(ret) && arg3) {
7156 if (copy_to_user_timeval(arg3,
7157 &ovalue.it_interval)
7158 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7159 &ovalue.it_value))
7160 goto efault;
7163 break;
7164 case TARGET_NR_getitimer:
7166 struct itimerval value;
7168 ret = get_errno(getitimer(arg1, &value));
7169 if (!is_error(ret) && arg2) {
7170 if (copy_to_user_timeval(arg2,
7171 &value.it_interval)
7172 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7173 &value.it_value))
7174 goto efault;
7177 break;
7178 case TARGET_NR_stat:
7179 if (!(p = lock_user_string(arg1)))
7180 goto efault;
7181 ret = get_errno(stat(path(p), &st));
7182 unlock_user(p, arg1, 0);
7183 goto do_stat;
7184 case TARGET_NR_lstat:
7185 if (!(p = lock_user_string(arg1)))
7186 goto efault;
7187 ret = get_errno(lstat(path(p), &st));
7188 unlock_user(p, arg1, 0);
7189 goto do_stat;
7190 case TARGET_NR_fstat:
7192 ret = get_errno(fstat(arg1, &st));
7193 do_stat:
7194 if (!is_error(ret)) {
7195 struct target_stat *target_st;
7197 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7198 goto efault;
7199 memset(target_st, 0, sizeof(*target_st));
7200 __put_user(st.st_dev, &target_st->st_dev);
7201 __put_user(st.st_ino, &target_st->st_ino);
7202 __put_user(st.st_mode, &target_st->st_mode);
7203 __put_user(st.st_uid, &target_st->st_uid);
7204 __put_user(st.st_gid, &target_st->st_gid);
7205 __put_user(st.st_nlink, &target_st->st_nlink);
7206 __put_user(st.st_rdev, &target_st->st_rdev);
7207 __put_user(st.st_size, &target_st->st_size);
7208 __put_user(st.st_blksize, &target_st->st_blksize);
7209 __put_user(st.st_blocks, &target_st->st_blocks);
7210 __put_user(st.st_atime, &target_st->target_st_atime);
7211 __put_user(st.st_mtime, &target_st->target_st_mtime);
7212 __put_user(st.st_ctime, &target_st->target_st_ctime);
7213 unlock_user_struct(target_st, arg2, 1);
7216 break;
7217 #ifdef TARGET_NR_olduname
7218 case TARGET_NR_olduname:
7219 goto unimplemented;
7220 #endif
7221 #ifdef TARGET_NR_iopl
7222 case TARGET_NR_iopl:
7223 goto unimplemented;
7224 #endif
7225 case TARGET_NR_vhangup:
7226 ret = get_errno(vhangup());
7227 break;
7228 #ifdef TARGET_NR_idle
7229 case TARGET_NR_idle:
7230 goto unimplemented;
7231 #endif
7232 #ifdef TARGET_NR_syscall
7233 case TARGET_NR_syscall:
7234 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7235 arg6, arg7, arg8, 0);
7236 break;
7237 #endif
7238 case TARGET_NR_wait4:
7240 int status;
7241 abi_long status_ptr = arg2;
7242 struct rusage rusage, *rusage_ptr;
7243 abi_ulong target_rusage = arg4;
7244 abi_long rusage_err;
7245 if (target_rusage)
7246 rusage_ptr = &rusage;
7247 else
7248 rusage_ptr = NULL;
7249 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7250 if (!is_error(ret)) {
7251 if (status_ptr && ret) {
7252 status = host_to_target_waitstatus(status);
7253 if (put_user_s32(status, status_ptr))
7254 goto efault;
7256 if (target_rusage) {
7257 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7258 if (rusage_err) {
7259 ret = rusage_err;
7264 break;
7265 #ifdef TARGET_NR_swapoff
7266 case TARGET_NR_swapoff:
7267 if (!(p = lock_user_string(arg1)))
7268 goto efault;
7269 ret = get_errno(swapoff(p));
7270 unlock_user(p, arg1, 0);
7271 break;
7272 #endif
7273 case TARGET_NR_sysinfo:
7275 struct target_sysinfo *target_value;
7276 struct sysinfo value;
7277 ret = get_errno(sysinfo(&value));
7278 if (!is_error(ret) && arg1)
7280 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7281 goto efault;
7282 __put_user(value.uptime, &target_value->uptime);
7283 __put_user(value.loads[0], &target_value->loads[0]);
7284 __put_user(value.loads[1], &target_value->loads[1]);
7285 __put_user(value.loads[2], &target_value->loads[2]);
7286 __put_user(value.totalram, &target_value->totalram);
7287 __put_user(value.freeram, &target_value->freeram);
7288 __put_user(value.sharedram, &target_value->sharedram);
7289 __put_user(value.bufferram, &target_value->bufferram);
7290 __put_user(value.totalswap, &target_value->totalswap);
7291 __put_user(value.freeswap, &target_value->freeswap);
7292 __put_user(value.procs, &target_value->procs);
7293 __put_user(value.totalhigh, &target_value->totalhigh);
7294 __put_user(value.freehigh, &target_value->freehigh);
7295 __put_user(value.mem_unit, &target_value->mem_unit);
7296 unlock_user_struct(target_value, arg1, 1);
7299 break;
7300 #ifdef TARGET_NR_ipc
7301 case TARGET_NR_ipc:
7302 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7303 break;
7304 #endif
7305 #ifdef TARGET_NR_semget
7306 case TARGET_NR_semget:
7307 ret = get_errno(semget(arg1, arg2, arg3));
7308 break;
7309 #endif
7310 #ifdef TARGET_NR_semop
7311 case TARGET_NR_semop:
7312 ret = do_semop(arg1, arg2, arg3);
7313 break;
7314 #endif
7315 #ifdef TARGET_NR_semctl
7316 case TARGET_NR_semctl:
7317 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7318 break;
7319 #endif
7320 #ifdef TARGET_NR_msgctl
7321 case TARGET_NR_msgctl:
7322 ret = do_msgctl(arg1, arg2, arg3);
7323 break;
7324 #endif
7325 #ifdef TARGET_NR_msgget
7326 case TARGET_NR_msgget:
7327 ret = get_errno(msgget(arg1, arg2));
7328 break;
7329 #endif
7330 #ifdef TARGET_NR_msgrcv
7331 case TARGET_NR_msgrcv:
7332 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7333 break;
7334 #endif
7335 #ifdef TARGET_NR_msgsnd
7336 case TARGET_NR_msgsnd:
7337 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7338 break;
7339 #endif
7340 #ifdef TARGET_NR_shmget
7341 case TARGET_NR_shmget:
7342 ret = get_errno(shmget(arg1, arg2, arg3));
7343 break;
7344 #endif
7345 #ifdef TARGET_NR_shmctl
7346 case TARGET_NR_shmctl:
7347 ret = do_shmctl(arg1, arg2, arg3);
7348 break;
7349 #endif
7350 #ifdef TARGET_NR_shmat
7351 case TARGET_NR_shmat:
7352 ret = do_shmat(arg1, arg2, arg3);
7353 break;
7354 #endif
7355 #ifdef TARGET_NR_shmdt
7356 case TARGET_NR_shmdt:
7357 ret = do_shmdt(arg1);
7358 break;
7359 #endif
7360 case TARGET_NR_fsync:
7361 ret = get_errno(fsync(arg1));
7362 break;
7363 case TARGET_NR_clone:
7364 /* Linux manages to have three different orderings for its
7365 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7366 * match the kernel's CONFIG_CLONE_* settings.
7367 * Microblaze is further special in that it uses a sixth
7368 * implicit argument to clone for the TLS pointer.
7370 #if defined(TARGET_MICROBLAZE)
7371 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7372 #elif defined(TARGET_CLONE_BACKWARDS)
7373 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7374 #elif defined(TARGET_CLONE_BACKWARDS2)
7375 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7376 #else
7377 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7378 #endif
7379 break;
7380 #ifdef __NR_exit_group
7381 /* new thread calls */
7382 case TARGET_NR_exit_group:
7383 #ifdef TARGET_GPROF
7384 _mcleanup();
7385 #endif
7386 gdb_exit(cpu_env, arg1);
7387 ret = get_errno(exit_group(arg1));
7388 break;
7389 #endif
7390 case TARGET_NR_setdomainname:
7391 if (!(p = lock_user_string(arg1)))
7392 goto efault;
7393 ret = get_errno(setdomainname(p, arg2));
7394 unlock_user(p, arg1, 0);
7395 break;
7396 case TARGET_NR_uname:
7397 /* no need to transcode because we use the linux syscall */
7399 struct new_utsname * buf;
7401 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7402 goto efault;
7403 ret = get_errno(sys_uname(buf));
7404 if (!is_error(ret)) {
7405 /* Overrite the native machine name with whatever is being
7406 emulated. */
7407 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7408 /* Allow the user to override the reported release. */
7409 if (qemu_uname_release && *qemu_uname_release)
7410 strcpy (buf->release, qemu_uname_release);
7412 unlock_user_struct(buf, arg1, 1);
7414 break;
7415 #ifdef TARGET_I386
7416 case TARGET_NR_modify_ldt:
7417 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7418 break;
7419 #if !defined(TARGET_X86_64)
7420 case TARGET_NR_vm86old:
7421 goto unimplemented;
7422 case TARGET_NR_vm86:
7423 ret = do_vm86(cpu_env, arg1, arg2);
7424 break;
7425 #endif
7426 #endif
7427 case TARGET_NR_adjtimex:
7428 goto unimplemented;
7429 #ifdef TARGET_NR_create_module
7430 case TARGET_NR_create_module:
7431 #endif
7432 case TARGET_NR_init_module:
7433 case TARGET_NR_delete_module:
7434 #ifdef TARGET_NR_get_kernel_syms
7435 case TARGET_NR_get_kernel_syms:
7436 #endif
7437 goto unimplemented;
7438 case TARGET_NR_quotactl:
7439 goto unimplemented;
7440 case TARGET_NR_getpgid:
7441 ret = get_errno(getpgid(arg1));
7442 break;
7443 case TARGET_NR_fchdir:
7444 ret = get_errno(fchdir(arg1));
7445 break;
7446 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7447 case TARGET_NR_bdflush:
7448 goto unimplemented;
7449 #endif
7450 #ifdef TARGET_NR_sysfs
7451 case TARGET_NR_sysfs:
7452 goto unimplemented;
7453 #endif
7454 case TARGET_NR_personality:
7455 ret = get_errno(personality(arg1));
7456 break;
7457 #ifdef TARGET_NR_afs_syscall
7458 case TARGET_NR_afs_syscall:
7459 goto unimplemented;
7460 #endif
7461 #ifdef TARGET_NR__llseek /* Not on alpha */
7462 case TARGET_NR__llseek:
7464 int64_t res;
7465 #if !defined(__NR_llseek)
7466 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7467 if (res == -1) {
7468 ret = get_errno(res);
7469 } else {
7470 ret = 0;
7472 #else
7473 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7474 #endif
7475 if ((ret == 0) && put_user_s64(res, arg4)) {
7476 goto efault;
7479 break;
7480 #endif
7481 case TARGET_NR_getdents:
7482 #ifdef __NR_getdents
7483 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7485 struct target_dirent *target_dirp;
7486 struct linux_dirent *dirp;
7487 abi_long count = arg3;
7489 dirp = malloc(count);
7490 if (!dirp) {
7491 ret = -TARGET_ENOMEM;
7492 goto fail;
7495 ret = get_errno(sys_getdents(arg1, dirp, count));
7496 if (!is_error(ret)) {
7497 struct linux_dirent *de;
7498 struct target_dirent *tde;
7499 int len = ret;
7500 int reclen, treclen;
7501 int count1, tnamelen;
7503 count1 = 0;
7504 de = dirp;
7505 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7506 goto efault;
7507 tde = target_dirp;
7508 while (len > 0) {
7509 reclen = de->d_reclen;
7510 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7511 assert(tnamelen >= 0);
7512 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7513 assert(count1 + treclen <= count);
7514 tde->d_reclen = tswap16(treclen);
7515 tde->d_ino = tswapal(de->d_ino);
7516 tde->d_off = tswapal(de->d_off);
7517 memcpy(tde->d_name, de->d_name, tnamelen);
7518 de = (struct linux_dirent *)((char *)de + reclen);
7519 len -= reclen;
7520 tde = (struct target_dirent *)((char *)tde + treclen);
7521 count1 += treclen;
7523 ret = count1;
7524 unlock_user(target_dirp, arg2, ret);
7526 free(dirp);
7528 #else
7530 struct linux_dirent *dirp;
7531 abi_long count = arg3;
7533 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7534 goto efault;
7535 ret = get_errno(sys_getdents(arg1, dirp, count));
7536 if (!is_error(ret)) {
7537 struct linux_dirent *de;
7538 int len = ret;
7539 int reclen;
7540 de = dirp;
7541 while (len > 0) {
7542 reclen = de->d_reclen;
7543 if (reclen > len)
7544 break;
7545 de->d_reclen = tswap16(reclen);
7546 tswapls(&de->d_ino);
7547 tswapls(&de->d_off);
7548 de = (struct linux_dirent *)((char *)de + reclen);
7549 len -= reclen;
7552 unlock_user(dirp, arg2, ret);
7554 #endif
7555 #else
7556 /* Implement getdents in terms of getdents64 */
7558 struct linux_dirent64 *dirp;
7559 abi_long count = arg3;
7561 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7562 if (!dirp) {
7563 goto efault;
7565 ret = get_errno(sys_getdents64(arg1, dirp, count));
7566 if (!is_error(ret)) {
7567 /* Convert the dirent64 structs to target dirent. We do this
7568 * in-place, since we can guarantee that a target_dirent is no
7569 * larger than a dirent64; however this means we have to be
7570 * careful to read everything before writing in the new format.
7572 struct linux_dirent64 *de;
7573 struct target_dirent *tde;
7574 int len = ret;
7575 int tlen = 0;
7577 de = dirp;
7578 tde = (struct target_dirent *)dirp;
7579 while (len > 0) {
7580 int namelen, treclen;
7581 int reclen = de->d_reclen;
7582 uint64_t ino = de->d_ino;
7583 int64_t off = de->d_off;
7584 uint8_t type = de->d_type;
7586 namelen = strlen(de->d_name);
7587 treclen = offsetof(struct target_dirent, d_name)
7588 + namelen + 2;
7589 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7591 memmove(tde->d_name, de->d_name, namelen + 1);
7592 tde->d_ino = tswapal(ino);
7593 tde->d_off = tswapal(off);
7594 tde->d_reclen = tswap16(treclen);
7595 /* The target_dirent type is in what was formerly a padding
7596 * byte at the end of the structure:
7598 *(((char *)tde) + treclen - 1) = type;
7600 de = (struct linux_dirent64 *)((char *)de + reclen);
7601 tde = (struct target_dirent *)((char *)tde + treclen);
7602 len -= reclen;
7603 tlen += treclen;
7605 ret = tlen;
7607 unlock_user(dirp, arg2, ret);
7609 #endif
7610 break;
7611 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7612 case TARGET_NR_getdents64:
7614 struct linux_dirent64 *dirp;
7615 abi_long count = arg3;
7616 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7617 goto efault;
7618 ret = get_errno(sys_getdents64(arg1, dirp, count));
7619 if (!is_error(ret)) {
7620 struct linux_dirent64 *de;
7621 int len = ret;
7622 int reclen;
7623 de = dirp;
7624 while (len > 0) {
7625 reclen = de->d_reclen;
7626 if (reclen > len)
7627 break;
7628 de->d_reclen = tswap16(reclen);
7629 tswap64s((uint64_t *)&de->d_ino);
7630 tswap64s((uint64_t *)&de->d_off);
7631 de = (struct linux_dirent64 *)((char *)de + reclen);
7632 len -= reclen;
7635 unlock_user(dirp, arg2, ret);
7637 break;
7638 #endif /* TARGET_NR_getdents64 */
7639 #if defined(TARGET_NR__newselect)
7640 case TARGET_NR__newselect:
7641 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7642 break;
7643 #endif
7644 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7645 # ifdef TARGET_NR_poll
7646 case TARGET_NR_poll:
7647 # endif
7648 # ifdef TARGET_NR_ppoll
7649 case TARGET_NR_ppoll:
7650 # endif
7652 struct target_pollfd *target_pfd;
7653 unsigned int nfds = arg2;
7654 int timeout = arg3;
7655 struct pollfd *pfd;
7656 unsigned int i;
7658 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7659 if (!target_pfd)
7660 goto efault;
7662 pfd = alloca(sizeof(struct pollfd) * nfds);
7663 for(i = 0; i < nfds; i++) {
7664 pfd[i].fd = tswap32(target_pfd[i].fd);
7665 pfd[i].events = tswap16(target_pfd[i].events);
7668 # ifdef TARGET_NR_ppoll
7669 if (num == TARGET_NR_ppoll) {
7670 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7671 target_sigset_t *target_set;
7672 sigset_t _set, *set = &_set;
7674 if (arg3) {
7675 if (target_to_host_timespec(timeout_ts, arg3)) {
7676 unlock_user(target_pfd, arg1, 0);
7677 goto efault;
7679 } else {
7680 timeout_ts = NULL;
7683 if (arg4) {
7684 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7685 if (!target_set) {
7686 unlock_user(target_pfd, arg1, 0);
7687 goto efault;
7689 target_to_host_sigset(set, target_set);
7690 } else {
7691 set = NULL;
7694 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7696 if (!is_error(ret) && arg3) {
7697 host_to_target_timespec(arg3, timeout_ts);
7699 if (arg4) {
7700 unlock_user(target_set, arg4, 0);
7702 } else
7703 # endif
7704 ret = get_errno(poll(pfd, nfds, timeout));
7706 if (!is_error(ret)) {
7707 for(i = 0; i < nfds; i++) {
7708 target_pfd[i].revents = tswap16(pfd[i].revents);
7711 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7713 break;
7714 #endif
7715 case TARGET_NR_flock:
7716 /* NOTE: the flock constant seems to be the same for every
7717 Linux platform */
7718 ret = get_errno(flock(arg1, arg2));
7719 break;
7720 case TARGET_NR_readv:
7722 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7723 if (vec != NULL) {
7724 ret = get_errno(readv(arg1, vec, arg3));
7725 unlock_iovec(vec, arg2, arg3, 1);
7726 } else {
7727 ret = -host_to_target_errno(errno);
7730 break;
7731 case TARGET_NR_writev:
7733 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7734 if (vec != NULL) {
7735 ret = get_errno(writev(arg1, vec, arg3));
7736 unlock_iovec(vec, arg2, arg3, 0);
7737 } else {
7738 ret = -host_to_target_errno(errno);
7741 break;
7742 case TARGET_NR_getsid:
7743 ret = get_errno(getsid(arg1));
7744 break;
7745 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7746 case TARGET_NR_fdatasync:
7747 ret = get_errno(fdatasync(arg1));
7748 break;
7749 #endif
7750 case TARGET_NR__sysctl:
7751 /* We don't implement this, but ENOTDIR is always a safe
7752 return value. */
7753 ret = -TARGET_ENOTDIR;
7754 break;
7755 case TARGET_NR_sched_getaffinity:
7757 unsigned int mask_size;
7758 unsigned long *mask;
7761 * sched_getaffinity needs multiples of ulong, so need to take
7762 * care of mismatches between target ulong and host ulong sizes.
7764 if (arg2 & (sizeof(abi_ulong) - 1)) {
7765 ret = -TARGET_EINVAL;
7766 break;
7768 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7770 mask = alloca(mask_size);
7771 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7773 if (!is_error(ret)) {
7774 if (ret > arg2) {
7775 /* More data returned than the caller's buffer will fit.
7776 * This only happens if sizeof(abi_long) < sizeof(long)
7777 * and the caller passed us a buffer holding an odd number
7778 * of abi_longs. If the host kernel is actually using the
7779 * extra 4 bytes then fail EINVAL; otherwise we can just
7780 * ignore them and only copy the interesting part.
7782 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7783 if (numcpus > arg2 * 8) {
7784 ret = -TARGET_EINVAL;
7785 break;
7787 ret = arg2;
7790 if (copy_to_user(arg3, mask, ret)) {
7791 goto efault;
7795 break;
7796 case TARGET_NR_sched_setaffinity:
7798 unsigned int mask_size;
7799 unsigned long *mask;
7802 * sched_setaffinity needs multiples of ulong, so need to take
7803 * care of mismatches between target ulong and host ulong sizes.
7805 if (arg2 & (sizeof(abi_ulong) - 1)) {
7806 ret = -TARGET_EINVAL;
7807 break;
7809 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7811 mask = alloca(mask_size);
7812 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7813 goto efault;
7815 memcpy(mask, p, arg2);
7816 unlock_user_struct(p, arg2, 0);
7818 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7820 break;
7821 case TARGET_NR_sched_setparam:
7823 struct sched_param *target_schp;
7824 struct sched_param schp;
7826 if (arg2 == 0) {
7827 return -TARGET_EINVAL;
7829 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7830 goto efault;
7831 schp.sched_priority = tswap32(target_schp->sched_priority);
7832 unlock_user_struct(target_schp, arg2, 0);
7833 ret = get_errno(sched_setparam(arg1, &schp));
7835 break;
7836 case TARGET_NR_sched_getparam:
7838 struct sched_param *target_schp;
7839 struct sched_param schp;
7841 if (arg2 == 0) {
7842 return -TARGET_EINVAL;
7844 ret = get_errno(sched_getparam(arg1, &schp));
7845 if (!is_error(ret)) {
7846 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7847 goto efault;
7848 target_schp->sched_priority = tswap32(schp.sched_priority);
7849 unlock_user_struct(target_schp, arg2, 1);
7852 break;
7853 case TARGET_NR_sched_setscheduler:
7855 struct sched_param *target_schp;
7856 struct sched_param schp;
7857 if (arg3 == 0) {
7858 return -TARGET_EINVAL;
7860 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7861 goto efault;
7862 schp.sched_priority = tswap32(target_schp->sched_priority);
7863 unlock_user_struct(target_schp, arg3, 0);
7864 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7866 break;
7867 case TARGET_NR_sched_getscheduler:
7868 ret = get_errno(sched_getscheduler(arg1));
7869 break;
7870 case TARGET_NR_sched_yield:
7871 ret = get_errno(sched_yield());
7872 break;
7873 case TARGET_NR_sched_get_priority_max:
7874 ret = get_errno(sched_get_priority_max(arg1));
7875 break;
7876 case TARGET_NR_sched_get_priority_min:
7877 ret = get_errno(sched_get_priority_min(arg1));
7878 break;
7879 case TARGET_NR_sched_rr_get_interval:
7881 struct timespec ts;
7882 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7883 if (!is_error(ret)) {
7884 ret = host_to_target_timespec(arg2, &ts);
7887 break;
7888 case TARGET_NR_nanosleep:
7890 struct timespec req, rem;
7891 target_to_host_timespec(&req, arg1);
7892 ret = get_errno(nanosleep(&req, &rem));
7893 if (is_error(ret) && arg2) {
7894 host_to_target_timespec(arg2, &rem);
7897 break;
7898 #ifdef TARGET_NR_query_module
7899 case TARGET_NR_query_module:
7900 goto unimplemented;
7901 #endif
7902 #ifdef TARGET_NR_nfsservctl
7903 case TARGET_NR_nfsservctl:
7904 goto unimplemented;
7905 #endif
7906 case TARGET_NR_prctl:
7907 switch (arg1) {
7908 case PR_GET_PDEATHSIG:
7910 int deathsig;
7911 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7912 if (!is_error(ret) && arg2
7913 && put_user_ual(deathsig, arg2)) {
7914 goto efault;
7916 break;
7918 #ifdef PR_GET_NAME
7919 case PR_GET_NAME:
7921 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7922 if (!name) {
7923 goto efault;
7925 ret = get_errno(prctl(arg1, (unsigned long)name,
7926 arg3, arg4, arg5));
7927 unlock_user(name, arg2, 16);
7928 break;
7930 case PR_SET_NAME:
7932 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7933 if (!name) {
7934 goto efault;
7936 ret = get_errno(prctl(arg1, (unsigned long)name,
7937 arg3, arg4, arg5));
7938 unlock_user(name, arg2, 0);
7939 break;
7941 #endif
7942 default:
7943 /* Most prctl options have no pointer arguments */
7944 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7945 break;
7947 break;
7948 #ifdef TARGET_NR_arch_prctl
7949 case TARGET_NR_arch_prctl:
7950 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7951 ret = do_arch_prctl(cpu_env, arg1, arg2);
7952 break;
7953 #else
7954 goto unimplemented;
7955 #endif
7956 #endif
7957 #ifdef TARGET_NR_pread64
7958 case TARGET_NR_pread64:
7959 if (regpairs_aligned(cpu_env)) {
7960 arg4 = arg5;
7961 arg5 = arg6;
7963 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7964 goto efault;
7965 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7966 unlock_user(p, arg2, ret);
7967 break;
7968 case TARGET_NR_pwrite64:
7969 if (regpairs_aligned(cpu_env)) {
7970 arg4 = arg5;
7971 arg5 = arg6;
7973 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7974 goto efault;
7975 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7976 unlock_user(p, arg2, 0);
7977 break;
7978 #endif
7979 case TARGET_NR_getcwd:
7980 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7981 goto efault;
7982 ret = get_errno(sys_getcwd1(p, arg2));
7983 unlock_user(p, arg1, ret);
7984 break;
7985 case TARGET_NR_capget:
7986 case TARGET_NR_capset:
7988 struct target_user_cap_header *target_header;
7989 struct target_user_cap_data *target_data = NULL;
7990 struct __user_cap_header_struct header;
7991 struct __user_cap_data_struct data[2];
7992 struct __user_cap_data_struct *dataptr = NULL;
7993 int i, target_datalen;
7994 int data_items = 1;
7996 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
7997 goto efault;
7999 header.version = tswap32(target_header->version);
8000 header.pid = tswap32(target_header->pid);
8002 if (header.version != _LINUX_CAPABILITY_VERSION) {
8003 /* Version 2 and up takes pointer to two user_data structs */
8004 data_items = 2;
8007 target_datalen = sizeof(*target_data) * data_items;
8009 if (arg2) {
8010 if (num == TARGET_NR_capget) {
8011 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8012 } else {
8013 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8015 if (!target_data) {
8016 unlock_user_struct(target_header, arg1, 0);
8017 goto efault;
8020 if (num == TARGET_NR_capset) {
8021 for (i = 0; i < data_items; i++) {
8022 data[i].effective = tswap32(target_data[i].effective);
8023 data[i].permitted = tswap32(target_data[i].permitted);
8024 data[i].inheritable = tswap32(target_data[i].inheritable);
8028 dataptr = data;
8031 if (num == TARGET_NR_capget) {
8032 ret = get_errno(capget(&header, dataptr));
8033 } else {
8034 ret = get_errno(capset(&header, dataptr));
8037 /* The kernel always updates version for both capget and capset */
8038 target_header->version = tswap32(header.version);
8039 unlock_user_struct(target_header, arg1, 1);
8041 if (arg2) {
8042 if (num == TARGET_NR_capget) {
8043 for (i = 0; i < data_items; i++) {
8044 target_data[i].effective = tswap32(data[i].effective);
8045 target_data[i].permitted = tswap32(data[i].permitted);
8046 target_data[i].inheritable = tswap32(data[i].inheritable);
8048 unlock_user(target_data, arg2, target_datalen);
8049 } else {
8050 unlock_user(target_data, arg2, 0);
8053 break;
8055 case TARGET_NR_sigaltstack:
8056 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8057 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8058 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8059 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8060 break;
8061 #else
8062 goto unimplemented;
8063 #endif
8065 #ifdef CONFIG_SENDFILE
8066 case TARGET_NR_sendfile:
8068 off_t *offp = NULL;
8069 off_t off;
8070 if (arg3) {
8071 ret = get_user_sal(off, arg3);
8072 if (is_error(ret)) {
8073 break;
8075 offp = &off;
8077 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8078 if (!is_error(ret) && arg3) {
8079 abi_long ret2 = put_user_sal(off, arg3);
8080 if (is_error(ret2)) {
8081 ret = ret2;
8084 break;
8086 #ifdef TARGET_NR_sendfile64
8087 case TARGET_NR_sendfile64:
8089 off_t *offp = NULL;
8090 off_t off;
8091 if (arg3) {
8092 ret = get_user_s64(off, arg3);
8093 if (is_error(ret)) {
8094 break;
8096 offp = &off;
8098 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8099 if (!is_error(ret) && arg3) {
8100 abi_long ret2 = put_user_s64(off, arg3);
8101 if (is_error(ret2)) {
8102 ret = ret2;
8105 break;
8107 #endif
8108 #else
8109 case TARGET_NR_sendfile:
8110 #ifdef TARGET_NR_sendfile64
8111 case TARGET_NR_sendfile64:
8112 #endif
8113 goto unimplemented;
8114 #endif
8116 #ifdef TARGET_NR_getpmsg
8117 case TARGET_NR_getpmsg:
8118 goto unimplemented;
8119 #endif
8120 #ifdef TARGET_NR_putpmsg
8121 case TARGET_NR_putpmsg:
8122 goto unimplemented;
8123 #endif
8124 #ifdef TARGET_NR_vfork
8125 case TARGET_NR_vfork:
8126 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8127 0, 0, 0, 0));
8128 break;
8129 #endif
8130 #ifdef TARGET_NR_ugetrlimit
8131 case TARGET_NR_ugetrlimit:
8133 struct rlimit rlim;
8134 int resource = target_to_host_resource(arg1);
8135 ret = get_errno(getrlimit(resource, &rlim));
8136 if (!is_error(ret)) {
8137 struct target_rlimit *target_rlim;
8138 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8139 goto efault;
8140 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8141 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8142 unlock_user_struct(target_rlim, arg2, 1);
8144 break;
8146 #endif
8147 #ifdef TARGET_NR_truncate64
8148 case TARGET_NR_truncate64:
8149 if (!(p = lock_user_string(arg1)))
8150 goto efault;
8151 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8152 unlock_user(p, arg1, 0);
8153 break;
8154 #endif
8155 #ifdef TARGET_NR_ftruncate64
8156 case TARGET_NR_ftruncate64:
8157 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8158 break;
8159 #endif
8160 #ifdef TARGET_NR_stat64
8161 case TARGET_NR_stat64:
8162 if (!(p = lock_user_string(arg1)))
8163 goto efault;
8164 ret = get_errno(stat(path(p), &st));
8165 unlock_user(p, arg1, 0);
8166 if (!is_error(ret))
8167 ret = host_to_target_stat64(cpu_env, arg2, &st);
8168 break;
8169 #endif
8170 #ifdef TARGET_NR_lstat64
8171 case TARGET_NR_lstat64:
8172 if (!(p = lock_user_string(arg1)))
8173 goto efault;
8174 ret = get_errno(lstat(path(p), &st));
8175 unlock_user(p, arg1, 0);
8176 if (!is_error(ret))
8177 ret = host_to_target_stat64(cpu_env, arg2, &st);
8178 break;
8179 #endif
8180 #ifdef TARGET_NR_fstat64
8181 case TARGET_NR_fstat64:
8182 ret = get_errno(fstat(arg1, &st));
8183 if (!is_error(ret))
8184 ret = host_to_target_stat64(cpu_env, arg2, &st);
8185 break;
8186 #endif
8187 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8188 #ifdef TARGET_NR_fstatat64
8189 case TARGET_NR_fstatat64:
8190 #endif
8191 #ifdef TARGET_NR_newfstatat
8192 case TARGET_NR_newfstatat:
8193 #endif
8194 if (!(p = lock_user_string(arg2)))
8195 goto efault;
8196 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8197 if (!is_error(ret))
8198 ret = host_to_target_stat64(cpu_env, arg3, &st);
8199 break;
8200 #endif
8201 case TARGET_NR_lchown:
8202 if (!(p = lock_user_string(arg1)))
8203 goto efault;
8204 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8205 unlock_user(p, arg1, 0);
8206 break;
8207 #ifdef TARGET_NR_getuid
8208 case TARGET_NR_getuid:
8209 ret = get_errno(high2lowuid(getuid()));
8210 break;
8211 #endif
8212 #ifdef TARGET_NR_getgid
8213 case TARGET_NR_getgid:
8214 ret = get_errno(high2lowgid(getgid()));
8215 break;
8216 #endif
8217 #ifdef TARGET_NR_geteuid
8218 case TARGET_NR_geteuid:
8219 ret = get_errno(high2lowuid(geteuid()));
8220 break;
8221 #endif
8222 #ifdef TARGET_NR_getegid
8223 case TARGET_NR_getegid:
8224 ret = get_errno(high2lowgid(getegid()));
8225 break;
8226 #endif
8227 case TARGET_NR_setreuid:
8228 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8229 break;
8230 case TARGET_NR_setregid:
8231 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8232 break;
8233 case TARGET_NR_getgroups:
8235 int gidsetsize = arg1;
8236 target_id *target_grouplist;
8237 gid_t *grouplist;
8238 int i;
8240 grouplist = alloca(gidsetsize * sizeof(gid_t));
8241 ret = get_errno(getgroups(gidsetsize, grouplist));
8242 if (gidsetsize == 0)
8243 break;
8244 if (!is_error(ret)) {
8245 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8246 if (!target_grouplist)
8247 goto efault;
8248 for(i = 0;i < ret; i++)
8249 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8250 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8253 break;
8254 case TARGET_NR_setgroups:
8256 int gidsetsize = arg1;
8257 target_id *target_grouplist;
8258 gid_t *grouplist = NULL;
8259 int i;
8260 if (gidsetsize) {
8261 grouplist = alloca(gidsetsize * sizeof(gid_t));
8262 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8263 if (!target_grouplist) {
8264 ret = -TARGET_EFAULT;
8265 goto fail;
8267 for (i = 0; i < gidsetsize; i++) {
8268 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8270 unlock_user(target_grouplist, arg2, 0);
8272 ret = get_errno(setgroups(gidsetsize, grouplist));
8274 break;
8275 case TARGET_NR_fchown:
8276 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8277 break;
8278 #if defined(TARGET_NR_fchownat)
8279 case TARGET_NR_fchownat:
8280 if (!(p = lock_user_string(arg2)))
8281 goto efault;
8282 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8283 low2highgid(arg4), arg5));
8284 unlock_user(p, arg2, 0);
8285 break;
8286 #endif
8287 #ifdef TARGET_NR_setresuid
8288 case TARGET_NR_setresuid:
8289 ret = get_errno(setresuid(low2highuid(arg1),
8290 low2highuid(arg2),
8291 low2highuid(arg3)));
8292 break;
8293 #endif
8294 #ifdef TARGET_NR_getresuid
8295 case TARGET_NR_getresuid:
8297 uid_t ruid, euid, suid;
8298 ret = get_errno(getresuid(&ruid, &euid, &suid));
8299 if (!is_error(ret)) {
8300 if (put_user_id(high2lowuid(ruid), arg1)
8301 || put_user_id(high2lowuid(euid), arg2)
8302 || put_user_id(high2lowuid(suid), arg3))
8303 goto efault;
8306 break;
8307 #endif
8308 #ifdef TARGET_NR_getresgid
8309 case TARGET_NR_setresgid:
8310 ret = get_errno(setresgid(low2highgid(arg1),
8311 low2highgid(arg2),
8312 low2highgid(arg3)));
8313 break;
8314 #endif
8315 #ifdef TARGET_NR_getresgid
8316 case TARGET_NR_getresgid:
8318 gid_t rgid, egid, sgid;
8319 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8320 if (!is_error(ret)) {
8321 if (put_user_id(high2lowgid(rgid), arg1)
8322 || put_user_id(high2lowgid(egid), arg2)
8323 || put_user_id(high2lowgid(sgid), arg3))
8324 goto efault;
8327 break;
8328 #endif
8329 case TARGET_NR_chown:
8330 if (!(p = lock_user_string(arg1)))
8331 goto efault;
8332 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8333 unlock_user(p, arg1, 0);
8334 break;
8335 case TARGET_NR_setuid:
8336 ret = get_errno(setuid(low2highuid(arg1)));
8337 break;
8338 case TARGET_NR_setgid:
8339 ret = get_errno(setgid(low2highgid(arg1)));
8340 break;
8341 case TARGET_NR_setfsuid:
8342 ret = get_errno(setfsuid(arg1));
8343 break;
8344 case TARGET_NR_setfsgid:
8345 ret = get_errno(setfsgid(arg1));
8346 break;
8348 #ifdef TARGET_NR_lchown32
8349 case TARGET_NR_lchown32:
8350 if (!(p = lock_user_string(arg1)))
8351 goto efault;
8352 ret = get_errno(lchown(p, arg2, arg3));
8353 unlock_user(p, arg1, 0);
8354 break;
8355 #endif
8356 #ifdef TARGET_NR_getuid32
8357 case TARGET_NR_getuid32:
8358 ret = get_errno(getuid());
8359 break;
8360 #endif
8362 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8363 /* Alpha specific */
8364 case TARGET_NR_getxuid:
8366 uid_t euid;
8367 euid=geteuid();
8368 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8370 ret = get_errno(getuid());
8371 break;
8372 #endif
8373 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8374 /* Alpha specific */
8375 case TARGET_NR_getxgid:
8377 uid_t egid;
8378 egid=getegid();
8379 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8381 ret = get_errno(getgid());
8382 break;
8383 #endif
8384 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8385 /* Alpha specific */
8386 case TARGET_NR_osf_getsysinfo:
8387 ret = -TARGET_EOPNOTSUPP;
8388 switch (arg1) {
8389 case TARGET_GSI_IEEE_FP_CONTROL:
8391 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8393 /* Copied from linux ieee_fpcr_to_swcr. */
8394 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8395 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8396 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8397 | SWCR_TRAP_ENABLE_DZE
8398 | SWCR_TRAP_ENABLE_OVF);
8399 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8400 | SWCR_TRAP_ENABLE_INE);
8401 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8402 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8404 if (put_user_u64 (swcr, arg2))
8405 goto efault;
8406 ret = 0;
8408 break;
8410 /* case GSI_IEEE_STATE_AT_SIGNAL:
8411 -- Not implemented in linux kernel.
8412 case GSI_UACPROC:
8413 -- Retrieves current unaligned access state; not much used.
8414 case GSI_PROC_TYPE:
8415 -- Retrieves implver information; surely not used.
8416 case GSI_GET_HWRPB:
8417 -- Grabs a copy of the HWRPB; surely not used.
8420 break;
8421 #endif
8422 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8423 /* Alpha specific */
8424 case TARGET_NR_osf_setsysinfo:
8425 ret = -TARGET_EOPNOTSUPP;
8426 switch (arg1) {
8427 case TARGET_SSI_IEEE_FP_CONTROL:
8429 uint64_t swcr, fpcr, orig_fpcr;
8431 if (get_user_u64 (swcr, arg2)) {
8432 goto efault;
8434 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8435 fpcr = orig_fpcr & FPCR_DYN_MASK;
8437 /* Copied from linux ieee_swcr_to_fpcr. */
8438 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8439 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8440 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8441 | SWCR_TRAP_ENABLE_DZE
8442 | SWCR_TRAP_ENABLE_OVF)) << 48;
8443 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8444 | SWCR_TRAP_ENABLE_INE)) << 57;
8445 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8446 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8448 cpu_alpha_store_fpcr(cpu_env, fpcr);
8449 ret = 0;
8451 break;
8453 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8455 uint64_t exc, fpcr, orig_fpcr;
8456 int si_code;
8458 if (get_user_u64(exc, arg2)) {
8459 goto efault;
8462 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8464 /* We only add to the exception status here. */
8465 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8467 cpu_alpha_store_fpcr(cpu_env, fpcr);
8468 ret = 0;
8470 /* Old exceptions are not signaled. */
8471 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8473 /* If any exceptions set by this call,
8474 and are unmasked, send a signal. */
8475 si_code = 0;
8476 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8477 si_code = TARGET_FPE_FLTRES;
8479 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8480 si_code = TARGET_FPE_FLTUND;
8482 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8483 si_code = TARGET_FPE_FLTOVF;
8485 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8486 si_code = TARGET_FPE_FLTDIV;
8488 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8489 si_code = TARGET_FPE_FLTINV;
8491 if (si_code != 0) {
8492 target_siginfo_t info;
8493 info.si_signo = SIGFPE;
8494 info.si_errno = 0;
8495 info.si_code = si_code;
8496 info._sifields._sigfault._addr
8497 = ((CPUArchState *)cpu_env)->pc;
8498 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8501 break;
8503 /* case SSI_NVPAIRS:
8504 -- Used with SSIN_UACPROC to enable unaligned accesses.
8505 case SSI_IEEE_STATE_AT_SIGNAL:
8506 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8507 -- Not implemented in linux kernel
8510 break;
8511 #endif
8512 #ifdef TARGET_NR_osf_sigprocmask
8513 /* Alpha specific. */
8514 case TARGET_NR_osf_sigprocmask:
8516 abi_ulong mask;
8517 int how;
8518 sigset_t set, oldset;
8520 switch(arg1) {
8521 case TARGET_SIG_BLOCK:
8522 how = SIG_BLOCK;
8523 break;
8524 case TARGET_SIG_UNBLOCK:
8525 how = SIG_UNBLOCK;
8526 break;
8527 case TARGET_SIG_SETMASK:
8528 how = SIG_SETMASK;
8529 break;
8530 default:
8531 ret = -TARGET_EINVAL;
8532 goto fail;
8534 mask = arg2;
8535 target_to_host_old_sigset(&set, &mask);
8536 do_sigprocmask(how, &set, &oldset);
8537 host_to_target_old_sigset(&mask, &oldset);
8538 ret = mask;
8540 break;
8541 #endif
8543 #ifdef TARGET_NR_getgid32
8544 case TARGET_NR_getgid32:
8545 ret = get_errno(getgid());
8546 break;
8547 #endif
8548 #ifdef TARGET_NR_geteuid32
8549 case TARGET_NR_geteuid32:
8550 ret = get_errno(geteuid());
8551 break;
8552 #endif
8553 #ifdef TARGET_NR_getegid32
8554 case TARGET_NR_getegid32:
8555 ret = get_errno(getegid());
8556 break;
8557 #endif
8558 #ifdef TARGET_NR_setreuid32
8559 case TARGET_NR_setreuid32:
8560 ret = get_errno(setreuid(arg1, arg2));
8561 break;
8562 #endif
8563 #ifdef TARGET_NR_setregid32
8564 case TARGET_NR_setregid32:
8565 ret = get_errno(setregid(arg1, arg2));
8566 break;
8567 #endif
8568 #ifdef TARGET_NR_getgroups32
8569 case TARGET_NR_getgroups32:
8571 int gidsetsize = arg1;
8572 uint32_t *target_grouplist;
8573 gid_t *grouplist;
8574 int i;
8576 grouplist = alloca(gidsetsize * sizeof(gid_t));
8577 ret = get_errno(getgroups(gidsetsize, grouplist));
8578 if (gidsetsize == 0)
8579 break;
8580 if (!is_error(ret)) {
8581 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8582 if (!target_grouplist) {
8583 ret = -TARGET_EFAULT;
8584 goto fail;
8586 for(i = 0;i < ret; i++)
8587 target_grouplist[i] = tswap32(grouplist[i]);
8588 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8591 break;
8592 #endif
8593 #ifdef TARGET_NR_setgroups32
8594 case TARGET_NR_setgroups32:
8596 int gidsetsize = arg1;
8597 uint32_t *target_grouplist;
8598 gid_t *grouplist;
8599 int i;
8601 grouplist = alloca(gidsetsize * sizeof(gid_t));
8602 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8603 if (!target_grouplist) {
8604 ret = -TARGET_EFAULT;
8605 goto fail;
8607 for(i = 0;i < gidsetsize; i++)
8608 grouplist[i] = tswap32(target_grouplist[i]);
8609 unlock_user(target_grouplist, arg2, 0);
8610 ret = get_errno(setgroups(gidsetsize, grouplist));
8612 break;
8613 #endif
8614 #ifdef TARGET_NR_fchown32
8615 case TARGET_NR_fchown32:
8616 ret = get_errno(fchown(arg1, arg2, arg3));
8617 break;
8618 #endif
8619 #ifdef TARGET_NR_setresuid32
8620 case TARGET_NR_setresuid32:
8621 ret = get_errno(setresuid(arg1, arg2, arg3));
8622 break;
8623 #endif
8624 #ifdef TARGET_NR_getresuid32
8625 case TARGET_NR_getresuid32:
8627 uid_t ruid, euid, suid;
8628 ret = get_errno(getresuid(&ruid, &euid, &suid));
8629 if (!is_error(ret)) {
8630 if (put_user_u32(ruid, arg1)
8631 || put_user_u32(euid, arg2)
8632 || put_user_u32(suid, arg3))
8633 goto efault;
8636 break;
8637 #endif
8638 #ifdef TARGET_NR_setresgid32
8639 case TARGET_NR_setresgid32:
8640 ret = get_errno(setresgid(arg1, arg2, arg3));
8641 break;
8642 #endif
8643 #ifdef TARGET_NR_getresgid32
8644 case TARGET_NR_getresgid32:
8646 gid_t rgid, egid, sgid;
8647 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8648 if (!is_error(ret)) {
8649 if (put_user_u32(rgid, arg1)
8650 || put_user_u32(egid, arg2)
8651 || put_user_u32(sgid, arg3))
8652 goto efault;
8655 break;
8656 #endif
8657 #ifdef TARGET_NR_chown32
8658 case TARGET_NR_chown32:
8659 if (!(p = lock_user_string(arg1)))
8660 goto efault;
8661 ret = get_errno(chown(p, arg2, arg3));
8662 unlock_user(p, arg1, 0);
8663 break;
8664 #endif
8665 #ifdef TARGET_NR_setuid32
8666 case TARGET_NR_setuid32:
8667 ret = get_errno(setuid(arg1));
8668 break;
8669 #endif
8670 #ifdef TARGET_NR_setgid32
8671 case TARGET_NR_setgid32:
8672 ret = get_errno(setgid(arg1));
8673 break;
8674 #endif
8675 #ifdef TARGET_NR_setfsuid32
8676 case TARGET_NR_setfsuid32:
8677 ret = get_errno(setfsuid(arg1));
8678 break;
8679 #endif
8680 #ifdef TARGET_NR_setfsgid32
8681 case TARGET_NR_setfsgid32:
8682 ret = get_errno(setfsgid(arg1));
8683 break;
8684 #endif
8686 case TARGET_NR_pivot_root:
8687 goto unimplemented;
8688 #ifdef TARGET_NR_mincore
8689 case TARGET_NR_mincore:
8691 void *a;
8692 ret = -TARGET_EFAULT;
8693 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8694 goto efault;
8695 if (!(p = lock_user_string(arg3)))
8696 goto mincore_fail;
8697 ret = get_errno(mincore(a, arg2, p));
8698 unlock_user(p, arg3, ret);
8699 mincore_fail:
8700 unlock_user(a, arg1, 0);
8702 break;
8703 #endif
8704 #ifdef TARGET_NR_arm_fadvise64_64
8705 case TARGET_NR_arm_fadvise64_64:
8708 * arm_fadvise64_64 looks like fadvise64_64 but
8709 * with different argument order
8711 abi_long temp;
8712 temp = arg3;
8713 arg3 = arg4;
8714 arg4 = temp;
8716 #endif
8717 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8718 #ifdef TARGET_NR_fadvise64_64
8719 case TARGET_NR_fadvise64_64:
8720 #endif
8721 #ifdef TARGET_NR_fadvise64
8722 case TARGET_NR_fadvise64:
8723 #endif
8724 #ifdef TARGET_S390X
8725 switch (arg4) {
8726 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8727 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8728 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8729 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8730 default: break;
8732 #endif
8733 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8734 break;
8735 #endif
8736 #ifdef TARGET_NR_madvise
8737 case TARGET_NR_madvise:
8738 /* A straight passthrough may not be safe because qemu sometimes
8739 turns private file-backed mappings into anonymous mappings.
8740 This will break MADV_DONTNEED.
8741 This is a hint, so ignoring and returning success is ok. */
8742 ret = get_errno(0);
8743 break;
8744 #endif
8745 #if TARGET_ABI_BITS == 32
8746 case TARGET_NR_fcntl64:
8748 int cmd;
8749 struct flock64 fl;
8750 struct target_flock64 *target_fl;
8751 #ifdef TARGET_ARM
8752 struct target_eabi_flock64 *target_efl;
8753 #endif
8755 cmd = target_to_host_fcntl_cmd(arg2);
8756 if (cmd == -TARGET_EINVAL) {
8757 ret = cmd;
8758 break;
8761 switch(arg2) {
8762 case TARGET_F_GETLK64:
8763 #ifdef TARGET_ARM
8764 if (((CPUARMState *)cpu_env)->eabi) {
8765 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8766 goto efault;
8767 fl.l_type = tswap16(target_efl->l_type);
8768 fl.l_whence = tswap16(target_efl->l_whence);
8769 fl.l_start = tswap64(target_efl->l_start);
8770 fl.l_len = tswap64(target_efl->l_len);
8771 fl.l_pid = tswap32(target_efl->l_pid);
8772 unlock_user_struct(target_efl, arg3, 0);
8773 } else
8774 #endif
8776 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8777 goto efault;
8778 fl.l_type = tswap16(target_fl->l_type);
8779 fl.l_whence = tswap16(target_fl->l_whence);
8780 fl.l_start = tswap64(target_fl->l_start);
8781 fl.l_len = tswap64(target_fl->l_len);
8782 fl.l_pid = tswap32(target_fl->l_pid);
8783 unlock_user_struct(target_fl, arg3, 0);
8785 ret = get_errno(fcntl(arg1, cmd, &fl));
8786 if (ret == 0) {
8787 #ifdef TARGET_ARM
8788 if (((CPUARMState *)cpu_env)->eabi) {
8789 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8790 goto efault;
8791 target_efl->l_type = tswap16(fl.l_type);
8792 target_efl->l_whence = tswap16(fl.l_whence);
8793 target_efl->l_start = tswap64(fl.l_start);
8794 target_efl->l_len = tswap64(fl.l_len);
8795 target_efl->l_pid = tswap32(fl.l_pid);
8796 unlock_user_struct(target_efl, arg3, 1);
8797 } else
8798 #endif
8800 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8801 goto efault;
8802 target_fl->l_type = tswap16(fl.l_type);
8803 target_fl->l_whence = tswap16(fl.l_whence);
8804 target_fl->l_start = tswap64(fl.l_start);
8805 target_fl->l_len = tswap64(fl.l_len);
8806 target_fl->l_pid = tswap32(fl.l_pid);
8807 unlock_user_struct(target_fl, arg3, 1);
8810 break;
8812 case TARGET_F_SETLK64:
8813 case TARGET_F_SETLKW64:
8814 #ifdef TARGET_ARM
8815 if (((CPUARMState *)cpu_env)->eabi) {
8816 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8817 goto efault;
8818 fl.l_type = tswap16(target_efl->l_type);
8819 fl.l_whence = tswap16(target_efl->l_whence);
8820 fl.l_start = tswap64(target_efl->l_start);
8821 fl.l_len = tswap64(target_efl->l_len);
8822 fl.l_pid = tswap32(target_efl->l_pid);
8823 unlock_user_struct(target_efl, arg3, 0);
8824 } else
8825 #endif
8827 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8828 goto efault;
8829 fl.l_type = tswap16(target_fl->l_type);
8830 fl.l_whence = tswap16(target_fl->l_whence);
8831 fl.l_start = tswap64(target_fl->l_start);
8832 fl.l_len = tswap64(target_fl->l_len);
8833 fl.l_pid = tswap32(target_fl->l_pid);
8834 unlock_user_struct(target_fl, arg3, 0);
8836 ret = get_errno(fcntl(arg1, cmd, &fl));
8837 break;
8838 default:
8839 ret = do_fcntl(arg1, arg2, arg3);
8840 break;
8842 break;
8844 #endif
8845 #ifdef TARGET_NR_cacheflush
8846 case TARGET_NR_cacheflush:
8847 /* self-modifying code is handled automatically, so nothing needed */
8848 ret = 0;
8849 break;
8850 #endif
8851 #ifdef TARGET_NR_security
8852 case TARGET_NR_security:
8853 goto unimplemented;
8854 #endif
8855 #ifdef TARGET_NR_getpagesize
8856 case TARGET_NR_getpagesize:
8857 ret = TARGET_PAGE_SIZE;
8858 break;
8859 #endif
8860 case TARGET_NR_gettid:
8861 ret = get_errno(gettid());
8862 break;
8863 #ifdef TARGET_NR_readahead
8864 case TARGET_NR_readahead:
8865 #if TARGET_ABI_BITS == 32
8866 if (regpairs_aligned(cpu_env)) {
8867 arg2 = arg3;
8868 arg3 = arg4;
8869 arg4 = arg5;
8871 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8872 #else
8873 ret = get_errno(readahead(arg1, arg2, arg3));
8874 #endif
8875 break;
8876 #endif
8877 #ifdef CONFIG_ATTR
8878 #ifdef TARGET_NR_setxattr
8879 case TARGET_NR_listxattr:
8880 case TARGET_NR_llistxattr:
8882 void *p, *b = 0;
8883 if (arg2) {
8884 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8885 if (!b) {
8886 ret = -TARGET_EFAULT;
8887 break;
8890 p = lock_user_string(arg1);
8891 if (p) {
8892 if (num == TARGET_NR_listxattr) {
8893 ret = get_errno(listxattr(p, b, arg3));
8894 } else {
8895 ret = get_errno(llistxattr(p, b, arg3));
8897 } else {
8898 ret = -TARGET_EFAULT;
8900 unlock_user(p, arg1, 0);
8901 unlock_user(b, arg2, arg3);
8902 break;
8904 case TARGET_NR_flistxattr:
8906 void *b = 0;
8907 if (arg2) {
8908 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8909 if (!b) {
8910 ret = -TARGET_EFAULT;
8911 break;
8914 ret = get_errno(flistxattr(arg1, b, arg3));
8915 unlock_user(b, arg2, arg3);
8916 break;
8918 case TARGET_NR_setxattr:
8919 case TARGET_NR_lsetxattr:
8921 void *p, *n, *v = 0;
8922 if (arg3) {
8923 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8924 if (!v) {
8925 ret = -TARGET_EFAULT;
8926 break;
8929 p = lock_user_string(arg1);
8930 n = lock_user_string(arg2);
8931 if (p && n) {
8932 if (num == TARGET_NR_setxattr) {
8933 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8934 } else {
8935 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8937 } else {
8938 ret = -TARGET_EFAULT;
8940 unlock_user(p, arg1, 0);
8941 unlock_user(n, arg2, 0);
8942 unlock_user(v, arg3, 0);
8944 break;
8945 case TARGET_NR_fsetxattr:
8947 void *n, *v = 0;
8948 if (arg3) {
8949 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8950 if (!v) {
8951 ret = -TARGET_EFAULT;
8952 break;
8955 n = lock_user_string(arg2);
8956 if (n) {
8957 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8958 } else {
8959 ret = -TARGET_EFAULT;
8961 unlock_user(n, arg2, 0);
8962 unlock_user(v, arg3, 0);
8964 break;
8965 case TARGET_NR_getxattr:
8966 case TARGET_NR_lgetxattr:
8968 void *p, *n, *v = 0;
8969 if (arg3) {
8970 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8971 if (!v) {
8972 ret = -TARGET_EFAULT;
8973 break;
8976 p = lock_user_string(arg1);
8977 n = lock_user_string(arg2);
8978 if (p && n) {
8979 if (num == TARGET_NR_getxattr) {
8980 ret = get_errno(getxattr(p, n, v, arg4));
8981 } else {
8982 ret = get_errno(lgetxattr(p, n, v, arg4));
8984 } else {
8985 ret = -TARGET_EFAULT;
8987 unlock_user(p, arg1, 0);
8988 unlock_user(n, arg2, 0);
8989 unlock_user(v, arg3, arg4);
8991 break;
8992 case TARGET_NR_fgetxattr:
8994 void *n, *v = 0;
8995 if (arg3) {
8996 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8997 if (!v) {
8998 ret = -TARGET_EFAULT;
8999 break;
9002 n = lock_user_string(arg2);
9003 if (n) {
9004 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9005 } else {
9006 ret = -TARGET_EFAULT;
9008 unlock_user(n, arg2, 0);
9009 unlock_user(v, arg3, arg4);
9011 break;
9012 case TARGET_NR_removexattr:
9013 case TARGET_NR_lremovexattr:
9015 void *p, *n;
9016 p = lock_user_string(arg1);
9017 n = lock_user_string(arg2);
9018 if (p && n) {
9019 if (num == TARGET_NR_removexattr) {
9020 ret = get_errno(removexattr(p, n));
9021 } else {
9022 ret = get_errno(lremovexattr(p, n));
9024 } else {
9025 ret = -TARGET_EFAULT;
9027 unlock_user(p, arg1, 0);
9028 unlock_user(n, arg2, 0);
9030 break;
9031 case TARGET_NR_fremovexattr:
9033 void *n;
9034 n = lock_user_string(arg2);
9035 if (n) {
9036 ret = get_errno(fremovexattr(arg1, n));
9037 } else {
9038 ret = -TARGET_EFAULT;
9040 unlock_user(n, arg2, 0);
9042 break;
9043 #endif
9044 #endif /* CONFIG_ATTR */
9045 #ifdef TARGET_NR_set_thread_area
9046 case TARGET_NR_set_thread_area:
9047 #if defined(TARGET_MIPS)
9048 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9049 ret = 0;
9050 break;
9051 #elif defined(TARGET_CRIS)
9052 if (arg1 & 0xff)
9053 ret = -TARGET_EINVAL;
9054 else {
9055 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9056 ret = 0;
9058 break;
9059 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9060 ret = do_set_thread_area(cpu_env, arg1);
9061 break;
9062 #elif defined(TARGET_M68K)
9064 TaskState *ts = cpu->opaque;
9065 ts->tp_value = arg1;
9066 ret = 0;
9067 break;
9069 #else
9070 goto unimplemented_nowarn;
9071 #endif
9072 #endif
9073 #ifdef TARGET_NR_get_thread_area
9074 case TARGET_NR_get_thread_area:
9075 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9076 ret = do_get_thread_area(cpu_env, arg1);
9077 break;
9078 #elif defined(TARGET_M68K)
9080 TaskState *ts = cpu->opaque;
9081 ret = ts->tp_value;
9082 break;
9084 #else
9085 goto unimplemented_nowarn;
9086 #endif
9087 #endif
9088 #ifdef TARGET_NR_getdomainname
9089 case TARGET_NR_getdomainname:
9090 goto unimplemented_nowarn;
9091 #endif
9093 #ifdef TARGET_NR_clock_gettime
9094 case TARGET_NR_clock_gettime:
9096 struct timespec ts;
9097 ret = get_errno(clock_gettime(arg1, &ts));
9098 if (!is_error(ret)) {
9099 host_to_target_timespec(arg2, &ts);
9101 break;
9103 #endif
9104 #ifdef TARGET_NR_clock_getres
9105 case TARGET_NR_clock_getres:
9107 struct timespec ts;
9108 ret = get_errno(clock_getres(arg1, &ts));
9109 if (!is_error(ret)) {
9110 host_to_target_timespec(arg2, &ts);
9112 break;
9114 #endif
9115 #ifdef TARGET_NR_clock_nanosleep
9116 case TARGET_NR_clock_nanosleep:
9118 struct timespec ts;
9119 target_to_host_timespec(&ts, arg3);
9120 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9121 if (arg4)
9122 host_to_target_timespec(arg4, &ts);
9124 #if defined(TARGET_PPC)
9125 /* clock_nanosleep is odd in that it returns positive errno values.
9126 * On PPC, CR0 bit 3 should be set in such a situation. */
9127 if (ret) {
9128 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9130 #endif
9131 break;
9133 #endif
9135 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9136 case TARGET_NR_set_tid_address:
9137 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9138 break;
9139 #endif
9141 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9142 case TARGET_NR_tkill:
9143 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9144 break;
9145 #endif
9147 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9148 case TARGET_NR_tgkill:
9149 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9150 target_to_host_signal(arg3)));
9151 break;
9152 #endif
9154 #ifdef TARGET_NR_set_robust_list
9155 case TARGET_NR_set_robust_list:
9156 case TARGET_NR_get_robust_list:
9157 /* The ABI for supporting robust futexes has userspace pass
9158 * the kernel a pointer to a linked list which is updated by
9159 * userspace after the syscall; the list is walked by the kernel
9160 * when the thread exits. Since the linked list in QEMU guest
9161 * memory isn't a valid linked list for the host and we have
9162 * no way to reliably intercept the thread-death event, we can't
9163 * support these. Silently return ENOSYS so that guest userspace
9164 * falls back to a non-robust futex implementation (which should
9165 * be OK except in the corner case of the guest crashing while
9166 * holding a mutex that is shared with another process via
9167 * shared memory).
9169 goto unimplemented_nowarn;
9170 #endif
9172 #if defined(TARGET_NR_utimensat)
9173 case TARGET_NR_utimensat:
9175 struct timespec *tsp, ts[2];
9176 if (!arg3) {
9177 tsp = NULL;
9178 } else {
9179 target_to_host_timespec(ts, arg3);
9180 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9181 tsp = ts;
9183 if (!arg2)
9184 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9185 else {
9186 if (!(p = lock_user_string(arg2))) {
9187 ret = -TARGET_EFAULT;
9188 goto fail;
9190 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9191 unlock_user(p, arg2, 0);
9194 break;
9195 #endif
9196 case TARGET_NR_futex:
9197 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9198 break;
9199 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9200 case TARGET_NR_inotify_init:
9201 ret = get_errno(sys_inotify_init());
9202 break;
9203 #endif
9204 #ifdef CONFIG_INOTIFY1
9205 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9206 case TARGET_NR_inotify_init1:
9207 ret = get_errno(sys_inotify_init1(arg1));
9208 break;
9209 #endif
9210 #endif
9211 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9212 case TARGET_NR_inotify_add_watch:
9213 p = lock_user_string(arg2);
9214 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9215 unlock_user(p, arg2, 0);
9216 break;
9217 #endif
9218 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9219 case TARGET_NR_inotify_rm_watch:
9220 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9221 break;
9222 #endif
9224 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9225 case TARGET_NR_mq_open:
9227 struct mq_attr posix_mq_attr, *attrp;
9229 p = lock_user_string(arg1 - 1);
9230 if (arg4 != 0) {
9231 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9232 attrp = &posix_mq_attr;
9233 } else {
9234 attrp = 0;
9236 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9237 unlock_user (p, arg1, 0);
9239 break;
9241 case TARGET_NR_mq_unlink:
9242 p = lock_user_string(arg1 - 1);
9243 ret = get_errno(mq_unlink(p));
9244 unlock_user (p, arg1, 0);
9245 break;
9247 case TARGET_NR_mq_timedsend:
9249 struct timespec ts;
9251 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9252 if (arg5 != 0) {
9253 target_to_host_timespec(&ts, arg5);
9254 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9255 host_to_target_timespec(arg5, &ts);
9257 else
9258 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9259 unlock_user (p, arg2, arg3);
9261 break;
9263 case TARGET_NR_mq_timedreceive:
9265 struct timespec ts;
9266 unsigned int prio;
9268 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9269 if (arg5 != 0) {
9270 target_to_host_timespec(&ts, arg5);
9271 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9272 host_to_target_timespec(arg5, &ts);
9274 else
9275 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9276 unlock_user (p, arg2, arg3);
9277 if (arg4 != 0)
9278 put_user_u32(prio, arg4);
9280 break;
9282 /* Not implemented for now... */
9283 /* case TARGET_NR_mq_notify: */
9284 /* break; */
9286 case TARGET_NR_mq_getsetattr:
9288 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9289 ret = 0;
9290 if (arg3 != 0) {
9291 ret = mq_getattr(arg1, &posix_mq_attr_out);
9292 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9294 if (arg2 != 0) {
9295 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9296 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9300 break;
9301 #endif
9303 #ifdef CONFIG_SPLICE
9304 #ifdef TARGET_NR_tee
9305 case TARGET_NR_tee:
9307 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9309 break;
9310 #endif
9311 #ifdef TARGET_NR_splice
9312 case TARGET_NR_splice:
9314 loff_t loff_in, loff_out;
9315 loff_t *ploff_in = NULL, *ploff_out = NULL;
9316 if(arg2) {
9317 get_user_u64(loff_in, arg2);
9318 ploff_in = &loff_in;
9320 if(arg4) {
9321 get_user_u64(loff_out, arg2);
9322 ploff_out = &loff_out;
9324 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9326 break;
9327 #endif
9328 #ifdef TARGET_NR_vmsplice
9329 case TARGET_NR_vmsplice:
9331 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9332 if (vec != NULL) {
9333 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9334 unlock_iovec(vec, arg2, arg3, 0);
9335 } else {
9336 ret = -host_to_target_errno(errno);
9339 break;
9340 #endif
9341 #endif /* CONFIG_SPLICE */
9342 #ifdef CONFIG_EVENTFD
9343 #if defined(TARGET_NR_eventfd)
9344 case TARGET_NR_eventfd:
9345 ret = get_errno(eventfd(arg1, 0));
9346 break;
9347 #endif
9348 #if defined(TARGET_NR_eventfd2)
9349 case TARGET_NR_eventfd2:
9351 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9352 if (arg2 & TARGET_O_NONBLOCK) {
9353 host_flags |= O_NONBLOCK;
9355 if (arg2 & TARGET_O_CLOEXEC) {
9356 host_flags |= O_CLOEXEC;
9358 ret = get_errno(eventfd(arg1, host_flags));
9359 break;
9361 #endif
9362 #endif /* CONFIG_EVENTFD */
9363 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9364 case TARGET_NR_fallocate:
9365 #if TARGET_ABI_BITS == 32
9366 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9367 target_offset64(arg5, arg6)));
9368 #else
9369 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9370 #endif
9371 break;
9372 #endif
9373 #if defined(CONFIG_SYNC_FILE_RANGE)
9374 #if defined(TARGET_NR_sync_file_range)
9375 case TARGET_NR_sync_file_range:
9376 #if TARGET_ABI_BITS == 32
9377 #if defined(TARGET_MIPS)
9378 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9379 target_offset64(arg5, arg6), arg7));
9380 #else
9381 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9382 target_offset64(arg4, arg5), arg6));
9383 #endif /* !TARGET_MIPS */
9384 #else
9385 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9386 #endif
9387 break;
9388 #endif
9389 #if defined(TARGET_NR_sync_file_range2)
9390 case TARGET_NR_sync_file_range2:
9391 /* This is like sync_file_range but the arguments are reordered */
9392 #if TARGET_ABI_BITS == 32
9393 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9394 target_offset64(arg5, arg6), arg2));
9395 #else
9396 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9397 #endif
9398 break;
9399 #endif
9400 #endif
9401 #if defined(CONFIG_EPOLL)
9402 #if defined(TARGET_NR_epoll_create)
9403 case TARGET_NR_epoll_create:
9404 ret = get_errno(epoll_create(arg1));
9405 break;
9406 #endif
9407 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9408 case TARGET_NR_epoll_create1:
9409 ret = get_errno(epoll_create1(arg1));
9410 break;
9411 #endif
9412 #if defined(TARGET_NR_epoll_ctl)
9413 case TARGET_NR_epoll_ctl:
9415 struct epoll_event ep;
9416 struct epoll_event *epp = 0;
9417 if (arg4) {
9418 struct target_epoll_event *target_ep;
9419 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9420 goto efault;
9422 ep.events = tswap32(target_ep->events);
9423 /* The epoll_data_t union is just opaque data to the kernel,
9424 * so we transfer all 64 bits across and need not worry what
9425 * actual data type it is.
9427 ep.data.u64 = tswap64(target_ep->data.u64);
9428 unlock_user_struct(target_ep, arg4, 0);
9429 epp = &ep;
9431 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9432 break;
9434 #endif
9436 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9437 #define IMPLEMENT_EPOLL_PWAIT
9438 #endif
9439 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9440 #if defined(TARGET_NR_epoll_wait)
9441 case TARGET_NR_epoll_wait:
9442 #endif
9443 #if defined(IMPLEMENT_EPOLL_PWAIT)
9444 case TARGET_NR_epoll_pwait:
9445 #endif
9447 struct target_epoll_event *target_ep;
9448 struct epoll_event *ep;
9449 int epfd = arg1;
9450 int maxevents = arg3;
9451 int timeout = arg4;
9453 target_ep = lock_user(VERIFY_WRITE, arg2,
9454 maxevents * sizeof(struct target_epoll_event), 1);
9455 if (!target_ep) {
9456 goto efault;
9459 ep = alloca(maxevents * sizeof(struct epoll_event));
9461 switch (num) {
9462 #if defined(IMPLEMENT_EPOLL_PWAIT)
9463 case TARGET_NR_epoll_pwait:
9465 target_sigset_t *target_set;
9466 sigset_t _set, *set = &_set;
9468 if (arg5) {
9469 target_set = lock_user(VERIFY_READ, arg5,
9470 sizeof(target_sigset_t), 1);
9471 if (!target_set) {
9472 unlock_user(target_ep, arg2, 0);
9473 goto efault;
9475 target_to_host_sigset(set, target_set);
9476 unlock_user(target_set, arg5, 0);
9477 } else {
9478 set = NULL;
9481 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9482 break;
9484 #endif
9485 #if defined(TARGET_NR_epoll_wait)
9486 case TARGET_NR_epoll_wait:
9487 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9488 break;
9489 #endif
9490 default:
9491 ret = -TARGET_ENOSYS;
9493 if (!is_error(ret)) {
9494 int i;
9495 for (i = 0; i < ret; i++) {
9496 target_ep[i].events = tswap32(ep[i].events);
9497 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9500 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9501 break;
9503 #endif
9504 #endif
9505 #ifdef TARGET_NR_prlimit64
9506 case TARGET_NR_prlimit64:
9508 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9509 struct target_rlimit64 *target_rnew, *target_rold;
9510 struct host_rlimit64 rnew, rold, *rnewp = 0;
9511 if (arg3) {
9512 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9513 goto efault;
9515 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9516 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9517 unlock_user_struct(target_rnew, arg3, 0);
9518 rnewp = &rnew;
9521 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9522 if (!is_error(ret) && arg4) {
9523 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9524 goto efault;
9526 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9527 target_rold->rlim_max = tswap64(rold.rlim_max);
9528 unlock_user_struct(target_rold, arg4, 1);
9530 break;
9532 #endif
9533 #ifdef TARGET_NR_gethostname
9534 case TARGET_NR_gethostname:
9536 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9537 if (name) {
9538 ret = get_errno(gethostname(name, arg2));
9539 unlock_user(name, arg1, arg2);
9540 } else {
9541 ret = -TARGET_EFAULT;
9543 break;
9545 #endif
9546 #ifdef TARGET_NR_atomic_cmpxchg_32
9547 case TARGET_NR_atomic_cmpxchg_32:
9549 /* should use start_exclusive from main.c */
9550 abi_ulong mem_value;
9551 if (get_user_u32(mem_value, arg6)) {
9552 target_siginfo_t info;
9553 info.si_signo = SIGSEGV;
9554 info.si_errno = 0;
9555 info.si_code = TARGET_SEGV_MAPERR;
9556 info._sifields._sigfault._addr = arg6;
9557 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9558 ret = 0xdeadbeef;
9561 if (mem_value == arg2)
9562 put_user_u32(arg1, arg6);
9563 ret = mem_value;
9564 break;
9566 #endif
9567 #ifdef TARGET_NR_atomic_barrier
9568 case TARGET_NR_atomic_barrier:
9570 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9571 ret = 0;
9572 break;
9574 #endif
9576 #ifdef TARGET_NR_timer_create
9577 case TARGET_NR_timer_create:
9579 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9581 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9582 struct target_timer_t *ptarget_timer;
9584 int clkid = arg1;
9585 int timer_index = next_free_host_timer();
9587 if (timer_index < 0) {
9588 ret = -TARGET_EAGAIN;
9589 } else {
9590 timer_t *phtimer = g_posix_timers + timer_index;
9592 if (arg2) {
9593 phost_sevp = &host_sevp;
9594 ret = target_to_host_sigevent(phost_sevp, arg2);
9595 if (ret != 0) {
9596 break;
9600 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9601 if (ret) {
9602 phtimer = NULL;
9603 } else {
9604 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9605 goto efault;
9607 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9608 unlock_user_struct(ptarget_timer, arg3, 1);
9611 break;
9613 #endif
9615 #ifdef TARGET_NR_timer_settime
9616 case TARGET_NR_timer_settime:
9618 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9619 * struct itimerspec * old_value */
9620 target_ulong timerid = arg1;
9622 if (arg3 == 0 || timerid >= ARRAY_SIZE(g_posix_timers)) {
9623 ret = -TARGET_EINVAL;
9624 } else {
9625 timer_t htimer = g_posix_timers[timerid];
9626 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9628 target_to_host_itimerspec(&hspec_new, arg3);
9629 ret = get_errno(
9630 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9631 host_to_target_itimerspec(arg2, &hspec_old);
9633 break;
9635 #endif
9637 #ifdef TARGET_NR_timer_gettime
9638 case TARGET_NR_timer_gettime:
9640 /* args: timer_t timerid, struct itimerspec *curr_value */
9641 target_ulong timerid = arg1;
9643 if (!arg2) {
9644 return -TARGET_EFAULT;
9645 } else if (timerid >= ARRAY_SIZE(g_posix_timers)) {
9646 ret = -TARGET_EINVAL;
9647 } else {
9648 timer_t htimer = g_posix_timers[timerid];
9649 struct itimerspec hspec;
9650 ret = get_errno(timer_gettime(htimer, &hspec));
9652 if (host_to_target_itimerspec(arg2, &hspec)) {
9653 ret = -TARGET_EFAULT;
9656 break;
9658 #endif
9660 #ifdef TARGET_NR_timer_getoverrun
9661 case TARGET_NR_timer_getoverrun:
9663 /* args: timer_t timerid */
9664 target_ulong timerid = arg1;
9666 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
9667 ret = -TARGET_EINVAL;
9668 } else {
9669 timer_t htimer = g_posix_timers[timerid];
9670 ret = get_errno(timer_getoverrun(htimer));
9672 break;
9674 #endif
9676 #ifdef TARGET_NR_timer_delete
9677 case TARGET_NR_timer_delete:
9679 /* args: timer_t timerid */
9680 target_ulong timerid = arg1;
9682 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
9683 ret = -TARGET_EINVAL;
9684 } else {
9685 timer_t htimer = g_posix_timers[timerid];
9686 ret = get_errno(timer_delete(htimer));
9687 g_posix_timers[timerid] = 0;
9689 break;
9691 #endif
9693 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9694 case TARGET_NR_timerfd_create:
9695 ret = get_errno(timerfd_create(arg1,
9696 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9697 break;
9698 #endif
9700 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9701 case TARGET_NR_timerfd_gettime:
9703 struct itimerspec its_curr;
9705 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9707 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9708 goto efault;
9711 break;
9712 #endif
9714 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9715 case TARGET_NR_timerfd_settime:
9717 struct itimerspec its_new, its_old, *p_new;
9719 if (arg3) {
9720 if (target_to_host_itimerspec(&its_new, arg3)) {
9721 goto efault;
9723 p_new = &its_new;
9724 } else {
9725 p_new = NULL;
9728 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9730 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9731 goto efault;
9734 break;
9735 #endif
9737 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9738 case TARGET_NR_ioprio_get:
9739 ret = get_errno(ioprio_get(arg1, arg2));
9740 break;
9741 #endif
9743 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9744 case TARGET_NR_ioprio_set:
9745 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9746 break;
9747 #endif
9749 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9750 case TARGET_NR_setns:
9751 ret = get_errno(setns(arg1, arg2));
9752 break;
9753 #endif
9754 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9755 case TARGET_NR_unshare:
9756 ret = get_errno(unshare(arg1));
9757 break;
9758 #endif
9760 default:
9761 unimplemented:
9762 gemu_log("qemu: Unsupported syscall: %d\n", num);
9763 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9764 unimplemented_nowarn:
9765 #endif
9766 ret = -TARGET_ENOSYS;
9767 break;
9769 fail:
9770 #ifdef DEBUG
9771 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9772 #endif
9773 if(do_strace)
9774 print_syscall_ret(num, ret);
9775 return ret;
9776 efault:
9777 ret = -TARGET_EFAULT;
9778 goto fail;