target_arm: Parameterise the irq lines for armv7m_init
[qemu/ar7.git] / linux-user / syscall.c
blobd4398b9c566949fdf231d5066a7e8de7af814f0d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef TARGET_GPROF
73 #include <sys/gmon.h>
74 #endif
75 #ifdef CONFIG_EVENTFD
76 #include <sys/eventfd.h>
77 #endif
78 #ifdef CONFIG_EPOLL
79 #include <sys/epoll.h>
80 #endif
81 #ifdef CONFIG_ATTR
82 #include "qemu/xattr.h"
83 #endif
84 #ifdef CONFIG_SENDFILE
85 #include <sys/sendfile.h>
86 #endif
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #include <linux/vt.h>
108 #include <linux/dm-ioctl.h>
109 #include <linux/reboot.h>
110 #include <linux/route.h>
111 #include <linux/filter.h>
112 #include <linux/blkpg.h>
113 #include "linux_loop.h"
114 #include "uname.h"
116 #include "qemu.h"
118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
121 //#define DEBUG
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_futex __NR_futex
193 #define __NR_sys_inotify_init __NR_inotify_init
194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 defined(__s390x__)
199 #define __NR__llseek __NR_lseek
200 #endif
202 /* Newer kernel ports have llseek() instead of _llseek() */
203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
204 #define TARGET_NR__llseek TARGET_NR_llseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 #ifdef __NR_getdents
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #endif
219 #if !defined(__NR_getdents) || \
220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #endif
223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
225 loff_t *, res, uint, wh);
226 #endif
227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #endif
232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
233 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #endif
235 #ifdef __NR_exit_group
236 _syscall1(int,exit_group,int,error_code)
237 #endif
238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
239 _syscall1(int,set_tid_address,int *,tidptr)
240 #endif
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 void *, arg);
253 _syscall2(int, capget, struct __user_cap_header_struct *, header,
254 struct __user_cap_data_struct *, data);
255 _syscall2(int, capset, struct __user_cap_header_struct *, header,
256 struct __user_cap_data_struct *, data);
257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
258 _syscall2(int, ioprio_get, int, which, int, who)
259 #endif
260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
262 #endif
264 static bitmask_transtbl fcntl_flags_tbl[] = {
265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
278 #if defined(O_DIRECT)
279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
280 #endif
281 #if defined(O_NOATIME)
282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
283 #endif
284 #if defined(O_CLOEXEC)
285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
286 #endif
287 #if defined(O_PATH)
288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
289 #endif
290 /* Don't terminate the list prematurely on 64-bit host+guest. */
291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
293 #endif
294 { 0, 0, 0, 0 }
297 static int sys_getcwd1(char *buf, size_t size)
299 if (getcwd(buf, size) == NULL) {
300 /* getcwd() sets errno */
301 return (-1);
303 return strlen(buf)+1;
306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
309 * open(2) has extra parameter 'mode' when called with
310 * flag O_CREAT.
312 if ((flags & O_CREAT) != 0) {
313 return (openat(dirfd, pathname, flags, mode));
315 return (openat(dirfd, pathname, flags));
318 #ifdef TARGET_NR_utimensat
319 #ifdef CONFIG_UTIMENSAT
320 static int sys_utimensat(int dirfd, const char *pathname,
321 const struct timespec times[2], int flags)
323 if (pathname == NULL)
324 return futimens(dirfd, times);
325 else
326 return utimensat(dirfd, pathname, times, flags);
328 #elif defined(__NR_utimensat)
329 #define __NR_sys_utimensat __NR_utimensat
330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
331 const struct timespec *,tsp,int,flags)
332 #else
333 static int sys_utimensat(int dirfd, const char *pathname,
334 const struct timespec times[2], int flags)
336 errno = ENOSYS;
337 return -1;
339 #endif
340 #endif /* TARGET_NR_utimensat */
342 #ifdef CONFIG_INOTIFY
343 #include <sys/inotify.h>
345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
346 static int sys_inotify_init(void)
348 return (inotify_init());
350 #endif
351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
354 return (inotify_add_watch(fd, pathname, mask));
356 #endif
357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
358 static int sys_inotify_rm_watch(int fd, int32_t wd)
360 return (inotify_rm_watch(fd, wd));
362 #endif
363 #ifdef CONFIG_INOTIFY1
364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
365 static int sys_inotify_init1(int flags)
367 return (inotify_init1(flags));
369 #endif
370 #endif
371 #else
372 /* Userspace can usually survive runtime without inotify */
373 #undef TARGET_NR_inotify_init
374 #undef TARGET_NR_inotify_init1
375 #undef TARGET_NR_inotify_add_watch
376 #undef TARGET_NR_inotify_rm_watch
377 #endif /* CONFIG_INOTIFY */
379 #if defined(TARGET_NR_ppoll)
380 #ifndef __NR_ppoll
381 # define __NR_ppoll -1
382 #endif
383 #define __NR_sys_ppoll __NR_ppoll
384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
385 struct timespec *, timeout, const sigset_t *, sigmask,
386 size_t, sigsetsize)
387 #endif
389 #if defined(TARGET_NR_pselect6)
390 #ifndef __NR_pselect6
391 # define __NR_pselect6 -1
392 #endif
393 #define __NR_sys_pselect6 __NR_pselect6
394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
396 #endif
398 #if defined(TARGET_NR_prlimit64)
399 #ifndef __NR_prlimit64
400 # define __NR_prlimit64 -1
401 #endif
402 #define __NR_sys_prlimit64 __NR_prlimit64
403 /* The glibc rlimit structure may not be that used by the underlying syscall */
404 struct host_rlimit64 {
405 uint64_t rlim_cur;
406 uint64_t rlim_max;
408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
409 const struct host_rlimit64 *, new_limit,
410 struct host_rlimit64 *, old_limit)
411 #endif
414 #if defined(TARGET_NR_timer_create)
415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
416 static timer_t g_posix_timers[32] = { 0, } ;
418 static inline int next_free_host_timer(void)
420 int k ;
421 /* FIXME: Does finding the next free slot require a lock? */
422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
423 if (g_posix_timers[k] == 0) {
424 g_posix_timers[k] = (timer_t) 1;
425 return k;
428 return -1;
430 #endif
432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433 #ifdef TARGET_ARM
434 static inline int regpairs_aligned(void *cpu_env) {
435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
437 #elif defined(TARGET_MIPS)
438 static inline int regpairs_aligned(void *cpu_env) { return 1; }
439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
441 * of registers which translates to the same as ARM/MIPS, because we start with
442 * r3 as arg1 */
443 static inline int regpairs_aligned(void *cpu_env) { return 1; }
444 #else
445 static inline int regpairs_aligned(void *cpu_env) { return 0; }
446 #endif
448 #define ERRNO_TABLE_SIZE 1200
450 /* target_to_host_errno_table[] is initialized from
451 * host_to_target_errno_table[] in syscall_init(). */
452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
456 * This list is the union of errno values overridden in asm-<arch>/errno.h
457 * minus the errnos that are not actually generic to all archs.
459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
460 [EIDRM] = TARGET_EIDRM,
461 [ECHRNG] = TARGET_ECHRNG,
462 [EL2NSYNC] = TARGET_EL2NSYNC,
463 [EL3HLT] = TARGET_EL3HLT,
464 [EL3RST] = TARGET_EL3RST,
465 [ELNRNG] = TARGET_ELNRNG,
466 [EUNATCH] = TARGET_EUNATCH,
467 [ENOCSI] = TARGET_ENOCSI,
468 [EL2HLT] = TARGET_EL2HLT,
469 [EDEADLK] = TARGET_EDEADLK,
470 [ENOLCK] = TARGET_ENOLCK,
471 [EBADE] = TARGET_EBADE,
472 [EBADR] = TARGET_EBADR,
473 [EXFULL] = TARGET_EXFULL,
474 [ENOANO] = TARGET_ENOANO,
475 [EBADRQC] = TARGET_EBADRQC,
476 [EBADSLT] = TARGET_EBADSLT,
477 [EBFONT] = TARGET_EBFONT,
478 [ENOSTR] = TARGET_ENOSTR,
479 [ENODATA] = TARGET_ENODATA,
480 [ETIME] = TARGET_ETIME,
481 [ENOSR] = TARGET_ENOSR,
482 [ENONET] = TARGET_ENONET,
483 [ENOPKG] = TARGET_ENOPKG,
484 [EREMOTE] = TARGET_EREMOTE,
485 [ENOLINK] = TARGET_ENOLINK,
486 [EADV] = TARGET_EADV,
487 [ESRMNT] = TARGET_ESRMNT,
488 [ECOMM] = TARGET_ECOMM,
489 [EPROTO] = TARGET_EPROTO,
490 [EDOTDOT] = TARGET_EDOTDOT,
491 [EMULTIHOP] = TARGET_EMULTIHOP,
492 [EBADMSG] = TARGET_EBADMSG,
493 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
494 [EOVERFLOW] = TARGET_EOVERFLOW,
495 [ENOTUNIQ] = TARGET_ENOTUNIQ,
496 [EBADFD] = TARGET_EBADFD,
497 [EREMCHG] = TARGET_EREMCHG,
498 [ELIBACC] = TARGET_ELIBACC,
499 [ELIBBAD] = TARGET_ELIBBAD,
500 [ELIBSCN] = TARGET_ELIBSCN,
501 [ELIBMAX] = TARGET_ELIBMAX,
502 [ELIBEXEC] = TARGET_ELIBEXEC,
503 [EILSEQ] = TARGET_EILSEQ,
504 [ENOSYS] = TARGET_ENOSYS,
505 [ELOOP] = TARGET_ELOOP,
506 [ERESTART] = TARGET_ERESTART,
507 [ESTRPIPE] = TARGET_ESTRPIPE,
508 [ENOTEMPTY] = TARGET_ENOTEMPTY,
509 [EUSERS] = TARGET_EUSERS,
510 [ENOTSOCK] = TARGET_ENOTSOCK,
511 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
512 [EMSGSIZE] = TARGET_EMSGSIZE,
513 [EPROTOTYPE] = TARGET_EPROTOTYPE,
514 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
515 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
516 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
517 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
518 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
519 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
520 [EADDRINUSE] = TARGET_EADDRINUSE,
521 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
522 [ENETDOWN] = TARGET_ENETDOWN,
523 [ENETUNREACH] = TARGET_ENETUNREACH,
524 [ENETRESET] = TARGET_ENETRESET,
525 [ECONNABORTED] = TARGET_ECONNABORTED,
526 [ECONNRESET] = TARGET_ECONNRESET,
527 [ENOBUFS] = TARGET_ENOBUFS,
528 [EISCONN] = TARGET_EISCONN,
529 [ENOTCONN] = TARGET_ENOTCONN,
530 [EUCLEAN] = TARGET_EUCLEAN,
531 [ENOTNAM] = TARGET_ENOTNAM,
532 [ENAVAIL] = TARGET_ENAVAIL,
533 [EISNAM] = TARGET_EISNAM,
534 [EREMOTEIO] = TARGET_EREMOTEIO,
535 [ESHUTDOWN] = TARGET_ESHUTDOWN,
536 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
537 [ETIMEDOUT] = TARGET_ETIMEDOUT,
538 [ECONNREFUSED] = TARGET_ECONNREFUSED,
539 [EHOSTDOWN] = TARGET_EHOSTDOWN,
540 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
541 [EALREADY] = TARGET_EALREADY,
542 [EINPROGRESS] = TARGET_EINPROGRESS,
543 [ESTALE] = TARGET_ESTALE,
544 [ECANCELED] = TARGET_ECANCELED,
545 [ENOMEDIUM] = TARGET_ENOMEDIUM,
546 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
547 #ifdef ENOKEY
548 [ENOKEY] = TARGET_ENOKEY,
549 #endif
550 #ifdef EKEYEXPIRED
551 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
552 #endif
553 #ifdef EKEYREVOKED
554 [EKEYREVOKED] = TARGET_EKEYREVOKED,
555 #endif
556 #ifdef EKEYREJECTED
557 [EKEYREJECTED] = TARGET_EKEYREJECTED,
558 #endif
559 #ifdef EOWNERDEAD
560 [EOWNERDEAD] = TARGET_EOWNERDEAD,
561 #endif
562 #ifdef ENOTRECOVERABLE
563 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
564 #endif
567 static inline int host_to_target_errno(int err)
569 if(host_to_target_errno_table[err])
570 return host_to_target_errno_table[err];
571 return err;
574 static inline int target_to_host_errno(int err)
576 if (target_to_host_errno_table[err])
577 return target_to_host_errno_table[err];
578 return err;
581 static inline abi_long get_errno(abi_long ret)
583 if (ret == -1)
584 return -host_to_target_errno(errno);
585 else
586 return ret;
589 static inline int is_error(abi_long ret)
591 return (abi_ulong)ret >= (abi_ulong)(-4096);
594 char *target_strerror(int err)
596 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
597 return NULL;
599 return strerror(target_to_host_errno(err));
602 static inline int host_to_target_sock_type(int host_type)
604 int target_type;
606 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
607 case SOCK_DGRAM:
608 target_type = TARGET_SOCK_DGRAM;
609 break;
610 case SOCK_STREAM:
611 target_type = TARGET_SOCK_STREAM;
612 break;
613 default:
614 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
615 break;
618 #if defined(SOCK_CLOEXEC)
619 if (host_type & SOCK_CLOEXEC) {
620 target_type |= TARGET_SOCK_CLOEXEC;
622 #endif
624 #if defined(SOCK_NONBLOCK)
625 if (host_type & SOCK_NONBLOCK) {
626 target_type |= TARGET_SOCK_NONBLOCK;
628 #endif
630 return target_type;
633 static abi_ulong target_brk;
634 static abi_ulong target_original_brk;
635 static abi_ulong brk_page;
637 void target_set_brk(abi_ulong new_brk)
639 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
640 brk_page = HOST_PAGE_ALIGN(target_brk);
643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
644 #define DEBUGF_BRK(message, args...)
646 /* do_brk() must return target values and target errnos. */
647 abi_long do_brk(abi_ulong new_brk)
649 abi_long mapped_addr;
650 int new_alloc_size;
652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
654 if (!new_brk) {
655 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
656 return target_brk;
658 if (new_brk < target_original_brk) {
659 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
660 target_brk);
661 return target_brk;
664 /* If the new brk is less than the highest page reserved to the
665 * target heap allocation, set it and we're almost done... */
666 if (new_brk <= brk_page) {
667 /* Heap contents are initialized to zero, as for anonymous
668 * mapped pages. */
669 if (new_brk > target_brk) {
670 memset(g2h(target_brk), 0, new_brk - target_brk);
672 target_brk = new_brk;
673 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
674 return target_brk;
677 /* We need to allocate more memory after the brk... Note that
678 * we don't use MAP_FIXED because that will map over the top of
679 * any existing mapping (like the one with the host libc or qemu
680 * itself); instead we treat "mapped but at wrong address" as
681 * a failure and unmap again.
683 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
684 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
685 PROT_READ|PROT_WRITE,
686 MAP_ANON|MAP_PRIVATE, 0, 0));
688 if (mapped_addr == brk_page) {
689 /* Heap contents are initialized to zero, as for anonymous
690 * mapped pages. Technically the new pages are already
691 * initialized to zero since they *are* anonymous mapped
692 * pages, however we have to take care with the contents that
693 * come from the remaining part of the previous page: it may
694 * contains garbage data due to a previous heap usage (grown
695 * then shrunken). */
696 memset(g2h(target_brk), 0, brk_page - target_brk);
698 target_brk = new_brk;
699 brk_page = HOST_PAGE_ALIGN(target_brk);
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
701 target_brk);
702 return target_brk;
703 } else if (mapped_addr != -1) {
704 /* Mapped but at wrong address, meaning there wasn't actually
705 * enough space for this brk.
707 target_munmap(mapped_addr, new_alloc_size);
708 mapped_addr = -1;
709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
711 else {
712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
715 #if defined(TARGET_ALPHA)
716 /* We (partially) emulate OSF/1 on Alpha, which requires we
717 return a proper errno, not an unchanged brk value. */
718 return -TARGET_ENOMEM;
719 #endif
720 /* For everything else, return the previous break. */
721 return target_brk;
724 static inline abi_long copy_from_user_fdset(fd_set *fds,
725 abi_ulong target_fds_addr,
726 int n)
728 int i, nw, j, k;
729 abi_ulong b, *target_fds;
731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
732 if (!(target_fds = lock_user(VERIFY_READ,
733 target_fds_addr,
734 sizeof(abi_ulong) * nw,
735 1)))
736 return -TARGET_EFAULT;
738 FD_ZERO(fds);
739 k = 0;
740 for (i = 0; i < nw; i++) {
741 /* grab the abi_ulong */
742 __get_user(b, &target_fds[i]);
743 for (j = 0; j < TARGET_ABI_BITS; j++) {
744 /* check the bit inside the abi_ulong */
745 if ((b >> j) & 1)
746 FD_SET(k, fds);
747 k++;
751 unlock_user(target_fds, target_fds_addr, 0);
753 return 0;
756 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
757 abi_ulong target_fds_addr,
758 int n)
760 if (target_fds_addr) {
761 if (copy_from_user_fdset(fds, target_fds_addr, n))
762 return -TARGET_EFAULT;
763 *fds_ptr = fds;
764 } else {
765 *fds_ptr = NULL;
767 return 0;
770 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
771 const fd_set *fds,
772 int n)
774 int i, nw, j, k;
775 abi_long v;
776 abi_ulong *target_fds;
778 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
779 if (!(target_fds = lock_user(VERIFY_WRITE,
780 target_fds_addr,
781 sizeof(abi_ulong) * nw,
782 0)))
783 return -TARGET_EFAULT;
785 k = 0;
786 for (i = 0; i < nw; i++) {
787 v = 0;
788 for (j = 0; j < TARGET_ABI_BITS; j++) {
789 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
790 k++;
792 __put_user(v, &target_fds[i]);
795 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
797 return 0;
800 #if defined(__alpha__)
801 #define HOST_HZ 1024
802 #else
803 #define HOST_HZ 100
804 #endif
806 static inline abi_long host_to_target_clock_t(long ticks)
808 #if HOST_HZ == TARGET_HZ
809 return ticks;
810 #else
811 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
812 #endif
815 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
816 const struct rusage *rusage)
818 struct target_rusage *target_rusage;
820 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
821 return -TARGET_EFAULT;
822 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
823 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
824 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
825 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
826 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
827 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
828 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
829 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
830 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
831 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
832 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
833 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
834 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
835 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
836 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
837 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
838 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
839 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
840 unlock_user_struct(target_rusage, target_addr, 1);
842 return 0;
845 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
847 abi_ulong target_rlim_swap;
848 rlim_t result;
850 target_rlim_swap = tswapal(target_rlim);
851 if (target_rlim_swap == TARGET_RLIM_INFINITY)
852 return RLIM_INFINITY;
854 result = target_rlim_swap;
855 if (target_rlim_swap != (rlim_t)result)
856 return RLIM_INFINITY;
858 return result;
861 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
863 abi_ulong target_rlim_swap;
864 abi_ulong result;
866 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
867 target_rlim_swap = TARGET_RLIM_INFINITY;
868 else
869 target_rlim_swap = rlim;
870 result = tswapal(target_rlim_swap);
872 return result;
875 static inline int target_to_host_resource(int code)
877 switch (code) {
878 case TARGET_RLIMIT_AS:
879 return RLIMIT_AS;
880 case TARGET_RLIMIT_CORE:
881 return RLIMIT_CORE;
882 case TARGET_RLIMIT_CPU:
883 return RLIMIT_CPU;
884 case TARGET_RLIMIT_DATA:
885 return RLIMIT_DATA;
886 case TARGET_RLIMIT_FSIZE:
887 return RLIMIT_FSIZE;
888 case TARGET_RLIMIT_LOCKS:
889 return RLIMIT_LOCKS;
890 case TARGET_RLIMIT_MEMLOCK:
891 return RLIMIT_MEMLOCK;
892 case TARGET_RLIMIT_MSGQUEUE:
893 return RLIMIT_MSGQUEUE;
894 case TARGET_RLIMIT_NICE:
895 return RLIMIT_NICE;
896 case TARGET_RLIMIT_NOFILE:
897 return RLIMIT_NOFILE;
898 case TARGET_RLIMIT_NPROC:
899 return RLIMIT_NPROC;
900 case TARGET_RLIMIT_RSS:
901 return RLIMIT_RSS;
902 case TARGET_RLIMIT_RTPRIO:
903 return RLIMIT_RTPRIO;
904 case TARGET_RLIMIT_SIGPENDING:
905 return RLIMIT_SIGPENDING;
906 case TARGET_RLIMIT_STACK:
907 return RLIMIT_STACK;
908 default:
909 return code;
913 static inline abi_long copy_from_user_timeval(struct timeval *tv,
914 abi_ulong target_tv_addr)
916 struct target_timeval *target_tv;
918 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
919 return -TARGET_EFAULT;
921 __get_user(tv->tv_sec, &target_tv->tv_sec);
922 __get_user(tv->tv_usec, &target_tv->tv_usec);
924 unlock_user_struct(target_tv, target_tv_addr, 0);
926 return 0;
929 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
930 const struct timeval *tv)
932 struct target_timeval *target_tv;
934 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
935 return -TARGET_EFAULT;
937 __put_user(tv->tv_sec, &target_tv->tv_sec);
938 __put_user(tv->tv_usec, &target_tv->tv_usec);
940 unlock_user_struct(target_tv, target_tv_addr, 1);
942 return 0;
945 static inline abi_long copy_from_user_timezone(struct timezone *tz,
946 abi_ulong target_tz_addr)
948 struct target_timezone *target_tz;
950 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
951 return -TARGET_EFAULT;
954 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
955 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
957 unlock_user_struct(target_tz, target_tz_addr, 0);
959 return 0;
962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
963 #include <mqueue.h>
965 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
966 abi_ulong target_mq_attr_addr)
968 struct target_mq_attr *target_mq_attr;
970 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
971 target_mq_attr_addr, 1))
972 return -TARGET_EFAULT;
974 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
975 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
976 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
977 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
981 return 0;
984 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
985 const struct mq_attr *attr)
987 struct target_mq_attr *target_mq_attr;
989 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
990 target_mq_attr_addr, 0))
991 return -TARGET_EFAULT;
993 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
994 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
995 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
996 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
998 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1000 return 0;
1002 #endif
1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1005 /* do_select() must return target values and target errnos. */
1006 static abi_long do_select(int n,
1007 abi_ulong rfd_addr, abi_ulong wfd_addr,
1008 abi_ulong efd_addr, abi_ulong target_tv_addr)
1010 fd_set rfds, wfds, efds;
1011 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1012 struct timeval tv, *tv_ptr;
1013 abi_long ret;
1015 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1016 if (ret) {
1017 return ret;
1019 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1020 if (ret) {
1021 return ret;
1023 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1024 if (ret) {
1025 return ret;
1028 if (target_tv_addr) {
1029 if (copy_from_user_timeval(&tv, target_tv_addr))
1030 return -TARGET_EFAULT;
1031 tv_ptr = &tv;
1032 } else {
1033 tv_ptr = NULL;
1036 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1038 if (!is_error(ret)) {
1039 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1040 return -TARGET_EFAULT;
1041 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1042 return -TARGET_EFAULT;
1043 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1044 return -TARGET_EFAULT;
1046 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1047 return -TARGET_EFAULT;
1050 return ret;
1052 #endif
1054 static abi_long do_pipe2(int host_pipe[], int flags)
1056 #ifdef CONFIG_PIPE2
1057 return pipe2(host_pipe, flags);
1058 #else
1059 return -ENOSYS;
1060 #endif
1063 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1064 int flags, int is_pipe2)
1066 int host_pipe[2];
1067 abi_long ret;
1068 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1070 if (is_error(ret))
1071 return get_errno(ret);
1073 /* Several targets have special calling conventions for the original
1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1075 if (!is_pipe2) {
1076 #if defined(TARGET_ALPHA)
1077 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1078 return host_pipe[0];
1079 #elif defined(TARGET_MIPS)
1080 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1081 return host_pipe[0];
1082 #elif defined(TARGET_SH4)
1083 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_SPARC)
1086 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1087 return host_pipe[0];
1088 #endif
1091 if (put_user_s32(host_pipe[0], pipedes)
1092 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1093 return -TARGET_EFAULT;
1094 return get_errno(ret);
1097 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1098 abi_ulong target_addr,
1099 socklen_t len)
1101 struct target_ip_mreqn *target_smreqn;
1103 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1104 if (!target_smreqn)
1105 return -TARGET_EFAULT;
1106 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1107 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1108 if (len == sizeof(struct target_ip_mreqn))
1109 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1110 unlock_user(target_smreqn, target_addr, 0);
1112 return 0;
1115 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1116 abi_ulong target_addr,
1117 socklen_t len)
1119 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1120 sa_family_t sa_family;
1121 struct target_sockaddr *target_saddr;
1123 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1124 if (!target_saddr)
1125 return -TARGET_EFAULT;
1127 sa_family = tswap16(target_saddr->sa_family);
1129 /* Oops. The caller might send a incomplete sun_path; sun_path
1130 * must be terminated by \0 (see the manual page), but
1131 * unfortunately it is quite common to specify sockaddr_un
1132 * length as "strlen(x->sun_path)" while it should be
1133 * "strlen(...) + 1". We'll fix that here if needed.
1134 * Linux kernel has a similar feature.
1137 if (sa_family == AF_UNIX) {
1138 if (len < unix_maxlen && len > 0) {
1139 char *cp = (char*)target_saddr;
1141 if ( cp[len-1] && !cp[len] )
1142 len++;
1144 if (len > unix_maxlen)
1145 len = unix_maxlen;
1148 memcpy(addr, target_saddr, len);
1149 addr->sa_family = sa_family;
1150 if (sa_family == AF_PACKET) {
1151 struct target_sockaddr_ll *lladdr;
1153 lladdr = (struct target_sockaddr_ll *)addr;
1154 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1155 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1157 unlock_user(target_saddr, target_addr, 0);
1159 return 0;
1162 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1163 struct sockaddr *addr,
1164 socklen_t len)
1166 struct target_sockaddr *target_saddr;
1168 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1169 if (!target_saddr)
1170 return -TARGET_EFAULT;
1171 memcpy(target_saddr, addr, len);
1172 target_saddr->sa_family = tswap16(addr->sa_family);
1173 unlock_user(target_saddr, target_addr, len);
1175 return 0;
1178 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1179 struct target_msghdr *target_msgh)
1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1182 abi_long msg_controllen;
1183 abi_ulong target_cmsg_addr;
1184 struct target_cmsghdr *target_cmsg;
1185 socklen_t space = 0;
1187 msg_controllen = tswapal(target_msgh->msg_controllen);
1188 if (msg_controllen < sizeof (struct target_cmsghdr))
1189 goto the_end;
1190 target_cmsg_addr = tswapal(target_msgh->msg_control);
1191 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1192 if (!target_cmsg)
1193 return -TARGET_EFAULT;
1195 while (cmsg && target_cmsg) {
1196 void *data = CMSG_DATA(cmsg);
1197 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1199 int len = tswapal(target_cmsg->cmsg_len)
1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1202 space += CMSG_SPACE(len);
1203 if (space > msgh->msg_controllen) {
1204 space -= CMSG_SPACE(len);
1205 gemu_log("Host cmsg overflow\n");
1206 break;
1209 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1210 cmsg->cmsg_level = SOL_SOCKET;
1211 } else {
1212 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1214 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1215 cmsg->cmsg_len = CMSG_LEN(len);
1217 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1218 int *fd = (int *)data;
1219 int *target_fd = (int *)target_data;
1220 int i, numfds = len / sizeof(int);
1222 for (i = 0; i < numfds; i++)
1223 fd[i] = tswap32(target_fd[i]);
1224 } else if (cmsg->cmsg_level == SOL_SOCKET
1225 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1226 struct ucred *cred = (struct ucred *)data;
1227 struct target_ucred *target_cred =
1228 (struct target_ucred *)target_data;
1230 __put_user(target_cred->pid, &cred->pid);
1231 __put_user(target_cred->uid, &cred->uid);
1232 __put_user(target_cred->gid, &cred->gid);
1233 } else {
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg->cmsg_level, cmsg->cmsg_type);
1236 memcpy(data, target_data, len);
1239 cmsg = CMSG_NXTHDR(msgh, cmsg);
1240 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1242 unlock_user(target_cmsg, target_cmsg_addr, 0);
1243 the_end:
1244 msgh->msg_controllen = space;
1245 return 0;
1248 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1249 struct msghdr *msgh)
1251 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1252 abi_long msg_controllen;
1253 abi_ulong target_cmsg_addr;
1254 struct target_cmsghdr *target_cmsg;
1255 socklen_t space = 0;
1257 msg_controllen = tswapal(target_msgh->msg_controllen);
1258 if (msg_controllen < sizeof (struct target_cmsghdr))
1259 goto the_end;
1260 target_cmsg_addr = tswapal(target_msgh->msg_control);
1261 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1262 if (!target_cmsg)
1263 return -TARGET_EFAULT;
1265 while (cmsg && target_cmsg) {
1266 void *data = CMSG_DATA(cmsg);
1267 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1269 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1271 space += TARGET_CMSG_SPACE(len);
1272 if (space > msg_controllen) {
1273 space -= TARGET_CMSG_SPACE(len);
1274 gemu_log("Target cmsg overflow\n");
1275 break;
1278 if (cmsg->cmsg_level == SOL_SOCKET) {
1279 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1280 } else {
1281 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1283 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1284 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1286 switch (cmsg->cmsg_level) {
1287 case SOL_SOCKET:
1288 switch (cmsg->cmsg_type) {
1289 case SCM_RIGHTS:
1291 int *fd = (int *)data;
1292 int *target_fd = (int *)target_data;
1293 int i, numfds = len / sizeof(int);
1295 for (i = 0; i < numfds; i++)
1296 target_fd[i] = tswap32(fd[i]);
1297 break;
1299 case SO_TIMESTAMP:
1301 struct timeval *tv = (struct timeval *)data;
1302 struct target_timeval *target_tv =
1303 (struct target_timeval *)target_data;
1305 if (len != sizeof(struct timeval))
1306 goto unimplemented;
1308 /* copy struct timeval to target */
1309 target_tv->tv_sec = tswapal(tv->tv_sec);
1310 target_tv->tv_usec = tswapal(tv->tv_usec);
1311 break;
1313 case SCM_CREDENTIALS:
1315 struct ucred *cred = (struct ucred *)data;
1316 struct target_ucred *target_cred =
1317 (struct target_ucred *)target_data;
1319 __put_user(cred->pid, &target_cred->pid);
1320 __put_user(cred->uid, &target_cred->uid);
1321 __put_user(cred->gid, &target_cred->gid);
1322 break;
1324 default:
1325 goto unimplemented;
1327 break;
1329 default:
1330 unimplemented:
1331 gemu_log("Unsupported ancillary data: %d/%d\n",
1332 cmsg->cmsg_level, cmsg->cmsg_type);
1333 memcpy(target_data, data, len);
1336 cmsg = CMSG_NXTHDR(msgh, cmsg);
1337 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1339 unlock_user(target_cmsg, target_cmsg_addr, space);
1340 the_end:
1341 target_msgh->msg_controllen = tswapal(space);
1342 return 0;
1345 /* do_setsockopt() Must return target values and target errnos. */
1346 static abi_long do_setsockopt(int sockfd, int level, int optname,
1347 abi_ulong optval_addr, socklen_t optlen)
1349 abi_long ret;
1350 int val;
1351 struct ip_mreqn *ip_mreq;
1352 struct ip_mreq_source *ip_mreq_source;
1354 switch(level) {
1355 case SOL_TCP:
1356 /* TCP options all take an 'int' value. */
1357 if (optlen < sizeof(uint32_t))
1358 return -TARGET_EINVAL;
1360 if (get_user_u32(val, optval_addr))
1361 return -TARGET_EFAULT;
1362 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1363 break;
1364 case SOL_IP:
1365 switch(optname) {
1366 case IP_TOS:
1367 case IP_TTL:
1368 case IP_HDRINCL:
1369 case IP_ROUTER_ALERT:
1370 case IP_RECVOPTS:
1371 case IP_RETOPTS:
1372 case IP_PKTINFO:
1373 case IP_MTU_DISCOVER:
1374 case IP_RECVERR:
1375 case IP_RECVTOS:
1376 #ifdef IP_FREEBIND
1377 case IP_FREEBIND:
1378 #endif
1379 case IP_MULTICAST_TTL:
1380 case IP_MULTICAST_LOOP:
1381 val = 0;
1382 if (optlen >= sizeof(uint32_t)) {
1383 if (get_user_u32(val, optval_addr))
1384 return -TARGET_EFAULT;
1385 } else if (optlen >= 1) {
1386 if (get_user_u8(val, optval_addr))
1387 return -TARGET_EFAULT;
1389 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1390 break;
1391 case IP_ADD_MEMBERSHIP:
1392 case IP_DROP_MEMBERSHIP:
1393 if (optlen < sizeof (struct target_ip_mreq) ||
1394 optlen > sizeof (struct target_ip_mreqn))
1395 return -TARGET_EINVAL;
1397 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1398 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1399 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1400 break;
1402 case IP_BLOCK_SOURCE:
1403 case IP_UNBLOCK_SOURCE:
1404 case IP_ADD_SOURCE_MEMBERSHIP:
1405 case IP_DROP_SOURCE_MEMBERSHIP:
1406 if (optlen != sizeof (struct target_ip_mreq_source))
1407 return -TARGET_EINVAL;
1409 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1410 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1411 unlock_user (ip_mreq_source, optval_addr, 0);
1412 break;
1414 default:
1415 goto unimplemented;
1417 break;
1418 case SOL_IPV6:
1419 switch (optname) {
1420 case IPV6_MTU_DISCOVER:
1421 case IPV6_MTU:
1422 case IPV6_V6ONLY:
1423 case IPV6_RECVPKTINFO:
1424 val = 0;
1425 if (optlen < sizeof(uint32_t)) {
1426 return -TARGET_EINVAL;
1428 if (get_user_u32(val, optval_addr)) {
1429 return -TARGET_EFAULT;
1431 ret = get_errno(setsockopt(sockfd, level, optname,
1432 &val, sizeof(val)));
1433 break;
1434 default:
1435 goto unimplemented;
1437 break;
1438 case SOL_RAW:
1439 switch (optname) {
1440 case ICMP_FILTER:
1441 /* struct icmp_filter takes an u32 value */
1442 if (optlen < sizeof(uint32_t)) {
1443 return -TARGET_EINVAL;
1446 if (get_user_u32(val, optval_addr)) {
1447 return -TARGET_EFAULT;
1449 ret = get_errno(setsockopt(sockfd, level, optname,
1450 &val, sizeof(val)));
1451 break;
1453 default:
1454 goto unimplemented;
1456 break;
1457 case TARGET_SOL_SOCKET:
1458 switch (optname) {
1459 case TARGET_SO_RCVTIMEO:
1461 struct timeval tv;
1463 optname = SO_RCVTIMEO;
1465 set_timeout:
1466 if (optlen != sizeof(struct target_timeval)) {
1467 return -TARGET_EINVAL;
1470 if (copy_from_user_timeval(&tv, optval_addr)) {
1471 return -TARGET_EFAULT;
1474 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1475 &tv, sizeof(tv)));
1476 return ret;
1478 case TARGET_SO_SNDTIMEO:
1479 optname = SO_SNDTIMEO;
1480 goto set_timeout;
1481 case TARGET_SO_ATTACH_FILTER:
1483 struct target_sock_fprog *tfprog;
1484 struct target_sock_filter *tfilter;
1485 struct sock_fprog fprog;
1486 struct sock_filter *filter;
1487 int i;
1489 if (optlen != sizeof(*tfprog)) {
1490 return -TARGET_EINVAL;
1492 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1493 return -TARGET_EFAULT;
1495 if (!lock_user_struct(VERIFY_READ, tfilter,
1496 tswapal(tfprog->filter), 0)) {
1497 unlock_user_struct(tfprog, optval_addr, 1);
1498 return -TARGET_EFAULT;
1501 fprog.len = tswap16(tfprog->len);
1502 filter = malloc(fprog.len * sizeof(*filter));
1503 if (filter == NULL) {
1504 unlock_user_struct(tfilter, tfprog->filter, 1);
1505 unlock_user_struct(tfprog, optval_addr, 1);
1506 return -TARGET_ENOMEM;
1508 for (i = 0; i < fprog.len; i++) {
1509 filter[i].code = tswap16(tfilter[i].code);
1510 filter[i].jt = tfilter[i].jt;
1511 filter[i].jf = tfilter[i].jf;
1512 filter[i].k = tswap32(tfilter[i].k);
1514 fprog.filter = filter;
1516 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1517 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1518 free(filter);
1520 unlock_user_struct(tfilter, tfprog->filter, 1);
1521 unlock_user_struct(tfprog, optval_addr, 1);
1522 return ret;
1524 case TARGET_SO_BINDTODEVICE:
1526 char *dev_ifname, *addr_ifname;
1528 if (optlen > IFNAMSIZ - 1) {
1529 optlen = IFNAMSIZ - 1;
1531 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1532 if (!dev_ifname) {
1533 return -TARGET_EFAULT;
1535 optname = SO_BINDTODEVICE;
1536 addr_ifname = alloca(IFNAMSIZ);
1537 memcpy(addr_ifname, dev_ifname, optlen);
1538 addr_ifname[optlen] = 0;
1539 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
1540 unlock_user (dev_ifname, optval_addr, 0);
1541 return ret;
1543 /* Options with 'int' argument. */
1544 case TARGET_SO_DEBUG:
1545 optname = SO_DEBUG;
1546 break;
1547 case TARGET_SO_REUSEADDR:
1548 optname = SO_REUSEADDR;
1549 break;
1550 case TARGET_SO_TYPE:
1551 optname = SO_TYPE;
1552 break;
1553 case TARGET_SO_ERROR:
1554 optname = SO_ERROR;
1555 break;
1556 case TARGET_SO_DONTROUTE:
1557 optname = SO_DONTROUTE;
1558 break;
1559 case TARGET_SO_BROADCAST:
1560 optname = SO_BROADCAST;
1561 break;
1562 case TARGET_SO_SNDBUF:
1563 optname = SO_SNDBUF;
1564 break;
1565 case TARGET_SO_SNDBUFFORCE:
1566 optname = SO_SNDBUFFORCE;
1567 break;
1568 case TARGET_SO_RCVBUF:
1569 optname = SO_RCVBUF;
1570 break;
1571 case TARGET_SO_RCVBUFFORCE:
1572 optname = SO_RCVBUFFORCE;
1573 break;
1574 case TARGET_SO_KEEPALIVE:
1575 optname = SO_KEEPALIVE;
1576 break;
1577 case TARGET_SO_OOBINLINE:
1578 optname = SO_OOBINLINE;
1579 break;
1580 case TARGET_SO_NO_CHECK:
1581 optname = SO_NO_CHECK;
1582 break;
1583 case TARGET_SO_PRIORITY:
1584 optname = SO_PRIORITY;
1585 break;
1586 #ifdef SO_BSDCOMPAT
1587 case TARGET_SO_BSDCOMPAT:
1588 optname = SO_BSDCOMPAT;
1589 break;
1590 #endif
1591 case TARGET_SO_PASSCRED:
1592 optname = SO_PASSCRED;
1593 break;
1594 case TARGET_SO_PASSSEC:
1595 optname = SO_PASSSEC;
1596 break;
1597 case TARGET_SO_TIMESTAMP:
1598 optname = SO_TIMESTAMP;
1599 break;
1600 case TARGET_SO_RCVLOWAT:
1601 optname = SO_RCVLOWAT;
1602 break;
1603 break;
1604 default:
1605 goto unimplemented;
1607 if (optlen < sizeof(uint32_t))
1608 return -TARGET_EINVAL;
1610 if (get_user_u32(val, optval_addr))
1611 return -TARGET_EFAULT;
1612 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1613 break;
1614 default:
1615 unimplemented:
1616 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1617 ret = -TARGET_ENOPROTOOPT;
1619 return ret;
1622 /* do_getsockopt() Must return target values and target errnos. */
1623 static abi_long do_getsockopt(int sockfd, int level, int optname,
1624 abi_ulong optval_addr, abi_ulong optlen)
1626 abi_long ret;
1627 int len, val;
1628 socklen_t lv;
1630 switch(level) {
1631 case TARGET_SOL_SOCKET:
1632 level = SOL_SOCKET;
1633 switch (optname) {
1634 /* These don't just return a single integer */
1635 case TARGET_SO_LINGER:
1636 case TARGET_SO_RCVTIMEO:
1637 case TARGET_SO_SNDTIMEO:
1638 case TARGET_SO_PEERNAME:
1639 goto unimplemented;
1640 case TARGET_SO_PEERCRED: {
1641 struct ucred cr;
1642 socklen_t crlen;
1643 struct target_ucred *tcr;
1645 if (get_user_u32(len, optlen)) {
1646 return -TARGET_EFAULT;
1648 if (len < 0) {
1649 return -TARGET_EINVAL;
1652 crlen = sizeof(cr);
1653 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1654 &cr, &crlen));
1655 if (ret < 0) {
1656 return ret;
1658 if (len > crlen) {
1659 len = crlen;
1661 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1662 return -TARGET_EFAULT;
1664 __put_user(cr.pid, &tcr->pid);
1665 __put_user(cr.uid, &tcr->uid);
1666 __put_user(cr.gid, &tcr->gid);
1667 unlock_user_struct(tcr, optval_addr, 1);
1668 if (put_user_u32(len, optlen)) {
1669 return -TARGET_EFAULT;
1671 break;
1673 /* Options with 'int' argument. */
1674 case TARGET_SO_DEBUG:
1675 optname = SO_DEBUG;
1676 goto int_case;
1677 case TARGET_SO_REUSEADDR:
1678 optname = SO_REUSEADDR;
1679 goto int_case;
1680 case TARGET_SO_TYPE:
1681 optname = SO_TYPE;
1682 goto int_case;
1683 case TARGET_SO_ERROR:
1684 optname = SO_ERROR;
1685 goto int_case;
1686 case TARGET_SO_DONTROUTE:
1687 optname = SO_DONTROUTE;
1688 goto int_case;
1689 case TARGET_SO_BROADCAST:
1690 optname = SO_BROADCAST;
1691 goto int_case;
1692 case TARGET_SO_SNDBUF:
1693 optname = SO_SNDBUF;
1694 goto int_case;
1695 case TARGET_SO_RCVBUF:
1696 optname = SO_RCVBUF;
1697 goto int_case;
1698 case TARGET_SO_KEEPALIVE:
1699 optname = SO_KEEPALIVE;
1700 goto int_case;
1701 case TARGET_SO_OOBINLINE:
1702 optname = SO_OOBINLINE;
1703 goto int_case;
1704 case TARGET_SO_NO_CHECK:
1705 optname = SO_NO_CHECK;
1706 goto int_case;
1707 case TARGET_SO_PRIORITY:
1708 optname = SO_PRIORITY;
1709 goto int_case;
1710 #ifdef SO_BSDCOMPAT
1711 case TARGET_SO_BSDCOMPAT:
1712 optname = SO_BSDCOMPAT;
1713 goto int_case;
1714 #endif
1715 case TARGET_SO_PASSCRED:
1716 optname = SO_PASSCRED;
1717 goto int_case;
1718 case TARGET_SO_TIMESTAMP:
1719 optname = SO_TIMESTAMP;
1720 goto int_case;
1721 case TARGET_SO_RCVLOWAT:
1722 optname = SO_RCVLOWAT;
1723 goto int_case;
1724 case TARGET_SO_ACCEPTCONN:
1725 optname = SO_ACCEPTCONN;
1726 goto int_case;
1727 default:
1728 goto int_case;
1730 break;
1731 case SOL_TCP:
1732 /* TCP options all take an 'int' value. */
1733 int_case:
1734 if (get_user_u32(len, optlen))
1735 return -TARGET_EFAULT;
1736 if (len < 0)
1737 return -TARGET_EINVAL;
1738 lv = sizeof(lv);
1739 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1740 if (ret < 0)
1741 return ret;
1742 if (optname == SO_TYPE) {
1743 val = host_to_target_sock_type(val);
1745 if (len > lv)
1746 len = lv;
1747 if (len == 4) {
1748 if (put_user_u32(val, optval_addr))
1749 return -TARGET_EFAULT;
1750 } else {
1751 if (put_user_u8(val, optval_addr))
1752 return -TARGET_EFAULT;
1754 if (put_user_u32(len, optlen))
1755 return -TARGET_EFAULT;
1756 break;
1757 case SOL_IP:
1758 switch(optname) {
1759 case IP_TOS:
1760 case IP_TTL:
1761 case IP_HDRINCL:
1762 case IP_ROUTER_ALERT:
1763 case IP_RECVOPTS:
1764 case IP_RETOPTS:
1765 case IP_PKTINFO:
1766 case IP_MTU_DISCOVER:
1767 case IP_RECVERR:
1768 case IP_RECVTOS:
1769 #ifdef IP_FREEBIND
1770 case IP_FREEBIND:
1771 #endif
1772 case IP_MULTICAST_TTL:
1773 case IP_MULTICAST_LOOP:
1774 if (get_user_u32(len, optlen))
1775 return -TARGET_EFAULT;
1776 if (len < 0)
1777 return -TARGET_EINVAL;
1778 lv = sizeof(lv);
1779 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1780 if (ret < 0)
1781 return ret;
1782 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1783 len = 1;
1784 if (put_user_u32(len, optlen)
1785 || put_user_u8(val, optval_addr))
1786 return -TARGET_EFAULT;
1787 } else {
1788 if (len > sizeof(int))
1789 len = sizeof(int);
1790 if (put_user_u32(len, optlen)
1791 || put_user_u32(val, optval_addr))
1792 return -TARGET_EFAULT;
1794 break;
1795 default:
1796 ret = -TARGET_ENOPROTOOPT;
1797 break;
1799 break;
1800 default:
1801 unimplemented:
1802 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1803 level, optname);
1804 ret = -TARGET_EOPNOTSUPP;
1805 break;
1807 return ret;
1810 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1811 int count, int copy)
1813 struct target_iovec *target_vec;
1814 struct iovec *vec;
1815 abi_ulong total_len, max_len;
1816 int i;
1817 int err = 0;
1818 bool bad_address = false;
1820 if (count == 0) {
1821 errno = 0;
1822 return NULL;
1824 if (count < 0 || count > IOV_MAX) {
1825 errno = EINVAL;
1826 return NULL;
1829 vec = calloc(count, sizeof(struct iovec));
1830 if (vec == NULL) {
1831 errno = ENOMEM;
1832 return NULL;
1835 target_vec = lock_user(VERIFY_READ, target_addr,
1836 count * sizeof(struct target_iovec), 1);
1837 if (target_vec == NULL) {
1838 err = EFAULT;
1839 goto fail2;
1842 /* ??? If host page size > target page size, this will result in a
1843 value larger than what we can actually support. */
1844 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1845 total_len = 0;
1847 for (i = 0; i < count; i++) {
1848 abi_ulong base = tswapal(target_vec[i].iov_base);
1849 abi_long len = tswapal(target_vec[i].iov_len);
1851 if (len < 0) {
1852 err = EINVAL;
1853 goto fail;
1854 } else if (len == 0) {
1855 /* Zero length pointer is ignored. */
1856 vec[i].iov_base = 0;
1857 } else {
1858 vec[i].iov_base = lock_user(type, base, len, copy);
1859 /* If the first buffer pointer is bad, this is a fault. But
1860 * subsequent bad buffers will result in a partial write; this
1861 * is realized by filling the vector with null pointers and
1862 * zero lengths. */
1863 if (!vec[i].iov_base) {
1864 if (i == 0) {
1865 err = EFAULT;
1866 goto fail;
1867 } else {
1868 bad_address = true;
1871 if (bad_address) {
1872 len = 0;
1874 if (len > max_len - total_len) {
1875 len = max_len - total_len;
1878 vec[i].iov_len = len;
1879 total_len += len;
1882 unlock_user(target_vec, target_addr, 0);
1883 return vec;
1885 fail:
1886 unlock_user(target_vec, target_addr, 0);
1887 fail2:
1888 free(vec);
1889 errno = err;
1890 return NULL;
1893 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1894 int count, int copy)
1896 struct target_iovec *target_vec;
1897 int i;
1899 target_vec = lock_user(VERIFY_READ, target_addr,
1900 count * sizeof(struct target_iovec), 1);
1901 if (target_vec) {
1902 for (i = 0; i < count; i++) {
1903 abi_ulong base = tswapal(target_vec[i].iov_base);
1904 abi_long len = tswapal(target_vec[i].iov_base);
1905 if (len < 0) {
1906 break;
1908 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1910 unlock_user(target_vec, target_addr, 0);
1913 free(vec);
1916 static inline int target_to_host_sock_type(int *type)
1918 int host_type = 0;
1919 int target_type = *type;
1921 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1922 case TARGET_SOCK_DGRAM:
1923 host_type = SOCK_DGRAM;
1924 break;
1925 case TARGET_SOCK_STREAM:
1926 host_type = SOCK_STREAM;
1927 break;
1928 default:
1929 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1930 break;
1932 if (target_type & TARGET_SOCK_CLOEXEC) {
1933 #if defined(SOCK_CLOEXEC)
1934 host_type |= SOCK_CLOEXEC;
1935 #else
1936 return -TARGET_EINVAL;
1937 #endif
1939 if (target_type & TARGET_SOCK_NONBLOCK) {
1940 #if defined(SOCK_NONBLOCK)
1941 host_type |= SOCK_NONBLOCK;
1942 #elif !defined(O_NONBLOCK)
1943 return -TARGET_EINVAL;
1944 #endif
1946 *type = host_type;
1947 return 0;
1950 /* Try to emulate socket type flags after socket creation. */
1951 static int sock_flags_fixup(int fd, int target_type)
1953 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1954 if (target_type & TARGET_SOCK_NONBLOCK) {
1955 int flags = fcntl(fd, F_GETFL);
1956 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1957 close(fd);
1958 return -TARGET_EINVAL;
1961 #endif
1962 return fd;
1965 /* do_socket() Must return target values and target errnos. */
1966 static abi_long do_socket(int domain, int type, int protocol)
1968 int target_type = type;
1969 int ret;
1971 ret = target_to_host_sock_type(&type);
1972 if (ret) {
1973 return ret;
1976 if (domain == PF_NETLINK)
1977 return -TARGET_EAFNOSUPPORT;
1978 ret = get_errno(socket(domain, type, protocol));
1979 if (ret >= 0) {
1980 ret = sock_flags_fixup(ret, target_type);
1982 return ret;
1985 /* do_bind() Must return target values and target errnos. */
1986 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1987 socklen_t addrlen)
1989 void *addr;
1990 abi_long ret;
1992 if ((int)addrlen < 0) {
1993 return -TARGET_EINVAL;
1996 addr = alloca(addrlen+1);
1998 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1999 if (ret)
2000 return ret;
2002 return get_errno(bind(sockfd, addr, addrlen));
2005 /* do_connect() Must return target values and target errnos. */
2006 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2007 socklen_t addrlen)
2009 void *addr;
2010 abi_long ret;
2012 if ((int)addrlen < 0) {
2013 return -TARGET_EINVAL;
2016 addr = alloca(addrlen+1);
2018 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2019 if (ret)
2020 return ret;
2022 return get_errno(connect(sockfd, addr, addrlen));
2025 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2026 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2027 int flags, int send)
2029 abi_long ret, len;
2030 struct msghdr msg;
2031 int count;
2032 struct iovec *vec;
2033 abi_ulong target_vec;
2035 if (msgp->msg_name) {
2036 msg.msg_namelen = tswap32(msgp->msg_namelen);
2037 msg.msg_name = alloca(msg.msg_namelen+1);
2038 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
2039 msg.msg_namelen);
2040 if (ret) {
2041 goto out2;
2043 } else {
2044 msg.msg_name = NULL;
2045 msg.msg_namelen = 0;
2047 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2048 msg.msg_control = alloca(msg.msg_controllen);
2049 msg.msg_flags = tswap32(msgp->msg_flags);
2051 count = tswapal(msgp->msg_iovlen);
2052 target_vec = tswapal(msgp->msg_iov);
2053 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2054 target_vec, count, send);
2055 if (vec == NULL) {
2056 ret = -host_to_target_errno(errno);
2057 goto out2;
2059 msg.msg_iovlen = count;
2060 msg.msg_iov = vec;
2062 if (send) {
2063 ret = target_to_host_cmsg(&msg, msgp);
2064 if (ret == 0)
2065 ret = get_errno(sendmsg(fd, &msg, flags));
2066 } else {
2067 ret = get_errno(recvmsg(fd, &msg, flags));
2068 if (!is_error(ret)) {
2069 len = ret;
2070 ret = host_to_target_cmsg(msgp, &msg);
2071 if (!is_error(ret)) {
2072 msgp->msg_namelen = tswap32(msg.msg_namelen);
2073 if (msg.msg_name != NULL) {
2074 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2075 msg.msg_name, msg.msg_namelen);
2076 if (ret) {
2077 goto out;
2081 ret = len;
2086 out:
2087 unlock_iovec(vec, target_vec, count, !send);
2088 out2:
2089 return ret;
2092 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2093 int flags, int send)
2095 abi_long ret;
2096 struct target_msghdr *msgp;
2098 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2099 msgp,
2100 target_msg,
2101 send ? 1 : 0)) {
2102 return -TARGET_EFAULT;
2104 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2105 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2106 return ret;
2109 #ifdef TARGET_NR_sendmmsg
2110 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2111 * so it might not have this *mmsg-specific flag either.
2113 #ifndef MSG_WAITFORONE
2114 #define MSG_WAITFORONE 0x10000
2115 #endif
2117 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2118 unsigned int vlen, unsigned int flags,
2119 int send)
2121 struct target_mmsghdr *mmsgp;
2122 abi_long ret = 0;
2123 int i;
2125 if (vlen > UIO_MAXIOV) {
2126 vlen = UIO_MAXIOV;
2129 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2130 if (!mmsgp) {
2131 return -TARGET_EFAULT;
2134 for (i = 0; i < vlen; i++) {
2135 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2136 if (is_error(ret)) {
2137 break;
2139 mmsgp[i].msg_len = tswap32(ret);
2140 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2141 if (flags & MSG_WAITFORONE) {
2142 flags |= MSG_DONTWAIT;
2146 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2148 /* Return number of datagrams sent if we sent any at all;
2149 * otherwise return the error.
2151 if (i) {
2152 return i;
2154 return ret;
2156 #endif
2158 /* If we don't have a system accept4() then just call accept.
2159 * The callsites to do_accept4() will ensure that they don't
2160 * pass a non-zero flags argument in this config.
2162 #ifndef CONFIG_ACCEPT4
2163 static inline int accept4(int sockfd, struct sockaddr *addr,
2164 socklen_t *addrlen, int flags)
2166 assert(flags == 0);
2167 return accept(sockfd, addr, addrlen);
2169 #endif
2171 /* do_accept4() Must return target values and target errnos. */
2172 static abi_long do_accept4(int fd, abi_ulong target_addr,
2173 abi_ulong target_addrlen_addr, int flags)
2175 socklen_t addrlen;
2176 void *addr;
2177 abi_long ret;
2178 int host_flags;
2180 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2182 if (target_addr == 0) {
2183 return get_errno(accept4(fd, NULL, NULL, host_flags));
2186 /* linux returns EINVAL if addrlen pointer is invalid */
2187 if (get_user_u32(addrlen, target_addrlen_addr))
2188 return -TARGET_EINVAL;
2190 if ((int)addrlen < 0) {
2191 return -TARGET_EINVAL;
2194 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2195 return -TARGET_EINVAL;
2197 addr = alloca(addrlen);
2199 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2200 if (!is_error(ret)) {
2201 host_to_target_sockaddr(target_addr, addr, addrlen);
2202 if (put_user_u32(addrlen, target_addrlen_addr))
2203 ret = -TARGET_EFAULT;
2205 return ret;
2208 /* do_getpeername() Must return target values and target errnos. */
2209 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2210 abi_ulong target_addrlen_addr)
2212 socklen_t addrlen;
2213 void *addr;
2214 abi_long ret;
2216 if (get_user_u32(addrlen, target_addrlen_addr))
2217 return -TARGET_EFAULT;
2219 if ((int)addrlen < 0) {
2220 return -TARGET_EINVAL;
2223 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2224 return -TARGET_EFAULT;
2226 addr = alloca(addrlen);
2228 ret = get_errno(getpeername(fd, addr, &addrlen));
2229 if (!is_error(ret)) {
2230 host_to_target_sockaddr(target_addr, addr, addrlen);
2231 if (put_user_u32(addrlen, target_addrlen_addr))
2232 ret = -TARGET_EFAULT;
2234 return ret;
2237 /* do_getsockname() Must return target values and target errnos. */
2238 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2239 abi_ulong target_addrlen_addr)
2241 socklen_t addrlen;
2242 void *addr;
2243 abi_long ret;
2245 if (get_user_u32(addrlen, target_addrlen_addr))
2246 return -TARGET_EFAULT;
2248 if ((int)addrlen < 0) {
2249 return -TARGET_EINVAL;
2252 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2253 return -TARGET_EFAULT;
2255 addr = alloca(addrlen);
2257 ret = get_errno(getsockname(fd, addr, &addrlen));
2258 if (!is_error(ret)) {
2259 host_to_target_sockaddr(target_addr, addr, addrlen);
2260 if (put_user_u32(addrlen, target_addrlen_addr))
2261 ret = -TARGET_EFAULT;
2263 return ret;
2266 /* do_socketpair() Must return target values and target errnos. */
2267 static abi_long do_socketpair(int domain, int type, int protocol,
2268 abi_ulong target_tab_addr)
2270 int tab[2];
2271 abi_long ret;
2273 target_to_host_sock_type(&type);
2275 ret = get_errno(socketpair(domain, type, protocol, tab));
2276 if (!is_error(ret)) {
2277 if (put_user_s32(tab[0], target_tab_addr)
2278 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2279 ret = -TARGET_EFAULT;
2281 return ret;
2284 /* do_sendto() Must return target values and target errnos. */
2285 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2286 abi_ulong target_addr, socklen_t addrlen)
2288 void *addr;
2289 void *host_msg;
2290 abi_long ret;
2292 if ((int)addrlen < 0) {
2293 return -TARGET_EINVAL;
2296 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2297 if (!host_msg)
2298 return -TARGET_EFAULT;
2299 if (target_addr) {
2300 addr = alloca(addrlen+1);
2301 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2302 if (ret) {
2303 unlock_user(host_msg, msg, 0);
2304 return ret;
2306 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2307 } else {
2308 ret = get_errno(send(fd, host_msg, len, flags));
2310 unlock_user(host_msg, msg, 0);
2311 return ret;
2314 /* do_recvfrom() Must return target values and target errnos. */
2315 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2316 abi_ulong target_addr,
2317 abi_ulong target_addrlen)
2319 socklen_t addrlen;
2320 void *addr;
2321 void *host_msg;
2322 abi_long ret;
2324 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2325 if (!host_msg)
2326 return -TARGET_EFAULT;
2327 if (target_addr) {
2328 if (get_user_u32(addrlen, target_addrlen)) {
2329 ret = -TARGET_EFAULT;
2330 goto fail;
2332 if ((int)addrlen < 0) {
2333 ret = -TARGET_EINVAL;
2334 goto fail;
2336 addr = alloca(addrlen);
2337 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2338 } else {
2339 addr = NULL; /* To keep compiler quiet. */
2340 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2342 if (!is_error(ret)) {
2343 if (target_addr) {
2344 host_to_target_sockaddr(target_addr, addr, addrlen);
2345 if (put_user_u32(addrlen, target_addrlen)) {
2346 ret = -TARGET_EFAULT;
2347 goto fail;
2350 unlock_user(host_msg, msg, len);
2351 } else {
2352 fail:
2353 unlock_user(host_msg, msg, 0);
2355 return ret;
2358 #ifdef TARGET_NR_socketcall
2359 /* do_socketcall() Must return target values and target errnos. */
2360 static abi_long do_socketcall(int num, abi_ulong vptr)
2362 static const unsigned ac[] = { /* number of arguments per call */
2363 [SOCKOP_socket] = 3, /* domain, type, protocol */
2364 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2365 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2366 [SOCKOP_listen] = 2, /* sockfd, backlog */
2367 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2368 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2369 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2370 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2371 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2372 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2373 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2374 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2375 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2376 [SOCKOP_shutdown] = 2, /* sockfd, how */
2377 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2378 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2379 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2380 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2382 abi_long a[6]; /* max 6 args */
2384 /* first, collect the arguments in a[] according to ac[] */
2385 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2386 unsigned i;
2387 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2388 for (i = 0; i < ac[num]; ++i) {
2389 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2390 return -TARGET_EFAULT;
2395 /* now when we have the args, actually handle the call */
2396 switch (num) {
2397 case SOCKOP_socket: /* domain, type, protocol */
2398 return do_socket(a[0], a[1], a[2]);
2399 case SOCKOP_bind: /* sockfd, addr, addrlen */
2400 return do_bind(a[0], a[1], a[2]);
2401 case SOCKOP_connect: /* sockfd, addr, addrlen */
2402 return do_connect(a[0], a[1], a[2]);
2403 case SOCKOP_listen: /* sockfd, backlog */
2404 return get_errno(listen(a[0], a[1]));
2405 case SOCKOP_accept: /* sockfd, addr, addrlen */
2406 return do_accept4(a[0], a[1], a[2], 0);
2407 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2408 return do_accept4(a[0], a[1], a[2], a[3]);
2409 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2410 return do_getsockname(a[0], a[1], a[2]);
2411 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2412 return do_getpeername(a[0], a[1], a[2]);
2413 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2414 return do_socketpair(a[0], a[1], a[2], a[3]);
2415 case SOCKOP_send: /* sockfd, msg, len, flags */
2416 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2417 case SOCKOP_recv: /* sockfd, msg, len, flags */
2418 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2419 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2420 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2421 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2422 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2423 case SOCKOP_shutdown: /* sockfd, how */
2424 return get_errno(shutdown(a[0], a[1]));
2425 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2426 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2427 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2428 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2429 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2430 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2431 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2432 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2433 default:
2434 gemu_log("Unsupported socketcall: %d\n", num);
2435 return -TARGET_ENOSYS;
2438 #endif
2440 #define N_SHM_REGIONS 32
2442 static struct shm_region {
2443 abi_ulong start;
2444 abi_ulong size;
2445 } shm_regions[N_SHM_REGIONS];
2447 struct target_semid_ds
2449 struct target_ipc_perm sem_perm;
2450 abi_ulong sem_otime;
2451 #if !defined(TARGET_PPC64)
2452 abi_ulong __unused1;
2453 #endif
2454 abi_ulong sem_ctime;
2455 #if !defined(TARGET_PPC64)
2456 abi_ulong __unused2;
2457 #endif
2458 abi_ulong sem_nsems;
2459 abi_ulong __unused3;
2460 abi_ulong __unused4;
2463 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2464 abi_ulong target_addr)
2466 struct target_ipc_perm *target_ip;
2467 struct target_semid_ds *target_sd;
2469 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2470 return -TARGET_EFAULT;
2471 target_ip = &(target_sd->sem_perm);
2472 host_ip->__key = tswap32(target_ip->__key);
2473 host_ip->uid = tswap32(target_ip->uid);
2474 host_ip->gid = tswap32(target_ip->gid);
2475 host_ip->cuid = tswap32(target_ip->cuid);
2476 host_ip->cgid = tswap32(target_ip->cgid);
2477 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2478 host_ip->mode = tswap32(target_ip->mode);
2479 #else
2480 host_ip->mode = tswap16(target_ip->mode);
2481 #endif
2482 #if defined(TARGET_PPC)
2483 host_ip->__seq = tswap32(target_ip->__seq);
2484 #else
2485 host_ip->__seq = tswap16(target_ip->__seq);
2486 #endif
2487 unlock_user_struct(target_sd, target_addr, 0);
2488 return 0;
2491 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2492 struct ipc_perm *host_ip)
2494 struct target_ipc_perm *target_ip;
2495 struct target_semid_ds *target_sd;
2497 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2498 return -TARGET_EFAULT;
2499 target_ip = &(target_sd->sem_perm);
2500 target_ip->__key = tswap32(host_ip->__key);
2501 target_ip->uid = tswap32(host_ip->uid);
2502 target_ip->gid = tswap32(host_ip->gid);
2503 target_ip->cuid = tswap32(host_ip->cuid);
2504 target_ip->cgid = tswap32(host_ip->cgid);
2505 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2506 target_ip->mode = tswap32(host_ip->mode);
2507 #else
2508 target_ip->mode = tswap16(host_ip->mode);
2509 #endif
2510 #if defined(TARGET_PPC)
2511 target_ip->__seq = tswap32(host_ip->__seq);
2512 #else
2513 target_ip->__seq = tswap16(host_ip->__seq);
2514 #endif
2515 unlock_user_struct(target_sd, target_addr, 1);
2516 return 0;
2519 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2520 abi_ulong target_addr)
2522 struct target_semid_ds *target_sd;
2524 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2525 return -TARGET_EFAULT;
2526 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2527 return -TARGET_EFAULT;
2528 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2529 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2530 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2531 unlock_user_struct(target_sd, target_addr, 0);
2532 return 0;
2535 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2536 struct semid_ds *host_sd)
2538 struct target_semid_ds *target_sd;
2540 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2541 return -TARGET_EFAULT;
2542 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2543 return -TARGET_EFAULT;
2544 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2545 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2546 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2547 unlock_user_struct(target_sd, target_addr, 1);
2548 return 0;
2551 struct target_seminfo {
2552 int semmap;
2553 int semmni;
2554 int semmns;
2555 int semmnu;
2556 int semmsl;
2557 int semopm;
2558 int semume;
2559 int semusz;
2560 int semvmx;
2561 int semaem;
2564 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2565 struct seminfo *host_seminfo)
2567 struct target_seminfo *target_seminfo;
2568 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2569 return -TARGET_EFAULT;
2570 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2571 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2572 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2573 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2574 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2575 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2576 __put_user(host_seminfo->semume, &target_seminfo->semume);
2577 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2578 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2579 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2580 unlock_user_struct(target_seminfo, target_addr, 1);
2581 return 0;
2584 union semun {
2585 int val;
2586 struct semid_ds *buf;
2587 unsigned short *array;
2588 struct seminfo *__buf;
2591 union target_semun {
2592 int val;
2593 abi_ulong buf;
2594 abi_ulong array;
2595 abi_ulong __buf;
2598 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2599 abi_ulong target_addr)
2601 int nsems;
2602 unsigned short *array;
2603 union semun semun;
2604 struct semid_ds semid_ds;
2605 int i, ret;
2607 semun.buf = &semid_ds;
2609 ret = semctl(semid, 0, IPC_STAT, semun);
2610 if (ret == -1)
2611 return get_errno(ret);
2613 nsems = semid_ds.sem_nsems;
2615 *host_array = malloc(nsems*sizeof(unsigned short));
2616 if (!*host_array) {
2617 return -TARGET_ENOMEM;
2619 array = lock_user(VERIFY_READ, target_addr,
2620 nsems*sizeof(unsigned short), 1);
2621 if (!array) {
2622 free(*host_array);
2623 return -TARGET_EFAULT;
2626 for(i=0; i<nsems; i++) {
2627 __get_user((*host_array)[i], &array[i]);
2629 unlock_user(array, target_addr, 0);
2631 return 0;
2634 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2635 unsigned short **host_array)
2637 int nsems;
2638 unsigned short *array;
2639 union semun semun;
2640 struct semid_ds semid_ds;
2641 int i, ret;
2643 semun.buf = &semid_ds;
2645 ret = semctl(semid, 0, IPC_STAT, semun);
2646 if (ret == -1)
2647 return get_errno(ret);
2649 nsems = semid_ds.sem_nsems;
2651 array = lock_user(VERIFY_WRITE, target_addr,
2652 nsems*sizeof(unsigned short), 0);
2653 if (!array)
2654 return -TARGET_EFAULT;
2656 for(i=0; i<nsems; i++) {
2657 __put_user((*host_array)[i], &array[i]);
2659 free(*host_array);
2660 unlock_user(array, target_addr, 1);
2662 return 0;
2665 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2666 union target_semun target_su)
2668 union semun arg;
2669 struct semid_ds dsarg;
2670 unsigned short *array = NULL;
2671 struct seminfo seminfo;
2672 abi_long ret = -TARGET_EINVAL;
2673 abi_long err;
2674 cmd &= 0xff;
2676 switch( cmd ) {
2677 case GETVAL:
2678 case SETVAL:
2679 /* In 64 bit cross-endian situations, we will erroneously pick up
2680 * the wrong half of the union for the "val" element. To rectify
2681 * this, the entire 8-byte structure is byteswapped, followed by
2682 * a swap of the 4 byte val field. In other cases, the data is
2683 * already in proper host byte order. */
2684 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2685 target_su.buf = tswapal(target_su.buf);
2686 arg.val = tswap32(target_su.val);
2687 } else {
2688 arg.val = target_su.val;
2690 ret = get_errno(semctl(semid, semnum, cmd, arg));
2691 break;
2692 case GETALL:
2693 case SETALL:
2694 err = target_to_host_semarray(semid, &array, target_su.array);
2695 if (err)
2696 return err;
2697 arg.array = array;
2698 ret = get_errno(semctl(semid, semnum, cmd, arg));
2699 err = host_to_target_semarray(semid, target_su.array, &array);
2700 if (err)
2701 return err;
2702 break;
2703 case IPC_STAT:
2704 case IPC_SET:
2705 case SEM_STAT:
2706 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2707 if (err)
2708 return err;
2709 arg.buf = &dsarg;
2710 ret = get_errno(semctl(semid, semnum, cmd, arg));
2711 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2712 if (err)
2713 return err;
2714 break;
2715 case IPC_INFO:
2716 case SEM_INFO:
2717 arg.__buf = &seminfo;
2718 ret = get_errno(semctl(semid, semnum, cmd, arg));
2719 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2720 if (err)
2721 return err;
2722 break;
2723 case IPC_RMID:
2724 case GETPID:
2725 case GETNCNT:
2726 case GETZCNT:
2727 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2728 break;
2731 return ret;
2734 struct target_sembuf {
2735 unsigned short sem_num;
2736 short sem_op;
2737 short sem_flg;
2740 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2741 abi_ulong target_addr,
2742 unsigned nsops)
2744 struct target_sembuf *target_sembuf;
2745 int i;
2747 target_sembuf = lock_user(VERIFY_READ, target_addr,
2748 nsops*sizeof(struct target_sembuf), 1);
2749 if (!target_sembuf)
2750 return -TARGET_EFAULT;
2752 for(i=0; i<nsops; i++) {
2753 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2754 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2755 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2758 unlock_user(target_sembuf, target_addr, 0);
2760 return 0;
2763 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2765 struct sembuf sops[nsops];
2767 if (target_to_host_sembuf(sops, ptr, nsops))
2768 return -TARGET_EFAULT;
2770 return get_errno(semop(semid, sops, nsops));
2773 struct target_msqid_ds
2775 struct target_ipc_perm msg_perm;
2776 abi_ulong msg_stime;
2777 #if TARGET_ABI_BITS == 32
2778 abi_ulong __unused1;
2779 #endif
2780 abi_ulong msg_rtime;
2781 #if TARGET_ABI_BITS == 32
2782 abi_ulong __unused2;
2783 #endif
2784 abi_ulong msg_ctime;
2785 #if TARGET_ABI_BITS == 32
2786 abi_ulong __unused3;
2787 #endif
2788 abi_ulong __msg_cbytes;
2789 abi_ulong msg_qnum;
2790 abi_ulong msg_qbytes;
2791 abi_ulong msg_lspid;
2792 abi_ulong msg_lrpid;
2793 abi_ulong __unused4;
2794 abi_ulong __unused5;
2797 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2798 abi_ulong target_addr)
2800 struct target_msqid_ds *target_md;
2802 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2803 return -TARGET_EFAULT;
2804 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2805 return -TARGET_EFAULT;
2806 host_md->msg_stime = tswapal(target_md->msg_stime);
2807 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2808 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2809 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2810 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2811 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2812 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2813 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2814 unlock_user_struct(target_md, target_addr, 0);
2815 return 0;
2818 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2819 struct msqid_ds *host_md)
2821 struct target_msqid_ds *target_md;
2823 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2824 return -TARGET_EFAULT;
2825 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2826 return -TARGET_EFAULT;
2827 target_md->msg_stime = tswapal(host_md->msg_stime);
2828 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2829 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2830 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2831 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2832 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2833 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2834 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2835 unlock_user_struct(target_md, target_addr, 1);
2836 return 0;
2839 struct target_msginfo {
2840 int msgpool;
2841 int msgmap;
2842 int msgmax;
2843 int msgmnb;
2844 int msgmni;
2845 int msgssz;
2846 int msgtql;
2847 unsigned short int msgseg;
2850 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2851 struct msginfo *host_msginfo)
2853 struct target_msginfo *target_msginfo;
2854 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2855 return -TARGET_EFAULT;
2856 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2857 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2858 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2859 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2860 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2861 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2862 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2863 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2864 unlock_user_struct(target_msginfo, target_addr, 1);
2865 return 0;
2868 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2870 struct msqid_ds dsarg;
2871 struct msginfo msginfo;
2872 abi_long ret = -TARGET_EINVAL;
2874 cmd &= 0xff;
2876 switch (cmd) {
2877 case IPC_STAT:
2878 case IPC_SET:
2879 case MSG_STAT:
2880 if (target_to_host_msqid_ds(&dsarg,ptr))
2881 return -TARGET_EFAULT;
2882 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2883 if (host_to_target_msqid_ds(ptr,&dsarg))
2884 return -TARGET_EFAULT;
2885 break;
2886 case IPC_RMID:
2887 ret = get_errno(msgctl(msgid, cmd, NULL));
2888 break;
2889 case IPC_INFO:
2890 case MSG_INFO:
2891 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2892 if (host_to_target_msginfo(ptr, &msginfo))
2893 return -TARGET_EFAULT;
2894 break;
2897 return ret;
2900 struct target_msgbuf {
2901 abi_long mtype;
2902 char mtext[1];
2905 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2906 ssize_t msgsz, int msgflg)
2908 struct target_msgbuf *target_mb;
2909 struct msgbuf *host_mb;
2910 abi_long ret = 0;
2912 if (msgsz < 0) {
2913 return -TARGET_EINVAL;
2916 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2917 return -TARGET_EFAULT;
2918 host_mb = malloc(msgsz+sizeof(long));
2919 if (!host_mb) {
2920 unlock_user_struct(target_mb, msgp, 0);
2921 return -TARGET_ENOMEM;
2923 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2924 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2925 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2926 free(host_mb);
2927 unlock_user_struct(target_mb, msgp, 0);
2929 return ret;
2932 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2933 unsigned int msgsz, abi_long msgtyp,
2934 int msgflg)
2936 struct target_msgbuf *target_mb;
2937 char *target_mtext;
2938 struct msgbuf *host_mb;
2939 abi_long ret = 0;
2941 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2942 return -TARGET_EFAULT;
2944 host_mb = g_malloc(msgsz+sizeof(long));
2945 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2947 if (ret > 0) {
2948 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2949 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2950 if (!target_mtext) {
2951 ret = -TARGET_EFAULT;
2952 goto end;
2954 memcpy(target_mb->mtext, host_mb->mtext, ret);
2955 unlock_user(target_mtext, target_mtext_addr, ret);
2958 target_mb->mtype = tswapal(host_mb->mtype);
2960 end:
2961 if (target_mb)
2962 unlock_user_struct(target_mb, msgp, 1);
2963 g_free(host_mb);
2964 return ret;
2967 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2968 abi_ulong target_addr)
2970 struct target_shmid_ds *target_sd;
2972 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2973 return -TARGET_EFAULT;
2974 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2975 return -TARGET_EFAULT;
2976 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2977 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2978 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2979 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2980 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2981 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2982 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2983 unlock_user_struct(target_sd, target_addr, 0);
2984 return 0;
2987 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2988 struct shmid_ds *host_sd)
2990 struct target_shmid_ds *target_sd;
2992 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2993 return -TARGET_EFAULT;
2994 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2995 return -TARGET_EFAULT;
2996 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2997 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2998 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2999 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3000 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3001 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3002 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3003 unlock_user_struct(target_sd, target_addr, 1);
3004 return 0;
3007 struct target_shminfo {
3008 abi_ulong shmmax;
3009 abi_ulong shmmin;
3010 abi_ulong shmmni;
3011 abi_ulong shmseg;
3012 abi_ulong shmall;
3015 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3016 struct shminfo *host_shminfo)
3018 struct target_shminfo *target_shminfo;
3019 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3020 return -TARGET_EFAULT;
3021 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3022 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3023 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3024 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3025 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3026 unlock_user_struct(target_shminfo, target_addr, 1);
3027 return 0;
3030 struct target_shm_info {
3031 int used_ids;
3032 abi_ulong shm_tot;
3033 abi_ulong shm_rss;
3034 abi_ulong shm_swp;
3035 abi_ulong swap_attempts;
3036 abi_ulong swap_successes;
3039 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3040 struct shm_info *host_shm_info)
3042 struct target_shm_info *target_shm_info;
3043 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3044 return -TARGET_EFAULT;
3045 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3046 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3047 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3048 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3049 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3050 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3051 unlock_user_struct(target_shm_info, target_addr, 1);
3052 return 0;
3055 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3057 struct shmid_ds dsarg;
3058 struct shminfo shminfo;
3059 struct shm_info shm_info;
3060 abi_long ret = -TARGET_EINVAL;
3062 cmd &= 0xff;
3064 switch(cmd) {
3065 case IPC_STAT:
3066 case IPC_SET:
3067 case SHM_STAT:
3068 if (target_to_host_shmid_ds(&dsarg, buf))
3069 return -TARGET_EFAULT;
3070 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3071 if (host_to_target_shmid_ds(buf, &dsarg))
3072 return -TARGET_EFAULT;
3073 break;
3074 case IPC_INFO:
3075 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3076 if (host_to_target_shminfo(buf, &shminfo))
3077 return -TARGET_EFAULT;
3078 break;
3079 case SHM_INFO:
3080 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3081 if (host_to_target_shm_info(buf, &shm_info))
3082 return -TARGET_EFAULT;
3083 break;
3084 case IPC_RMID:
3085 case SHM_LOCK:
3086 case SHM_UNLOCK:
3087 ret = get_errno(shmctl(shmid, cmd, NULL));
3088 break;
3091 return ret;
3094 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3096 abi_long raddr;
3097 void *host_raddr;
3098 struct shmid_ds shm_info;
3099 int i,ret;
3101 /* find out the length of the shared memory segment */
3102 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3103 if (is_error(ret)) {
3104 /* can't get length, bail out */
3105 return ret;
3108 mmap_lock();
3110 if (shmaddr)
3111 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3112 else {
3113 abi_ulong mmap_start;
3115 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3117 if (mmap_start == -1) {
3118 errno = ENOMEM;
3119 host_raddr = (void *)-1;
3120 } else
3121 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3124 if (host_raddr == (void *)-1) {
3125 mmap_unlock();
3126 return get_errno((long)host_raddr);
3128 raddr=h2g((unsigned long)host_raddr);
3130 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3131 PAGE_VALID | PAGE_READ |
3132 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3134 for (i = 0; i < N_SHM_REGIONS; i++) {
3135 if (shm_regions[i].start == 0) {
3136 shm_regions[i].start = raddr;
3137 shm_regions[i].size = shm_info.shm_segsz;
3138 break;
3142 mmap_unlock();
3143 return raddr;
3147 static inline abi_long do_shmdt(abi_ulong shmaddr)
3149 int i;
3151 for (i = 0; i < N_SHM_REGIONS; ++i) {
3152 if (shm_regions[i].start == shmaddr) {
3153 shm_regions[i].start = 0;
3154 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3155 break;
3159 return get_errno(shmdt(g2h(shmaddr)));
3162 #ifdef TARGET_NR_ipc
3163 /* ??? This only works with linear mappings. */
3164 /* do_ipc() must return target values and target errnos. */
3165 static abi_long do_ipc(unsigned int call, abi_long first,
3166 abi_long second, abi_long third,
3167 abi_long ptr, abi_long fifth)
3169 int version;
3170 abi_long ret = 0;
3172 version = call >> 16;
3173 call &= 0xffff;
3175 switch (call) {
3176 case IPCOP_semop:
3177 ret = do_semop(first, ptr, second);
3178 break;
3180 case IPCOP_semget:
3181 ret = get_errno(semget(first, second, third));
3182 break;
3184 case IPCOP_semctl: {
3185 /* The semun argument to semctl is passed by value, so dereference the
3186 * ptr argument. */
3187 abi_ulong atptr;
3188 get_user_ual(atptr, ptr);
3189 ret = do_semctl(first, second, third,
3190 (union target_semun) atptr);
3191 break;
3194 case IPCOP_msgget:
3195 ret = get_errno(msgget(first, second));
3196 break;
3198 case IPCOP_msgsnd:
3199 ret = do_msgsnd(first, ptr, second, third);
3200 break;
3202 case IPCOP_msgctl:
3203 ret = do_msgctl(first, second, ptr);
3204 break;
3206 case IPCOP_msgrcv:
3207 switch (version) {
3208 case 0:
3210 struct target_ipc_kludge {
3211 abi_long msgp;
3212 abi_long msgtyp;
3213 } *tmp;
3215 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3216 ret = -TARGET_EFAULT;
3217 break;
3220 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3222 unlock_user_struct(tmp, ptr, 0);
3223 break;
3225 default:
3226 ret = do_msgrcv(first, ptr, second, fifth, third);
3228 break;
3230 case IPCOP_shmat:
3231 switch (version) {
3232 default:
3234 abi_ulong raddr;
3235 raddr = do_shmat(first, ptr, second);
3236 if (is_error(raddr))
3237 return get_errno(raddr);
3238 if (put_user_ual(raddr, third))
3239 return -TARGET_EFAULT;
3240 break;
3242 case 1:
3243 ret = -TARGET_EINVAL;
3244 break;
3246 break;
3247 case IPCOP_shmdt:
3248 ret = do_shmdt(ptr);
3249 break;
3251 case IPCOP_shmget:
3252 /* IPC_* flag values are the same on all linux platforms */
3253 ret = get_errno(shmget(first, second, third));
3254 break;
3256 /* IPC_* and SHM_* command values are the same on all linux platforms */
3257 case IPCOP_shmctl:
3258 ret = do_shmctl(first, second, ptr);
3259 break;
3260 default:
3261 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3262 ret = -TARGET_ENOSYS;
3263 break;
3265 return ret;
3267 #endif
3269 /* kernel structure types definitions */
3271 #define STRUCT(name, ...) STRUCT_ ## name,
3272 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3273 enum {
3274 #include "syscall_types.h"
3276 #undef STRUCT
3277 #undef STRUCT_SPECIAL
3279 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3280 #define STRUCT_SPECIAL(name)
3281 #include "syscall_types.h"
3282 #undef STRUCT
3283 #undef STRUCT_SPECIAL
3285 typedef struct IOCTLEntry IOCTLEntry;
3287 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3288 int fd, abi_long cmd, abi_long arg);
3290 struct IOCTLEntry {
3291 int target_cmd;
3292 unsigned int host_cmd;
3293 const char *name;
3294 int access;
3295 do_ioctl_fn *do_ioctl;
3296 const argtype arg_type[5];
3299 #define IOC_R 0x0001
3300 #define IOC_W 0x0002
3301 #define IOC_RW (IOC_R | IOC_W)
3303 #define MAX_STRUCT_SIZE 4096
3305 #ifdef CONFIG_FIEMAP
3306 /* So fiemap access checks don't overflow on 32 bit systems.
3307 * This is very slightly smaller than the limit imposed by
3308 * the underlying kernel.
3310 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3311 / sizeof(struct fiemap_extent))
3313 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3314 int fd, abi_long cmd, abi_long arg)
3316 /* The parameter for this ioctl is a struct fiemap followed
3317 * by an array of struct fiemap_extent whose size is set
3318 * in fiemap->fm_extent_count. The array is filled in by the
3319 * ioctl.
3321 int target_size_in, target_size_out;
3322 struct fiemap *fm;
3323 const argtype *arg_type = ie->arg_type;
3324 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3325 void *argptr, *p;
3326 abi_long ret;
3327 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3328 uint32_t outbufsz;
3329 int free_fm = 0;
3331 assert(arg_type[0] == TYPE_PTR);
3332 assert(ie->access == IOC_RW);
3333 arg_type++;
3334 target_size_in = thunk_type_size(arg_type, 0);
3335 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3336 if (!argptr) {
3337 return -TARGET_EFAULT;
3339 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3340 unlock_user(argptr, arg, 0);
3341 fm = (struct fiemap *)buf_temp;
3342 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3343 return -TARGET_EINVAL;
3346 outbufsz = sizeof (*fm) +
3347 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3349 if (outbufsz > MAX_STRUCT_SIZE) {
3350 /* We can't fit all the extents into the fixed size buffer.
3351 * Allocate one that is large enough and use it instead.
3353 fm = malloc(outbufsz);
3354 if (!fm) {
3355 return -TARGET_ENOMEM;
3357 memcpy(fm, buf_temp, sizeof(struct fiemap));
3358 free_fm = 1;
3360 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3361 if (!is_error(ret)) {
3362 target_size_out = target_size_in;
3363 /* An extent_count of 0 means we were only counting the extents
3364 * so there are no structs to copy
3366 if (fm->fm_extent_count != 0) {
3367 target_size_out += fm->fm_mapped_extents * extent_size;
3369 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3370 if (!argptr) {
3371 ret = -TARGET_EFAULT;
3372 } else {
3373 /* Convert the struct fiemap */
3374 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3375 if (fm->fm_extent_count != 0) {
3376 p = argptr + target_size_in;
3377 /* ...and then all the struct fiemap_extents */
3378 for (i = 0; i < fm->fm_mapped_extents; i++) {
3379 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3380 THUNK_TARGET);
3381 p += extent_size;
3384 unlock_user(argptr, arg, target_size_out);
3387 if (free_fm) {
3388 free(fm);
3390 return ret;
3392 #endif
3394 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3395 int fd, abi_long cmd, abi_long arg)
3397 const argtype *arg_type = ie->arg_type;
3398 int target_size;
3399 void *argptr;
3400 int ret;
3401 struct ifconf *host_ifconf;
3402 uint32_t outbufsz;
3403 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3404 int target_ifreq_size;
3405 int nb_ifreq;
3406 int free_buf = 0;
3407 int i;
3408 int target_ifc_len;
3409 abi_long target_ifc_buf;
3410 int host_ifc_len;
3411 char *host_ifc_buf;
3413 assert(arg_type[0] == TYPE_PTR);
3414 assert(ie->access == IOC_RW);
3416 arg_type++;
3417 target_size = thunk_type_size(arg_type, 0);
3419 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3420 if (!argptr)
3421 return -TARGET_EFAULT;
3422 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3423 unlock_user(argptr, arg, 0);
3425 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3426 target_ifc_len = host_ifconf->ifc_len;
3427 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3429 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3430 nb_ifreq = target_ifc_len / target_ifreq_size;
3431 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3433 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3434 if (outbufsz > MAX_STRUCT_SIZE) {
3435 /* We can't fit all the extents into the fixed size buffer.
3436 * Allocate one that is large enough and use it instead.
3438 host_ifconf = malloc(outbufsz);
3439 if (!host_ifconf) {
3440 return -TARGET_ENOMEM;
3442 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3443 free_buf = 1;
3445 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3447 host_ifconf->ifc_len = host_ifc_len;
3448 host_ifconf->ifc_buf = host_ifc_buf;
3450 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3451 if (!is_error(ret)) {
3452 /* convert host ifc_len to target ifc_len */
3454 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3455 target_ifc_len = nb_ifreq * target_ifreq_size;
3456 host_ifconf->ifc_len = target_ifc_len;
3458 /* restore target ifc_buf */
3460 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3462 /* copy struct ifconf to target user */
3464 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3465 if (!argptr)
3466 return -TARGET_EFAULT;
3467 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3468 unlock_user(argptr, arg, target_size);
3470 /* copy ifreq[] to target user */
3472 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3473 for (i = 0; i < nb_ifreq ; i++) {
3474 thunk_convert(argptr + i * target_ifreq_size,
3475 host_ifc_buf + i * sizeof(struct ifreq),
3476 ifreq_arg_type, THUNK_TARGET);
3478 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3481 if (free_buf) {
3482 free(host_ifconf);
3485 return ret;
3488 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3489 abi_long cmd, abi_long arg)
3491 void *argptr;
3492 struct dm_ioctl *host_dm;
3493 abi_long guest_data;
3494 uint32_t guest_data_size;
3495 int target_size;
3496 const argtype *arg_type = ie->arg_type;
3497 abi_long ret;
3498 void *big_buf = NULL;
3499 char *host_data;
3501 arg_type++;
3502 target_size = thunk_type_size(arg_type, 0);
3503 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3504 if (!argptr) {
3505 ret = -TARGET_EFAULT;
3506 goto out;
3508 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3509 unlock_user(argptr, arg, 0);
3511 /* buf_temp is too small, so fetch things into a bigger buffer */
3512 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3513 memcpy(big_buf, buf_temp, target_size);
3514 buf_temp = big_buf;
3515 host_dm = big_buf;
3517 guest_data = arg + host_dm->data_start;
3518 if ((guest_data - arg) < 0) {
3519 ret = -EINVAL;
3520 goto out;
3522 guest_data_size = host_dm->data_size - host_dm->data_start;
3523 host_data = (char*)host_dm + host_dm->data_start;
3525 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3526 switch (ie->host_cmd) {
3527 case DM_REMOVE_ALL:
3528 case DM_LIST_DEVICES:
3529 case DM_DEV_CREATE:
3530 case DM_DEV_REMOVE:
3531 case DM_DEV_SUSPEND:
3532 case DM_DEV_STATUS:
3533 case DM_DEV_WAIT:
3534 case DM_TABLE_STATUS:
3535 case DM_TABLE_CLEAR:
3536 case DM_TABLE_DEPS:
3537 case DM_LIST_VERSIONS:
3538 /* no input data */
3539 break;
3540 case DM_DEV_RENAME:
3541 case DM_DEV_SET_GEOMETRY:
3542 /* data contains only strings */
3543 memcpy(host_data, argptr, guest_data_size);
3544 break;
3545 case DM_TARGET_MSG:
3546 memcpy(host_data, argptr, guest_data_size);
3547 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3548 break;
3549 case DM_TABLE_LOAD:
3551 void *gspec = argptr;
3552 void *cur_data = host_data;
3553 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3554 int spec_size = thunk_type_size(arg_type, 0);
3555 int i;
3557 for (i = 0; i < host_dm->target_count; i++) {
3558 struct dm_target_spec *spec = cur_data;
3559 uint32_t next;
3560 int slen;
3562 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3563 slen = strlen((char*)gspec + spec_size) + 1;
3564 next = spec->next;
3565 spec->next = sizeof(*spec) + slen;
3566 strcpy((char*)&spec[1], gspec + spec_size);
3567 gspec += next;
3568 cur_data += spec->next;
3570 break;
3572 default:
3573 ret = -TARGET_EINVAL;
3574 goto out;
3576 unlock_user(argptr, guest_data, 0);
3578 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3579 if (!is_error(ret)) {
3580 guest_data = arg + host_dm->data_start;
3581 guest_data_size = host_dm->data_size - host_dm->data_start;
3582 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3583 switch (ie->host_cmd) {
3584 case DM_REMOVE_ALL:
3585 case DM_DEV_CREATE:
3586 case DM_DEV_REMOVE:
3587 case DM_DEV_RENAME:
3588 case DM_DEV_SUSPEND:
3589 case DM_DEV_STATUS:
3590 case DM_TABLE_LOAD:
3591 case DM_TABLE_CLEAR:
3592 case DM_TARGET_MSG:
3593 case DM_DEV_SET_GEOMETRY:
3594 /* no return data */
3595 break;
3596 case DM_LIST_DEVICES:
3598 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3599 uint32_t remaining_data = guest_data_size;
3600 void *cur_data = argptr;
3601 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3602 int nl_size = 12; /* can't use thunk_size due to alignment */
3604 while (1) {
3605 uint32_t next = nl->next;
3606 if (next) {
3607 nl->next = nl_size + (strlen(nl->name) + 1);
3609 if (remaining_data < nl->next) {
3610 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3611 break;
3613 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3614 strcpy(cur_data + nl_size, nl->name);
3615 cur_data += nl->next;
3616 remaining_data -= nl->next;
3617 if (!next) {
3618 break;
3620 nl = (void*)nl + next;
3622 break;
3624 case DM_DEV_WAIT:
3625 case DM_TABLE_STATUS:
3627 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3628 void *cur_data = argptr;
3629 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3630 int spec_size = thunk_type_size(arg_type, 0);
3631 int i;
3633 for (i = 0; i < host_dm->target_count; i++) {
3634 uint32_t next = spec->next;
3635 int slen = strlen((char*)&spec[1]) + 1;
3636 spec->next = (cur_data - argptr) + spec_size + slen;
3637 if (guest_data_size < spec->next) {
3638 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3639 break;
3641 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3642 strcpy(cur_data + spec_size, (char*)&spec[1]);
3643 cur_data = argptr + spec->next;
3644 spec = (void*)host_dm + host_dm->data_start + next;
3646 break;
3648 case DM_TABLE_DEPS:
3650 void *hdata = (void*)host_dm + host_dm->data_start;
3651 int count = *(uint32_t*)hdata;
3652 uint64_t *hdev = hdata + 8;
3653 uint64_t *gdev = argptr + 8;
3654 int i;
3656 *(uint32_t*)argptr = tswap32(count);
3657 for (i = 0; i < count; i++) {
3658 *gdev = tswap64(*hdev);
3659 gdev++;
3660 hdev++;
3662 break;
3664 case DM_LIST_VERSIONS:
3666 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3667 uint32_t remaining_data = guest_data_size;
3668 void *cur_data = argptr;
3669 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3670 int vers_size = thunk_type_size(arg_type, 0);
3672 while (1) {
3673 uint32_t next = vers->next;
3674 if (next) {
3675 vers->next = vers_size + (strlen(vers->name) + 1);
3677 if (remaining_data < vers->next) {
3678 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3679 break;
3681 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3682 strcpy(cur_data + vers_size, vers->name);
3683 cur_data += vers->next;
3684 remaining_data -= vers->next;
3685 if (!next) {
3686 break;
3688 vers = (void*)vers + next;
3690 break;
3692 default:
3693 ret = -TARGET_EINVAL;
3694 goto out;
3696 unlock_user(argptr, guest_data, guest_data_size);
3698 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3699 if (!argptr) {
3700 ret = -TARGET_EFAULT;
3701 goto out;
3703 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3704 unlock_user(argptr, arg, target_size);
3706 out:
3707 g_free(big_buf);
3708 return ret;
3711 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3712 abi_long cmd, abi_long arg)
3714 void *argptr;
3715 int target_size;
3716 const argtype *arg_type = ie->arg_type;
3717 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3718 abi_long ret;
3720 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3721 struct blkpg_partition host_part;
3723 /* Read and convert blkpg */
3724 arg_type++;
3725 target_size = thunk_type_size(arg_type, 0);
3726 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3727 if (!argptr) {
3728 ret = -TARGET_EFAULT;
3729 goto out;
3731 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3732 unlock_user(argptr, arg, 0);
3734 switch (host_blkpg->op) {
3735 case BLKPG_ADD_PARTITION:
3736 case BLKPG_DEL_PARTITION:
3737 /* payload is struct blkpg_partition */
3738 break;
3739 default:
3740 /* Unknown opcode */
3741 ret = -TARGET_EINVAL;
3742 goto out;
3745 /* Read and convert blkpg->data */
3746 arg = (abi_long)(uintptr_t)host_blkpg->data;
3747 target_size = thunk_type_size(part_arg_type, 0);
3748 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3749 if (!argptr) {
3750 ret = -TARGET_EFAULT;
3751 goto out;
3753 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3754 unlock_user(argptr, arg, 0);
3756 /* Swizzle the data pointer to our local copy and call! */
3757 host_blkpg->data = &host_part;
3758 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3760 out:
3761 return ret;
3764 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3765 int fd, abi_long cmd, abi_long arg)
3767 const argtype *arg_type = ie->arg_type;
3768 const StructEntry *se;
3769 const argtype *field_types;
3770 const int *dst_offsets, *src_offsets;
3771 int target_size;
3772 void *argptr;
3773 abi_ulong *target_rt_dev_ptr;
3774 unsigned long *host_rt_dev_ptr;
3775 abi_long ret;
3776 int i;
3778 assert(ie->access == IOC_W);
3779 assert(*arg_type == TYPE_PTR);
3780 arg_type++;
3781 assert(*arg_type == TYPE_STRUCT);
3782 target_size = thunk_type_size(arg_type, 0);
3783 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3784 if (!argptr) {
3785 return -TARGET_EFAULT;
3787 arg_type++;
3788 assert(*arg_type == (int)STRUCT_rtentry);
3789 se = struct_entries + *arg_type++;
3790 assert(se->convert[0] == NULL);
3791 /* convert struct here to be able to catch rt_dev string */
3792 field_types = se->field_types;
3793 dst_offsets = se->field_offsets[THUNK_HOST];
3794 src_offsets = se->field_offsets[THUNK_TARGET];
3795 for (i = 0; i < se->nb_fields; i++) {
3796 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3797 assert(*field_types == TYPE_PTRVOID);
3798 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3799 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3800 if (*target_rt_dev_ptr != 0) {
3801 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3802 tswapal(*target_rt_dev_ptr));
3803 if (!*host_rt_dev_ptr) {
3804 unlock_user(argptr, arg, 0);
3805 return -TARGET_EFAULT;
3807 } else {
3808 *host_rt_dev_ptr = 0;
3810 field_types++;
3811 continue;
3813 field_types = thunk_convert(buf_temp + dst_offsets[i],
3814 argptr + src_offsets[i],
3815 field_types, THUNK_HOST);
3817 unlock_user(argptr, arg, 0);
3819 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3820 if (*host_rt_dev_ptr != 0) {
3821 unlock_user((void *)*host_rt_dev_ptr,
3822 *target_rt_dev_ptr, 0);
3824 return ret;
3827 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3828 int fd, abi_long cmd, abi_long arg)
3830 int sig = target_to_host_signal(arg);
3831 return get_errno(ioctl(fd, ie->host_cmd, sig));
3834 static IOCTLEntry ioctl_entries[] = {
3835 #define IOCTL(cmd, access, ...) \
3836 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3837 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3838 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3839 #include "ioctls.h"
3840 { 0, 0, },
3843 /* ??? Implement proper locking for ioctls. */
3844 /* do_ioctl() Must return target values and target errnos. */
3845 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3847 const IOCTLEntry *ie;
3848 const argtype *arg_type;
3849 abi_long ret;
3850 uint8_t buf_temp[MAX_STRUCT_SIZE];
3851 int target_size;
3852 void *argptr;
3854 ie = ioctl_entries;
3855 for(;;) {
3856 if (ie->target_cmd == 0) {
3857 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3858 return -TARGET_ENOSYS;
3860 if (ie->target_cmd == cmd)
3861 break;
3862 ie++;
3864 arg_type = ie->arg_type;
3865 #if defined(DEBUG)
3866 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3867 #endif
3868 if (ie->do_ioctl) {
3869 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3872 switch(arg_type[0]) {
3873 case TYPE_NULL:
3874 /* no argument */
3875 ret = get_errno(ioctl(fd, ie->host_cmd));
3876 break;
3877 case TYPE_PTRVOID:
3878 case TYPE_INT:
3879 /* int argment */
3880 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3881 break;
3882 case TYPE_PTR:
3883 arg_type++;
3884 target_size = thunk_type_size(arg_type, 0);
3885 switch(ie->access) {
3886 case IOC_R:
3887 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3888 if (!is_error(ret)) {
3889 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3890 if (!argptr)
3891 return -TARGET_EFAULT;
3892 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3893 unlock_user(argptr, arg, target_size);
3895 break;
3896 case IOC_W:
3897 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3898 if (!argptr)
3899 return -TARGET_EFAULT;
3900 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3901 unlock_user(argptr, arg, 0);
3902 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3903 break;
3904 default:
3905 case IOC_RW:
3906 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3907 if (!argptr)
3908 return -TARGET_EFAULT;
3909 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3910 unlock_user(argptr, arg, 0);
3911 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3912 if (!is_error(ret)) {
3913 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3914 if (!argptr)
3915 return -TARGET_EFAULT;
3916 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3917 unlock_user(argptr, arg, target_size);
3919 break;
3921 break;
3922 default:
3923 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3924 (long)cmd, arg_type[0]);
3925 ret = -TARGET_ENOSYS;
3926 break;
3928 return ret;
3931 static const bitmask_transtbl iflag_tbl[] = {
3932 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3933 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3934 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3935 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3936 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3937 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3938 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3939 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3940 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3941 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3942 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3943 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3944 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3945 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3946 { 0, 0, 0, 0 }
3949 static const bitmask_transtbl oflag_tbl[] = {
3950 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3951 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3952 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3953 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3954 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3955 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3956 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3957 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3958 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3959 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3960 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3961 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3962 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3963 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3964 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3965 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3966 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3967 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3968 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3969 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3970 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3971 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3972 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3973 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3974 { 0, 0, 0, 0 }
3977 static const bitmask_transtbl cflag_tbl[] = {
3978 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3979 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3980 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3981 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3982 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3983 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3984 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3985 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3986 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3987 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3988 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3989 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3990 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3991 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3992 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3993 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3994 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3995 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3996 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3997 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3998 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3999 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4000 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4001 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4002 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4003 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4004 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4005 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4006 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4007 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4008 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4009 { 0, 0, 0, 0 }
4012 static const bitmask_transtbl lflag_tbl[] = {
4013 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4014 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4015 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4016 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4017 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4018 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4019 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4020 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4021 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4022 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4023 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4024 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4025 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4026 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4027 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4028 { 0, 0, 0, 0 }
4031 static void target_to_host_termios (void *dst, const void *src)
4033 struct host_termios *host = dst;
4034 const struct target_termios *target = src;
4036 host->c_iflag =
4037 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4038 host->c_oflag =
4039 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4040 host->c_cflag =
4041 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4042 host->c_lflag =
4043 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4044 host->c_line = target->c_line;
4046 memset(host->c_cc, 0, sizeof(host->c_cc));
4047 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4048 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4049 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4050 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4051 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4052 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4053 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4054 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4055 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4056 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4057 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4058 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4059 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4060 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4061 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4062 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4063 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4066 static void host_to_target_termios (void *dst, const void *src)
4068 struct target_termios *target = dst;
4069 const struct host_termios *host = src;
4071 target->c_iflag =
4072 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4073 target->c_oflag =
4074 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4075 target->c_cflag =
4076 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4077 target->c_lflag =
4078 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4079 target->c_line = host->c_line;
4081 memset(target->c_cc, 0, sizeof(target->c_cc));
4082 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4083 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4084 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4085 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4086 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4087 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4088 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4089 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4090 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4091 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4092 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4093 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4094 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4095 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4096 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4097 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4098 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4101 static const StructEntry struct_termios_def = {
4102 .convert = { host_to_target_termios, target_to_host_termios },
4103 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4104 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4107 static bitmask_transtbl mmap_flags_tbl[] = {
4108 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4109 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4110 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4111 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4112 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4113 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4114 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4115 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4116 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4117 MAP_NORESERVE },
4118 { 0, 0, 0, 0 }
4121 #if defined(TARGET_I386)
4123 /* NOTE: there is really one LDT for all the threads */
4124 static uint8_t *ldt_table;
4126 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4128 int size;
4129 void *p;
4131 if (!ldt_table)
4132 return 0;
4133 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4134 if (size > bytecount)
4135 size = bytecount;
4136 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4137 if (!p)
4138 return -TARGET_EFAULT;
4139 /* ??? Should this by byteswapped? */
4140 memcpy(p, ldt_table, size);
4141 unlock_user(p, ptr, size);
4142 return size;
4145 /* XXX: add locking support */
4146 static abi_long write_ldt(CPUX86State *env,
4147 abi_ulong ptr, unsigned long bytecount, int oldmode)
4149 struct target_modify_ldt_ldt_s ldt_info;
4150 struct target_modify_ldt_ldt_s *target_ldt_info;
4151 int seg_32bit, contents, read_exec_only, limit_in_pages;
4152 int seg_not_present, useable, lm;
4153 uint32_t *lp, entry_1, entry_2;
4155 if (bytecount != sizeof(ldt_info))
4156 return -TARGET_EINVAL;
4157 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4158 return -TARGET_EFAULT;
4159 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4160 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4161 ldt_info.limit = tswap32(target_ldt_info->limit);
4162 ldt_info.flags = tswap32(target_ldt_info->flags);
4163 unlock_user_struct(target_ldt_info, ptr, 0);
4165 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4166 return -TARGET_EINVAL;
4167 seg_32bit = ldt_info.flags & 1;
4168 contents = (ldt_info.flags >> 1) & 3;
4169 read_exec_only = (ldt_info.flags >> 3) & 1;
4170 limit_in_pages = (ldt_info.flags >> 4) & 1;
4171 seg_not_present = (ldt_info.flags >> 5) & 1;
4172 useable = (ldt_info.flags >> 6) & 1;
4173 #ifdef TARGET_ABI32
4174 lm = 0;
4175 #else
4176 lm = (ldt_info.flags >> 7) & 1;
4177 #endif
4178 if (contents == 3) {
4179 if (oldmode)
4180 return -TARGET_EINVAL;
4181 if (seg_not_present == 0)
4182 return -TARGET_EINVAL;
4184 /* allocate the LDT */
4185 if (!ldt_table) {
4186 env->ldt.base = target_mmap(0,
4187 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4188 PROT_READ|PROT_WRITE,
4189 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4190 if (env->ldt.base == -1)
4191 return -TARGET_ENOMEM;
4192 memset(g2h(env->ldt.base), 0,
4193 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4194 env->ldt.limit = 0xffff;
4195 ldt_table = g2h(env->ldt.base);
4198 /* NOTE: same code as Linux kernel */
4199 /* Allow LDTs to be cleared by the user. */
4200 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4201 if (oldmode ||
4202 (contents == 0 &&
4203 read_exec_only == 1 &&
4204 seg_32bit == 0 &&
4205 limit_in_pages == 0 &&
4206 seg_not_present == 1 &&
4207 useable == 0 )) {
4208 entry_1 = 0;
4209 entry_2 = 0;
4210 goto install;
4214 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4215 (ldt_info.limit & 0x0ffff);
4216 entry_2 = (ldt_info.base_addr & 0xff000000) |
4217 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4218 (ldt_info.limit & 0xf0000) |
4219 ((read_exec_only ^ 1) << 9) |
4220 (contents << 10) |
4221 ((seg_not_present ^ 1) << 15) |
4222 (seg_32bit << 22) |
4223 (limit_in_pages << 23) |
4224 (lm << 21) |
4225 0x7000;
4226 if (!oldmode)
4227 entry_2 |= (useable << 20);
4229 /* Install the new entry ... */
4230 install:
4231 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4232 lp[0] = tswap32(entry_1);
4233 lp[1] = tswap32(entry_2);
4234 return 0;
4237 /* specific and weird i386 syscalls */
4238 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4239 unsigned long bytecount)
4241 abi_long ret;
4243 switch (func) {
4244 case 0:
4245 ret = read_ldt(ptr, bytecount);
4246 break;
4247 case 1:
4248 ret = write_ldt(env, ptr, bytecount, 1);
4249 break;
4250 case 0x11:
4251 ret = write_ldt(env, ptr, bytecount, 0);
4252 break;
4253 default:
4254 ret = -TARGET_ENOSYS;
4255 break;
4257 return ret;
4260 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4261 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4263 uint64_t *gdt_table = g2h(env->gdt.base);
4264 struct target_modify_ldt_ldt_s ldt_info;
4265 struct target_modify_ldt_ldt_s *target_ldt_info;
4266 int seg_32bit, contents, read_exec_only, limit_in_pages;
4267 int seg_not_present, useable, lm;
4268 uint32_t *lp, entry_1, entry_2;
4269 int i;
4271 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4272 if (!target_ldt_info)
4273 return -TARGET_EFAULT;
4274 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4275 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4276 ldt_info.limit = tswap32(target_ldt_info->limit);
4277 ldt_info.flags = tswap32(target_ldt_info->flags);
4278 if (ldt_info.entry_number == -1) {
4279 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4280 if (gdt_table[i] == 0) {
4281 ldt_info.entry_number = i;
4282 target_ldt_info->entry_number = tswap32(i);
4283 break;
4287 unlock_user_struct(target_ldt_info, ptr, 1);
4289 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4290 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4291 return -TARGET_EINVAL;
4292 seg_32bit = ldt_info.flags & 1;
4293 contents = (ldt_info.flags >> 1) & 3;
4294 read_exec_only = (ldt_info.flags >> 3) & 1;
4295 limit_in_pages = (ldt_info.flags >> 4) & 1;
4296 seg_not_present = (ldt_info.flags >> 5) & 1;
4297 useable = (ldt_info.flags >> 6) & 1;
4298 #ifdef TARGET_ABI32
4299 lm = 0;
4300 #else
4301 lm = (ldt_info.flags >> 7) & 1;
4302 #endif
4304 if (contents == 3) {
4305 if (seg_not_present == 0)
4306 return -TARGET_EINVAL;
4309 /* NOTE: same code as Linux kernel */
4310 /* Allow LDTs to be cleared by the user. */
4311 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4312 if ((contents == 0 &&
4313 read_exec_only == 1 &&
4314 seg_32bit == 0 &&
4315 limit_in_pages == 0 &&
4316 seg_not_present == 1 &&
4317 useable == 0 )) {
4318 entry_1 = 0;
4319 entry_2 = 0;
4320 goto install;
4324 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4325 (ldt_info.limit & 0x0ffff);
4326 entry_2 = (ldt_info.base_addr & 0xff000000) |
4327 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4328 (ldt_info.limit & 0xf0000) |
4329 ((read_exec_only ^ 1) << 9) |
4330 (contents << 10) |
4331 ((seg_not_present ^ 1) << 15) |
4332 (seg_32bit << 22) |
4333 (limit_in_pages << 23) |
4334 (useable << 20) |
4335 (lm << 21) |
4336 0x7000;
4338 /* Install the new entry ... */
4339 install:
4340 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4341 lp[0] = tswap32(entry_1);
4342 lp[1] = tswap32(entry_2);
4343 return 0;
4346 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4348 struct target_modify_ldt_ldt_s *target_ldt_info;
4349 uint64_t *gdt_table = g2h(env->gdt.base);
4350 uint32_t base_addr, limit, flags;
4351 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4352 int seg_not_present, useable, lm;
4353 uint32_t *lp, entry_1, entry_2;
4355 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4356 if (!target_ldt_info)
4357 return -TARGET_EFAULT;
4358 idx = tswap32(target_ldt_info->entry_number);
4359 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4360 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4361 unlock_user_struct(target_ldt_info, ptr, 1);
4362 return -TARGET_EINVAL;
4364 lp = (uint32_t *)(gdt_table + idx);
4365 entry_1 = tswap32(lp[0]);
4366 entry_2 = tswap32(lp[1]);
4368 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4369 contents = (entry_2 >> 10) & 3;
4370 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4371 seg_32bit = (entry_2 >> 22) & 1;
4372 limit_in_pages = (entry_2 >> 23) & 1;
4373 useable = (entry_2 >> 20) & 1;
4374 #ifdef TARGET_ABI32
4375 lm = 0;
4376 #else
4377 lm = (entry_2 >> 21) & 1;
4378 #endif
4379 flags = (seg_32bit << 0) | (contents << 1) |
4380 (read_exec_only << 3) | (limit_in_pages << 4) |
4381 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4382 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4383 base_addr = (entry_1 >> 16) |
4384 (entry_2 & 0xff000000) |
4385 ((entry_2 & 0xff) << 16);
4386 target_ldt_info->base_addr = tswapal(base_addr);
4387 target_ldt_info->limit = tswap32(limit);
4388 target_ldt_info->flags = tswap32(flags);
4389 unlock_user_struct(target_ldt_info, ptr, 1);
4390 return 0;
4392 #endif /* TARGET_I386 && TARGET_ABI32 */
4394 #ifndef TARGET_ABI32
4395 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4397 abi_long ret = 0;
4398 abi_ulong val;
4399 int idx;
4401 switch(code) {
4402 case TARGET_ARCH_SET_GS:
4403 case TARGET_ARCH_SET_FS:
4404 if (code == TARGET_ARCH_SET_GS)
4405 idx = R_GS;
4406 else
4407 idx = R_FS;
4408 cpu_x86_load_seg(env, idx, 0);
4409 env->segs[idx].base = addr;
4410 break;
4411 case TARGET_ARCH_GET_GS:
4412 case TARGET_ARCH_GET_FS:
4413 if (code == TARGET_ARCH_GET_GS)
4414 idx = R_GS;
4415 else
4416 idx = R_FS;
4417 val = env->segs[idx].base;
4418 if (put_user(val, addr, abi_ulong))
4419 ret = -TARGET_EFAULT;
4420 break;
4421 default:
4422 ret = -TARGET_EINVAL;
4423 break;
4425 return ret;
4427 #endif
4429 #endif /* defined(TARGET_I386) */
4431 #define NEW_STACK_SIZE 0x40000
4434 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4435 typedef struct {
4436 CPUArchState *env;
4437 pthread_mutex_t mutex;
4438 pthread_cond_t cond;
4439 pthread_t thread;
4440 uint32_t tid;
4441 abi_ulong child_tidptr;
4442 abi_ulong parent_tidptr;
4443 sigset_t sigmask;
4444 } new_thread_info;
4446 static void *clone_func(void *arg)
4448 new_thread_info *info = arg;
4449 CPUArchState *env;
4450 CPUState *cpu;
4451 TaskState *ts;
4453 env = info->env;
4454 cpu = ENV_GET_CPU(env);
4455 thread_cpu = cpu;
4456 ts = (TaskState *)cpu->opaque;
4457 info->tid = gettid();
4458 cpu->host_tid = info->tid;
4459 task_settid(ts);
4460 if (info->child_tidptr)
4461 put_user_u32(info->tid, info->child_tidptr);
4462 if (info->parent_tidptr)
4463 put_user_u32(info->tid, info->parent_tidptr);
4464 /* Enable signals. */
4465 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4466 /* Signal to the parent that we're ready. */
4467 pthread_mutex_lock(&info->mutex);
4468 pthread_cond_broadcast(&info->cond);
4469 pthread_mutex_unlock(&info->mutex);
4470 /* Wait until the parent has finshed initializing the tls state. */
4471 pthread_mutex_lock(&clone_lock);
4472 pthread_mutex_unlock(&clone_lock);
4473 cpu_loop(env);
4474 /* never exits */
4475 return NULL;
4478 /* do_fork() Must return host values and target errnos (unlike most
4479 do_*() functions). */
4480 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4481 abi_ulong parent_tidptr, target_ulong newtls,
4482 abi_ulong child_tidptr)
4484 CPUState *cpu = ENV_GET_CPU(env);
4485 int ret;
4486 TaskState *ts;
4487 CPUState *new_cpu;
4488 CPUArchState *new_env;
4489 unsigned int nptl_flags;
4490 sigset_t sigmask;
4492 /* Emulate vfork() with fork() */
4493 if (flags & CLONE_VFORK)
4494 flags &= ~(CLONE_VFORK | CLONE_VM);
4496 if (flags & CLONE_VM) {
4497 TaskState *parent_ts = (TaskState *)cpu->opaque;
4498 new_thread_info info;
4499 pthread_attr_t attr;
4501 ts = g_malloc0(sizeof(TaskState));
4502 init_task_state(ts);
4503 /* we create a new CPU instance. */
4504 new_env = cpu_copy(env);
4505 /* Init regs that differ from the parent. */
4506 cpu_clone_regs(new_env, newsp);
4507 new_cpu = ENV_GET_CPU(new_env);
4508 new_cpu->opaque = ts;
4509 ts->bprm = parent_ts->bprm;
4510 ts->info = parent_ts->info;
4511 nptl_flags = flags;
4512 flags &= ~CLONE_NPTL_FLAGS2;
4514 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4515 ts->child_tidptr = child_tidptr;
4518 if (nptl_flags & CLONE_SETTLS)
4519 cpu_set_tls (new_env, newtls);
4521 /* Grab a mutex so that thread setup appears atomic. */
4522 pthread_mutex_lock(&clone_lock);
4524 memset(&info, 0, sizeof(info));
4525 pthread_mutex_init(&info.mutex, NULL);
4526 pthread_mutex_lock(&info.mutex);
4527 pthread_cond_init(&info.cond, NULL);
4528 info.env = new_env;
4529 if (nptl_flags & CLONE_CHILD_SETTID)
4530 info.child_tidptr = child_tidptr;
4531 if (nptl_flags & CLONE_PARENT_SETTID)
4532 info.parent_tidptr = parent_tidptr;
4534 ret = pthread_attr_init(&attr);
4535 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4536 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4537 /* It is not safe to deliver signals until the child has finished
4538 initializing, so temporarily block all signals. */
4539 sigfillset(&sigmask);
4540 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4542 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4543 /* TODO: Free new CPU state if thread creation failed. */
4545 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4546 pthread_attr_destroy(&attr);
4547 if (ret == 0) {
4548 /* Wait for the child to initialize. */
4549 pthread_cond_wait(&info.cond, &info.mutex);
4550 ret = info.tid;
4551 if (flags & CLONE_PARENT_SETTID)
4552 put_user_u32(ret, parent_tidptr);
4553 } else {
4554 ret = -1;
4556 pthread_mutex_unlock(&info.mutex);
4557 pthread_cond_destroy(&info.cond);
4558 pthread_mutex_destroy(&info.mutex);
4559 pthread_mutex_unlock(&clone_lock);
4560 } else {
4561 /* if no CLONE_VM, we consider it is a fork */
4562 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4563 return -EINVAL;
4564 fork_start();
4565 ret = fork();
4566 if (ret == 0) {
4567 /* Child Process. */
4568 cpu_clone_regs(env, newsp);
4569 fork_end(1);
4570 /* There is a race condition here. The parent process could
4571 theoretically read the TID in the child process before the child
4572 tid is set. This would require using either ptrace
4573 (not implemented) or having *_tidptr to point at a shared memory
4574 mapping. We can't repeat the spinlock hack used above because
4575 the child process gets its own copy of the lock. */
4576 if (flags & CLONE_CHILD_SETTID)
4577 put_user_u32(gettid(), child_tidptr);
4578 if (flags & CLONE_PARENT_SETTID)
4579 put_user_u32(gettid(), parent_tidptr);
4580 ts = (TaskState *)cpu->opaque;
4581 if (flags & CLONE_SETTLS)
4582 cpu_set_tls (env, newtls);
4583 if (flags & CLONE_CHILD_CLEARTID)
4584 ts->child_tidptr = child_tidptr;
4585 } else {
4586 fork_end(0);
4589 return ret;
4592 /* warning : doesn't handle linux specific flags... */
4593 static int target_to_host_fcntl_cmd(int cmd)
4595 switch(cmd) {
4596 case TARGET_F_DUPFD:
4597 case TARGET_F_GETFD:
4598 case TARGET_F_SETFD:
4599 case TARGET_F_GETFL:
4600 case TARGET_F_SETFL:
4601 return cmd;
4602 case TARGET_F_GETLK:
4603 return F_GETLK;
4604 case TARGET_F_SETLK:
4605 return F_SETLK;
4606 case TARGET_F_SETLKW:
4607 return F_SETLKW;
4608 case TARGET_F_GETOWN:
4609 return F_GETOWN;
4610 case TARGET_F_SETOWN:
4611 return F_SETOWN;
4612 case TARGET_F_GETSIG:
4613 return F_GETSIG;
4614 case TARGET_F_SETSIG:
4615 return F_SETSIG;
4616 #if TARGET_ABI_BITS == 32
4617 case TARGET_F_GETLK64:
4618 return F_GETLK64;
4619 case TARGET_F_SETLK64:
4620 return F_SETLK64;
4621 case TARGET_F_SETLKW64:
4622 return F_SETLKW64;
4623 #endif
4624 case TARGET_F_SETLEASE:
4625 return F_SETLEASE;
4626 case TARGET_F_GETLEASE:
4627 return F_GETLEASE;
4628 #ifdef F_DUPFD_CLOEXEC
4629 case TARGET_F_DUPFD_CLOEXEC:
4630 return F_DUPFD_CLOEXEC;
4631 #endif
4632 case TARGET_F_NOTIFY:
4633 return F_NOTIFY;
4634 #ifdef F_GETOWN_EX
4635 case TARGET_F_GETOWN_EX:
4636 return F_GETOWN_EX;
4637 #endif
4638 #ifdef F_SETOWN_EX
4639 case TARGET_F_SETOWN_EX:
4640 return F_SETOWN_EX;
4641 #endif
4642 default:
4643 return -TARGET_EINVAL;
4645 return -TARGET_EINVAL;
4648 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4649 static const bitmask_transtbl flock_tbl[] = {
4650 TRANSTBL_CONVERT(F_RDLCK),
4651 TRANSTBL_CONVERT(F_WRLCK),
4652 TRANSTBL_CONVERT(F_UNLCK),
4653 TRANSTBL_CONVERT(F_EXLCK),
4654 TRANSTBL_CONVERT(F_SHLCK),
4655 { 0, 0, 0, 0 }
4658 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4660 struct flock fl;
4661 struct target_flock *target_fl;
4662 struct flock64 fl64;
4663 struct target_flock64 *target_fl64;
4664 #ifdef F_GETOWN_EX
4665 struct f_owner_ex fox;
4666 struct target_f_owner_ex *target_fox;
4667 #endif
4668 abi_long ret;
4669 int host_cmd = target_to_host_fcntl_cmd(cmd);
4671 if (host_cmd == -TARGET_EINVAL)
4672 return host_cmd;
4674 switch(cmd) {
4675 case TARGET_F_GETLK:
4676 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4677 return -TARGET_EFAULT;
4678 fl.l_type =
4679 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4680 fl.l_whence = tswap16(target_fl->l_whence);
4681 fl.l_start = tswapal(target_fl->l_start);
4682 fl.l_len = tswapal(target_fl->l_len);
4683 fl.l_pid = tswap32(target_fl->l_pid);
4684 unlock_user_struct(target_fl, arg, 0);
4685 ret = get_errno(fcntl(fd, host_cmd, &fl));
4686 if (ret == 0) {
4687 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4688 return -TARGET_EFAULT;
4689 target_fl->l_type =
4690 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4691 target_fl->l_whence = tswap16(fl.l_whence);
4692 target_fl->l_start = tswapal(fl.l_start);
4693 target_fl->l_len = tswapal(fl.l_len);
4694 target_fl->l_pid = tswap32(fl.l_pid);
4695 unlock_user_struct(target_fl, arg, 1);
4697 break;
4699 case TARGET_F_SETLK:
4700 case TARGET_F_SETLKW:
4701 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4702 return -TARGET_EFAULT;
4703 fl.l_type =
4704 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4705 fl.l_whence = tswap16(target_fl->l_whence);
4706 fl.l_start = tswapal(target_fl->l_start);
4707 fl.l_len = tswapal(target_fl->l_len);
4708 fl.l_pid = tswap32(target_fl->l_pid);
4709 unlock_user_struct(target_fl, arg, 0);
4710 ret = get_errno(fcntl(fd, host_cmd, &fl));
4711 break;
4713 case TARGET_F_GETLK64:
4714 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4715 return -TARGET_EFAULT;
4716 fl64.l_type =
4717 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4718 fl64.l_whence = tswap16(target_fl64->l_whence);
4719 fl64.l_start = tswap64(target_fl64->l_start);
4720 fl64.l_len = tswap64(target_fl64->l_len);
4721 fl64.l_pid = tswap32(target_fl64->l_pid);
4722 unlock_user_struct(target_fl64, arg, 0);
4723 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4724 if (ret == 0) {
4725 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4726 return -TARGET_EFAULT;
4727 target_fl64->l_type =
4728 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4729 target_fl64->l_whence = tswap16(fl64.l_whence);
4730 target_fl64->l_start = tswap64(fl64.l_start);
4731 target_fl64->l_len = tswap64(fl64.l_len);
4732 target_fl64->l_pid = tswap32(fl64.l_pid);
4733 unlock_user_struct(target_fl64, arg, 1);
4735 break;
4736 case TARGET_F_SETLK64:
4737 case TARGET_F_SETLKW64:
4738 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4739 return -TARGET_EFAULT;
4740 fl64.l_type =
4741 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4742 fl64.l_whence = tswap16(target_fl64->l_whence);
4743 fl64.l_start = tswap64(target_fl64->l_start);
4744 fl64.l_len = tswap64(target_fl64->l_len);
4745 fl64.l_pid = tswap32(target_fl64->l_pid);
4746 unlock_user_struct(target_fl64, arg, 0);
4747 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4748 break;
4750 case TARGET_F_GETFL:
4751 ret = get_errno(fcntl(fd, host_cmd, arg));
4752 if (ret >= 0) {
4753 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4755 break;
4757 case TARGET_F_SETFL:
4758 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4759 break;
4761 #ifdef F_GETOWN_EX
4762 case TARGET_F_GETOWN_EX:
4763 ret = get_errno(fcntl(fd, host_cmd, &fox));
4764 if (ret >= 0) {
4765 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4766 return -TARGET_EFAULT;
4767 target_fox->type = tswap32(fox.type);
4768 target_fox->pid = tswap32(fox.pid);
4769 unlock_user_struct(target_fox, arg, 1);
4771 break;
4772 #endif
4774 #ifdef F_SETOWN_EX
4775 case TARGET_F_SETOWN_EX:
4776 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4777 return -TARGET_EFAULT;
4778 fox.type = tswap32(target_fox->type);
4779 fox.pid = tswap32(target_fox->pid);
4780 unlock_user_struct(target_fox, arg, 0);
4781 ret = get_errno(fcntl(fd, host_cmd, &fox));
4782 break;
4783 #endif
4785 case TARGET_F_SETOWN:
4786 case TARGET_F_GETOWN:
4787 case TARGET_F_SETSIG:
4788 case TARGET_F_GETSIG:
4789 case TARGET_F_SETLEASE:
4790 case TARGET_F_GETLEASE:
4791 ret = get_errno(fcntl(fd, host_cmd, arg));
4792 break;
4794 default:
4795 ret = get_errno(fcntl(fd, cmd, arg));
4796 break;
4798 return ret;
4801 #ifdef USE_UID16
4803 static inline int high2lowuid(int uid)
4805 if (uid > 65535)
4806 return 65534;
4807 else
4808 return uid;
4811 static inline int high2lowgid(int gid)
4813 if (gid > 65535)
4814 return 65534;
4815 else
4816 return gid;
4819 static inline int low2highuid(int uid)
4821 if ((int16_t)uid == -1)
4822 return -1;
4823 else
4824 return uid;
4827 static inline int low2highgid(int gid)
4829 if ((int16_t)gid == -1)
4830 return -1;
4831 else
4832 return gid;
4834 static inline int tswapid(int id)
4836 return tswap16(id);
4839 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4841 #else /* !USE_UID16 */
4842 static inline int high2lowuid(int uid)
4844 return uid;
4846 static inline int high2lowgid(int gid)
4848 return gid;
4850 static inline int low2highuid(int uid)
4852 return uid;
4854 static inline int low2highgid(int gid)
4856 return gid;
4858 static inline int tswapid(int id)
4860 return tswap32(id);
4863 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4865 #endif /* USE_UID16 */
4867 void syscall_init(void)
4869 IOCTLEntry *ie;
4870 const argtype *arg_type;
4871 int size;
4872 int i;
4874 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4875 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4876 #include "syscall_types.h"
4877 #undef STRUCT
4878 #undef STRUCT_SPECIAL
4880 /* Build target_to_host_errno_table[] table from
4881 * host_to_target_errno_table[]. */
4882 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4883 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4886 /* we patch the ioctl size if necessary. We rely on the fact that
4887 no ioctl has all the bits at '1' in the size field */
4888 ie = ioctl_entries;
4889 while (ie->target_cmd != 0) {
4890 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4891 TARGET_IOC_SIZEMASK) {
4892 arg_type = ie->arg_type;
4893 if (arg_type[0] != TYPE_PTR) {
4894 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4895 ie->target_cmd);
4896 exit(1);
4898 arg_type++;
4899 size = thunk_type_size(arg_type, 0);
4900 ie->target_cmd = (ie->target_cmd &
4901 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4902 (size << TARGET_IOC_SIZESHIFT);
4905 /* automatic consistency check if same arch */
4906 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4907 (defined(__x86_64__) && defined(TARGET_X86_64))
4908 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4909 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4910 ie->name, ie->target_cmd, ie->host_cmd);
4912 #endif
4913 ie++;
4917 #if TARGET_ABI_BITS == 32
4918 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4920 #ifdef TARGET_WORDS_BIGENDIAN
4921 return ((uint64_t)word0 << 32) | word1;
4922 #else
4923 return ((uint64_t)word1 << 32) | word0;
4924 #endif
4926 #else /* TARGET_ABI_BITS == 32 */
4927 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4929 return word0;
4931 #endif /* TARGET_ABI_BITS != 32 */
4933 #ifdef TARGET_NR_truncate64
4934 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4935 abi_long arg2,
4936 abi_long arg3,
4937 abi_long arg4)
4939 if (regpairs_aligned(cpu_env)) {
4940 arg2 = arg3;
4941 arg3 = arg4;
4943 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4945 #endif
4947 #ifdef TARGET_NR_ftruncate64
4948 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4949 abi_long arg2,
4950 abi_long arg3,
4951 abi_long arg4)
4953 if (regpairs_aligned(cpu_env)) {
4954 arg2 = arg3;
4955 arg3 = arg4;
4957 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4959 #endif
4961 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4962 abi_ulong target_addr)
4964 struct target_timespec *target_ts;
4966 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4967 return -TARGET_EFAULT;
4968 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4969 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4970 unlock_user_struct(target_ts, target_addr, 0);
4971 return 0;
4974 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4975 struct timespec *host_ts)
4977 struct target_timespec *target_ts;
4979 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4980 return -TARGET_EFAULT;
4981 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4982 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4983 unlock_user_struct(target_ts, target_addr, 1);
4984 return 0;
4987 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4988 abi_ulong target_addr)
4990 struct target_itimerspec *target_itspec;
4992 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4993 return -TARGET_EFAULT;
4996 host_itspec->it_interval.tv_sec =
4997 tswapal(target_itspec->it_interval.tv_sec);
4998 host_itspec->it_interval.tv_nsec =
4999 tswapal(target_itspec->it_interval.tv_nsec);
5000 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5001 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5003 unlock_user_struct(target_itspec, target_addr, 1);
5004 return 0;
5007 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5008 struct itimerspec *host_its)
5010 struct target_itimerspec *target_itspec;
5012 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5013 return -TARGET_EFAULT;
5016 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5017 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5019 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5020 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5022 unlock_user_struct(target_itspec, target_addr, 0);
5023 return 0;
5026 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5027 abi_ulong target_addr)
5029 struct target_sigevent *target_sevp;
5031 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5032 return -TARGET_EFAULT;
5035 /* This union is awkward on 64 bit systems because it has a 32 bit
5036 * integer and a pointer in it; we follow the conversion approach
5037 * used for handling sigval types in signal.c so the guest should get
5038 * the correct value back even if we did a 64 bit byteswap and it's
5039 * using the 32 bit integer.
5041 host_sevp->sigev_value.sival_ptr =
5042 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5043 host_sevp->sigev_signo =
5044 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5045 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5046 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5048 unlock_user_struct(target_sevp, target_addr, 1);
5049 return 0;
5052 #if defined(TARGET_NR_mlockall)
5053 static inline int target_to_host_mlockall_arg(int arg)
5055 int result = 0;
5057 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5058 result |= MCL_CURRENT;
5060 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5061 result |= MCL_FUTURE;
5063 return result;
5065 #endif
5067 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5068 static inline abi_long host_to_target_stat64(void *cpu_env,
5069 abi_ulong target_addr,
5070 struct stat *host_st)
5072 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5073 if (((CPUARMState *)cpu_env)->eabi) {
5074 struct target_eabi_stat64 *target_st;
5076 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5077 return -TARGET_EFAULT;
5078 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5079 __put_user(host_st->st_dev, &target_st->st_dev);
5080 __put_user(host_st->st_ino, &target_st->st_ino);
5081 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5082 __put_user(host_st->st_ino, &target_st->__st_ino);
5083 #endif
5084 __put_user(host_st->st_mode, &target_st->st_mode);
5085 __put_user(host_st->st_nlink, &target_st->st_nlink);
5086 __put_user(host_st->st_uid, &target_st->st_uid);
5087 __put_user(host_st->st_gid, &target_st->st_gid);
5088 __put_user(host_st->st_rdev, &target_st->st_rdev);
5089 __put_user(host_st->st_size, &target_st->st_size);
5090 __put_user(host_st->st_blksize, &target_st->st_blksize);
5091 __put_user(host_st->st_blocks, &target_st->st_blocks);
5092 __put_user(host_st->st_atime, &target_st->target_st_atime);
5093 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5094 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5095 unlock_user_struct(target_st, target_addr, 1);
5096 } else
5097 #endif
5099 #if defined(TARGET_HAS_STRUCT_STAT64)
5100 struct target_stat64 *target_st;
5101 #else
5102 struct target_stat *target_st;
5103 #endif
5105 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5106 return -TARGET_EFAULT;
5107 memset(target_st, 0, sizeof(*target_st));
5108 __put_user(host_st->st_dev, &target_st->st_dev);
5109 __put_user(host_st->st_ino, &target_st->st_ino);
5110 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5111 __put_user(host_st->st_ino, &target_st->__st_ino);
5112 #endif
5113 __put_user(host_st->st_mode, &target_st->st_mode);
5114 __put_user(host_st->st_nlink, &target_st->st_nlink);
5115 __put_user(host_st->st_uid, &target_st->st_uid);
5116 __put_user(host_st->st_gid, &target_st->st_gid);
5117 __put_user(host_st->st_rdev, &target_st->st_rdev);
5118 /* XXX: better use of kernel struct */
5119 __put_user(host_st->st_size, &target_st->st_size);
5120 __put_user(host_st->st_blksize, &target_st->st_blksize);
5121 __put_user(host_st->st_blocks, &target_st->st_blocks);
5122 __put_user(host_st->st_atime, &target_st->target_st_atime);
5123 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5124 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5125 unlock_user_struct(target_st, target_addr, 1);
5128 return 0;
5130 #endif
5132 /* ??? Using host futex calls even when target atomic operations
5133 are not really atomic probably breaks things. However implementing
5134 futexes locally would make futexes shared between multiple processes
5135 tricky. However they're probably useless because guest atomic
5136 operations won't work either. */
5137 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5138 target_ulong uaddr2, int val3)
5140 struct timespec ts, *pts;
5141 int base_op;
5143 /* ??? We assume FUTEX_* constants are the same on both host
5144 and target. */
5145 #ifdef FUTEX_CMD_MASK
5146 base_op = op & FUTEX_CMD_MASK;
5147 #else
5148 base_op = op;
5149 #endif
5150 switch (base_op) {
5151 case FUTEX_WAIT:
5152 case FUTEX_WAIT_BITSET:
5153 if (timeout) {
5154 pts = &ts;
5155 target_to_host_timespec(pts, timeout);
5156 } else {
5157 pts = NULL;
5159 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5160 pts, NULL, val3));
5161 case FUTEX_WAKE:
5162 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5163 case FUTEX_FD:
5164 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5165 case FUTEX_REQUEUE:
5166 case FUTEX_CMP_REQUEUE:
5167 case FUTEX_WAKE_OP:
5168 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5169 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5170 But the prototype takes a `struct timespec *'; insert casts
5171 to satisfy the compiler. We do not need to tswap TIMEOUT
5172 since it's not compared to guest memory. */
5173 pts = (struct timespec *)(uintptr_t) timeout;
5174 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5175 g2h(uaddr2),
5176 (base_op == FUTEX_CMP_REQUEUE
5177 ? tswap32(val3)
5178 : val3)));
5179 default:
5180 return -TARGET_ENOSYS;
5184 /* Map host to target signal numbers for the wait family of syscalls.
5185 Assume all other status bits are the same. */
5186 int host_to_target_waitstatus(int status)
5188 if (WIFSIGNALED(status)) {
5189 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5191 if (WIFSTOPPED(status)) {
5192 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5193 | (status & 0xff);
5195 return status;
5198 static int open_self_cmdline(void *cpu_env, int fd)
5200 int fd_orig = -1;
5201 bool word_skipped = false;
5203 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5204 if (fd_orig < 0) {
5205 return fd_orig;
5208 while (true) {
5209 ssize_t nb_read;
5210 char buf[128];
5211 char *cp_buf = buf;
5213 nb_read = read(fd_orig, buf, sizeof(buf));
5214 if (nb_read < 0) {
5215 fd_orig = close(fd_orig);
5216 return -1;
5217 } else if (nb_read == 0) {
5218 break;
5221 if (!word_skipped) {
5222 /* Skip the first string, which is the path to qemu-*-static
5223 instead of the actual command. */
5224 cp_buf = memchr(buf, 0, sizeof(buf));
5225 if (cp_buf) {
5226 /* Null byte found, skip one string */
5227 cp_buf++;
5228 nb_read -= cp_buf - buf;
5229 word_skipped = true;
5233 if (word_skipped) {
5234 if (write(fd, cp_buf, nb_read) != nb_read) {
5235 close(fd_orig);
5236 return -1;
5241 return close(fd_orig);
5244 static int open_self_maps(void *cpu_env, int fd)
5246 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5247 TaskState *ts = cpu->opaque;
5248 FILE *fp;
5249 char *line = NULL;
5250 size_t len = 0;
5251 ssize_t read;
5253 fp = fopen("/proc/self/maps", "r");
5254 if (fp == NULL) {
5255 return -EACCES;
5258 while ((read = getline(&line, &len, fp)) != -1) {
5259 int fields, dev_maj, dev_min, inode;
5260 uint64_t min, max, offset;
5261 char flag_r, flag_w, flag_x, flag_p;
5262 char path[512] = "";
5263 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5264 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5265 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5267 if ((fields < 10) || (fields > 11)) {
5268 continue;
5270 if (h2g_valid(min)) {
5271 int flags = page_get_flags(h2g(min));
5272 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5273 if (page_check_range(h2g(min), max - min, flags) == -1) {
5274 continue;
5276 if (h2g(min) == ts->info->stack_limit) {
5277 pstrcpy(path, sizeof(path), " [stack]");
5279 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5280 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5281 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5282 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5283 path[0] ? " " : "", path);
5287 free(line);
5288 fclose(fp);
5290 return 0;
5293 static int open_self_stat(void *cpu_env, int fd)
5295 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5296 TaskState *ts = cpu->opaque;
5297 abi_ulong start_stack = ts->info->start_stack;
5298 int i;
5300 for (i = 0; i < 44; i++) {
5301 char buf[128];
5302 int len;
5303 uint64_t val = 0;
5305 if (i == 0) {
5306 /* pid */
5307 val = getpid();
5308 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5309 } else if (i == 1) {
5310 /* app name */
5311 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5312 } else if (i == 27) {
5313 /* stack bottom */
5314 val = start_stack;
5315 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5316 } else {
5317 /* for the rest, there is MasterCard */
5318 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5321 len = strlen(buf);
5322 if (write(fd, buf, len) != len) {
5323 return -1;
5327 return 0;
5330 static int open_self_auxv(void *cpu_env, int fd)
5332 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5333 TaskState *ts = cpu->opaque;
5334 abi_ulong auxv = ts->info->saved_auxv;
5335 abi_ulong len = ts->info->auxv_len;
5336 char *ptr;
5339 * Auxiliary vector is stored in target process stack.
5340 * read in whole auxv vector and copy it to file
5342 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5343 if (ptr != NULL) {
5344 while (len > 0) {
5345 ssize_t r;
5346 r = write(fd, ptr, len);
5347 if (r <= 0) {
5348 break;
5350 len -= r;
5351 ptr += r;
5353 lseek(fd, 0, SEEK_SET);
5354 unlock_user(ptr, auxv, len);
5357 return 0;
5360 static int is_proc_myself(const char *filename, const char *entry)
5362 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5363 filename += strlen("/proc/");
5364 if (!strncmp(filename, "self/", strlen("self/"))) {
5365 filename += strlen("self/");
5366 } else if (*filename >= '1' && *filename <= '9') {
5367 char myself[80];
5368 snprintf(myself, sizeof(myself), "%d/", getpid());
5369 if (!strncmp(filename, myself, strlen(myself))) {
5370 filename += strlen(myself);
5371 } else {
5372 return 0;
5374 } else {
5375 return 0;
5377 if (!strcmp(filename, entry)) {
5378 return 1;
5381 return 0;
5384 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5385 static int is_proc(const char *filename, const char *entry)
5387 return strcmp(filename, entry) == 0;
5390 static int open_net_route(void *cpu_env, int fd)
5392 FILE *fp;
5393 char *line = NULL;
5394 size_t len = 0;
5395 ssize_t read;
5397 fp = fopen("/proc/net/route", "r");
5398 if (fp == NULL) {
5399 return -EACCES;
5402 /* read header */
5404 read = getline(&line, &len, fp);
5405 dprintf(fd, "%s", line);
5407 /* read routes */
5409 while ((read = getline(&line, &len, fp)) != -1) {
5410 char iface[16];
5411 uint32_t dest, gw, mask;
5412 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5413 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5414 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5415 &mask, &mtu, &window, &irtt);
5416 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5417 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5418 metric, tswap32(mask), mtu, window, irtt);
5421 free(line);
5422 fclose(fp);
5424 return 0;
5426 #endif
5428 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5430 struct fake_open {
5431 const char *filename;
5432 int (*fill)(void *cpu_env, int fd);
5433 int (*cmp)(const char *s1, const char *s2);
5435 const struct fake_open *fake_open;
5436 static const struct fake_open fakes[] = {
5437 { "maps", open_self_maps, is_proc_myself },
5438 { "stat", open_self_stat, is_proc_myself },
5439 { "auxv", open_self_auxv, is_proc_myself },
5440 { "cmdline", open_self_cmdline, is_proc_myself },
5441 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5442 { "/proc/net/route", open_net_route, is_proc },
5443 #endif
5444 { NULL, NULL, NULL }
5447 if (is_proc_myself(pathname, "exe")) {
5448 int execfd = qemu_getauxval(AT_EXECFD);
5449 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5452 for (fake_open = fakes; fake_open->filename; fake_open++) {
5453 if (fake_open->cmp(pathname, fake_open->filename)) {
5454 break;
5458 if (fake_open->filename) {
5459 const char *tmpdir;
5460 char filename[PATH_MAX];
5461 int fd, r;
5463 /* create temporary file to map stat to */
5464 tmpdir = getenv("TMPDIR");
5465 if (!tmpdir)
5466 tmpdir = "/tmp";
5467 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5468 fd = mkstemp(filename);
5469 if (fd < 0) {
5470 return fd;
5472 unlink(filename);
5474 if ((r = fake_open->fill(cpu_env, fd))) {
5475 close(fd);
5476 return r;
5478 lseek(fd, 0, SEEK_SET);
5480 return fd;
5483 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5486 #define TIMER_MAGIC 0x0caf0000
5487 #define TIMER_MAGIC_MASK 0xffff0000
5489 /* Convert QEMU provided timer ID back to internal 16bit index format */
5490 static target_timer_t get_timer_id(abi_long arg)
5492 target_timer_t timerid = arg;
5494 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5495 return -TARGET_EINVAL;
5498 timerid &= 0xffff;
5500 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5501 return -TARGET_EINVAL;
5504 return timerid;
5507 /* do_syscall() should always have a single exit point at the end so
5508 that actions, such as logging of syscall results, can be performed.
5509 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5510 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5511 abi_long arg2, abi_long arg3, abi_long arg4,
5512 abi_long arg5, abi_long arg6, abi_long arg7,
5513 abi_long arg8)
5515 CPUState *cpu = ENV_GET_CPU(cpu_env);
5516 abi_long ret;
5517 struct stat st;
5518 struct statfs stfs;
5519 void *p;
5521 #ifdef DEBUG
5522 gemu_log("syscall %d", num);
5523 #endif
5524 if(do_strace)
5525 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5527 switch(num) {
5528 case TARGET_NR_exit:
5529 /* In old applications this may be used to implement _exit(2).
5530 However in threaded applictions it is used for thread termination,
5531 and _exit_group is used for application termination.
5532 Do thread termination if we have more then one thread. */
5533 /* FIXME: This probably breaks if a signal arrives. We should probably
5534 be disabling signals. */
5535 if (CPU_NEXT(first_cpu)) {
5536 TaskState *ts;
5538 cpu_list_lock();
5539 /* Remove the CPU from the list. */
5540 QTAILQ_REMOVE(&cpus, cpu, node);
5541 cpu_list_unlock();
5542 ts = cpu->opaque;
5543 if (ts->child_tidptr) {
5544 put_user_u32(0, ts->child_tidptr);
5545 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5546 NULL, NULL, 0);
5548 thread_cpu = NULL;
5549 object_unref(OBJECT(cpu));
5550 g_free(ts);
5551 pthread_exit(NULL);
5553 #ifdef TARGET_GPROF
5554 _mcleanup();
5555 #endif
5556 gdb_exit(cpu_env, arg1);
5557 _exit(arg1);
5558 ret = 0; /* avoid warning */
5559 break;
5560 case TARGET_NR_read:
5561 if (arg3 == 0)
5562 ret = 0;
5563 else {
5564 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5565 goto efault;
5566 ret = get_errno(read(arg1, p, arg3));
5567 unlock_user(p, arg2, ret);
5569 break;
5570 case TARGET_NR_write:
5571 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5572 goto efault;
5573 ret = get_errno(write(arg1, p, arg3));
5574 unlock_user(p, arg2, 0);
5575 break;
5576 case TARGET_NR_open:
5577 if (!(p = lock_user_string(arg1)))
5578 goto efault;
5579 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5580 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5581 arg3));
5582 unlock_user(p, arg1, 0);
5583 break;
5584 case TARGET_NR_openat:
5585 if (!(p = lock_user_string(arg2)))
5586 goto efault;
5587 ret = get_errno(do_openat(cpu_env, arg1, p,
5588 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5589 arg4));
5590 unlock_user(p, arg2, 0);
5591 break;
5592 case TARGET_NR_close:
5593 ret = get_errno(close(arg1));
5594 break;
5595 case TARGET_NR_brk:
5596 ret = do_brk(arg1);
5597 break;
5598 case TARGET_NR_fork:
5599 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5600 break;
5601 #ifdef TARGET_NR_waitpid
5602 case TARGET_NR_waitpid:
5604 int status;
5605 ret = get_errno(waitpid(arg1, &status, arg3));
5606 if (!is_error(ret) && arg2 && ret
5607 && put_user_s32(host_to_target_waitstatus(status), arg2))
5608 goto efault;
5610 break;
5611 #endif
5612 #ifdef TARGET_NR_waitid
5613 case TARGET_NR_waitid:
5615 siginfo_t info;
5616 info.si_pid = 0;
5617 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5618 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5619 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5620 goto efault;
5621 host_to_target_siginfo(p, &info);
5622 unlock_user(p, arg3, sizeof(target_siginfo_t));
5625 break;
5626 #endif
5627 #ifdef TARGET_NR_creat /* not on alpha */
5628 case TARGET_NR_creat:
5629 if (!(p = lock_user_string(arg1)))
5630 goto efault;
5631 ret = get_errno(creat(p, arg2));
5632 unlock_user(p, arg1, 0);
5633 break;
5634 #endif
5635 case TARGET_NR_link:
5637 void * p2;
5638 p = lock_user_string(arg1);
5639 p2 = lock_user_string(arg2);
5640 if (!p || !p2)
5641 ret = -TARGET_EFAULT;
5642 else
5643 ret = get_errno(link(p, p2));
5644 unlock_user(p2, arg2, 0);
5645 unlock_user(p, arg1, 0);
5647 break;
5648 #if defined(TARGET_NR_linkat)
5649 case TARGET_NR_linkat:
5651 void * p2 = NULL;
5652 if (!arg2 || !arg4)
5653 goto efault;
5654 p = lock_user_string(arg2);
5655 p2 = lock_user_string(arg4);
5656 if (!p || !p2)
5657 ret = -TARGET_EFAULT;
5658 else
5659 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5660 unlock_user(p, arg2, 0);
5661 unlock_user(p2, arg4, 0);
5663 break;
5664 #endif
5665 case TARGET_NR_unlink:
5666 if (!(p = lock_user_string(arg1)))
5667 goto efault;
5668 ret = get_errno(unlink(p));
5669 unlock_user(p, arg1, 0);
5670 break;
5671 #if defined(TARGET_NR_unlinkat)
5672 case TARGET_NR_unlinkat:
5673 if (!(p = lock_user_string(arg2)))
5674 goto efault;
5675 ret = get_errno(unlinkat(arg1, p, arg3));
5676 unlock_user(p, arg2, 0);
5677 break;
5678 #endif
5679 case TARGET_NR_execve:
5681 char **argp, **envp;
5682 int argc, envc;
5683 abi_ulong gp;
5684 abi_ulong guest_argp;
5685 abi_ulong guest_envp;
5686 abi_ulong addr;
5687 char **q;
5688 int total_size = 0;
5690 argc = 0;
5691 guest_argp = arg2;
5692 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5693 if (get_user_ual(addr, gp))
5694 goto efault;
5695 if (!addr)
5696 break;
5697 argc++;
5699 envc = 0;
5700 guest_envp = arg3;
5701 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5702 if (get_user_ual(addr, gp))
5703 goto efault;
5704 if (!addr)
5705 break;
5706 envc++;
5709 argp = alloca((argc + 1) * sizeof(void *));
5710 envp = alloca((envc + 1) * sizeof(void *));
5712 for (gp = guest_argp, q = argp; gp;
5713 gp += sizeof(abi_ulong), q++) {
5714 if (get_user_ual(addr, gp))
5715 goto execve_efault;
5716 if (!addr)
5717 break;
5718 if (!(*q = lock_user_string(addr)))
5719 goto execve_efault;
5720 total_size += strlen(*q) + 1;
5722 *q = NULL;
5724 for (gp = guest_envp, q = envp; gp;
5725 gp += sizeof(abi_ulong), q++) {
5726 if (get_user_ual(addr, gp))
5727 goto execve_efault;
5728 if (!addr)
5729 break;
5730 if (!(*q = lock_user_string(addr)))
5731 goto execve_efault;
5732 total_size += strlen(*q) + 1;
5734 *q = NULL;
5736 /* This case will not be caught by the host's execve() if its
5737 page size is bigger than the target's. */
5738 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5739 ret = -TARGET_E2BIG;
5740 goto execve_end;
5742 if (!(p = lock_user_string(arg1)))
5743 goto execve_efault;
5744 ret = get_errno(execve(p, argp, envp));
5745 unlock_user(p, arg1, 0);
5747 goto execve_end;
5749 execve_efault:
5750 ret = -TARGET_EFAULT;
5752 execve_end:
5753 for (gp = guest_argp, q = argp; *q;
5754 gp += sizeof(abi_ulong), q++) {
5755 if (get_user_ual(addr, gp)
5756 || !addr)
5757 break;
5758 unlock_user(*q, addr, 0);
5760 for (gp = guest_envp, q = envp; *q;
5761 gp += sizeof(abi_ulong), q++) {
5762 if (get_user_ual(addr, gp)
5763 || !addr)
5764 break;
5765 unlock_user(*q, addr, 0);
5768 break;
5769 case TARGET_NR_chdir:
5770 if (!(p = lock_user_string(arg1)))
5771 goto efault;
5772 ret = get_errno(chdir(p));
5773 unlock_user(p, arg1, 0);
5774 break;
5775 #ifdef TARGET_NR_time
5776 case TARGET_NR_time:
5778 time_t host_time;
5779 ret = get_errno(time(&host_time));
5780 if (!is_error(ret)
5781 && arg1
5782 && put_user_sal(host_time, arg1))
5783 goto efault;
5785 break;
5786 #endif
5787 case TARGET_NR_mknod:
5788 if (!(p = lock_user_string(arg1)))
5789 goto efault;
5790 ret = get_errno(mknod(p, arg2, arg3));
5791 unlock_user(p, arg1, 0);
5792 break;
5793 #if defined(TARGET_NR_mknodat)
5794 case TARGET_NR_mknodat:
5795 if (!(p = lock_user_string(arg2)))
5796 goto efault;
5797 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5798 unlock_user(p, arg2, 0);
5799 break;
5800 #endif
5801 case TARGET_NR_chmod:
5802 if (!(p = lock_user_string(arg1)))
5803 goto efault;
5804 ret = get_errno(chmod(p, arg2));
5805 unlock_user(p, arg1, 0);
5806 break;
5807 #ifdef TARGET_NR_break
5808 case TARGET_NR_break:
5809 goto unimplemented;
5810 #endif
5811 #ifdef TARGET_NR_oldstat
5812 case TARGET_NR_oldstat:
5813 goto unimplemented;
5814 #endif
5815 case TARGET_NR_lseek:
5816 ret = get_errno(lseek(arg1, arg2, arg3));
5817 break;
5818 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5819 /* Alpha specific */
5820 case TARGET_NR_getxpid:
5821 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5822 ret = get_errno(getpid());
5823 break;
5824 #endif
5825 #ifdef TARGET_NR_getpid
5826 case TARGET_NR_getpid:
5827 ret = get_errno(getpid());
5828 break;
5829 #endif
5830 case TARGET_NR_mount:
5832 /* need to look at the data field */
5833 void *p2, *p3;
5835 if (arg1) {
5836 p = lock_user_string(arg1);
5837 if (!p) {
5838 goto efault;
5840 } else {
5841 p = NULL;
5844 p2 = lock_user_string(arg2);
5845 if (!p2) {
5846 if (arg1) {
5847 unlock_user(p, arg1, 0);
5849 goto efault;
5852 if (arg3) {
5853 p3 = lock_user_string(arg3);
5854 if (!p3) {
5855 if (arg1) {
5856 unlock_user(p, arg1, 0);
5858 unlock_user(p2, arg2, 0);
5859 goto efault;
5861 } else {
5862 p3 = NULL;
5865 /* FIXME - arg5 should be locked, but it isn't clear how to
5866 * do that since it's not guaranteed to be a NULL-terminated
5867 * string.
5869 if (!arg5) {
5870 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
5871 } else {
5872 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
5874 ret = get_errno(ret);
5876 if (arg1) {
5877 unlock_user(p, arg1, 0);
5879 unlock_user(p2, arg2, 0);
5880 if (arg3) {
5881 unlock_user(p3, arg3, 0);
5884 break;
5885 #ifdef TARGET_NR_umount
5886 case TARGET_NR_umount:
5887 if (!(p = lock_user_string(arg1)))
5888 goto efault;
5889 ret = get_errno(umount(p));
5890 unlock_user(p, arg1, 0);
5891 break;
5892 #endif
5893 #ifdef TARGET_NR_stime /* not on alpha */
5894 case TARGET_NR_stime:
5896 time_t host_time;
5897 if (get_user_sal(host_time, arg1))
5898 goto efault;
5899 ret = get_errno(stime(&host_time));
5901 break;
5902 #endif
5903 case TARGET_NR_ptrace:
5904 goto unimplemented;
5905 #ifdef TARGET_NR_alarm /* not on alpha */
5906 case TARGET_NR_alarm:
5907 ret = alarm(arg1);
5908 break;
5909 #endif
5910 #ifdef TARGET_NR_oldfstat
5911 case TARGET_NR_oldfstat:
5912 goto unimplemented;
5913 #endif
5914 #ifdef TARGET_NR_pause /* not on alpha */
5915 case TARGET_NR_pause:
5916 ret = get_errno(pause());
5917 break;
5918 #endif
5919 #ifdef TARGET_NR_utime
5920 case TARGET_NR_utime:
5922 struct utimbuf tbuf, *host_tbuf;
5923 struct target_utimbuf *target_tbuf;
5924 if (arg2) {
5925 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5926 goto efault;
5927 tbuf.actime = tswapal(target_tbuf->actime);
5928 tbuf.modtime = tswapal(target_tbuf->modtime);
5929 unlock_user_struct(target_tbuf, arg2, 0);
5930 host_tbuf = &tbuf;
5931 } else {
5932 host_tbuf = NULL;
5934 if (!(p = lock_user_string(arg1)))
5935 goto efault;
5936 ret = get_errno(utime(p, host_tbuf));
5937 unlock_user(p, arg1, 0);
5939 break;
5940 #endif
5941 case TARGET_NR_utimes:
5943 struct timeval *tvp, tv[2];
5944 if (arg2) {
5945 if (copy_from_user_timeval(&tv[0], arg2)
5946 || copy_from_user_timeval(&tv[1],
5947 arg2 + sizeof(struct target_timeval)))
5948 goto efault;
5949 tvp = tv;
5950 } else {
5951 tvp = NULL;
5953 if (!(p = lock_user_string(arg1)))
5954 goto efault;
5955 ret = get_errno(utimes(p, tvp));
5956 unlock_user(p, arg1, 0);
5958 break;
5959 #if defined(TARGET_NR_futimesat)
5960 case TARGET_NR_futimesat:
5962 struct timeval *tvp, tv[2];
5963 if (arg3) {
5964 if (copy_from_user_timeval(&tv[0], arg3)
5965 || copy_from_user_timeval(&tv[1],
5966 arg3 + sizeof(struct target_timeval)))
5967 goto efault;
5968 tvp = tv;
5969 } else {
5970 tvp = NULL;
5972 if (!(p = lock_user_string(arg2)))
5973 goto efault;
5974 ret = get_errno(futimesat(arg1, path(p), tvp));
5975 unlock_user(p, arg2, 0);
5977 break;
5978 #endif
5979 #ifdef TARGET_NR_stty
5980 case TARGET_NR_stty:
5981 goto unimplemented;
5982 #endif
5983 #ifdef TARGET_NR_gtty
5984 case TARGET_NR_gtty:
5985 goto unimplemented;
5986 #endif
5987 case TARGET_NR_access:
5988 if (!(p = lock_user_string(arg1)))
5989 goto efault;
5990 ret = get_errno(access(path(p), arg2));
5991 unlock_user(p, arg1, 0);
5992 break;
5993 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5994 case TARGET_NR_faccessat:
5995 if (!(p = lock_user_string(arg2)))
5996 goto efault;
5997 ret = get_errno(faccessat(arg1, p, arg3, 0));
5998 unlock_user(p, arg2, 0);
5999 break;
6000 #endif
6001 #ifdef TARGET_NR_nice /* not on alpha */
6002 case TARGET_NR_nice:
6003 ret = get_errno(nice(arg1));
6004 break;
6005 #endif
6006 #ifdef TARGET_NR_ftime
6007 case TARGET_NR_ftime:
6008 goto unimplemented;
6009 #endif
6010 case TARGET_NR_sync:
6011 sync();
6012 ret = 0;
6013 break;
6014 case TARGET_NR_kill:
6015 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6016 break;
6017 case TARGET_NR_rename:
6019 void *p2;
6020 p = lock_user_string(arg1);
6021 p2 = lock_user_string(arg2);
6022 if (!p || !p2)
6023 ret = -TARGET_EFAULT;
6024 else
6025 ret = get_errno(rename(p, p2));
6026 unlock_user(p2, arg2, 0);
6027 unlock_user(p, arg1, 0);
6029 break;
6030 #if defined(TARGET_NR_renameat)
6031 case TARGET_NR_renameat:
6033 void *p2;
6034 p = lock_user_string(arg2);
6035 p2 = lock_user_string(arg4);
6036 if (!p || !p2)
6037 ret = -TARGET_EFAULT;
6038 else
6039 ret = get_errno(renameat(arg1, p, arg3, p2));
6040 unlock_user(p2, arg4, 0);
6041 unlock_user(p, arg2, 0);
6043 break;
6044 #endif
6045 case TARGET_NR_mkdir:
6046 if (!(p = lock_user_string(arg1)))
6047 goto efault;
6048 ret = get_errno(mkdir(p, arg2));
6049 unlock_user(p, arg1, 0);
6050 break;
6051 #if defined(TARGET_NR_mkdirat)
6052 case TARGET_NR_mkdirat:
6053 if (!(p = lock_user_string(arg2)))
6054 goto efault;
6055 ret = get_errno(mkdirat(arg1, p, arg3));
6056 unlock_user(p, arg2, 0);
6057 break;
6058 #endif
6059 case TARGET_NR_rmdir:
6060 if (!(p = lock_user_string(arg1)))
6061 goto efault;
6062 ret = get_errno(rmdir(p));
6063 unlock_user(p, arg1, 0);
6064 break;
6065 case TARGET_NR_dup:
6066 ret = get_errno(dup(arg1));
6067 break;
6068 case TARGET_NR_pipe:
6069 ret = do_pipe(cpu_env, arg1, 0, 0);
6070 break;
6071 #ifdef TARGET_NR_pipe2
6072 case TARGET_NR_pipe2:
6073 ret = do_pipe(cpu_env, arg1,
6074 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6075 break;
6076 #endif
6077 case TARGET_NR_times:
6079 struct target_tms *tmsp;
6080 struct tms tms;
6081 ret = get_errno(times(&tms));
6082 if (arg1) {
6083 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6084 if (!tmsp)
6085 goto efault;
6086 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6087 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6088 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6089 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6091 if (!is_error(ret))
6092 ret = host_to_target_clock_t(ret);
6094 break;
6095 #ifdef TARGET_NR_prof
6096 case TARGET_NR_prof:
6097 goto unimplemented;
6098 #endif
6099 #ifdef TARGET_NR_signal
6100 case TARGET_NR_signal:
6101 goto unimplemented;
6102 #endif
6103 case TARGET_NR_acct:
6104 if (arg1 == 0) {
6105 ret = get_errno(acct(NULL));
6106 } else {
6107 if (!(p = lock_user_string(arg1)))
6108 goto efault;
6109 ret = get_errno(acct(path(p)));
6110 unlock_user(p, arg1, 0);
6112 break;
6113 #ifdef TARGET_NR_umount2
6114 case TARGET_NR_umount2:
6115 if (!(p = lock_user_string(arg1)))
6116 goto efault;
6117 ret = get_errno(umount2(p, arg2));
6118 unlock_user(p, arg1, 0);
6119 break;
6120 #endif
6121 #ifdef TARGET_NR_lock
6122 case TARGET_NR_lock:
6123 goto unimplemented;
6124 #endif
6125 case TARGET_NR_ioctl:
6126 ret = do_ioctl(arg1, arg2, arg3);
6127 break;
6128 case TARGET_NR_fcntl:
6129 ret = do_fcntl(arg1, arg2, arg3);
6130 break;
6131 #ifdef TARGET_NR_mpx
6132 case TARGET_NR_mpx:
6133 goto unimplemented;
6134 #endif
6135 case TARGET_NR_setpgid:
6136 ret = get_errno(setpgid(arg1, arg2));
6137 break;
6138 #ifdef TARGET_NR_ulimit
6139 case TARGET_NR_ulimit:
6140 goto unimplemented;
6141 #endif
6142 #ifdef TARGET_NR_oldolduname
6143 case TARGET_NR_oldolduname:
6144 goto unimplemented;
6145 #endif
6146 case TARGET_NR_umask:
6147 ret = get_errno(umask(arg1));
6148 break;
6149 case TARGET_NR_chroot:
6150 if (!(p = lock_user_string(arg1)))
6151 goto efault;
6152 ret = get_errno(chroot(p));
6153 unlock_user(p, arg1, 0);
6154 break;
6155 case TARGET_NR_ustat:
6156 goto unimplemented;
6157 case TARGET_NR_dup2:
6158 ret = get_errno(dup2(arg1, arg2));
6159 break;
6160 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6161 case TARGET_NR_dup3:
6162 ret = get_errno(dup3(arg1, arg2, arg3));
6163 break;
6164 #endif
6165 #ifdef TARGET_NR_getppid /* not on alpha */
6166 case TARGET_NR_getppid:
6167 ret = get_errno(getppid());
6168 break;
6169 #endif
6170 case TARGET_NR_getpgrp:
6171 ret = get_errno(getpgrp());
6172 break;
6173 case TARGET_NR_setsid:
6174 ret = get_errno(setsid());
6175 break;
6176 #ifdef TARGET_NR_sigaction
6177 case TARGET_NR_sigaction:
6179 #if defined(TARGET_ALPHA)
6180 struct target_sigaction act, oact, *pact = 0;
6181 struct target_old_sigaction *old_act;
6182 if (arg2) {
6183 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6184 goto efault;
6185 act._sa_handler = old_act->_sa_handler;
6186 target_siginitset(&act.sa_mask, old_act->sa_mask);
6187 act.sa_flags = old_act->sa_flags;
6188 act.sa_restorer = 0;
6189 unlock_user_struct(old_act, arg2, 0);
6190 pact = &act;
6192 ret = get_errno(do_sigaction(arg1, pact, &oact));
6193 if (!is_error(ret) && arg3) {
6194 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6195 goto efault;
6196 old_act->_sa_handler = oact._sa_handler;
6197 old_act->sa_mask = oact.sa_mask.sig[0];
6198 old_act->sa_flags = oact.sa_flags;
6199 unlock_user_struct(old_act, arg3, 1);
6201 #elif defined(TARGET_MIPS)
6202 struct target_sigaction act, oact, *pact, *old_act;
6204 if (arg2) {
6205 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6206 goto efault;
6207 act._sa_handler = old_act->_sa_handler;
6208 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6209 act.sa_flags = old_act->sa_flags;
6210 unlock_user_struct(old_act, arg2, 0);
6211 pact = &act;
6212 } else {
6213 pact = NULL;
6216 ret = get_errno(do_sigaction(arg1, pact, &oact));
6218 if (!is_error(ret) && arg3) {
6219 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6220 goto efault;
6221 old_act->_sa_handler = oact._sa_handler;
6222 old_act->sa_flags = oact.sa_flags;
6223 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6224 old_act->sa_mask.sig[1] = 0;
6225 old_act->sa_mask.sig[2] = 0;
6226 old_act->sa_mask.sig[3] = 0;
6227 unlock_user_struct(old_act, arg3, 1);
6229 #else
6230 struct target_old_sigaction *old_act;
6231 struct target_sigaction act, oact, *pact;
6232 if (arg2) {
6233 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6234 goto efault;
6235 act._sa_handler = old_act->_sa_handler;
6236 target_siginitset(&act.sa_mask, old_act->sa_mask);
6237 act.sa_flags = old_act->sa_flags;
6238 act.sa_restorer = old_act->sa_restorer;
6239 unlock_user_struct(old_act, arg2, 0);
6240 pact = &act;
6241 } else {
6242 pact = NULL;
6244 ret = get_errno(do_sigaction(arg1, pact, &oact));
6245 if (!is_error(ret) && arg3) {
6246 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6247 goto efault;
6248 old_act->_sa_handler = oact._sa_handler;
6249 old_act->sa_mask = oact.sa_mask.sig[0];
6250 old_act->sa_flags = oact.sa_flags;
6251 old_act->sa_restorer = oact.sa_restorer;
6252 unlock_user_struct(old_act, arg3, 1);
6254 #endif
6256 break;
6257 #endif
6258 case TARGET_NR_rt_sigaction:
6260 #if defined(TARGET_ALPHA)
6261 struct target_sigaction act, oact, *pact = 0;
6262 struct target_rt_sigaction *rt_act;
6263 /* ??? arg4 == sizeof(sigset_t). */
6264 if (arg2) {
6265 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6266 goto efault;
6267 act._sa_handler = rt_act->_sa_handler;
6268 act.sa_mask = rt_act->sa_mask;
6269 act.sa_flags = rt_act->sa_flags;
6270 act.sa_restorer = arg5;
6271 unlock_user_struct(rt_act, arg2, 0);
6272 pact = &act;
6274 ret = get_errno(do_sigaction(arg1, pact, &oact));
6275 if (!is_error(ret) && arg3) {
6276 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6277 goto efault;
6278 rt_act->_sa_handler = oact._sa_handler;
6279 rt_act->sa_mask = oact.sa_mask;
6280 rt_act->sa_flags = oact.sa_flags;
6281 unlock_user_struct(rt_act, arg3, 1);
6283 #else
6284 struct target_sigaction *act;
6285 struct target_sigaction *oact;
6287 if (arg2) {
6288 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6289 goto efault;
6290 } else
6291 act = NULL;
6292 if (arg3) {
6293 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6294 ret = -TARGET_EFAULT;
6295 goto rt_sigaction_fail;
6297 } else
6298 oact = NULL;
6299 ret = get_errno(do_sigaction(arg1, act, oact));
6300 rt_sigaction_fail:
6301 if (act)
6302 unlock_user_struct(act, arg2, 0);
6303 if (oact)
6304 unlock_user_struct(oact, arg3, 1);
6305 #endif
6307 break;
6308 #ifdef TARGET_NR_sgetmask /* not on alpha */
6309 case TARGET_NR_sgetmask:
6311 sigset_t cur_set;
6312 abi_ulong target_set;
6313 do_sigprocmask(0, NULL, &cur_set);
6314 host_to_target_old_sigset(&target_set, &cur_set);
6315 ret = target_set;
6317 break;
6318 #endif
6319 #ifdef TARGET_NR_ssetmask /* not on alpha */
6320 case TARGET_NR_ssetmask:
6322 sigset_t set, oset, cur_set;
6323 abi_ulong target_set = arg1;
6324 do_sigprocmask(0, NULL, &cur_set);
6325 target_to_host_old_sigset(&set, &target_set);
6326 sigorset(&set, &set, &cur_set);
6327 do_sigprocmask(SIG_SETMASK, &set, &oset);
6328 host_to_target_old_sigset(&target_set, &oset);
6329 ret = target_set;
6331 break;
6332 #endif
6333 #ifdef TARGET_NR_sigprocmask
6334 case TARGET_NR_sigprocmask:
6336 #if defined(TARGET_ALPHA)
6337 sigset_t set, oldset;
6338 abi_ulong mask;
6339 int how;
6341 switch (arg1) {
6342 case TARGET_SIG_BLOCK:
6343 how = SIG_BLOCK;
6344 break;
6345 case TARGET_SIG_UNBLOCK:
6346 how = SIG_UNBLOCK;
6347 break;
6348 case TARGET_SIG_SETMASK:
6349 how = SIG_SETMASK;
6350 break;
6351 default:
6352 ret = -TARGET_EINVAL;
6353 goto fail;
6355 mask = arg2;
6356 target_to_host_old_sigset(&set, &mask);
6358 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6359 if (!is_error(ret)) {
6360 host_to_target_old_sigset(&mask, &oldset);
6361 ret = mask;
6362 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6364 #else
6365 sigset_t set, oldset, *set_ptr;
6366 int how;
6368 if (arg2) {
6369 switch (arg1) {
6370 case TARGET_SIG_BLOCK:
6371 how = SIG_BLOCK;
6372 break;
6373 case TARGET_SIG_UNBLOCK:
6374 how = SIG_UNBLOCK;
6375 break;
6376 case TARGET_SIG_SETMASK:
6377 how = SIG_SETMASK;
6378 break;
6379 default:
6380 ret = -TARGET_EINVAL;
6381 goto fail;
6383 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6384 goto efault;
6385 target_to_host_old_sigset(&set, p);
6386 unlock_user(p, arg2, 0);
6387 set_ptr = &set;
6388 } else {
6389 how = 0;
6390 set_ptr = NULL;
6392 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6393 if (!is_error(ret) && arg3) {
6394 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6395 goto efault;
6396 host_to_target_old_sigset(p, &oldset);
6397 unlock_user(p, arg3, sizeof(target_sigset_t));
6399 #endif
6401 break;
6402 #endif
6403 case TARGET_NR_rt_sigprocmask:
6405 int how = arg1;
6406 sigset_t set, oldset, *set_ptr;
6408 if (arg2) {
6409 switch(how) {
6410 case TARGET_SIG_BLOCK:
6411 how = SIG_BLOCK;
6412 break;
6413 case TARGET_SIG_UNBLOCK:
6414 how = SIG_UNBLOCK;
6415 break;
6416 case TARGET_SIG_SETMASK:
6417 how = SIG_SETMASK;
6418 break;
6419 default:
6420 ret = -TARGET_EINVAL;
6421 goto fail;
6423 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6424 goto efault;
6425 target_to_host_sigset(&set, p);
6426 unlock_user(p, arg2, 0);
6427 set_ptr = &set;
6428 } else {
6429 how = 0;
6430 set_ptr = NULL;
6432 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6433 if (!is_error(ret) && arg3) {
6434 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6435 goto efault;
6436 host_to_target_sigset(p, &oldset);
6437 unlock_user(p, arg3, sizeof(target_sigset_t));
6440 break;
6441 #ifdef TARGET_NR_sigpending
6442 case TARGET_NR_sigpending:
6444 sigset_t set;
6445 ret = get_errno(sigpending(&set));
6446 if (!is_error(ret)) {
6447 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6448 goto efault;
6449 host_to_target_old_sigset(p, &set);
6450 unlock_user(p, arg1, sizeof(target_sigset_t));
6453 break;
6454 #endif
6455 case TARGET_NR_rt_sigpending:
6457 sigset_t set;
6458 ret = get_errno(sigpending(&set));
6459 if (!is_error(ret)) {
6460 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6461 goto efault;
6462 host_to_target_sigset(p, &set);
6463 unlock_user(p, arg1, sizeof(target_sigset_t));
6466 break;
6467 #ifdef TARGET_NR_sigsuspend
6468 case TARGET_NR_sigsuspend:
6470 sigset_t set;
6471 #if defined(TARGET_ALPHA)
6472 abi_ulong mask = arg1;
6473 target_to_host_old_sigset(&set, &mask);
6474 #else
6475 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6476 goto efault;
6477 target_to_host_old_sigset(&set, p);
6478 unlock_user(p, arg1, 0);
6479 #endif
6480 ret = get_errno(sigsuspend(&set));
6482 break;
6483 #endif
6484 case TARGET_NR_rt_sigsuspend:
6486 sigset_t set;
6487 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6488 goto efault;
6489 target_to_host_sigset(&set, p);
6490 unlock_user(p, arg1, 0);
6491 ret = get_errno(sigsuspend(&set));
6493 break;
6494 case TARGET_NR_rt_sigtimedwait:
6496 sigset_t set;
6497 struct timespec uts, *puts;
6498 siginfo_t uinfo;
6500 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6501 goto efault;
6502 target_to_host_sigset(&set, p);
6503 unlock_user(p, arg1, 0);
6504 if (arg3) {
6505 puts = &uts;
6506 target_to_host_timespec(puts, arg3);
6507 } else {
6508 puts = NULL;
6510 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6511 if (!is_error(ret)) {
6512 if (arg2) {
6513 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6515 if (!p) {
6516 goto efault;
6518 host_to_target_siginfo(p, &uinfo);
6519 unlock_user(p, arg2, sizeof(target_siginfo_t));
6521 ret = host_to_target_signal(ret);
6524 break;
6525 case TARGET_NR_rt_sigqueueinfo:
6527 siginfo_t uinfo;
6528 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6529 goto efault;
6530 target_to_host_siginfo(&uinfo, p);
6531 unlock_user(p, arg1, 0);
6532 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6534 break;
6535 #ifdef TARGET_NR_sigreturn
6536 case TARGET_NR_sigreturn:
6537 /* NOTE: ret is eax, so not transcoding must be done */
6538 ret = do_sigreturn(cpu_env);
6539 break;
6540 #endif
6541 case TARGET_NR_rt_sigreturn:
6542 /* NOTE: ret is eax, so not transcoding must be done */
6543 ret = do_rt_sigreturn(cpu_env);
6544 break;
6545 case TARGET_NR_sethostname:
6546 if (!(p = lock_user_string(arg1)))
6547 goto efault;
6548 ret = get_errno(sethostname(p, arg2));
6549 unlock_user(p, arg1, 0);
6550 break;
6551 case TARGET_NR_setrlimit:
6553 int resource = target_to_host_resource(arg1);
6554 struct target_rlimit *target_rlim;
6555 struct rlimit rlim;
6556 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6557 goto efault;
6558 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6559 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6560 unlock_user_struct(target_rlim, arg2, 0);
6561 ret = get_errno(setrlimit(resource, &rlim));
6563 break;
6564 case TARGET_NR_getrlimit:
6566 int resource = target_to_host_resource(arg1);
6567 struct target_rlimit *target_rlim;
6568 struct rlimit rlim;
6570 ret = get_errno(getrlimit(resource, &rlim));
6571 if (!is_error(ret)) {
6572 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6573 goto efault;
6574 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6575 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6576 unlock_user_struct(target_rlim, arg2, 1);
6579 break;
6580 case TARGET_NR_getrusage:
6582 struct rusage rusage;
6583 ret = get_errno(getrusage(arg1, &rusage));
6584 if (!is_error(ret)) {
6585 ret = host_to_target_rusage(arg2, &rusage);
6588 break;
6589 case TARGET_NR_gettimeofday:
6591 struct timeval tv;
6592 ret = get_errno(gettimeofday(&tv, NULL));
6593 if (!is_error(ret)) {
6594 if (copy_to_user_timeval(arg1, &tv))
6595 goto efault;
6598 break;
6599 case TARGET_NR_settimeofday:
6601 struct timeval tv, *ptv = NULL;
6602 struct timezone tz, *ptz = NULL;
6604 if (arg1) {
6605 if (copy_from_user_timeval(&tv, arg1)) {
6606 goto efault;
6608 ptv = &tv;
6611 if (arg2) {
6612 if (copy_from_user_timezone(&tz, arg2)) {
6613 goto efault;
6615 ptz = &tz;
6618 ret = get_errno(settimeofday(ptv, ptz));
6620 break;
6621 #if defined(TARGET_NR_select)
6622 case TARGET_NR_select:
6623 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6624 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6625 #else
6627 struct target_sel_arg_struct *sel;
6628 abi_ulong inp, outp, exp, tvp;
6629 long nsel;
6631 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6632 goto efault;
6633 nsel = tswapal(sel->n);
6634 inp = tswapal(sel->inp);
6635 outp = tswapal(sel->outp);
6636 exp = tswapal(sel->exp);
6637 tvp = tswapal(sel->tvp);
6638 unlock_user_struct(sel, arg1, 0);
6639 ret = do_select(nsel, inp, outp, exp, tvp);
6641 #endif
6642 break;
6643 #endif
6644 #ifdef TARGET_NR_pselect6
6645 case TARGET_NR_pselect6:
6647 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6648 fd_set rfds, wfds, efds;
6649 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6650 struct timespec ts, *ts_ptr;
6653 * The 6th arg is actually two args smashed together,
6654 * so we cannot use the C library.
6656 sigset_t set;
6657 struct {
6658 sigset_t *set;
6659 size_t size;
6660 } sig, *sig_ptr;
6662 abi_ulong arg_sigset, arg_sigsize, *arg7;
6663 target_sigset_t *target_sigset;
6665 n = arg1;
6666 rfd_addr = arg2;
6667 wfd_addr = arg3;
6668 efd_addr = arg4;
6669 ts_addr = arg5;
6671 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6672 if (ret) {
6673 goto fail;
6675 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6676 if (ret) {
6677 goto fail;
6679 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6680 if (ret) {
6681 goto fail;
6685 * This takes a timespec, and not a timeval, so we cannot
6686 * use the do_select() helper ...
6688 if (ts_addr) {
6689 if (target_to_host_timespec(&ts, ts_addr)) {
6690 goto efault;
6692 ts_ptr = &ts;
6693 } else {
6694 ts_ptr = NULL;
6697 /* Extract the two packed args for the sigset */
6698 if (arg6) {
6699 sig_ptr = &sig;
6700 sig.size = _NSIG / 8;
6702 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6703 if (!arg7) {
6704 goto efault;
6706 arg_sigset = tswapal(arg7[0]);
6707 arg_sigsize = tswapal(arg7[1]);
6708 unlock_user(arg7, arg6, 0);
6710 if (arg_sigset) {
6711 sig.set = &set;
6712 if (arg_sigsize != sizeof(*target_sigset)) {
6713 /* Like the kernel, we enforce correct size sigsets */
6714 ret = -TARGET_EINVAL;
6715 goto fail;
6717 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6718 sizeof(*target_sigset), 1);
6719 if (!target_sigset) {
6720 goto efault;
6722 target_to_host_sigset(&set, target_sigset);
6723 unlock_user(target_sigset, arg_sigset, 0);
6724 } else {
6725 sig.set = NULL;
6727 } else {
6728 sig_ptr = NULL;
6731 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6732 ts_ptr, sig_ptr));
6734 if (!is_error(ret)) {
6735 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6736 goto efault;
6737 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6738 goto efault;
6739 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6740 goto efault;
6742 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6743 goto efault;
6746 break;
6747 #endif
6748 case TARGET_NR_symlink:
6750 void *p2;
6751 p = lock_user_string(arg1);
6752 p2 = lock_user_string(arg2);
6753 if (!p || !p2)
6754 ret = -TARGET_EFAULT;
6755 else
6756 ret = get_errno(symlink(p, p2));
6757 unlock_user(p2, arg2, 0);
6758 unlock_user(p, arg1, 0);
6760 break;
6761 #if defined(TARGET_NR_symlinkat)
6762 case TARGET_NR_symlinkat:
6764 void *p2;
6765 p = lock_user_string(arg1);
6766 p2 = lock_user_string(arg3);
6767 if (!p || !p2)
6768 ret = -TARGET_EFAULT;
6769 else
6770 ret = get_errno(symlinkat(p, arg2, p2));
6771 unlock_user(p2, arg3, 0);
6772 unlock_user(p, arg1, 0);
6774 break;
6775 #endif
6776 #ifdef TARGET_NR_oldlstat
6777 case TARGET_NR_oldlstat:
6778 goto unimplemented;
6779 #endif
6780 case TARGET_NR_readlink:
6782 void *p2;
6783 p = lock_user_string(arg1);
6784 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6785 if (!p || !p2) {
6786 ret = -TARGET_EFAULT;
6787 } else if (!arg3) {
6788 /* Short circuit this for the magic exe check. */
6789 ret = -TARGET_EINVAL;
6790 } else if (is_proc_myself((const char *)p, "exe")) {
6791 char real[PATH_MAX], *temp;
6792 temp = realpath(exec_path, real);
6793 /* Return value is # of bytes that we wrote to the buffer. */
6794 if (temp == NULL) {
6795 ret = get_errno(-1);
6796 } else {
6797 /* Don't worry about sign mismatch as earlier mapping
6798 * logic would have thrown a bad address error. */
6799 ret = MIN(strlen(real), arg3);
6800 /* We cannot NUL terminate the string. */
6801 memcpy(p2, real, ret);
6803 } else {
6804 ret = get_errno(readlink(path(p), p2, arg3));
6806 unlock_user(p2, arg2, ret);
6807 unlock_user(p, arg1, 0);
6809 break;
6810 #if defined(TARGET_NR_readlinkat)
6811 case TARGET_NR_readlinkat:
6813 void *p2;
6814 p = lock_user_string(arg2);
6815 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6816 if (!p || !p2) {
6817 ret = -TARGET_EFAULT;
6818 } else if (is_proc_myself((const char *)p, "exe")) {
6819 char real[PATH_MAX], *temp;
6820 temp = realpath(exec_path, real);
6821 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6822 snprintf((char *)p2, arg4, "%s", real);
6823 } else {
6824 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6826 unlock_user(p2, arg3, ret);
6827 unlock_user(p, arg2, 0);
6829 break;
6830 #endif
6831 #ifdef TARGET_NR_uselib
6832 case TARGET_NR_uselib:
6833 goto unimplemented;
6834 #endif
6835 #ifdef TARGET_NR_swapon
6836 case TARGET_NR_swapon:
6837 if (!(p = lock_user_string(arg1)))
6838 goto efault;
6839 ret = get_errno(swapon(p, arg2));
6840 unlock_user(p, arg1, 0);
6841 break;
6842 #endif
6843 case TARGET_NR_reboot:
6844 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6845 /* arg4 must be ignored in all other cases */
6846 p = lock_user_string(arg4);
6847 if (!p) {
6848 goto efault;
6850 ret = get_errno(reboot(arg1, arg2, arg3, p));
6851 unlock_user(p, arg4, 0);
6852 } else {
6853 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6855 break;
6856 #ifdef TARGET_NR_readdir
6857 case TARGET_NR_readdir:
6858 goto unimplemented;
6859 #endif
6860 #ifdef TARGET_NR_mmap
6861 case TARGET_NR_mmap:
6862 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6863 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6864 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6865 || defined(TARGET_S390X)
6867 abi_ulong *v;
6868 abi_ulong v1, v2, v3, v4, v5, v6;
6869 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6870 goto efault;
6871 v1 = tswapal(v[0]);
6872 v2 = tswapal(v[1]);
6873 v3 = tswapal(v[2]);
6874 v4 = tswapal(v[3]);
6875 v5 = tswapal(v[4]);
6876 v6 = tswapal(v[5]);
6877 unlock_user(v, arg1, 0);
6878 ret = get_errno(target_mmap(v1, v2, v3,
6879 target_to_host_bitmask(v4, mmap_flags_tbl),
6880 v5, v6));
6882 #else
6883 ret = get_errno(target_mmap(arg1, arg2, arg3,
6884 target_to_host_bitmask(arg4, mmap_flags_tbl),
6885 arg5,
6886 arg6));
6887 #endif
6888 break;
6889 #endif
6890 #ifdef TARGET_NR_mmap2
6891 case TARGET_NR_mmap2:
6892 #ifndef MMAP_SHIFT
6893 #define MMAP_SHIFT 12
6894 #endif
6895 ret = get_errno(target_mmap(arg1, arg2, arg3,
6896 target_to_host_bitmask(arg4, mmap_flags_tbl),
6897 arg5,
6898 arg6 << MMAP_SHIFT));
6899 break;
6900 #endif
6901 case TARGET_NR_munmap:
6902 ret = get_errno(target_munmap(arg1, arg2));
6903 break;
6904 case TARGET_NR_mprotect:
6906 TaskState *ts = cpu->opaque;
6907 /* Special hack to detect libc making the stack executable. */
6908 if ((arg3 & PROT_GROWSDOWN)
6909 && arg1 >= ts->info->stack_limit
6910 && arg1 <= ts->info->start_stack) {
6911 arg3 &= ~PROT_GROWSDOWN;
6912 arg2 = arg2 + arg1 - ts->info->stack_limit;
6913 arg1 = ts->info->stack_limit;
6916 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6917 break;
6918 #ifdef TARGET_NR_mremap
6919 case TARGET_NR_mremap:
6920 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6921 break;
6922 #endif
6923 /* ??? msync/mlock/munlock are broken for softmmu. */
6924 #ifdef TARGET_NR_msync
6925 case TARGET_NR_msync:
6926 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6927 break;
6928 #endif
6929 #ifdef TARGET_NR_mlock
6930 case TARGET_NR_mlock:
6931 ret = get_errno(mlock(g2h(arg1), arg2));
6932 break;
6933 #endif
6934 #ifdef TARGET_NR_munlock
6935 case TARGET_NR_munlock:
6936 ret = get_errno(munlock(g2h(arg1), arg2));
6937 break;
6938 #endif
6939 #ifdef TARGET_NR_mlockall
6940 case TARGET_NR_mlockall:
6941 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_munlockall
6945 case TARGET_NR_munlockall:
6946 ret = get_errno(munlockall());
6947 break;
6948 #endif
6949 case TARGET_NR_truncate:
6950 if (!(p = lock_user_string(arg1)))
6951 goto efault;
6952 ret = get_errno(truncate(p, arg2));
6953 unlock_user(p, arg1, 0);
6954 break;
6955 case TARGET_NR_ftruncate:
6956 ret = get_errno(ftruncate(arg1, arg2));
6957 break;
6958 case TARGET_NR_fchmod:
6959 ret = get_errno(fchmod(arg1, arg2));
6960 break;
6961 #if defined(TARGET_NR_fchmodat)
6962 case TARGET_NR_fchmodat:
6963 if (!(p = lock_user_string(arg2)))
6964 goto efault;
6965 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6966 unlock_user(p, arg2, 0);
6967 break;
6968 #endif
6969 case TARGET_NR_getpriority:
6970 /* Note that negative values are valid for getpriority, so we must
6971 differentiate based on errno settings. */
6972 errno = 0;
6973 ret = getpriority(arg1, arg2);
6974 if (ret == -1 && errno != 0) {
6975 ret = -host_to_target_errno(errno);
6976 break;
6978 #ifdef TARGET_ALPHA
6979 /* Return value is the unbiased priority. Signal no error. */
6980 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6981 #else
6982 /* Return value is a biased priority to avoid negative numbers. */
6983 ret = 20 - ret;
6984 #endif
6985 break;
6986 case TARGET_NR_setpriority:
6987 ret = get_errno(setpriority(arg1, arg2, arg3));
6988 break;
6989 #ifdef TARGET_NR_profil
6990 case TARGET_NR_profil:
6991 goto unimplemented;
6992 #endif
6993 case TARGET_NR_statfs:
6994 if (!(p = lock_user_string(arg1)))
6995 goto efault;
6996 ret = get_errno(statfs(path(p), &stfs));
6997 unlock_user(p, arg1, 0);
6998 convert_statfs:
6999 if (!is_error(ret)) {
7000 struct target_statfs *target_stfs;
7002 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7003 goto efault;
7004 __put_user(stfs.f_type, &target_stfs->f_type);
7005 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7006 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7007 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7008 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7009 __put_user(stfs.f_files, &target_stfs->f_files);
7010 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7011 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7012 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7013 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7014 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7015 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7016 unlock_user_struct(target_stfs, arg2, 1);
7018 break;
7019 case TARGET_NR_fstatfs:
7020 ret = get_errno(fstatfs(arg1, &stfs));
7021 goto convert_statfs;
7022 #ifdef TARGET_NR_statfs64
7023 case TARGET_NR_statfs64:
7024 if (!(p = lock_user_string(arg1)))
7025 goto efault;
7026 ret = get_errno(statfs(path(p), &stfs));
7027 unlock_user(p, arg1, 0);
7028 convert_statfs64:
7029 if (!is_error(ret)) {
7030 struct target_statfs64 *target_stfs;
7032 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7033 goto efault;
7034 __put_user(stfs.f_type, &target_stfs->f_type);
7035 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7036 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7037 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7038 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7039 __put_user(stfs.f_files, &target_stfs->f_files);
7040 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7041 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7042 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7043 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7044 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7045 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7046 unlock_user_struct(target_stfs, arg3, 1);
7048 break;
7049 case TARGET_NR_fstatfs64:
7050 ret = get_errno(fstatfs(arg1, &stfs));
7051 goto convert_statfs64;
7052 #endif
7053 #ifdef TARGET_NR_ioperm
7054 case TARGET_NR_ioperm:
7055 goto unimplemented;
7056 #endif
7057 #ifdef TARGET_NR_socketcall
7058 case TARGET_NR_socketcall:
7059 ret = do_socketcall(arg1, arg2);
7060 break;
7061 #endif
7062 #ifdef TARGET_NR_accept
7063 case TARGET_NR_accept:
7064 ret = do_accept4(arg1, arg2, arg3, 0);
7065 break;
7066 #endif
7067 #ifdef TARGET_NR_accept4
7068 case TARGET_NR_accept4:
7069 #ifdef CONFIG_ACCEPT4
7070 ret = do_accept4(arg1, arg2, arg3, arg4);
7071 #else
7072 goto unimplemented;
7073 #endif
7074 break;
7075 #endif
7076 #ifdef TARGET_NR_bind
7077 case TARGET_NR_bind:
7078 ret = do_bind(arg1, arg2, arg3);
7079 break;
7080 #endif
7081 #ifdef TARGET_NR_connect
7082 case TARGET_NR_connect:
7083 ret = do_connect(arg1, arg2, arg3);
7084 break;
7085 #endif
7086 #ifdef TARGET_NR_getpeername
7087 case TARGET_NR_getpeername:
7088 ret = do_getpeername(arg1, arg2, arg3);
7089 break;
7090 #endif
7091 #ifdef TARGET_NR_getsockname
7092 case TARGET_NR_getsockname:
7093 ret = do_getsockname(arg1, arg2, arg3);
7094 break;
7095 #endif
7096 #ifdef TARGET_NR_getsockopt
7097 case TARGET_NR_getsockopt:
7098 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7099 break;
7100 #endif
7101 #ifdef TARGET_NR_listen
7102 case TARGET_NR_listen:
7103 ret = get_errno(listen(arg1, arg2));
7104 break;
7105 #endif
7106 #ifdef TARGET_NR_recv
7107 case TARGET_NR_recv:
7108 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7109 break;
7110 #endif
7111 #ifdef TARGET_NR_recvfrom
7112 case TARGET_NR_recvfrom:
7113 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7114 break;
7115 #endif
7116 #ifdef TARGET_NR_recvmsg
7117 case TARGET_NR_recvmsg:
7118 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7119 break;
7120 #endif
7121 #ifdef TARGET_NR_send
7122 case TARGET_NR_send:
7123 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7124 break;
7125 #endif
7126 #ifdef TARGET_NR_sendmsg
7127 case TARGET_NR_sendmsg:
7128 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7129 break;
7130 #endif
7131 #ifdef TARGET_NR_sendmmsg
7132 case TARGET_NR_sendmmsg:
7133 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7134 break;
7135 case TARGET_NR_recvmmsg:
7136 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7137 break;
7138 #endif
7139 #ifdef TARGET_NR_sendto
7140 case TARGET_NR_sendto:
7141 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7142 break;
7143 #endif
7144 #ifdef TARGET_NR_shutdown
7145 case TARGET_NR_shutdown:
7146 ret = get_errno(shutdown(arg1, arg2));
7147 break;
7148 #endif
7149 #ifdef TARGET_NR_socket
7150 case TARGET_NR_socket:
7151 ret = do_socket(arg1, arg2, arg3);
7152 break;
7153 #endif
7154 #ifdef TARGET_NR_socketpair
7155 case TARGET_NR_socketpair:
7156 ret = do_socketpair(arg1, arg2, arg3, arg4);
7157 break;
7158 #endif
7159 #ifdef TARGET_NR_setsockopt
7160 case TARGET_NR_setsockopt:
7161 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7162 break;
7163 #endif
7165 case TARGET_NR_syslog:
7166 if (!(p = lock_user_string(arg2)))
7167 goto efault;
7168 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7169 unlock_user(p, arg2, 0);
7170 break;
7172 case TARGET_NR_setitimer:
7174 struct itimerval value, ovalue, *pvalue;
7176 if (arg2) {
7177 pvalue = &value;
7178 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7179 || copy_from_user_timeval(&pvalue->it_value,
7180 arg2 + sizeof(struct target_timeval)))
7181 goto efault;
7182 } else {
7183 pvalue = NULL;
7185 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7186 if (!is_error(ret) && arg3) {
7187 if (copy_to_user_timeval(arg3,
7188 &ovalue.it_interval)
7189 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7190 &ovalue.it_value))
7191 goto efault;
7194 break;
7195 case TARGET_NR_getitimer:
7197 struct itimerval value;
7199 ret = get_errno(getitimer(arg1, &value));
7200 if (!is_error(ret) && arg2) {
7201 if (copy_to_user_timeval(arg2,
7202 &value.it_interval)
7203 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7204 &value.it_value))
7205 goto efault;
7208 break;
7209 case TARGET_NR_stat:
7210 if (!(p = lock_user_string(arg1)))
7211 goto efault;
7212 ret = get_errno(stat(path(p), &st));
7213 unlock_user(p, arg1, 0);
7214 goto do_stat;
7215 case TARGET_NR_lstat:
7216 if (!(p = lock_user_string(arg1)))
7217 goto efault;
7218 ret = get_errno(lstat(path(p), &st));
7219 unlock_user(p, arg1, 0);
7220 goto do_stat;
7221 case TARGET_NR_fstat:
7223 ret = get_errno(fstat(arg1, &st));
7224 do_stat:
7225 if (!is_error(ret)) {
7226 struct target_stat *target_st;
7228 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7229 goto efault;
7230 memset(target_st, 0, sizeof(*target_st));
7231 __put_user(st.st_dev, &target_st->st_dev);
7232 __put_user(st.st_ino, &target_st->st_ino);
7233 __put_user(st.st_mode, &target_st->st_mode);
7234 __put_user(st.st_uid, &target_st->st_uid);
7235 __put_user(st.st_gid, &target_st->st_gid);
7236 __put_user(st.st_nlink, &target_st->st_nlink);
7237 __put_user(st.st_rdev, &target_st->st_rdev);
7238 __put_user(st.st_size, &target_st->st_size);
7239 __put_user(st.st_blksize, &target_st->st_blksize);
7240 __put_user(st.st_blocks, &target_st->st_blocks);
7241 __put_user(st.st_atime, &target_st->target_st_atime);
7242 __put_user(st.st_mtime, &target_st->target_st_mtime);
7243 __put_user(st.st_ctime, &target_st->target_st_ctime);
7244 unlock_user_struct(target_st, arg2, 1);
7247 break;
7248 #ifdef TARGET_NR_olduname
7249 case TARGET_NR_olduname:
7250 goto unimplemented;
7251 #endif
7252 #ifdef TARGET_NR_iopl
7253 case TARGET_NR_iopl:
7254 goto unimplemented;
7255 #endif
7256 case TARGET_NR_vhangup:
7257 ret = get_errno(vhangup());
7258 break;
7259 #ifdef TARGET_NR_idle
7260 case TARGET_NR_idle:
7261 goto unimplemented;
7262 #endif
7263 #ifdef TARGET_NR_syscall
7264 case TARGET_NR_syscall:
7265 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7266 arg6, arg7, arg8, 0);
7267 break;
7268 #endif
7269 case TARGET_NR_wait4:
7271 int status;
7272 abi_long status_ptr = arg2;
7273 struct rusage rusage, *rusage_ptr;
7274 abi_ulong target_rusage = arg4;
7275 abi_long rusage_err;
7276 if (target_rusage)
7277 rusage_ptr = &rusage;
7278 else
7279 rusage_ptr = NULL;
7280 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7281 if (!is_error(ret)) {
7282 if (status_ptr && ret) {
7283 status = host_to_target_waitstatus(status);
7284 if (put_user_s32(status, status_ptr))
7285 goto efault;
7287 if (target_rusage) {
7288 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7289 if (rusage_err) {
7290 ret = rusage_err;
7295 break;
7296 #ifdef TARGET_NR_swapoff
7297 case TARGET_NR_swapoff:
7298 if (!(p = lock_user_string(arg1)))
7299 goto efault;
7300 ret = get_errno(swapoff(p));
7301 unlock_user(p, arg1, 0);
7302 break;
7303 #endif
7304 case TARGET_NR_sysinfo:
7306 struct target_sysinfo *target_value;
7307 struct sysinfo value;
7308 ret = get_errno(sysinfo(&value));
7309 if (!is_error(ret) && arg1)
7311 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7312 goto efault;
7313 __put_user(value.uptime, &target_value->uptime);
7314 __put_user(value.loads[0], &target_value->loads[0]);
7315 __put_user(value.loads[1], &target_value->loads[1]);
7316 __put_user(value.loads[2], &target_value->loads[2]);
7317 __put_user(value.totalram, &target_value->totalram);
7318 __put_user(value.freeram, &target_value->freeram);
7319 __put_user(value.sharedram, &target_value->sharedram);
7320 __put_user(value.bufferram, &target_value->bufferram);
7321 __put_user(value.totalswap, &target_value->totalswap);
7322 __put_user(value.freeswap, &target_value->freeswap);
7323 __put_user(value.procs, &target_value->procs);
7324 __put_user(value.totalhigh, &target_value->totalhigh);
7325 __put_user(value.freehigh, &target_value->freehigh);
7326 __put_user(value.mem_unit, &target_value->mem_unit);
7327 unlock_user_struct(target_value, arg1, 1);
7330 break;
7331 #ifdef TARGET_NR_ipc
7332 case TARGET_NR_ipc:
7333 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7334 break;
7335 #endif
7336 #ifdef TARGET_NR_semget
7337 case TARGET_NR_semget:
7338 ret = get_errno(semget(arg1, arg2, arg3));
7339 break;
7340 #endif
7341 #ifdef TARGET_NR_semop
7342 case TARGET_NR_semop:
7343 ret = do_semop(arg1, arg2, arg3);
7344 break;
7345 #endif
7346 #ifdef TARGET_NR_semctl
7347 case TARGET_NR_semctl:
7348 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7349 break;
7350 #endif
7351 #ifdef TARGET_NR_msgctl
7352 case TARGET_NR_msgctl:
7353 ret = do_msgctl(arg1, arg2, arg3);
7354 break;
7355 #endif
7356 #ifdef TARGET_NR_msgget
7357 case TARGET_NR_msgget:
7358 ret = get_errno(msgget(arg1, arg2));
7359 break;
7360 #endif
7361 #ifdef TARGET_NR_msgrcv
7362 case TARGET_NR_msgrcv:
7363 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7364 break;
7365 #endif
7366 #ifdef TARGET_NR_msgsnd
7367 case TARGET_NR_msgsnd:
7368 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7369 break;
7370 #endif
7371 #ifdef TARGET_NR_shmget
7372 case TARGET_NR_shmget:
7373 ret = get_errno(shmget(arg1, arg2, arg3));
7374 break;
7375 #endif
7376 #ifdef TARGET_NR_shmctl
7377 case TARGET_NR_shmctl:
7378 ret = do_shmctl(arg1, arg2, arg3);
7379 break;
7380 #endif
7381 #ifdef TARGET_NR_shmat
7382 case TARGET_NR_shmat:
7383 ret = do_shmat(arg1, arg2, arg3);
7384 break;
7385 #endif
7386 #ifdef TARGET_NR_shmdt
7387 case TARGET_NR_shmdt:
7388 ret = do_shmdt(arg1);
7389 break;
7390 #endif
7391 case TARGET_NR_fsync:
7392 ret = get_errno(fsync(arg1));
7393 break;
7394 case TARGET_NR_clone:
7395 /* Linux manages to have three different orderings for its
7396 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7397 * match the kernel's CONFIG_CLONE_* settings.
7398 * Microblaze is further special in that it uses a sixth
7399 * implicit argument to clone for the TLS pointer.
7401 #if defined(TARGET_MICROBLAZE)
7402 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7403 #elif defined(TARGET_CLONE_BACKWARDS)
7404 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7405 #elif defined(TARGET_CLONE_BACKWARDS2)
7406 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7407 #else
7408 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7409 #endif
7410 break;
7411 #ifdef __NR_exit_group
7412 /* new thread calls */
7413 case TARGET_NR_exit_group:
7414 #ifdef TARGET_GPROF
7415 _mcleanup();
7416 #endif
7417 gdb_exit(cpu_env, arg1);
7418 ret = get_errno(exit_group(arg1));
7419 break;
7420 #endif
7421 case TARGET_NR_setdomainname:
7422 if (!(p = lock_user_string(arg1)))
7423 goto efault;
7424 ret = get_errno(setdomainname(p, arg2));
7425 unlock_user(p, arg1, 0);
7426 break;
7427 case TARGET_NR_uname:
7428 /* no need to transcode because we use the linux syscall */
7430 struct new_utsname * buf;
7432 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7433 goto efault;
7434 ret = get_errno(sys_uname(buf));
7435 if (!is_error(ret)) {
7436 /* Overrite the native machine name with whatever is being
7437 emulated. */
7438 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7439 /* Allow the user to override the reported release. */
7440 if (qemu_uname_release && *qemu_uname_release)
7441 strcpy (buf->release, qemu_uname_release);
7443 unlock_user_struct(buf, arg1, 1);
7445 break;
7446 #ifdef TARGET_I386
7447 case TARGET_NR_modify_ldt:
7448 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7449 break;
7450 #if !defined(TARGET_X86_64)
7451 case TARGET_NR_vm86old:
7452 goto unimplemented;
7453 case TARGET_NR_vm86:
7454 ret = do_vm86(cpu_env, arg1, arg2);
7455 break;
7456 #endif
7457 #endif
7458 case TARGET_NR_adjtimex:
7459 goto unimplemented;
7460 #ifdef TARGET_NR_create_module
7461 case TARGET_NR_create_module:
7462 #endif
7463 case TARGET_NR_init_module:
7464 case TARGET_NR_delete_module:
7465 #ifdef TARGET_NR_get_kernel_syms
7466 case TARGET_NR_get_kernel_syms:
7467 #endif
7468 goto unimplemented;
7469 case TARGET_NR_quotactl:
7470 goto unimplemented;
7471 case TARGET_NR_getpgid:
7472 ret = get_errno(getpgid(arg1));
7473 break;
7474 case TARGET_NR_fchdir:
7475 ret = get_errno(fchdir(arg1));
7476 break;
7477 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7478 case TARGET_NR_bdflush:
7479 goto unimplemented;
7480 #endif
7481 #ifdef TARGET_NR_sysfs
7482 case TARGET_NR_sysfs:
7483 goto unimplemented;
7484 #endif
7485 case TARGET_NR_personality:
7486 ret = get_errno(personality(arg1));
7487 break;
7488 #ifdef TARGET_NR_afs_syscall
7489 case TARGET_NR_afs_syscall:
7490 goto unimplemented;
7491 #endif
7492 #ifdef TARGET_NR__llseek /* Not on alpha */
7493 case TARGET_NR__llseek:
7495 int64_t res;
7496 #if !defined(__NR_llseek)
7497 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7498 if (res == -1) {
7499 ret = get_errno(res);
7500 } else {
7501 ret = 0;
7503 #else
7504 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7505 #endif
7506 if ((ret == 0) && put_user_s64(res, arg4)) {
7507 goto efault;
7510 break;
7511 #endif
7512 case TARGET_NR_getdents:
7513 #ifdef __NR_getdents
7514 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7516 struct target_dirent *target_dirp;
7517 struct linux_dirent *dirp;
7518 abi_long count = arg3;
7520 dirp = malloc(count);
7521 if (!dirp) {
7522 ret = -TARGET_ENOMEM;
7523 goto fail;
7526 ret = get_errno(sys_getdents(arg1, dirp, count));
7527 if (!is_error(ret)) {
7528 struct linux_dirent *de;
7529 struct target_dirent *tde;
7530 int len = ret;
7531 int reclen, treclen;
7532 int count1, tnamelen;
7534 count1 = 0;
7535 de = dirp;
7536 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7537 goto efault;
7538 tde = target_dirp;
7539 while (len > 0) {
7540 reclen = de->d_reclen;
7541 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7542 assert(tnamelen >= 0);
7543 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7544 assert(count1 + treclen <= count);
7545 tde->d_reclen = tswap16(treclen);
7546 tde->d_ino = tswapal(de->d_ino);
7547 tde->d_off = tswapal(de->d_off);
7548 memcpy(tde->d_name, de->d_name, tnamelen);
7549 de = (struct linux_dirent *)((char *)de + reclen);
7550 len -= reclen;
7551 tde = (struct target_dirent *)((char *)tde + treclen);
7552 count1 += treclen;
7554 ret = count1;
7555 unlock_user(target_dirp, arg2, ret);
7557 free(dirp);
7559 #else
7561 struct linux_dirent *dirp;
7562 abi_long count = arg3;
7564 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7565 goto efault;
7566 ret = get_errno(sys_getdents(arg1, dirp, count));
7567 if (!is_error(ret)) {
7568 struct linux_dirent *de;
7569 int len = ret;
7570 int reclen;
7571 de = dirp;
7572 while (len > 0) {
7573 reclen = de->d_reclen;
7574 if (reclen > len)
7575 break;
7576 de->d_reclen = tswap16(reclen);
7577 tswapls(&de->d_ino);
7578 tswapls(&de->d_off);
7579 de = (struct linux_dirent *)((char *)de + reclen);
7580 len -= reclen;
7583 unlock_user(dirp, arg2, ret);
7585 #endif
7586 #else
7587 /* Implement getdents in terms of getdents64 */
7589 struct linux_dirent64 *dirp;
7590 abi_long count = arg3;
7592 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7593 if (!dirp) {
7594 goto efault;
7596 ret = get_errno(sys_getdents64(arg1, dirp, count));
7597 if (!is_error(ret)) {
7598 /* Convert the dirent64 structs to target dirent. We do this
7599 * in-place, since we can guarantee that a target_dirent is no
7600 * larger than a dirent64; however this means we have to be
7601 * careful to read everything before writing in the new format.
7603 struct linux_dirent64 *de;
7604 struct target_dirent *tde;
7605 int len = ret;
7606 int tlen = 0;
7608 de = dirp;
7609 tde = (struct target_dirent *)dirp;
7610 while (len > 0) {
7611 int namelen, treclen;
7612 int reclen = de->d_reclen;
7613 uint64_t ino = de->d_ino;
7614 int64_t off = de->d_off;
7615 uint8_t type = de->d_type;
7617 namelen = strlen(de->d_name);
7618 treclen = offsetof(struct target_dirent, d_name)
7619 + namelen + 2;
7620 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7622 memmove(tde->d_name, de->d_name, namelen + 1);
7623 tde->d_ino = tswapal(ino);
7624 tde->d_off = tswapal(off);
7625 tde->d_reclen = tswap16(treclen);
7626 /* The target_dirent type is in what was formerly a padding
7627 * byte at the end of the structure:
7629 *(((char *)tde) + treclen - 1) = type;
7631 de = (struct linux_dirent64 *)((char *)de + reclen);
7632 tde = (struct target_dirent *)((char *)tde + treclen);
7633 len -= reclen;
7634 tlen += treclen;
7636 ret = tlen;
7638 unlock_user(dirp, arg2, ret);
7640 #endif
7641 break;
7642 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7643 case TARGET_NR_getdents64:
7645 struct linux_dirent64 *dirp;
7646 abi_long count = arg3;
7647 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7648 goto efault;
7649 ret = get_errno(sys_getdents64(arg1, dirp, count));
7650 if (!is_error(ret)) {
7651 struct linux_dirent64 *de;
7652 int len = ret;
7653 int reclen;
7654 de = dirp;
7655 while (len > 0) {
7656 reclen = de->d_reclen;
7657 if (reclen > len)
7658 break;
7659 de->d_reclen = tswap16(reclen);
7660 tswap64s((uint64_t *)&de->d_ino);
7661 tswap64s((uint64_t *)&de->d_off);
7662 de = (struct linux_dirent64 *)((char *)de + reclen);
7663 len -= reclen;
7666 unlock_user(dirp, arg2, ret);
7668 break;
7669 #endif /* TARGET_NR_getdents64 */
7670 #if defined(TARGET_NR__newselect)
7671 case TARGET_NR__newselect:
7672 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7673 break;
7674 #endif
7675 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7676 # ifdef TARGET_NR_poll
7677 case TARGET_NR_poll:
7678 # endif
7679 # ifdef TARGET_NR_ppoll
7680 case TARGET_NR_ppoll:
7681 # endif
7683 struct target_pollfd *target_pfd;
7684 unsigned int nfds = arg2;
7685 int timeout = arg3;
7686 struct pollfd *pfd;
7687 unsigned int i;
7689 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7690 if (!target_pfd)
7691 goto efault;
7693 pfd = alloca(sizeof(struct pollfd) * nfds);
7694 for(i = 0; i < nfds; i++) {
7695 pfd[i].fd = tswap32(target_pfd[i].fd);
7696 pfd[i].events = tswap16(target_pfd[i].events);
7699 # ifdef TARGET_NR_ppoll
7700 if (num == TARGET_NR_ppoll) {
7701 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7702 target_sigset_t *target_set;
7703 sigset_t _set, *set = &_set;
7705 if (arg3) {
7706 if (target_to_host_timespec(timeout_ts, arg3)) {
7707 unlock_user(target_pfd, arg1, 0);
7708 goto efault;
7710 } else {
7711 timeout_ts = NULL;
7714 if (arg4) {
7715 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7716 if (!target_set) {
7717 unlock_user(target_pfd, arg1, 0);
7718 goto efault;
7720 target_to_host_sigset(set, target_set);
7721 } else {
7722 set = NULL;
7725 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7727 if (!is_error(ret) && arg3) {
7728 host_to_target_timespec(arg3, timeout_ts);
7730 if (arg4) {
7731 unlock_user(target_set, arg4, 0);
7733 } else
7734 # endif
7735 ret = get_errno(poll(pfd, nfds, timeout));
7737 if (!is_error(ret)) {
7738 for(i = 0; i < nfds; i++) {
7739 target_pfd[i].revents = tswap16(pfd[i].revents);
7742 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7744 break;
7745 #endif
7746 case TARGET_NR_flock:
7747 /* NOTE: the flock constant seems to be the same for every
7748 Linux platform */
7749 ret = get_errno(flock(arg1, arg2));
7750 break;
7751 case TARGET_NR_readv:
7753 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7754 if (vec != NULL) {
7755 ret = get_errno(readv(arg1, vec, arg3));
7756 unlock_iovec(vec, arg2, arg3, 1);
7757 } else {
7758 ret = -host_to_target_errno(errno);
7761 break;
7762 case TARGET_NR_writev:
7764 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7765 if (vec != NULL) {
7766 ret = get_errno(writev(arg1, vec, arg3));
7767 unlock_iovec(vec, arg2, arg3, 0);
7768 } else {
7769 ret = -host_to_target_errno(errno);
7772 break;
7773 case TARGET_NR_getsid:
7774 ret = get_errno(getsid(arg1));
7775 break;
7776 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7777 case TARGET_NR_fdatasync:
7778 ret = get_errno(fdatasync(arg1));
7779 break;
7780 #endif
7781 case TARGET_NR__sysctl:
7782 /* We don't implement this, but ENOTDIR is always a safe
7783 return value. */
7784 ret = -TARGET_ENOTDIR;
7785 break;
7786 case TARGET_NR_sched_getaffinity:
7788 unsigned int mask_size;
7789 unsigned long *mask;
7792 * sched_getaffinity needs multiples of ulong, so need to take
7793 * care of mismatches between target ulong and host ulong sizes.
7795 if (arg2 & (sizeof(abi_ulong) - 1)) {
7796 ret = -TARGET_EINVAL;
7797 break;
7799 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7801 mask = alloca(mask_size);
7802 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7804 if (!is_error(ret)) {
7805 if (ret > arg2) {
7806 /* More data returned than the caller's buffer will fit.
7807 * This only happens if sizeof(abi_long) < sizeof(long)
7808 * and the caller passed us a buffer holding an odd number
7809 * of abi_longs. If the host kernel is actually using the
7810 * extra 4 bytes then fail EINVAL; otherwise we can just
7811 * ignore them and only copy the interesting part.
7813 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
7814 if (numcpus > arg2 * 8) {
7815 ret = -TARGET_EINVAL;
7816 break;
7818 ret = arg2;
7821 if (copy_to_user(arg3, mask, ret)) {
7822 goto efault;
7826 break;
7827 case TARGET_NR_sched_setaffinity:
7829 unsigned int mask_size;
7830 unsigned long *mask;
7833 * sched_setaffinity needs multiples of ulong, so need to take
7834 * care of mismatches between target ulong and host ulong sizes.
7836 if (arg2 & (sizeof(abi_ulong) - 1)) {
7837 ret = -TARGET_EINVAL;
7838 break;
7840 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7842 mask = alloca(mask_size);
7843 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7844 goto efault;
7846 memcpy(mask, p, arg2);
7847 unlock_user_struct(p, arg2, 0);
7849 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7851 break;
7852 case TARGET_NR_sched_setparam:
7854 struct sched_param *target_schp;
7855 struct sched_param schp;
7857 if (arg2 == 0) {
7858 return -TARGET_EINVAL;
7860 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7861 goto efault;
7862 schp.sched_priority = tswap32(target_schp->sched_priority);
7863 unlock_user_struct(target_schp, arg2, 0);
7864 ret = get_errno(sched_setparam(arg1, &schp));
7866 break;
7867 case TARGET_NR_sched_getparam:
7869 struct sched_param *target_schp;
7870 struct sched_param schp;
7872 if (arg2 == 0) {
7873 return -TARGET_EINVAL;
7875 ret = get_errno(sched_getparam(arg1, &schp));
7876 if (!is_error(ret)) {
7877 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7878 goto efault;
7879 target_schp->sched_priority = tswap32(schp.sched_priority);
7880 unlock_user_struct(target_schp, arg2, 1);
7883 break;
7884 case TARGET_NR_sched_setscheduler:
7886 struct sched_param *target_schp;
7887 struct sched_param schp;
7888 if (arg3 == 0) {
7889 return -TARGET_EINVAL;
7891 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7892 goto efault;
7893 schp.sched_priority = tswap32(target_schp->sched_priority);
7894 unlock_user_struct(target_schp, arg3, 0);
7895 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7897 break;
7898 case TARGET_NR_sched_getscheduler:
7899 ret = get_errno(sched_getscheduler(arg1));
7900 break;
7901 case TARGET_NR_sched_yield:
7902 ret = get_errno(sched_yield());
7903 break;
7904 case TARGET_NR_sched_get_priority_max:
7905 ret = get_errno(sched_get_priority_max(arg1));
7906 break;
7907 case TARGET_NR_sched_get_priority_min:
7908 ret = get_errno(sched_get_priority_min(arg1));
7909 break;
7910 case TARGET_NR_sched_rr_get_interval:
7912 struct timespec ts;
7913 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7914 if (!is_error(ret)) {
7915 ret = host_to_target_timespec(arg2, &ts);
7918 break;
7919 case TARGET_NR_nanosleep:
7921 struct timespec req, rem;
7922 target_to_host_timespec(&req, arg1);
7923 ret = get_errno(nanosleep(&req, &rem));
7924 if (is_error(ret) && arg2) {
7925 host_to_target_timespec(arg2, &rem);
7928 break;
7929 #ifdef TARGET_NR_query_module
7930 case TARGET_NR_query_module:
7931 goto unimplemented;
7932 #endif
7933 #ifdef TARGET_NR_nfsservctl
7934 case TARGET_NR_nfsservctl:
7935 goto unimplemented;
7936 #endif
7937 case TARGET_NR_prctl:
7938 switch (arg1) {
7939 case PR_GET_PDEATHSIG:
7941 int deathsig;
7942 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7943 if (!is_error(ret) && arg2
7944 && put_user_ual(deathsig, arg2)) {
7945 goto efault;
7947 break;
7949 #ifdef PR_GET_NAME
7950 case PR_GET_NAME:
7952 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7953 if (!name) {
7954 goto efault;
7956 ret = get_errno(prctl(arg1, (unsigned long)name,
7957 arg3, arg4, arg5));
7958 unlock_user(name, arg2, 16);
7959 break;
7961 case PR_SET_NAME:
7963 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7964 if (!name) {
7965 goto efault;
7967 ret = get_errno(prctl(arg1, (unsigned long)name,
7968 arg3, arg4, arg5));
7969 unlock_user(name, arg2, 0);
7970 break;
7972 #endif
7973 default:
7974 /* Most prctl options have no pointer arguments */
7975 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7976 break;
7978 break;
7979 #ifdef TARGET_NR_arch_prctl
7980 case TARGET_NR_arch_prctl:
7981 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7982 ret = do_arch_prctl(cpu_env, arg1, arg2);
7983 break;
7984 #else
7985 goto unimplemented;
7986 #endif
7987 #endif
7988 #ifdef TARGET_NR_pread64
7989 case TARGET_NR_pread64:
7990 if (regpairs_aligned(cpu_env)) {
7991 arg4 = arg5;
7992 arg5 = arg6;
7994 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7995 goto efault;
7996 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7997 unlock_user(p, arg2, ret);
7998 break;
7999 case TARGET_NR_pwrite64:
8000 if (regpairs_aligned(cpu_env)) {
8001 arg4 = arg5;
8002 arg5 = arg6;
8004 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8005 goto efault;
8006 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8007 unlock_user(p, arg2, 0);
8008 break;
8009 #endif
8010 case TARGET_NR_getcwd:
8011 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8012 goto efault;
8013 ret = get_errno(sys_getcwd1(p, arg2));
8014 unlock_user(p, arg1, ret);
8015 break;
8016 case TARGET_NR_capget:
8017 case TARGET_NR_capset:
8019 struct target_user_cap_header *target_header;
8020 struct target_user_cap_data *target_data = NULL;
8021 struct __user_cap_header_struct header;
8022 struct __user_cap_data_struct data[2];
8023 struct __user_cap_data_struct *dataptr = NULL;
8024 int i, target_datalen;
8025 int data_items = 1;
8027 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8028 goto efault;
8030 header.version = tswap32(target_header->version);
8031 header.pid = tswap32(target_header->pid);
8033 if (header.version != _LINUX_CAPABILITY_VERSION) {
8034 /* Version 2 and up takes pointer to two user_data structs */
8035 data_items = 2;
8038 target_datalen = sizeof(*target_data) * data_items;
8040 if (arg2) {
8041 if (num == TARGET_NR_capget) {
8042 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8043 } else {
8044 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8046 if (!target_data) {
8047 unlock_user_struct(target_header, arg1, 0);
8048 goto efault;
8051 if (num == TARGET_NR_capset) {
8052 for (i = 0; i < data_items; i++) {
8053 data[i].effective = tswap32(target_data[i].effective);
8054 data[i].permitted = tswap32(target_data[i].permitted);
8055 data[i].inheritable = tswap32(target_data[i].inheritable);
8059 dataptr = data;
8062 if (num == TARGET_NR_capget) {
8063 ret = get_errno(capget(&header, dataptr));
8064 } else {
8065 ret = get_errno(capset(&header, dataptr));
8068 /* The kernel always updates version for both capget and capset */
8069 target_header->version = tswap32(header.version);
8070 unlock_user_struct(target_header, arg1, 1);
8072 if (arg2) {
8073 if (num == TARGET_NR_capget) {
8074 for (i = 0; i < data_items; i++) {
8075 target_data[i].effective = tswap32(data[i].effective);
8076 target_data[i].permitted = tswap32(data[i].permitted);
8077 target_data[i].inheritable = tswap32(data[i].inheritable);
8079 unlock_user(target_data, arg2, target_datalen);
8080 } else {
8081 unlock_user(target_data, arg2, 0);
8084 break;
8086 case TARGET_NR_sigaltstack:
8087 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
8088 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
8089 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
8090 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8091 break;
8092 #else
8093 goto unimplemented;
8094 #endif
8096 #ifdef CONFIG_SENDFILE
8097 case TARGET_NR_sendfile:
8099 off_t *offp = NULL;
8100 off_t off;
8101 if (arg3) {
8102 ret = get_user_sal(off, arg3);
8103 if (is_error(ret)) {
8104 break;
8106 offp = &off;
8108 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8109 if (!is_error(ret) && arg3) {
8110 abi_long ret2 = put_user_sal(off, arg3);
8111 if (is_error(ret2)) {
8112 ret = ret2;
8115 break;
8117 #ifdef TARGET_NR_sendfile64
8118 case TARGET_NR_sendfile64:
8120 off_t *offp = NULL;
8121 off_t off;
8122 if (arg3) {
8123 ret = get_user_s64(off, arg3);
8124 if (is_error(ret)) {
8125 break;
8127 offp = &off;
8129 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8130 if (!is_error(ret) && arg3) {
8131 abi_long ret2 = put_user_s64(off, arg3);
8132 if (is_error(ret2)) {
8133 ret = ret2;
8136 break;
8138 #endif
8139 #else
8140 case TARGET_NR_sendfile:
8141 #ifdef TARGET_NR_sendfile64
8142 case TARGET_NR_sendfile64:
8143 #endif
8144 goto unimplemented;
8145 #endif
8147 #ifdef TARGET_NR_getpmsg
8148 case TARGET_NR_getpmsg:
8149 goto unimplemented;
8150 #endif
8151 #ifdef TARGET_NR_putpmsg
8152 case TARGET_NR_putpmsg:
8153 goto unimplemented;
8154 #endif
8155 #ifdef TARGET_NR_vfork
8156 case TARGET_NR_vfork:
8157 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8158 0, 0, 0, 0));
8159 break;
8160 #endif
8161 #ifdef TARGET_NR_ugetrlimit
8162 case TARGET_NR_ugetrlimit:
8164 struct rlimit rlim;
8165 int resource = target_to_host_resource(arg1);
8166 ret = get_errno(getrlimit(resource, &rlim));
8167 if (!is_error(ret)) {
8168 struct target_rlimit *target_rlim;
8169 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8170 goto efault;
8171 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8172 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8173 unlock_user_struct(target_rlim, arg2, 1);
8175 break;
8177 #endif
8178 #ifdef TARGET_NR_truncate64
8179 case TARGET_NR_truncate64:
8180 if (!(p = lock_user_string(arg1)))
8181 goto efault;
8182 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8183 unlock_user(p, arg1, 0);
8184 break;
8185 #endif
8186 #ifdef TARGET_NR_ftruncate64
8187 case TARGET_NR_ftruncate64:
8188 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8189 break;
8190 #endif
8191 #ifdef TARGET_NR_stat64
8192 case TARGET_NR_stat64:
8193 if (!(p = lock_user_string(arg1)))
8194 goto efault;
8195 ret = get_errno(stat(path(p), &st));
8196 unlock_user(p, arg1, 0);
8197 if (!is_error(ret))
8198 ret = host_to_target_stat64(cpu_env, arg2, &st);
8199 break;
8200 #endif
8201 #ifdef TARGET_NR_lstat64
8202 case TARGET_NR_lstat64:
8203 if (!(p = lock_user_string(arg1)))
8204 goto efault;
8205 ret = get_errno(lstat(path(p), &st));
8206 unlock_user(p, arg1, 0);
8207 if (!is_error(ret))
8208 ret = host_to_target_stat64(cpu_env, arg2, &st);
8209 break;
8210 #endif
8211 #ifdef TARGET_NR_fstat64
8212 case TARGET_NR_fstat64:
8213 ret = get_errno(fstat(arg1, &st));
8214 if (!is_error(ret))
8215 ret = host_to_target_stat64(cpu_env, arg2, &st);
8216 break;
8217 #endif
8218 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8219 #ifdef TARGET_NR_fstatat64
8220 case TARGET_NR_fstatat64:
8221 #endif
8222 #ifdef TARGET_NR_newfstatat
8223 case TARGET_NR_newfstatat:
8224 #endif
8225 if (!(p = lock_user_string(arg2)))
8226 goto efault;
8227 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8228 if (!is_error(ret))
8229 ret = host_to_target_stat64(cpu_env, arg3, &st);
8230 break;
8231 #endif
8232 case TARGET_NR_lchown:
8233 if (!(p = lock_user_string(arg1)))
8234 goto efault;
8235 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8236 unlock_user(p, arg1, 0);
8237 break;
8238 #ifdef TARGET_NR_getuid
8239 case TARGET_NR_getuid:
8240 ret = get_errno(high2lowuid(getuid()));
8241 break;
8242 #endif
8243 #ifdef TARGET_NR_getgid
8244 case TARGET_NR_getgid:
8245 ret = get_errno(high2lowgid(getgid()));
8246 break;
8247 #endif
8248 #ifdef TARGET_NR_geteuid
8249 case TARGET_NR_geteuid:
8250 ret = get_errno(high2lowuid(geteuid()));
8251 break;
8252 #endif
8253 #ifdef TARGET_NR_getegid
8254 case TARGET_NR_getegid:
8255 ret = get_errno(high2lowgid(getegid()));
8256 break;
8257 #endif
8258 case TARGET_NR_setreuid:
8259 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8260 break;
8261 case TARGET_NR_setregid:
8262 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8263 break;
8264 case TARGET_NR_getgroups:
8266 int gidsetsize = arg1;
8267 target_id *target_grouplist;
8268 gid_t *grouplist;
8269 int i;
8271 grouplist = alloca(gidsetsize * sizeof(gid_t));
8272 ret = get_errno(getgroups(gidsetsize, grouplist));
8273 if (gidsetsize == 0)
8274 break;
8275 if (!is_error(ret)) {
8276 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8277 if (!target_grouplist)
8278 goto efault;
8279 for(i = 0;i < ret; i++)
8280 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8281 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8284 break;
8285 case TARGET_NR_setgroups:
8287 int gidsetsize = arg1;
8288 target_id *target_grouplist;
8289 gid_t *grouplist = NULL;
8290 int i;
8291 if (gidsetsize) {
8292 grouplist = alloca(gidsetsize * sizeof(gid_t));
8293 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8294 if (!target_grouplist) {
8295 ret = -TARGET_EFAULT;
8296 goto fail;
8298 for (i = 0; i < gidsetsize; i++) {
8299 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8301 unlock_user(target_grouplist, arg2, 0);
8303 ret = get_errno(setgroups(gidsetsize, grouplist));
8305 break;
8306 case TARGET_NR_fchown:
8307 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8308 break;
8309 #if defined(TARGET_NR_fchownat)
8310 case TARGET_NR_fchownat:
8311 if (!(p = lock_user_string(arg2)))
8312 goto efault;
8313 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8314 low2highgid(arg4), arg5));
8315 unlock_user(p, arg2, 0);
8316 break;
8317 #endif
8318 #ifdef TARGET_NR_setresuid
8319 case TARGET_NR_setresuid:
8320 ret = get_errno(setresuid(low2highuid(arg1),
8321 low2highuid(arg2),
8322 low2highuid(arg3)));
8323 break;
8324 #endif
8325 #ifdef TARGET_NR_getresuid
8326 case TARGET_NR_getresuid:
8328 uid_t ruid, euid, suid;
8329 ret = get_errno(getresuid(&ruid, &euid, &suid));
8330 if (!is_error(ret)) {
8331 if (put_user_id(high2lowuid(ruid), arg1)
8332 || put_user_id(high2lowuid(euid), arg2)
8333 || put_user_id(high2lowuid(suid), arg3))
8334 goto efault;
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_getresgid
8340 case TARGET_NR_setresgid:
8341 ret = get_errno(setresgid(low2highgid(arg1),
8342 low2highgid(arg2),
8343 low2highgid(arg3)));
8344 break;
8345 #endif
8346 #ifdef TARGET_NR_getresgid
8347 case TARGET_NR_getresgid:
8349 gid_t rgid, egid, sgid;
8350 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8351 if (!is_error(ret)) {
8352 if (put_user_id(high2lowgid(rgid), arg1)
8353 || put_user_id(high2lowgid(egid), arg2)
8354 || put_user_id(high2lowgid(sgid), arg3))
8355 goto efault;
8358 break;
8359 #endif
8360 case TARGET_NR_chown:
8361 if (!(p = lock_user_string(arg1)))
8362 goto efault;
8363 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8364 unlock_user(p, arg1, 0);
8365 break;
8366 case TARGET_NR_setuid:
8367 ret = get_errno(setuid(low2highuid(arg1)));
8368 break;
8369 case TARGET_NR_setgid:
8370 ret = get_errno(setgid(low2highgid(arg1)));
8371 break;
8372 case TARGET_NR_setfsuid:
8373 ret = get_errno(setfsuid(arg1));
8374 break;
8375 case TARGET_NR_setfsgid:
8376 ret = get_errno(setfsgid(arg1));
8377 break;
8379 #ifdef TARGET_NR_lchown32
8380 case TARGET_NR_lchown32:
8381 if (!(p = lock_user_string(arg1)))
8382 goto efault;
8383 ret = get_errno(lchown(p, arg2, arg3));
8384 unlock_user(p, arg1, 0);
8385 break;
8386 #endif
8387 #ifdef TARGET_NR_getuid32
8388 case TARGET_NR_getuid32:
8389 ret = get_errno(getuid());
8390 break;
8391 #endif
8393 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8394 /* Alpha specific */
8395 case TARGET_NR_getxuid:
8397 uid_t euid;
8398 euid=geteuid();
8399 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8401 ret = get_errno(getuid());
8402 break;
8403 #endif
8404 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8405 /* Alpha specific */
8406 case TARGET_NR_getxgid:
8408 uid_t egid;
8409 egid=getegid();
8410 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8412 ret = get_errno(getgid());
8413 break;
8414 #endif
8415 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8416 /* Alpha specific */
8417 case TARGET_NR_osf_getsysinfo:
8418 ret = -TARGET_EOPNOTSUPP;
8419 switch (arg1) {
8420 case TARGET_GSI_IEEE_FP_CONTROL:
8422 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8424 /* Copied from linux ieee_fpcr_to_swcr. */
8425 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8426 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8427 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8428 | SWCR_TRAP_ENABLE_DZE
8429 | SWCR_TRAP_ENABLE_OVF);
8430 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8431 | SWCR_TRAP_ENABLE_INE);
8432 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8433 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8435 if (put_user_u64 (swcr, arg2))
8436 goto efault;
8437 ret = 0;
8439 break;
8441 /* case GSI_IEEE_STATE_AT_SIGNAL:
8442 -- Not implemented in linux kernel.
8443 case GSI_UACPROC:
8444 -- Retrieves current unaligned access state; not much used.
8445 case GSI_PROC_TYPE:
8446 -- Retrieves implver information; surely not used.
8447 case GSI_GET_HWRPB:
8448 -- Grabs a copy of the HWRPB; surely not used.
8451 break;
8452 #endif
8453 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8454 /* Alpha specific */
8455 case TARGET_NR_osf_setsysinfo:
8456 ret = -TARGET_EOPNOTSUPP;
8457 switch (arg1) {
8458 case TARGET_SSI_IEEE_FP_CONTROL:
8460 uint64_t swcr, fpcr, orig_fpcr;
8462 if (get_user_u64 (swcr, arg2)) {
8463 goto efault;
8465 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8466 fpcr = orig_fpcr & FPCR_DYN_MASK;
8468 /* Copied from linux ieee_swcr_to_fpcr. */
8469 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8470 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8471 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8472 | SWCR_TRAP_ENABLE_DZE
8473 | SWCR_TRAP_ENABLE_OVF)) << 48;
8474 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8475 | SWCR_TRAP_ENABLE_INE)) << 57;
8476 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8477 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8479 cpu_alpha_store_fpcr(cpu_env, fpcr);
8480 ret = 0;
8482 break;
8484 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8486 uint64_t exc, fpcr, orig_fpcr;
8487 int si_code;
8489 if (get_user_u64(exc, arg2)) {
8490 goto efault;
8493 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8495 /* We only add to the exception status here. */
8496 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8498 cpu_alpha_store_fpcr(cpu_env, fpcr);
8499 ret = 0;
8501 /* Old exceptions are not signaled. */
8502 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8504 /* If any exceptions set by this call,
8505 and are unmasked, send a signal. */
8506 si_code = 0;
8507 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8508 si_code = TARGET_FPE_FLTRES;
8510 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8511 si_code = TARGET_FPE_FLTUND;
8513 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8514 si_code = TARGET_FPE_FLTOVF;
8516 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8517 si_code = TARGET_FPE_FLTDIV;
8519 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8520 si_code = TARGET_FPE_FLTINV;
8522 if (si_code != 0) {
8523 target_siginfo_t info;
8524 info.si_signo = SIGFPE;
8525 info.si_errno = 0;
8526 info.si_code = si_code;
8527 info._sifields._sigfault._addr
8528 = ((CPUArchState *)cpu_env)->pc;
8529 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8532 break;
8534 /* case SSI_NVPAIRS:
8535 -- Used with SSIN_UACPROC to enable unaligned accesses.
8536 case SSI_IEEE_STATE_AT_SIGNAL:
8537 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8538 -- Not implemented in linux kernel
8541 break;
8542 #endif
8543 #ifdef TARGET_NR_osf_sigprocmask
8544 /* Alpha specific. */
8545 case TARGET_NR_osf_sigprocmask:
8547 abi_ulong mask;
8548 int how;
8549 sigset_t set, oldset;
8551 switch(arg1) {
8552 case TARGET_SIG_BLOCK:
8553 how = SIG_BLOCK;
8554 break;
8555 case TARGET_SIG_UNBLOCK:
8556 how = SIG_UNBLOCK;
8557 break;
8558 case TARGET_SIG_SETMASK:
8559 how = SIG_SETMASK;
8560 break;
8561 default:
8562 ret = -TARGET_EINVAL;
8563 goto fail;
8565 mask = arg2;
8566 target_to_host_old_sigset(&set, &mask);
8567 do_sigprocmask(how, &set, &oldset);
8568 host_to_target_old_sigset(&mask, &oldset);
8569 ret = mask;
8571 break;
8572 #endif
8574 #ifdef TARGET_NR_getgid32
8575 case TARGET_NR_getgid32:
8576 ret = get_errno(getgid());
8577 break;
8578 #endif
8579 #ifdef TARGET_NR_geteuid32
8580 case TARGET_NR_geteuid32:
8581 ret = get_errno(geteuid());
8582 break;
8583 #endif
8584 #ifdef TARGET_NR_getegid32
8585 case TARGET_NR_getegid32:
8586 ret = get_errno(getegid());
8587 break;
8588 #endif
8589 #ifdef TARGET_NR_setreuid32
8590 case TARGET_NR_setreuid32:
8591 ret = get_errno(setreuid(arg1, arg2));
8592 break;
8593 #endif
8594 #ifdef TARGET_NR_setregid32
8595 case TARGET_NR_setregid32:
8596 ret = get_errno(setregid(arg1, arg2));
8597 break;
8598 #endif
8599 #ifdef TARGET_NR_getgroups32
8600 case TARGET_NR_getgroups32:
8602 int gidsetsize = arg1;
8603 uint32_t *target_grouplist;
8604 gid_t *grouplist;
8605 int i;
8607 grouplist = alloca(gidsetsize * sizeof(gid_t));
8608 ret = get_errno(getgroups(gidsetsize, grouplist));
8609 if (gidsetsize == 0)
8610 break;
8611 if (!is_error(ret)) {
8612 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8613 if (!target_grouplist) {
8614 ret = -TARGET_EFAULT;
8615 goto fail;
8617 for(i = 0;i < ret; i++)
8618 target_grouplist[i] = tswap32(grouplist[i]);
8619 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8622 break;
8623 #endif
8624 #ifdef TARGET_NR_setgroups32
8625 case TARGET_NR_setgroups32:
8627 int gidsetsize = arg1;
8628 uint32_t *target_grouplist;
8629 gid_t *grouplist;
8630 int i;
8632 grouplist = alloca(gidsetsize * sizeof(gid_t));
8633 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8634 if (!target_grouplist) {
8635 ret = -TARGET_EFAULT;
8636 goto fail;
8638 for(i = 0;i < gidsetsize; i++)
8639 grouplist[i] = tswap32(target_grouplist[i]);
8640 unlock_user(target_grouplist, arg2, 0);
8641 ret = get_errno(setgroups(gidsetsize, grouplist));
8643 break;
8644 #endif
8645 #ifdef TARGET_NR_fchown32
8646 case TARGET_NR_fchown32:
8647 ret = get_errno(fchown(arg1, arg2, arg3));
8648 break;
8649 #endif
8650 #ifdef TARGET_NR_setresuid32
8651 case TARGET_NR_setresuid32:
8652 ret = get_errno(setresuid(arg1, arg2, arg3));
8653 break;
8654 #endif
8655 #ifdef TARGET_NR_getresuid32
8656 case TARGET_NR_getresuid32:
8658 uid_t ruid, euid, suid;
8659 ret = get_errno(getresuid(&ruid, &euid, &suid));
8660 if (!is_error(ret)) {
8661 if (put_user_u32(ruid, arg1)
8662 || put_user_u32(euid, arg2)
8663 || put_user_u32(suid, arg3))
8664 goto efault;
8667 break;
8668 #endif
8669 #ifdef TARGET_NR_setresgid32
8670 case TARGET_NR_setresgid32:
8671 ret = get_errno(setresgid(arg1, arg2, arg3));
8672 break;
8673 #endif
8674 #ifdef TARGET_NR_getresgid32
8675 case TARGET_NR_getresgid32:
8677 gid_t rgid, egid, sgid;
8678 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8679 if (!is_error(ret)) {
8680 if (put_user_u32(rgid, arg1)
8681 || put_user_u32(egid, arg2)
8682 || put_user_u32(sgid, arg3))
8683 goto efault;
8686 break;
8687 #endif
8688 #ifdef TARGET_NR_chown32
8689 case TARGET_NR_chown32:
8690 if (!(p = lock_user_string(arg1)))
8691 goto efault;
8692 ret = get_errno(chown(p, arg2, arg3));
8693 unlock_user(p, arg1, 0);
8694 break;
8695 #endif
8696 #ifdef TARGET_NR_setuid32
8697 case TARGET_NR_setuid32:
8698 ret = get_errno(setuid(arg1));
8699 break;
8700 #endif
8701 #ifdef TARGET_NR_setgid32
8702 case TARGET_NR_setgid32:
8703 ret = get_errno(setgid(arg1));
8704 break;
8705 #endif
8706 #ifdef TARGET_NR_setfsuid32
8707 case TARGET_NR_setfsuid32:
8708 ret = get_errno(setfsuid(arg1));
8709 break;
8710 #endif
8711 #ifdef TARGET_NR_setfsgid32
8712 case TARGET_NR_setfsgid32:
8713 ret = get_errno(setfsgid(arg1));
8714 break;
8715 #endif
8717 case TARGET_NR_pivot_root:
8718 goto unimplemented;
8719 #ifdef TARGET_NR_mincore
8720 case TARGET_NR_mincore:
8722 void *a;
8723 ret = -TARGET_EFAULT;
8724 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8725 goto efault;
8726 if (!(p = lock_user_string(arg3)))
8727 goto mincore_fail;
8728 ret = get_errno(mincore(a, arg2, p));
8729 unlock_user(p, arg3, ret);
8730 mincore_fail:
8731 unlock_user(a, arg1, 0);
8733 break;
8734 #endif
8735 #ifdef TARGET_NR_arm_fadvise64_64
8736 case TARGET_NR_arm_fadvise64_64:
8739 * arm_fadvise64_64 looks like fadvise64_64 but
8740 * with different argument order
8742 abi_long temp;
8743 temp = arg3;
8744 arg3 = arg4;
8745 arg4 = temp;
8747 #endif
8748 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8749 #ifdef TARGET_NR_fadvise64_64
8750 case TARGET_NR_fadvise64_64:
8751 #endif
8752 #ifdef TARGET_NR_fadvise64
8753 case TARGET_NR_fadvise64:
8754 #endif
8755 #ifdef TARGET_S390X
8756 switch (arg4) {
8757 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8758 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8759 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8760 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8761 default: break;
8763 #endif
8764 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8765 break;
8766 #endif
8767 #ifdef TARGET_NR_madvise
8768 case TARGET_NR_madvise:
8769 /* A straight passthrough may not be safe because qemu sometimes
8770 turns private file-backed mappings into anonymous mappings.
8771 This will break MADV_DONTNEED.
8772 This is a hint, so ignoring and returning success is ok. */
8773 ret = get_errno(0);
8774 break;
8775 #endif
8776 #if TARGET_ABI_BITS == 32
8777 case TARGET_NR_fcntl64:
8779 int cmd;
8780 struct flock64 fl;
8781 struct target_flock64 *target_fl;
8782 #ifdef TARGET_ARM
8783 struct target_eabi_flock64 *target_efl;
8784 #endif
8786 cmd = target_to_host_fcntl_cmd(arg2);
8787 if (cmd == -TARGET_EINVAL) {
8788 ret = cmd;
8789 break;
8792 switch(arg2) {
8793 case TARGET_F_GETLK64:
8794 #ifdef TARGET_ARM
8795 if (((CPUARMState *)cpu_env)->eabi) {
8796 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8797 goto efault;
8798 fl.l_type = tswap16(target_efl->l_type);
8799 fl.l_whence = tswap16(target_efl->l_whence);
8800 fl.l_start = tswap64(target_efl->l_start);
8801 fl.l_len = tswap64(target_efl->l_len);
8802 fl.l_pid = tswap32(target_efl->l_pid);
8803 unlock_user_struct(target_efl, arg3, 0);
8804 } else
8805 #endif
8807 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8808 goto efault;
8809 fl.l_type = tswap16(target_fl->l_type);
8810 fl.l_whence = tswap16(target_fl->l_whence);
8811 fl.l_start = tswap64(target_fl->l_start);
8812 fl.l_len = tswap64(target_fl->l_len);
8813 fl.l_pid = tswap32(target_fl->l_pid);
8814 unlock_user_struct(target_fl, arg3, 0);
8816 ret = get_errno(fcntl(arg1, cmd, &fl));
8817 if (ret == 0) {
8818 #ifdef TARGET_ARM
8819 if (((CPUARMState *)cpu_env)->eabi) {
8820 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8821 goto efault;
8822 target_efl->l_type = tswap16(fl.l_type);
8823 target_efl->l_whence = tswap16(fl.l_whence);
8824 target_efl->l_start = tswap64(fl.l_start);
8825 target_efl->l_len = tswap64(fl.l_len);
8826 target_efl->l_pid = tswap32(fl.l_pid);
8827 unlock_user_struct(target_efl, arg3, 1);
8828 } else
8829 #endif
8831 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8832 goto efault;
8833 target_fl->l_type = tswap16(fl.l_type);
8834 target_fl->l_whence = tswap16(fl.l_whence);
8835 target_fl->l_start = tswap64(fl.l_start);
8836 target_fl->l_len = tswap64(fl.l_len);
8837 target_fl->l_pid = tswap32(fl.l_pid);
8838 unlock_user_struct(target_fl, arg3, 1);
8841 break;
8843 case TARGET_F_SETLK64:
8844 case TARGET_F_SETLKW64:
8845 #ifdef TARGET_ARM
8846 if (((CPUARMState *)cpu_env)->eabi) {
8847 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8848 goto efault;
8849 fl.l_type = tswap16(target_efl->l_type);
8850 fl.l_whence = tswap16(target_efl->l_whence);
8851 fl.l_start = tswap64(target_efl->l_start);
8852 fl.l_len = tswap64(target_efl->l_len);
8853 fl.l_pid = tswap32(target_efl->l_pid);
8854 unlock_user_struct(target_efl, arg3, 0);
8855 } else
8856 #endif
8858 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8859 goto efault;
8860 fl.l_type = tswap16(target_fl->l_type);
8861 fl.l_whence = tswap16(target_fl->l_whence);
8862 fl.l_start = tswap64(target_fl->l_start);
8863 fl.l_len = tswap64(target_fl->l_len);
8864 fl.l_pid = tswap32(target_fl->l_pid);
8865 unlock_user_struct(target_fl, arg3, 0);
8867 ret = get_errno(fcntl(arg1, cmd, &fl));
8868 break;
8869 default:
8870 ret = do_fcntl(arg1, arg2, arg3);
8871 break;
8873 break;
8875 #endif
8876 #ifdef TARGET_NR_cacheflush
8877 case TARGET_NR_cacheflush:
8878 /* self-modifying code is handled automatically, so nothing needed */
8879 ret = 0;
8880 break;
8881 #endif
8882 #ifdef TARGET_NR_security
8883 case TARGET_NR_security:
8884 goto unimplemented;
8885 #endif
8886 #ifdef TARGET_NR_getpagesize
8887 case TARGET_NR_getpagesize:
8888 ret = TARGET_PAGE_SIZE;
8889 break;
8890 #endif
8891 case TARGET_NR_gettid:
8892 ret = get_errno(gettid());
8893 break;
8894 #ifdef TARGET_NR_readahead
8895 case TARGET_NR_readahead:
8896 #if TARGET_ABI_BITS == 32
8897 if (regpairs_aligned(cpu_env)) {
8898 arg2 = arg3;
8899 arg3 = arg4;
8900 arg4 = arg5;
8902 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8903 #else
8904 ret = get_errno(readahead(arg1, arg2, arg3));
8905 #endif
8906 break;
8907 #endif
8908 #ifdef CONFIG_ATTR
8909 #ifdef TARGET_NR_setxattr
8910 case TARGET_NR_listxattr:
8911 case TARGET_NR_llistxattr:
8913 void *p, *b = 0;
8914 if (arg2) {
8915 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8916 if (!b) {
8917 ret = -TARGET_EFAULT;
8918 break;
8921 p = lock_user_string(arg1);
8922 if (p) {
8923 if (num == TARGET_NR_listxattr) {
8924 ret = get_errno(listxattr(p, b, arg3));
8925 } else {
8926 ret = get_errno(llistxattr(p, b, arg3));
8928 } else {
8929 ret = -TARGET_EFAULT;
8931 unlock_user(p, arg1, 0);
8932 unlock_user(b, arg2, arg3);
8933 break;
8935 case TARGET_NR_flistxattr:
8937 void *b = 0;
8938 if (arg2) {
8939 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8940 if (!b) {
8941 ret = -TARGET_EFAULT;
8942 break;
8945 ret = get_errno(flistxattr(arg1, b, arg3));
8946 unlock_user(b, arg2, arg3);
8947 break;
8949 case TARGET_NR_setxattr:
8950 case TARGET_NR_lsetxattr:
8952 void *p, *n, *v = 0;
8953 if (arg3) {
8954 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8955 if (!v) {
8956 ret = -TARGET_EFAULT;
8957 break;
8960 p = lock_user_string(arg1);
8961 n = lock_user_string(arg2);
8962 if (p && n) {
8963 if (num == TARGET_NR_setxattr) {
8964 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8965 } else {
8966 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8968 } else {
8969 ret = -TARGET_EFAULT;
8971 unlock_user(p, arg1, 0);
8972 unlock_user(n, arg2, 0);
8973 unlock_user(v, arg3, 0);
8975 break;
8976 case TARGET_NR_fsetxattr:
8978 void *n, *v = 0;
8979 if (arg3) {
8980 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8981 if (!v) {
8982 ret = -TARGET_EFAULT;
8983 break;
8986 n = lock_user_string(arg2);
8987 if (n) {
8988 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8989 } else {
8990 ret = -TARGET_EFAULT;
8992 unlock_user(n, arg2, 0);
8993 unlock_user(v, arg3, 0);
8995 break;
8996 case TARGET_NR_getxattr:
8997 case TARGET_NR_lgetxattr:
8999 void *p, *n, *v = 0;
9000 if (arg3) {
9001 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9002 if (!v) {
9003 ret = -TARGET_EFAULT;
9004 break;
9007 p = lock_user_string(arg1);
9008 n = lock_user_string(arg2);
9009 if (p && n) {
9010 if (num == TARGET_NR_getxattr) {
9011 ret = get_errno(getxattr(p, n, v, arg4));
9012 } else {
9013 ret = get_errno(lgetxattr(p, n, v, arg4));
9015 } else {
9016 ret = -TARGET_EFAULT;
9018 unlock_user(p, arg1, 0);
9019 unlock_user(n, arg2, 0);
9020 unlock_user(v, arg3, arg4);
9022 break;
9023 case TARGET_NR_fgetxattr:
9025 void *n, *v = 0;
9026 if (arg3) {
9027 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9028 if (!v) {
9029 ret = -TARGET_EFAULT;
9030 break;
9033 n = lock_user_string(arg2);
9034 if (n) {
9035 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9036 } else {
9037 ret = -TARGET_EFAULT;
9039 unlock_user(n, arg2, 0);
9040 unlock_user(v, arg3, arg4);
9042 break;
9043 case TARGET_NR_removexattr:
9044 case TARGET_NR_lremovexattr:
9046 void *p, *n;
9047 p = lock_user_string(arg1);
9048 n = lock_user_string(arg2);
9049 if (p && n) {
9050 if (num == TARGET_NR_removexattr) {
9051 ret = get_errno(removexattr(p, n));
9052 } else {
9053 ret = get_errno(lremovexattr(p, n));
9055 } else {
9056 ret = -TARGET_EFAULT;
9058 unlock_user(p, arg1, 0);
9059 unlock_user(n, arg2, 0);
9061 break;
9062 case TARGET_NR_fremovexattr:
9064 void *n;
9065 n = lock_user_string(arg2);
9066 if (n) {
9067 ret = get_errno(fremovexattr(arg1, n));
9068 } else {
9069 ret = -TARGET_EFAULT;
9071 unlock_user(n, arg2, 0);
9073 break;
9074 #endif
9075 #endif /* CONFIG_ATTR */
9076 #ifdef TARGET_NR_set_thread_area
9077 case TARGET_NR_set_thread_area:
9078 #if defined(TARGET_MIPS)
9079 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9080 ret = 0;
9081 break;
9082 #elif defined(TARGET_CRIS)
9083 if (arg1 & 0xff)
9084 ret = -TARGET_EINVAL;
9085 else {
9086 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9087 ret = 0;
9089 break;
9090 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9091 ret = do_set_thread_area(cpu_env, arg1);
9092 break;
9093 #elif defined(TARGET_M68K)
9095 TaskState *ts = cpu->opaque;
9096 ts->tp_value = arg1;
9097 ret = 0;
9098 break;
9100 #else
9101 goto unimplemented_nowarn;
9102 #endif
9103 #endif
9104 #ifdef TARGET_NR_get_thread_area
9105 case TARGET_NR_get_thread_area:
9106 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9107 ret = do_get_thread_area(cpu_env, arg1);
9108 break;
9109 #elif defined(TARGET_M68K)
9111 TaskState *ts = cpu->opaque;
9112 ret = ts->tp_value;
9113 break;
9115 #else
9116 goto unimplemented_nowarn;
9117 #endif
9118 #endif
9119 #ifdef TARGET_NR_getdomainname
9120 case TARGET_NR_getdomainname:
9121 goto unimplemented_nowarn;
9122 #endif
9124 #ifdef TARGET_NR_clock_gettime
9125 case TARGET_NR_clock_gettime:
9127 struct timespec ts;
9128 ret = get_errno(clock_gettime(arg1, &ts));
9129 if (!is_error(ret)) {
9130 host_to_target_timespec(arg2, &ts);
9132 break;
9134 #endif
9135 #ifdef TARGET_NR_clock_getres
9136 case TARGET_NR_clock_getres:
9138 struct timespec ts;
9139 ret = get_errno(clock_getres(arg1, &ts));
9140 if (!is_error(ret)) {
9141 host_to_target_timespec(arg2, &ts);
9143 break;
9145 #endif
9146 #ifdef TARGET_NR_clock_nanosleep
9147 case TARGET_NR_clock_nanosleep:
9149 struct timespec ts;
9150 target_to_host_timespec(&ts, arg3);
9151 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9152 if (arg4)
9153 host_to_target_timespec(arg4, &ts);
9155 #if defined(TARGET_PPC)
9156 /* clock_nanosleep is odd in that it returns positive errno values.
9157 * On PPC, CR0 bit 3 should be set in such a situation. */
9158 if (ret) {
9159 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9161 #endif
9162 break;
9164 #endif
9166 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9167 case TARGET_NR_set_tid_address:
9168 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9169 break;
9170 #endif
9172 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9173 case TARGET_NR_tkill:
9174 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9175 break;
9176 #endif
9178 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9179 case TARGET_NR_tgkill:
9180 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9181 target_to_host_signal(arg3)));
9182 break;
9183 #endif
9185 #ifdef TARGET_NR_set_robust_list
9186 case TARGET_NR_set_robust_list:
9187 case TARGET_NR_get_robust_list:
9188 /* The ABI for supporting robust futexes has userspace pass
9189 * the kernel a pointer to a linked list which is updated by
9190 * userspace after the syscall; the list is walked by the kernel
9191 * when the thread exits. Since the linked list in QEMU guest
9192 * memory isn't a valid linked list for the host and we have
9193 * no way to reliably intercept the thread-death event, we can't
9194 * support these. Silently return ENOSYS so that guest userspace
9195 * falls back to a non-robust futex implementation (which should
9196 * be OK except in the corner case of the guest crashing while
9197 * holding a mutex that is shared with another process via
9198 * shared memory).
9200 goto unimplemented_nowarn;
9201 #endif
9203 #if defined(TARGET_NR_utimensat)
9204 case TARGET_NR_utimensat:
9206 struct timespec *tsp, ts[2];
9207 if (!arg3) {
9208 tsp = NULL;
9209 } else {
9210 target_to_host_timespec(ts, arg3);
9211 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9212 tsp = ts;
9214 if (!arg2)
9215 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9216 else {
9217 if (!(p = lock_user_string(arg2))) {
9218 ret = -TARGET_EFAULT;
9219 goto fail;
9221 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9222 unlock_user(p, arg2, 0);
9225 break;
9226 #endif
9227 case TARGET_NR_futex:
9228 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9229 break;
9230 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9231 case TARGET_NR_inotify_init:
9232 ret = get_errno(sys_inotify_init());
9233 break;
9234 #endif
9235 #ifdef CONFIG_INOTIFY1
9236 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9237 case TARGET_NR_inotify_init1:
9238 ret = get_errno(sys_inotify_init1(arg1));
9239 break;
9240 #endif
9241 #endif
9242 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9243 case TARGET_NR_inotify_add_watch:
9244 p = lock_user_string(arg2);
9245 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9246 unlock_user(p, arg2, 0);
9247 break;
9248 #endif
9249 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9250 case TARGET_NR_inotify_rm_watch:
9251 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9252 break;
9253 #endif
9255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9256 case TARGET_NR_mq_open:
9258 struct mq_attr posix_mq_attr, *attrp;
9260 p = lock_user_string(arg1 - 1);
9261 if (arg4 != 0) {
9262 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9263 attrp = &posix_mq_attr;
9264 } else {
9265 attrp = 0;
9267 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9268 unlock_user (p, arg1, 0);
9270 break;
9272 case TARGET_NR_mq_unlink:
9273 p = lock_user_string(arg1 - 1);
9274 ret = get_errno(mq_unlink(p));
9275 unlock_user (p, arg1, 0);
9276 break;
9278 case TARGET_NR_mq_timedsend:
9280 struct timespec ts;
9282 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9283 if (arg5 != 0) {
9284 target_to_host_timespec(&ts, arg5);
9285 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9286 host_to_target_timespec(arg5, &ts);
9288 else
9289 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9290 unlock_user (p, arg2, arg3);
9292 break;
9294 case TARGET_NR_mq_timedreceive:
9296 struct timespec ts;
9297 unsigned int prio;
9299 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9300 if (arg5 != 0) {
9301 target_to_host_timespec(&ts, arg5);
9302 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9303 host_to_target_timespec(arg5, &ts);
9305 else
9306 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9307 unlock_user (p, arg2, arg3);
9308 if (arg4 != 0)
9309 put_user_u32(prio, arg4);
9311 break;
9313 /* Not implemented for now... */
9314 /* case TARGET_NR_mq_notify: */
9315 /* break; */
9317 case TARGET_NR_mq_getsetattr:
9319 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9320 ret = 0;
9321 if (arg3 != 0) {
9322 ret = mq_getattr(arg1, &posix_mq_attr_out);
9323 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9325 if (arg2 != 0) {
9326 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9327 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9331 break;
9332 #endif
9334 #ifdef CONFIG_SPLICE
9335 #ifdef TARGET_NR_tee
9336 case TARGET_NR_tee:
9338 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9340 break;
9341 #endif
9342 #ifdef TARGET_NR_splice
9343 case TARGET_NR_splice:
9345 loff_t loff_in, loff_out;
9346 loff_t *ploff_in = NULL, *ploff_out = NULL;
9347 if(arg2) {
9348 get_user_u64(loff_in, arg2);
9349 ploff_in = &loff_in;
9351 if(arg4) {
9352 get_user_u64(loff_out, arg2);
9353 ploff_out = &loff_out;
9355 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9357 break;
9358 #endif
9359 #ifdef TARGET_NR_vmsplice
9360 case TARGET_NR_vmsplice:
9362 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9363 if (vec != NULL) {
9364 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9365 unlock_iovec(vec, arg2, arg3, 0);
9366 } else {
9367 ret = -host_to_target_errno(errno);
9370 break;
9371 #endif
9372 #endif /* CONFIG_SPLICE */
9373 #ifdef CONFIG_EVENTFD
9374 #if defined(TARGET_NR_eventfd)
9375 case TARGET_NR_eventfd:
9376 ret = get_errno(eventfd(arg1, 0));
9377 break;
9378 #endif
9379 #if defined(TARGET_NR_eventfd2)
9380 case TARGET_NR_eventfd2:
9382 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9383 if (arg2 & TARGET_O_NONBLOCK) {
9384 host_flags |= O_NONBLOCK;
9386 if (arg2 & TARGET_O_CLOEXEC) {
9387 host_flags |= O_CLOEXEC;
9389 ret = get_errno(eventfd(arg1, host_flags));
9390 break;
9392 #endif
9393 #endif /* CONFIG_EVENTFD */
9394 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9395 case TARGET_NR_fallocate:
9396 #if TARGET_ABI_BITS == 32
9397 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9398 target_offset64(arg5, arg6)));
9399 #else
9400 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9401 #endif
9402 break;
9403 #endif
9404 #if defined(CONFIG_SYNC_FILE_RANGE)
9405 #if defined(TARGET_NR_sync_file_range)
9406 case TARGET_NR_sync_file_range:
9407 #if TARGET_ABI_BITS == 32
9408 #if defined(TARGET_MIPS)
9409 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9410 target_offset64(arg5, arg6), arg7));
9411 #else
9412 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9413 target_offset64(arg4, arg5), arg6));
9414 #endif /* !TARGET_MIPS */
9415 #else
9416 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9417 #endif
9418 break;
9419 #endif
9420 #if defined(TARGET_NR_sync_file_range2)
9421 case TARGET_NR_sync_file_range2:
9422 /* This is like sync_file_range but the arguments are reordered */
9423 #if TARGET_ABI_BITS == 32
9424 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9425 target_offset64(arg5, arg6), arg2));
9426 #else
9427 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9428 #endif
9429 break;
9430 #endif
9431 #endif
9432 #if defined(CONFIG_EPOLL)
9433 #if defined(TARGET_NR_epoll_create)
9434 case TARGET_NR_epoll_create:
9435 ret = get_errno(epoll_create(arg1));
9436 break;
9437 #endif
9438 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9439 case TARGET_NR_epoll_create1:
9440 ret = get_errno(epoll_create1(arg1));
9441 break;
9442 #endif
9443 #if defined(TARGET_NR_epoll_ctl)
9444 case TARGET_NR_epoll_ctl:
9446 struct epoll_event ep;
9447 struct epoll_event *epp = 0;
9448 if (arg4) {
9449 struct target_epoll_event *target_ep;
9450 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9451 goto efault;
9453 ep.events = tswap32(target_ep->events);
9454 /* The epoll_data_t union is just opaque data to the kernel,
9455 * so we transfer all 64 bits across and need not worry what
9456 * actual data type it is.
9458 ep.data.u64 = tswap64(target_ep->data.u64);
9459 unlock_user_struct(target_ep, arg4, 0);
9460 epp = &ep;
9462 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9463 break;
9465 #endif
9467 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9468 #define IMPLEMENT_EPOLL_PWAIT
9469 #endif
9470 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9471 #if defined(TARGET_NR_epoll_wait)
9472 case TARGET_NR_epoll_wait:
9473 #endif
9474 #if defined(IMPLEMENT_EPOLL_PWAIT)
9475 case TARGET_NR_epoll_pwait:
9476 #endif
9478 struct target_epoll_event *target_ep;
9479 struct epoll_event *ep;
9480 int epfd = arg1;
9481 int maxevents = arg3;
9482 int timeout = arg4;
9484 target_ep = lock_user(VERIFY_WRITE, arg2,
9485 maxevents * sizeof(struct target_epoll_event), 1);
9486 if (!target_ep) {
9487 goto efault;
9490 ep = alloca(maxevents * sizeof(struct epoll_event));
9492 switch (num) {
9493 #if defined(IMPLEMENT_EPOLL_PWAIT)
9494 case TARGET_NR_epoll_pwait:
9496 target_sigset_t *target_set;
9497 sigset_t _set, *set = &_set;
9499 if (arg5) {
9500 target_set = lock_user(VERIFY_READ, arg5,
9501 sizeof(target_sigset_t), 1);
9502 if (!target_set) {
9503 unlock_user(target_ep, arg2, 0);
9504 goto efault;
9506 target_to_host_sigset(set, target_set);
9507 unlock_user(target_set, arg5, 0);
9508 } else {
9509 set = NULL;
9512 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9513 break;
9515 #endif
9516 #if defined(TARGET_NR_epoll_wait)
9517 case TARGET_NR_epoll_wait:
9518 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9519 break;
9520 #endif
9521 default:
9522 ret = -TARGET_ENOSYS;
9524 if (!is_error(ret)) {
9525 int i;
9526 for (i = 0; i < ret; i++) {
9527 target_ep[i].events = tswap32(ep[i].events);
9528 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9531 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9532 break;
9534 #endif
9535 #endif
9536 #ifdef TARGET_NR_prlimit64
9537 case TARGET_NR_prlimit64:
9539 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9540 struct target_rlimit64 *target_rnew, *target_rold;
9541 struct host_rlimit64 rnew, rold, *rnewp = 0;
9542 int resource = target_to_host_resource(arg2);
9543 if (arg3) {
9544 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9545 goto efault;
9547 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9548 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9549 unlock_user_struct(target_rnew, arg3, 0);
9550 rnewp = &rnew;
9553 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
9554 if (!is_error(ret) && arg4) {
9555 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9556 goto efault;
9558 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9559 target_rold->rlim_max = tswap64(rold.rlim_max);
9560 unlock_user_struct(target_rold, arg4, 1);
9562 break;
9564 #endif
9565 #ifdef TARGET_NR_gethostname
9566 case TARGET_NR_gethostname:
9568 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9569 if (name) {
9570 ret = get_errno(gethostname(name, arg2));
9571 unlock_user(name, arg1, arg2);
9572 } else {
9573 ret = -TARGET_EFAULT;
9575 break;
9577 #endif
9578 #ifdef TARGET_NR_atomic_cmpxchg_32
9579 case TARGET_NR_atomic_cmpxchg_32:
9581 /* should use start_exclusive from main.c */
9582 abi_ulong mem_value;
9583 if (get_user_u32(mem_value, arg6)) {
9584 target_siginfo_t info;
9585 info.si_signo = SIGSEGV;
9586 info.si_errno = 0;
9587 info.si_code = TARGET_SEGV_MAPERR;
9588 info._sifields._sigfault._addr = arg6;
9589 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9590 ret = 0xdeadbeef;
9593 if (mem_value == arg2)
9594 put_user_u32(arg1, arg6);
9595 ret = mem_value;
9596 break;
9598 #endif
9599 #ifdef TARGET_NR_atomic_barrier
9600 case TARGET_NR_atomic_barrier:
9602 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9603 ret = 0;
9604 break;
9606 #endif
9608 #ifdef TARGET_NR_timer_create
9609 case TARGET_NR_timer_create:
9611 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9613 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9615 int clkid = arg1;
9616 int timer_index = next_free_host_timer();
9618 if (timer_index < 0) {
9619 ret = -TARGET_EAGAIN;
9620 } else {
9621 timer_t *phtimer = g_posix_timers + timer_index;
9623 if (arg2) {
9624 phost_sevp = &host_sevp;
9625 ret = target_to_host_sigevent(phost_sevp, arg2);
9626 if (ret != 0) {
9627 break;
9631 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9632 if (ret) {
9633 phtimer = NULL;
9634 } else {
9635 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
9636 goto efault;
9640 break;
9642 #endif
9644 #ifdef TARGET_NR_timer_settime
9645 case TARGET_NR_timer_settime:
9647 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9648 * struct itimerspec * old_value */
9649 target_timer_t timerid = get_timer_id(arg1);
9651 if (timerid < 0) {
9652 ret = timerid;
9653 } else if (arg3 == 0) {
9654 ret = -TARGET_EINVAL;
9655 } else {
9656 timer_t htimer = g_posix_timers[timerid];
9657 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9659 target_to_host_itimerspec(&hspec_new, arg3);
9660 ret = get_errno(
9661 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9662 host_to_target_itimerspec(arg2, &hspec_old);
9664 break;
9666 #endif
9668 #ifdef TARGET_NR_timer_gettime
9669 case TARGET_NR_timer_gettime:
9671 /* args: timer_t timerid, struct itimerspec *curr_value */
9672 target_timer_t timerid = get_timer_id(arg1);
9674 if (timerid < 0) {
9675 ret = timerid;
9676 } else if (!arg2) {
9677 ret = -TARGET_EFAULT;
9678 } else {
9679 timer_t htimer = g_posix_timers[timerid];
9680 struct itimerspec hspec;
9681 ret = get_errno(timer_gettime(htimer, &hspec));
9683 if (host_to_target_itimerspec(arg2, &hspec)) {
9684 ret = -TARGET_EFAULT;
9687 break;
9689 #endif
9691 #ifdef TARGET_NR_timer_getoverrun
9692 case TARGET_NR_timer_getoverrun:
9694 /* args: timer_t timerid */
9695 target_timer_t timerid = get_timer_id(arg1);
9697 if (timerid < 0) {
9698 ret = timerid;
9699 } else {
9700 timer_t htimer = g_posix_timers[timerid];
9701 ret = get_errno(timer_getoverrun(htimer));
9703 break;
9705 #endif
9707 #ifdef TARGET_NR_timer_delete
9708 case TARGET_NR_timer_delete:
9710 /* args: timer_t timerid */
9711 target_timer_t timerid = get_timer_id(arg1);
9713 if (timerid < 0) {
9714 ret = timerid;
9715 } else {
9716 timer_t htimer = g_posix_timers[timerid];
9717 ret = get_errno(timer_delete(htimer));
9718 g_posix_timers[timerid] = 0;
9720 break;
9722 #endif
9724 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
9725 case TARGET_NR_timerfd_create:
9726 ret = get_errno(timerfd_create(arg1,
9727 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
9728 break;
9729 #endif
9731 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
9732 case TARGET_NR_timerfd_gettime:
9734 struct itimerspec its_curr;
9736 ret = get_errno(timerfd_gettime(arg1, &its_curr));
9738 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
9739 goto efault;
9742 break;
9743 #endif
9745 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
9746 case TARGET_NR_timerfd_settime:
9748 struct itimerspec its_new, its_old, *p_new;
9750 if (arg3) {
9751 if (target_to_host_itimerspec(&its_new, arg3)) {
9752 goto efault;
9754 p_new = &its_new;
9755 } else {
9756 p_new = NULL;
9759 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
9761 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
9762 goto efault;
9765 break;
9766 #endif
9768 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
9769 case TARGET_NR_ioprio_get:
9770 ret = get_errno(ioprio_get(arg1, arg2));
9771 break;
9772 #endif
9774 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
9775 case TARGET_NR_ioprio_set:
9776 ret = get_errno(ioprio_set(arg1, arg2, arg3));
9777 break;
9778 #endif
9780 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
9781 case TARGET_NR_setns:
9782 ret = get_errno(setns(arg1, arg2));
9783 break;
9784 #endif
9785 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
9786 case TARGET_NR_unshare:
9787 ret = get_errno(unshare(arg1));
9788 break;
9789 #endif
9791 default:
9792 unimplemented:
9793 gemu_log("qemu: Unsupported syscall: %d\n", num);
9794 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9795 unimplemented_nowarn:
9796 #endif
9797 ret = -TARGET_ENOSYS;
9798 break;
9800 fail:
9801 #ifdef DEBUG
9802 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9803 #endif
9804 if(do_strace)
9805 print_syscall_ret(num, ret);
9806 return ret;
9807 efault:
9808 ret = -TARGET_EFAULT;
9809 goto fail;