linux-user: convert /proc/net/route when endianess differs
[qemu/ar7.git] / linux-user / syscall.c
blob251c1163bd0970ada434b3ae89f2b1b61e45f2d9
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #undef _syscall0
125 #undef _syscall1
126 #undef _syscall2
127 #undef _syscall3
128 #undef _syscall4
129 #undef _syscall5
130 #undef _syscall6
132 #define _syscall0(type,name) \
133 static type name (void) \
135 return syscall(__NR_##name); \
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
141 return syscall(__NR_##name, arg1); \
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
147 return syscall(__NR_##name, arg1, arg2); \
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
173 type6 arg6) \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
185 #define __NR_sys_syslog __NR_syslog
186 #define __NR_sys_tgkill __NR_tgkill
187 #define __NR_sys_tkill __NR_tkill
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
198 #ifdef __NR_gettid
199 _syscall0(int, gettid)
200 #else
201 /* This is a replacement for the host gettid() and must return a host
202 errno. */
203 static int gettid(void) {
204 return -ENOSYS;
206 #endif
207 #ifdef __NR_getdents
208 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
209 #endif
210 #if !defined(__NR_getdents) || \
211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
212 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
213 #endif
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
216 loff_t *, res, uint, wh);
217 #endif
218 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
219 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
222 #endif
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill,int,tid,int,sig)
225 #endif
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group,int,error_code)
228 #endif
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address,int *,tidptr)
231 #endif
232 #if defined(TARGET_NR_futex) && defined(__NR_futex)
233 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
234 const struct timespec *,timeout,int *,uaddr2,int,val3)
235 #endif
236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
237 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
238 unsigned long *, user_mask_ptr);
239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
240 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
241 unsigned long *, user_mask_ptr);
242 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
243 void *, arg);
245 static bitmask_transtbl fcntl_flags_tbl[] = {
246 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
247 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
248 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
249 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
250 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
251 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
252 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
253 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
254 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
255 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
256 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
257 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
258 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
259 #if defined(O_DIRECT)
260 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
261 #endif
262 #if defined(O_NOATIME)
263 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
264 #endif
265 #if defined(O_CLOEXEC)
266 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
267 #endif
268 #if defined(O_PATH)
269 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
270 #endif
271 /* Don't terminate the list prematurely on 64-bit host+guest. */
272 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
273 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
274 #endif
275 { 0, 0, 0, 0 }
278 #define COPY_UTSNAME_FIELD(dest, src) \
279 do { \
280 /* __NEW_UTS_LEN doesn't include terminating null */ \
281 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
282 (dest)[__NEW_UTS_LEN] = '\0'; \
283 } while (0)
285 static int sys_uname(struct new_utsname *buf)
287 struct utsname uts_buf;
289 if (uname(&uts_buf) < 0)
290 return (-1);
293 * Just in case these have some differences, we
294 * translate utsname to new_utsname (which is the
295 * struct linux kernel uses).
298 memset(buf, 0, sizeof(*buf));
299 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
300 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
301 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
302 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
303 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
304 #ifdef _GNU_SOURCE
305 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
306 #endif
307 return (0);
309 #undef COPY_UTSNAME_FIELD
312 static int sys_getcwd1(char *buf, size_t size)
314 if (getcwd(buf, size) == NULL) {
315 /* getcwd() sets errno */
316 return (-1);
318 return strlen(buf)+1;
321 #ifdef TARGET_NR_openat
322 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
325 * open(2) has extra parameter 'mode' when called with
326 * flag O_CREAT.
328 if ((flags & O_CREAT) != 0) {
329 return (openat(dirfd, pathname, flags, mode));
331 return (openat(dirfd, pathname, flags));
333 #endif
335 #ifdef TARGET_NR_utimensat
336 #ifdef CONFIG_UTIMENSAT
337 static int sys_utimensat(int dirfd, const char *pathname,
338 const struct timespec times[2], int flags)
340 if (pathname == NULL)
341 return futimens(dirfd, times);
342 else
343 return utimensat(dirfd, pathname, times, flags);
345 #elif defined(__NR_utimensat)
346 #define __NR_sys_utimensat __NR_utimensat
347 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
348 const struct timespec *,tsp,int,flags)
349 #else
350 static int sys_utimensat(int dirfd, const char *pathname,
351 const struct timespec times[2], int flags)
353 errno = ENOSYS;
354 return -1;
356 #endif
357 #endif /* TARGET_NR_utimensat */
359 #ifdef CONFIG_INOTIFY
360 #include <sys/inotify.h>
362 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
363 static int sys_inotify_init(void)
365 return (inotify_init());
367 #endif
368 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
369 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
371 return (inotify_add_watch(fd, pathname, mask));
373 #endif
374 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
375 static int sys_inotify_rm_watch(int fd, int32_t wd)
377 return (inotify_rm_watch(fd, wd));
379 #endif
380 #ifdef CONFIG_INOTIFY1
381 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
382 static int sys_inotify_init1(int flags)
384 return (inotify_init1(flags));
386 #endif
387 #endif
388 #else
389 /* Userspace can usually survive runtime without inotify */
390 #undef TARGET_NR_inotify_init
391 #undef TARGET_NR_inotify_init1
392 #undef TARGET_NR_inotify_add_watch
393 #undef TARGET_NR_inotify_rm_watch
394 #endif /* CONFIG_INOTIFY */
396 #if defined(TARGET_NR_ppoll)
397 #ifndef __NR_ppoll
398 # define __NR_ppoll -1
399 #endif
400 #define __NR_sys_ppoll __NR_ppoll
401 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
402 struct timespec *, timeout, const __sigset_t *, sigmask,
403 size_t, sigsetsize)
404 #endif
406 #if defined(TARGET_NR_pselect6)
407 #ifndef __NR_pselect6
408 # define __NR_pselect6 -1
409 #endif
410 #define __NR_sys_pselect6 __NR_pselect6
411 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
412 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
413 #endif
415 #if defined(TARGET_NR_prlimit64)
416 #ifndef __NR_prlimit64
417 # define __NR_prlimit64 -1
418 #endif
419 #define __NR_sys_prlimit64 __NR_prlimit64
420 /* The glibc rlimit structure may not be that used by the underlying syscall */
421 struct host_rlimit64 {
422 uint64_t rlim_cur;
423 uint64_t rlim_max;
425 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
426 const struct host_rlimit64 *, new_limit,
427 struct host_rlimit64 *, old_limit)
428 #endif
430 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
431 #ifdef TARGET_ARM
432 static inline int regpairs_aligned(void *cpu_env) {
433 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
435 #elif defined(TARGET_MIPS)
436 static inline int regpairs_aligned(void *cpu_env) { return 1; }
437 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
438 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
439 * of registers which translates to the same as ARM/MIPS, because we start with
440 * r3 as arg1 */
441 static inline int regpairs_aligned(void *cpu_env) { return 1; }
442 #else
443 static inline int regpairs_aligned(void *cpu_env) { return 0; }
444 #endif
446 #define ERRNO_TABLE_SIZE 1200
448 /* target_to_host_errno_table[] is initialized from
449 * host_to_target_errno_table[] in syscall_init(). */
450 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
454 * This list is the union of errno values overridden in asm-<arch>/errno.h
455 * minus the errnos that are not actually generic to all archs.
457 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
458 [EIDRM] = TARGET_EIDRM,
459 [ECHRNG] = TARGET_ECHRNG,
460 [EL2NSYNC] = TARGET_EL2NSYNC,
461 [EL3HLT] = TARGET_EL3HLT,
462 [EL3RST] = TARGET_EL3RST,
463 [ELNRNG] = TARGET_ELNRNG,
464 [EUNATCH] = TARGET_EUNATCH,
465 [ENOCSI] = TARGET_ENOCSI,
466 [EL2HLT] = TARGET_EL2HLT,
467 [EDEADLK] = TARGET_EDEADLK,
468 [ENOLCK] = TARGET_ENOLCK,
469 [EBADE] = TARGET_EBADE,
470 [EBADR] = TARGET_EBADR,
471 [EXFULL] = TARGET_EXFULL,
472 [ENOANO] = TARGET_ENOANO,
473 [EBADRQC] = TARGET_EBADRQC,
474 [EBADSLT] = TARGET_EBADSLT,
475 [EBFONT] = TARGET_EBFONT,
476 [ENOSTR] = TARGET_ENOSTR,
477 [ENODATA] = TARGET_ENODATA,
478 [ETIME] = TARGET_ETIME,
479 [ENOSR] = TARGET_ENOSR,
480 [ENONET] = TARGET_ENONET,
481 [ENOPKG] = TARGET_ENOPKG,
482 [EREMOTE] = TARGET_EREMOTE,
483 [ENOLINK] = TARGET_ENOLINK,
484 [EADV] = TARGET_EADV,
485 [ESRMNT] = TARGET_ESRMNT,
486 [ECOMM] = TARGET_ECOMM,
487 [EPROTO] = TARGET_EPROTO,
488 [EDOTDOT] = TARGET_EDOTDOT,
489 [EMULTIHOP] = TARGET_EMULTIHOP,
490 [EBADMSG] = TARGET_EBADMSG,
491 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
492 [EOVERFLOW] = TARGET_EOVERFLOW,
493 [ENOTUNIQ] = TARGET_ENOTUNIQ,
494 [EBADFD] = TARGET_EBADFD,
495 [EREMCHG] = TARGET_EREMCHG,
496 [ELIBACC] = TARGET_ELIBACC,
497 [ELIBBAD] = TARGET_ELIBBAD,
498 [ELIBSCN] = TARGET_ELIBSCN,
499 [ELIBMAX] = TARGET_ELIBMAX,
500 [ELIBEXEC] = TARGET_ELIBEXEC,
501 [EILSEQ] = TARGET_EILSEQ,
502 [ENOSYS] = TARGET_ENOSYS,
503 [ELOOP] = TARGET_ELOOP,
504 [ERESTART] = TARGET_ERESTART,
505 [ESTRPIPE] = TARGET_ESTRPIPE,
506 [ENOTEMPTY] = TARGET_ENOTEMPTY,
507 [EUSERS] = TARGET_EUSERS,
508 [ENOTSOCK] = TARGET_ENOTSOCK,
509 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
510 [EMSGSIZE] = TARGET_EMSGSIZE,
511 [EPROTOTYPE] = TARGET_EPROTOTYPE,
512 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
513 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
514 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
515 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
516 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
517 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
518 [EADDRINUSE] = TARGET_EADDRINUSE,
519 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
520 [ENETDOWN] = TARGET_ENETDOWN,
521 [ENETUNREACH] = TARGET_ENETUNREACH,
522 [ENETRESET] = TARGET_ENETRESET,
523 [ECONNABORTED] = TARGET_ECONNABORTED,
524 [ECONNRESET] = TARGET_ECONNRESET,
525 [ENOBUFS] = TARGET_ENOBUFS,
526 [EISCONN] = TARGET_EISCONN,
527 [ENOTCONN] = TARGET_ENOTCONN,
528 [EUCLEAN] = TARGET_EUCLEAN,
529 [ENOTNAM] = TARGET_ENOTNAM,
530 [ENAVAIL] = TARGET_ENAVAIL,
531 [EISNAM] = TARGET_EISNAM,
532 [EREMOTEIO] = TARGET_EREMOTEIO,
533 [ESHUTDOWN] = TARGET_ESHUTDOWN,
534 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
535 [ETIMEDOUT] = TARGET_ETIMEDOUT,
536 [ECONNREFUSED] = TARGET_ECONNREFUSED,
537 [EHOSTDOWN] = TARGET_EHOSTDOWN,
538 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
539 [EALREADY] = TARGET_EALREADY,
540 [EINPROGRESS] = TARGET_EINPROGRESS,
541 [ESTALE] = TARGET_ESTALE,
542 [ECANCELED] = TARGET_ECANCELED,
543 [ENOMEDIUM] = TARGET_ENOMEDIUM,
544 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
545 #ifdef ENOKEY
546 [ENOKEY] = TARGET_ENOKEY,
547 #endif
548 #ifdef EKEYEXPIRED
549 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
550 #endif
551 #ifdef EKEYREVOKED
552 [EKEYREVOKED] = TARGET_EKEYREVOKED,
553 #endif
554 #ifdef EKEYREJECTED
555 [EKEYREJECTED] = TARGET_EKEYREJECTED,
556 #endif
557 #ifdef EOWNERDEAD
558 [EOWNERDEAD] = TARGET_EOWNERDEAD,
559 #endif
560 #ifdef ENOTRECOVERABLE
561 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
562 #endif
565 static inline int host_to_target_errno(int err)
567 if(host_to_target_errno_table[err])
568 return host_to_target_errno_table[err];
569 return err;
572 static inline int target_to_host_errno(int err)
574 if (target_to_host_errno_table[err])
575 return target_to_host_errno_table[err];
576 return err;
579 static inline abi_long get_errno(abi_long ret)
581 if (ret == -1)
582 return -host_to_target_errno(errno);
583 else
584 return ret;
587 static inline int is_error(abi_long ret)
589 return (abi_ulong)ret >= (abi_ulong)(-4096);
592 char *target_strerror(int err)
594 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
595 return NULL;
597 return strerror(target_to_host_errno(err));
600 static abi_ulong target_brk;
601 static abi_ulong target_original_brk;
602 static abi_ulong brk_page;
604 void target_set_brk(abi_ulong new_brk)
606 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
607 brk_page = HOST_PAGE_ALIGN(target_brk);
610 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
611 #define DEBUGF_BRK(message, args...)
613 /* do_brk() must return target values and target errnos. */
614 abi_long do_brk(abi_ulong new_brk)
616 abi_long mapped_addr;
617 int new_alloc_size;
619 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
621 if (!new_brk) {
622 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
623 return target_brk;
625 if (new_brk < target_original_brk) {
626 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
627 target_brk);
628 return target_brk;
631 /* If the new brk is less than the highest page reserved to the
632 * target heap allocation, set it and we're almost done... */
633 if (new_brk <= brk_page) {
634 /* Heap contents are initialized to zero, as for anonymous
635 * mapped pages. */
636 if (new_brk > target_brk) {
637 memset(g2h(target_brk), 0, new_brk - target_brk);
639 target_brk = new_brk;
640 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
641 return target_brk;
644 /* We need to allocate more memory after the brk... Note that
645 * we don't use MAP_FIXED because that will map over the top of
646 * any existing mapping (like the one with the host libc or qemu
647 * itself); instead we treat "mapped but at wrong address" as
648 * a failure and unmap again.
650 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
651 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
652 PROT_READ|PROT_WRITE,
653 MAP_ANON|MAP_PRIVATE, 0, 0));
655 if (mapped_addr == brk_page) {
656 /* Heap contents are initialized to zero, as for anonymous
657 * mapped pages. Technically the new pages are already
658 * initialized to zero since they *are* anonymous mapped
659 * pages, however we have to take care with the contents that
660 * come from the remaining part of the previous page: it may
661 * contains garbage data due to a previous heap usage (grown
662 * then shrunken). */
663 memset(g2h(target_brk), 0, brk_page - target_brk);
665 target_brk = new_brk;
666 brk_page = HOST_PAGE_ALIGN(target_brk);
667 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
668 target_brk);
669 return target_brk;
670 } else if (mapped_addr != -1) {
671 /* Mapped but at wrong address, meaning there wasn't actually
672 * enough space for this brk.
674 target_munmap(mapped_addr, new_alloc_size);
675 mapped_addr = -1;
676 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
678 else {
679 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
682 #if defined(TARGET_ALPHA)
683 /* We (partially) emulate OSF/1 on Alpha, which requires we
684 return a proper errno, not an unchanged brk value. */
685 return -TARGET_ENOMEM;
686 #endif
687 /* For everything else, return the previous break. */
688 return target_brk;
691 static inline abi_long copy_from_user_fdset(fd_set *fds,
692 abi_ulong target_fds_addr,
693 int n)
695 int i, nw, j, k;
696 abi_ulong b, *target_fds;
698 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
699 if (!(target_fds = lock_user(VERIFY_READ,
700 target_fds_addr,
701 sizeof(abi_ulong) * nw,
702 1)))
703 return -TARGET_EFAULT;
705 FD_ZERO(fds);
706 k = 0;
707 for (i = 0; i < nw; i++) {
708 /* grab the abi_ulong */
709 __get_user(b, &target_fds[i]);
710 for (j = 0; j < TARGET_ABI_BITS; j++) {
711 /* check the bit inside the abi_ulong */
712 if ((b >> j) & 1)
713 FD_SET(k, fds);
714 k++;
718 unlock_user(target_fds, target_fds_addr, 0);
720 return 0;
723 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
724 abi_ulong target_fds_addr,
725 int n)
727 if (target_fds_addr) {
728 if (copy_from_user_fdset(fds, target_fds_addr, n))
729 return -TARGET_EFAULT;
730 *fds_ptr = fds;
731 } else {
732 *fds_ptr = NULL;
734 return 0;
737 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
738 const fd_set *fds,
739 int n)
741 int i, nw, j, k;
742 abi_long v;
743 abi_ulong *target_fds;
745 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
746 if (!(target_fds = lock_user(VERIFY_WRITE,
747 target_fds_addr,
748 sizeof(abi_ulong) * nw,
749 0)))
750 return -TARGET_EFAULT;
752 k = 0;
753 for (i = 0; i < nw; i++) {
754 v = 0;
755 for (j = 0; j < TARGET_ABI_BITS; j++) {
756 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
757 k++;
759 __put_user(v, &target_fds[i]);
762 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
764 return 0;
767 #if defined(__alpha__)
768 #define HOST_HZ 1024
769 #else
770 #define HOST_HZ 100
771 #endif
773 static inline abi_long host_to_target_clock_t(long ticks)
775 #if HOST_HZ == TARGET_HZ
776 return ticks;
777 #else
778 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
779 #endif
782 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
783 const struct rusage *rusage)
785 struct target_rusage *target_rusage;
787 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
788 return -TARGET_EFAULT;
789 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
790 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
791 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
792 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
793 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
794 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
795 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
796 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
797 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
798 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
799 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
800 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
801 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
802 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
803 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
804 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
805 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
806 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
807 unlock_user_struct(target_rusage, target_addr, 1);
809 return 0;
812 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
814 abi_ulong target_rlim_swap;
815 rlim_t result;
817 target_rlim_swap = tswapal(target_rlim);
818 if (target_rlim_swap == TARGET_RLIM_INFINITY)
819 return RLIM_INFINITY;
821 result = target_rlim_swap;
822 if (target_rlim_swap != (rlim_t)result)
823 return RLIM_INFINITY;
825 return result;
828 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
830 abi_ulong target_rlim_swap;
831 abi_ulong result;
833 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
834 target_rlim_swap = TARGET_RLIM_INFINITY;
835 else
836 target_rlim_swap = rlim;
837 result = tswapal(target_rlim_swap);
839 return result;
842 static inline int target_to_host_resource(int code)
844 switch (code) {
845 case TARGET_RLIMIT_AS:
846 return RLIMIT_AS;
847 case TARGET_RLIMIT_CORE:
848 return RLIMIT_CORE;
849 case TARGET_RLIMIT_CPU:
850 return RLIMIT_CPU;
851 case TARGET_RLIMIT_DATA:
852 return RLIMIT_DATA;
853 case TARGET_RLIMIT_FSIZE:
854 return RLIMIT_FSIZE;
855 case TARGET_RLIMIT_LOCKS:
856 return RLIMIT_LOCKS;
857 case TARGET_RLIMIT_MEMLOCK:
858 return RLIMIT_MEMLOCK;
859 case TARGET_RLIMIT_MSGQUEUE:
860 return RLIMIT_MSGQUEUE;
861 case TARGET_RLIMIT_NICE:
862 return RLIMIT_NICE;
863 case TARGET_RLIMIT_NOFILE:
864 return RLIMIT_NOFILE;
865 case TARGET_RLIMIT_NPROC:
866 return RLIMIT_NPROC;
867 case TARGET_RLIMIT_RSS:
868 return RLIMIT_RSS;
869 case TARGET_RLIMIT_RTPRIO:
870 return RLIMIT_RTPRIO;
871 case TARGET_RLIMIT_SIGPENDING:
872 return RLIMIT_SIGPENDING;
873 case TARGET_RLIMIT_STACK:
874 return RLIMIT_STACK;
875 default:
876 return code;
880 static inline abi_long copy_from_user_timeval(struct timeval *tv,
881 abi_ulong target_tv_addr)
883 struct target_timeval *target_tv;
885 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
886 return -TARGET_EFAULT;
888 __get_user(tv->tv_sec, &target_tv->tv_sec);
889 __get_user(tv->tv_usec, &target_tv->tv_usec);
891 unlock_user_struct(target_tv, target_tv_addr, 0);
893 return 0;
896 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
897 const struct timeval *tv)
899 struct target_timeval *target_tv;
901 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
902 return -TARGET_EFAULT;
904 __put_user(tv->tv_sec, &target_tv->tv_sec);
905 __put_user(tv->tv_usec, &target_tv->tv_usec);
907 unlock_user_struct(target_tv, target_tv_addr, 1);
909 return 0;
912 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
913 #include <mqueue.h>
915 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
916 abi_ulong target_mq_attr_addr)
918 struct target_mq_attr *target_mq_attr;
920 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
921 target_mq_attr_addr, 1))
922 return -TARGET_EFAULT;
924 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
925 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
926 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
927 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
929 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
931 return 0;
934 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
935 const struct mq_attr *attr)
937 struct target_mq_attr *target_mq_attr;
939 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
940 target_mq_attr_addr, 0))
941 return -TARGET_EFAULT;
943 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
944 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
945 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
946 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
948 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
950 return 0;
952 #endif
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
955 /* do_select() must return target values and target errnos. */
956 static abi_long do_select(int n,
957 abi_ulong rfd_addr, abi_ulong wfd_addr,
958 abi_ulong efd_addr, abi_ulong target_tv_addr)
960 fd_set rfds, wfds, efds;
961 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
962 struct timeval tv, *tv_ptr;
963 abi_long ret;
965 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
966 if (ret) {
967 return ret;
969 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
970 if (ret) {
971 return ret;
973 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
974 if (ret) {
975 return ret;
978 if (target_tv_addr) {
979 if (copy_from_user_timeval(&tv, target_tv_addr))
980 return -TARGET_EFAULT;
981 tv_ptr = &tv;
982 } else {
983 tv_ptr = NULL;
986 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
988 if (!is_error(ret)) {
989 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
990 return -TARGET_EFAULT;
991 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
992 return -TARGET_EFAULT;
993 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
994 return -TARGET_EFAULT;
996 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
997 return -TARGET_EFAULT;
1000 return ret;
1002 #endif
1004 static abi_long do_pipe2(int host_pipe[], int flags)
1006 #ifdef CONFIG_PIPE2
1007 return pipe2(host_pipe, flags);
1008 #else
1009 return -ENOSYS;
1010 #endif
1013 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1014 int flags, int is_pipe2)
1016 int host_pipe[2];
1017 abi_long ret;
1018 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1020 if (is_error(ret))
1021 return get_errno(ret);
1023 /* Several targets have special calling conventions for the original
1024 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1025 if (!is_pipe2) {
1026 #if defined(TARGET_ALPHA)
1027 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1028 return host_pipe[0];
1029 #elif defined(TARGET_MIPS)
1030 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1031 return host_pipe[0];
1032 #elif defined(TARGET_SH4)
1033 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1034 return host_pipe[0];
1035 #elif defined(TARGET_SPARC)
1036 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1037 return host_pipe[0];
1038 #endif
1041 if (put_user_s32(host_pipe[0], pipedes)
1042 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1043 return -TARGET_EFAULT;
1044 return get_errno(ret);
1047 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1048 abi_ulong target_addr,
1049 socklen_t len)
1051 struct target_ip_mreqn *target_smreqn;
1053 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1054 if (!target_smreqn)
1055 return -TARGET_EFAULT;
1056 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1057 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1058 if (len == sizeof(struct target_ip_mreqn))
1059 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1060 unlock_user(target_smreqn, target_addr, 0);
1062 return 0;
1065 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1066 abi_ulong target_addr,
1067 socklen_t len)
1069 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1070 sa_family_t sa_family;
1071 struct target_sockaddr *target_saddr;
1073 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1074 if (!target_saddr)
1075 return -TARGET_EFAULT;
1077 sa_family = tswap16(target_saddr->sa_family);
1079 /* Oops. The caller might send a incomplete sun_path; sun_path
1080 * must be terminated by \0 (see the manual page), but
1081 * unfortunately it is quite common to specify sockaddr_un
1082 * length as "strlen(x->sun_path)" while it should be
1083 * "strlen(...) + 1". We'll fix that here if needed.
1084 * Linux kernel has a similar feature.
1087 if (sa_family == AF_UNIX) {
1088 if (len < unix_maxlen && len > 0) {
1089 char *cp = (char*)target_saddr;
1091 if ( cp[len-1] && !cp[len] )
1092 len++;
1094 if (len > unix_maxlen)
1095 len = unix_maxlen;
1098 memcpy(addr, target_saddr, len);
1099 addr->sa_family = sa_family;
1100 unlock_user(target_saddr, target_addr, 0);
1102 return 0;
1105 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1106 struct sockaddr *addr,
1107 socklen_t len)
1109 struct target_sockaddr *target_saddr;
1111 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1112 if (!target_saddr)
1113 return -TARGET_EFAULT;
1114 memcpy(target_saddr, addr, len);
1115 target_saddr->sa_family = tswap16(addr->sa_family);
1116 unlock_user(target_saddr, target_addr, len);
1118 return 0;
1121 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1122 struct target_msghdr *target_msgh)
1124 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1125 abi_long msg_controllen;
1126 abi_ulong target_cmsg_addr;
1127 struct target_cmsghdr *target_cmsg;
1128 socklen_t space = 0;
1130 msg_controllen = tswapal(target_msgh->msg_controllen);
1131 if (msg_controllen < sizeof (struct target_cmsghdr))
1132 goto the_end;
1133 target_cmsg_addr = tswapal(target_msgh->msg_control);
1134 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1135 if (!target_cmsg)
1136 return -TARGET_EFAULT;
1138 while (cmsg && target_cmsg) {
1139 void *data = CMSG_DATA(cmsg);
1140 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1142 int len = tswapal(target_cmsg->cmsg_len)
1143 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1145 space += CMSG_SPACE(len);
1146 if (space > msgh->msg_controllen) {
1147 space -= CMSG_SPACE(len);
1148 gemu_log("Host cmsg overflow\n");
1149 break;
1152 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1153 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1154 cmsg->cmsg_len = CMSG_LEN(len);
1156 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1157 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1158 memcpy(data, target_data, len);
1159 } else {
1160 int *fd = (int *)data;
1161 int *target_fd = (int *)target_data;
1162 int i, numfds = len / sizeof(int);
1164 for (i = 0; i < numfds; i++)
1165 fd[i] = tswap32(target_fd[i]);
1168 cmsg = CMSG_NXTHDR(msgh, cmsg);
1169 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1171 unlock_user(target_cmsg, target_cmsg_addr, 0);
1172 the_end:
1173 msgh->msg_controllen = space;
1174 return 0;
1177 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1178 struct msghdr *msgh)
1180 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1181 abi_long msg_controllen;
1182 abi_ulong target_cmsg_addr;
1183 struct target_cmsghdr *target_cmsg;
1184 socklen_t space = 0;
1186 msg_controllen = tswapal(target_msgh->msg_controllen);
1187 if (msg_controllen < sizeof (struct target_cmsghdr))
1188 goto the_end;
1189 target_cmsg_addr = tswapal(target_msgh->msg_control);
1190 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1191 if (!target_cmsg)
1192 return -TARGET_EFAULT;
1194 while (cmsg && target_cmsg) {
1195 void *data = CMSG_DATA(cmsg);
1196 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1198 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1200 space += TARGET_CMSG_SPACE(len);
1201 if (space > msg_controllen) {
1202 space -= TARGET_CMSG_SPACE(len);
1203 gemu_log("Target cmsg overflow\n");
1204 break;
1207 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1208 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1209 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1211 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1212 (cmsg->cmsg_type == SCM_RIGHTS)) {
1213 int *fd = (int *)data;
1214 int *target_fd = (int *)target_data;
1215 int i, numfds = len / sizeof(int);
1217 for (i = 0; i < numfds; i++)
1218 target_fd[i] = tswap32(fd[i]);
1219 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1220 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1221 (len == sizeof(struct timeval))) {
1222 /* copy struct timeval to target */
1223 struct timeval *tv = (struct timeval *)data;
1224 struct target_timeval *target_tv =
1225 (struct target_timeval *)target_data;
1227 target_tv->tv_sec = tswapal(tv->tv_sec);
1228 target_tv->tv_usec = tswapal(tv->tv_usec);
1229 } else {
1230 gemu_log("Unsupported ancillary data: %d/%d\n",
1231 cmsg->cmsg_level, cmsg->cmsg_type);
1232 memcpy(target_data, data, len);
1235 cmsg = CMSG_NXTHDR(msgh, cmsg);
1236 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1238 unlock_user(target_cmsg, target_cmsg_addr, space);
1239 the_end:
1240 target_msgh->msg_controllen = tswapal(space);
1241 return 0;
1244 /* do_setsockopt() Must return target values and target errnos. */
1245 static abi_long do_setsockopt(int sockfd, int level, int optname,
1246 abi_ulong optval_addr, socklen_t optlen)
1248 abi_long ret;
1249 int val;
1250 struct ip_mreqn *ip_mreq;
1251 struct ip_mreq_source *ip_mreq_source;
1253 switch(level) {
1254 case SOL_TCP:
1255 /* TCP options all take an 'int' value. */
1256 if (optlen < sizeof(uint32_t))
1257 return -TARGET_EINVAL;
1259 if (get_user_u32(val, optval_addr))
1260 return -TARGET_EFAULT;
1261 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1262 break;
1263 case SOL_IP:
1264 switch(optname) {
1265 case IP_TOS:
1266 case IP_TTL:
1267 case IP_HDRINCL:
1268 case IP_ROUTER_ALERT:
1269 case IP_RECVOPTS:
1270 case IP_RETOPTS:
1271 case IP_PKTINFO:
1272 case IP_MTU_DISCOVER:
1273 case IP_RECVERR:
1274 case IP_RECVTOS:
1275 #ifdef IP_FREEBIND
1276 case IP_FREEBIND:
1277 #endif
1278 case IP_MULTICAST_TTL:
1279 case IP_MULTICAST_LOOP:
1280 val = 0;
1281 if (optlen >= sizeof(uint32_t)) {
1282 if (get_user_u32(val, optval_addr))
1283 return -TARGET_EFAULT;
1284 } else if (optlen >= 1) {
1285 if (get_user_u8(val, optval_addr))
1286 return -TARGET_EFAULT;
1288 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1289 break;
1290 case IP_ADD_MEMBERSHIP:
1291 case IP_DROP_MEMBERSHIP:
1292 if (optlen < sizeof (struct target_ip_mreq) ||
1293 optlen > sizeof (struct target_ip_mreqn))
1294 return -TARGET_EINVAL;
1296 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1297 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1298 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1299 break;
1301 case IP_BLOCK_SOURCE:
1302 case IP_UNBLOCK_SOURCE:
1303 case IP_ADD_SOURCE_MEMBERSHIP:
1304 case IP_DROP_SOURCE_MEMBERSHIP:
1305 if (optlen != sizeof (struct target_ip_mreq_source))
1306 return -TARGET_EINVAL;
1308 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1309 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1310 unlock_user (ip_mreq_source, optval_addr, 0);
1311 break;
1313 default:
1314 goto unimplemented;
1316 break;
1317 case SOL_RAW:
1318 switch (optname) {
1319 case ICMP_FILTER:
1320 /* struct icmp_filter takes an u32 value */
1321 if (optlen < sizeof(uint32_t)) {
1322 return -TARGET_EINVAL;
1325 if (get_user_u32(val, optval_addr)) {
1326 return -TARGET_EFAULT;
1328 ret = get_errno(setsockopt(sockfd, level, optname,
1329 &val, sizeof(val)));
1330 break;
1332 default:
1333 goto unimplemented;
1335 break;
1336 case TARGET_SOL_SOCKET:
1337 switch (optname) {
1338 case TARGET_SO_RCVTIMEO:
1340 struct timeval tv;
1342 optname = SO_RCVTIMEO;
1344 set_timeout:
1345 if (optlen != sizeof(struct target_timeval)) {
1346 return -TARGET_EINVAL;
1349 if (copy_from_user_timeval(&tv, optval_addr)) {
1350 return -TARGET_EFAULT;
1353 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1354 &tv, sizeof(tv)));
1355 return ret;
1357 case TARGET_SO_SNDTIMEO:
1358 optname = SO_SNDTIMEO;
1359 goto set_timeout;
1360 /* Options with 'int' argument. */
1361 case TARGET_SO_DEBUG:
1362 optname = SO_DEBUG;
1363 break;
1364 case TARGET_SO_REUSEADDR:
1365 optname = SO_REUSEADDR;
1366 break;
1367 case TARGET_SO_TYPE:
1368 optname = SO_TYPE;
1369 break;
1370 case TARGET_SO_ERROR:
1371 optname = SO_ERROR;
1372 break;
1373 case TARGET_SO_DONTROUTE:
1374 optname = SO_DONTROUTE;
1375 break;
1376 case TARGET_SO_BROADCAST:
1377 optname = SO_BROADCAST;
1378 break;
1379 case TARGET_SO_SNDBUF:
1380 optname = SO_SNDBUF;
1381 break;
1382 case TARGET_SO_RCVBUF:
1383 optname = SO_RCVBUF;
1384 break;
1385 case TARGET_SO_KEEPALIVE:
1386 optname = SO_KEEPALIVE;
1387 break;
1388 case TARGET_SO_OOBINLINE:
1389 optname = SO_OOBINLINE;
1390 break;
1391 case TARGET_SO_NO_CHECK:
1392 optname = SO_NO_CHECK;
1393 break;
1394 case TARGET_SO_PRIORITY:
1395 optname = SO_PRIORITY;
1396 break;
1397 #ifdef SO_BSDCOMPAT
1398 case TARGET_SO_BSDCOMPAT:
1399 optname = SO_BSDCOMPAT;
1400 break;
1401 #endif
1402 case TARGET_SO_PASSCRED:
1403 optname = SO_PASSCRED;
1404 break;
1405 case TARGET_SO_TIMESTAMP:
1406 optname = SO_TIMESTAMP;
1407 break;
1408 case TARGET_SO_RCVLOWAT:
1409 optname = SO_RCVLOWAT;
1410 break;
1411 break;
1412 default:
1413 goto unimplemented;
1415 if (optlen < sizeof(uint32_t))
1416 return -TARGET_EINVAL;
1418 if (get_user_u32(val, optval_addr))
1419 return -TARGET_EFAULT;
1420 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1421 break;
1422 default:
1423 unimplemented:
1424 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1425 ret = -TARGET_ENOPROTOOPT;
1427 return ret;
1430 /* do_getsockopt() Must return target values and target errnos. */
1431 static abi_long do_getsockopt(int sockfd, int level, int optname,
1432 abi_ulong optval_addr, abi_ulong optlen)
1434 abi_long ret;
1435 int len, val;
1436 socklen_t lv;
1438 switch(level) {
1439 case TARGET_SOL_SOCKET:
1440 level = SOL_SOCKET;
1441 switch (optname) {
1442 /* These don't just return a single integer */
1443 case TARGET_SO_LINGER:
1444 case TARGET_SO_RCVTIMEO:
1445 case TARGET_SO_SNDTIMEO:
1446 case TARGET_SO_PEERNAME:
1447 goto unimplemented;
1448 case TARGET_SO_PEERCRED: {
1449 struct ucred cr;
1450 socklen_t crlen;
1451 struct target_ucred *tcr;
1453 if (get_user_u32(len, optlen)) {
1454 return -TARGET_EFAULT;
1456 if (len < 0) {
1457 return -TARGET_EINVAL;
1460 crlen = sizeof(cr);
1461 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1462 &cr, &crlen));
1463 if (ret < 0) {
1464 return ret;
1466 if (len > crlen) {
1467 len = crlen;
1469 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1470 return -TARGET_EFAULT;
1472 __put_user(cr.pid, &tcr->pid);
1473 __put_user(cr.uid, &tcr->uid);
1474 __put_user(cr.gid, &tcr->gid);
1475 unlock_user_struct(tcr, optval_addr, 1);
1476 if (put_user_u32(len, optlen)) {
1477 return -TARGET_EFAULT;
1479 break;
1481 /* Options with 'int' argument. */
1482 case TARGET_SO_DEBUG:
1483 optname = SO_DEBUG;
1484 goto int_case;
1485 case TARGET_SO_REUSEADDR:
1486 optname = SO_REUSEADDR;
1487 goto int_case;
1488 case TARGET_SO_TYPE:
1489 optname = SO_TYPE;
1490 goto int_case;
1491 case TARGET_SO_ERROR:
1492 optname = SO_ERROR;
1493 goto int_case;
1494 case TARGET_SO_DONTROUTE:
1495 optname = SO_DONTROUTE;
1496 goto int_case;
1497 case TARGET_SO_BROADCAST:
1498 optname = SO_BROADCAST;
1499 goto int_case;
1500 case TARGET_SO_SNDBUF:
1501 optname = SO_SNDBUF;
1502 goto int_case;
1503 case TARGET_SO_RCVBUF:
1504 optname = SO_RCVBUF;
1505 goto int_case;
1506 case TARGET_SO_KEEPALIVE:
1507 optname = SO_KEEPALIVE;
1508 goto int_case;
1509 case TARGET_SO_OOBINLINE:
1510 optname = SO_OOBINLINE;
1511 goto int_case;
1512 case TARGET_SO_NO_CHECK:
1513 optname = SO_NO_CHECK;
1514 goto int_case;
1515 case TARGET_SO_PRIORITY:
1516 optname = SO_PRIORITY;
1517 goto int_case;
1518 #ifdef SO_BSDCOMPAT
1519 case TARGET_SO_BSDCOMPAT:
1520 optname = SO_BSDCOMPAT;
1521 goto int_case;
1522 #endif
1523 case TARGET_SO_PASSCRED:
1524 optname = SO_PASSCRED;
1525 goto int_case;
1526 case TARGET_SO_TIMESTAMP:
1527 optname = SO_TIMESTAMP;
1528 goto int_case;
1529 case TARGET_SO_RCVLOWAT:
1530 optname = SO_RCVLOWAT;
1531 goto int_case;
1532 default:
1533 goto int_case;
1535 break;
1536 case SOL_TCP:
1537 /* TCP options all take an 'int' value. */
1538 int_case:
1539 if (get_user_u32(len, optlen))
1540 return -TARGET_EFAULT;
1541 if (len < 0)
1542 return -TARGET_EINVAL;
1543 lv = sizeof(lv);
1544 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1545 if (ret < 0)
1546 return ret;
1547 if (len > lv)
1548 len = lv;
1549 if (len == 4) {
1550 if (put_user_u32(val, optval_addr))
1551 return -TARGET_EFAULT;
1552 } else {
1553 if (put_user_u8(val, optval_addr))
1554 return -TARGET_EFAULT;
1556 if (put_user_u32(len, optlen))
1557 return -TARGET_EFAULT;
1558 break;
1559 case SOL_IP:
1560 switch(optname) {
1561 case IP_TOS:
1562 case IP_TTL:
1563 case IP_HDRINCL:
1564 case IP_ROUTER_ALERT:
1565 case IP_RECVOPTS:
1566 case IP_RETOPTS:
1567 case IP_PKTINFO:
1568 case IP_MTU_DISCOVER:
1569 case IP_RECVERR:
1570 case IP_RECVTOS:
1571 #ifdef IP_FREEBIND
1572 case IP_FREEBIND:
1573 #endif
1574 case IP_MULTICAST_TTL:
1575 case IP_MULTICAST_LOOP:
1576 if (get_user_u32(len, optlen))
1577 return -TARGET_EFAULT;
1578 if (len < 0)
1579 return -TARGET_EINVAL;
1580 lv = sizeof(lv);
1581 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1582 if (ret < 0)
1583 return ret;
1584 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1585 len = 1;
1586 if (put_user_u32(len, optlen)
1587 || put_user_u8(val, optval_addr))
1588 return -TARGET_EFAULT;
1589 } else {
1590 if (len > sizeof(int))
1591 len = sizeof(int);
1592 if (put_user_u32(len, optlen)
1593 || put_user_u32(val, optval_addr))
1594 return -TARGET_EFAULT;
1596 break;
1597 default:
1598 ret = -TARGET_ENOPROTOOPT;
1599 break;
1601 break;
1602 default:
1603 unimplemented:
1604 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1605 level, optname);
1606 ret = -TARGET_EOPNOTSUPP;
1607 break;
1609 return ret;
1612 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1613 int count, int copy)
1615 struct target_iovec *target_vec;
1616 struct iovec *vec;
1617 abi_ulong total_len, max_len;
1618 int i;
1620 if (count == 0) {
1621 errno = 0;
1622 return NULL;
1624 if (count < 0 || count > IOV_MAX) {
1625 errno = EINVAL;
1626 return NULL;
1629 vec = calloc(count, sizeof(struct iovec));
1630 if (vec == NULL) {
1631 errno = ENOMEM;
1632 return NULL;
1635 target_vec = lock_user(VERIFY_READ, target_addr,
1636 count * sizeof(struct target_iovec), 1);
1637 if (target_vec == NULL) {
1638 errno = EFAULT;
1639 goto fail2;
1642 /* ??? If host page size > target page size, this will result in a
1643 value larger than what we can actually support. */
1644 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1645 total_len = 0;
1647 for (i = 0; i < count; i++) {
1648 abi_ulong base = tswapal(target_vec[i].iov_base);
1649 abi_long len = tswapal(target_vec[i].iov_len);
1651 if (len < 0) {
1652 errno = EINVAL;
1653 goto fail;
1654 } else if (len == 0) {
1655 /* Zero length pointer is ignored. */
1656 vec[i].iov_base = 0;
1657 } else {
1658 vec[i].iov_base = lock_user(type, base, len, copy);
1659 if (!vec[i].iov_base) {
1660 errno = EFAULT;
1661 goto fail;
1663 if (len > max_len - total_len) {
1664 len = max_len - total_len;
1667 vec[i].iov_len = len;
1668 total_len += len;
1671 unlock_user(target_vec, target_addr, 0);
1672 return vec;
1674 fail:
1675 free(vec);
1676 fail2:
1677 unlock_user(target_vec, target_addr, 0);
1678 return NULL;
1681 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1682 int count, int copy)
1684 struct target_iovec *target_vec;
1685 int i;
1687 target_vec = lock_user(VERIFY_READ, target_addr,
1688 count * sizeof(struct target_iovec), 1);
1689 if (target_vec) {
1690 for (i = 0; i < count; i++) {
1691 abi_ulong base = tswapal(target_vec[i].iov_base);
1692 abi_long len = tswapal(target_vec[i].iov_base);
1693 if (len < 0) {
1694 break;
1696 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1698 unlock_user(target_vec, target_addr, 0);
1701 free(vec);
1704 static inline void target_to_host_sock_type(int *type)
1706 int host_type = 0;
1707 int target_type = *type;
1709 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1710 case TARGET_SOCK_DGRAM:
1711 host_type = SOCK_DGRAM;
1712 break;
1713 case TARGET_SOCK_STREAM:
1714 host_type = SOCK_STREAM;
1715 break;
1716 default:
1717 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1718 break;
1720 if (target_type & TARGET_SOCK_CLOEXEC) {
1721 host_type |= SOCK_CLOEXEC;
1723 if (target_type & TARGET_SOCK_NONBLOCK) {
1724 host_type |= SOCK_NONBLOCK;
1726 *type = host_type;
1729 /* do_socket() Must return target values and target errnos. */
1730 static abi_long do_socket(int domain, int type, int protocol)
1732 target_to_host_sock_type(&type);
1734 if (domain == PF_NETLINK)
1735 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1736 return get_errno(socket(domain, type, protocol));
1739 /* do_bind() Must return target values and target errnos. */
1740 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1741 socklen_t addrlen)
1743 void *addr;
1744 abi_long ret;
1746 if ((int)addrlen < 0) {
1747 return -TARGET_EINVAL;
1750 addr = alloca(addrlen+1);
1752 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1753 if (ret)
1754 return ret;
1756 return get_errno(bind(sockfd, addr, addrlen));
1759 /* do_connect() Must return target values and target errnos. */
1760 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1761 socklen_t addrlen)
1763 void *addr;
1764 abi_long ret;
1766 if ((int)addrlen < 0) {
1767 return -TARGET_EINVAL;
1770 addr = alloca(addrlen);
1772 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1773 if (ret)
1774 return ret;
1776 return get_errno(connect(sockfd, addr, addrlen));
1779 /* do_sendrecvmsg() Must return target values and target errnos. */
1780 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1781 int flags, int send)
1783 abi_long ret, len;
1784 struct target_msghdr *msgp;
1785 struct msghdr msg;
1786 int count;
1787 struct iovec *vec;
1788 abi_ulong target_vec;
1790 /* FIXME */
1791 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1792 msgp,
1793 target_msg,
1794 send ? 1 : 0))
1795 return -TARGET_EFAULT;
1796 if (msgp->msg_name) {
1797 msg.msg_namelen = tswap32(msgp->msg_namelen);
1798 msg.msg_name = alloca(msg.msg_namelen);
1799 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1800 msg.msg_namelen);
1801 if (ret) {
1802 goto out2;
1804 } else {
1805 msg.msg_name = NULL;
1806 msg.msg_namelen = 0;
1808 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1809 msg.msg_control = alloca(msg.msg_controllen);
1810 msg.msg_flags = tswap32(msgp->msg_flags);
1812 count = tswapal(msgp->msg_iovlen);
1813 target_vec = tswapal(msgp->msg_iov);
1814 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1815 target_vec, count, send);
1816 if (vec == NULL) {
1817 ret = -host_to_target_errno(errno);
1818 goto out2;
1820 msg.msg_iovlen = count;
1821 msg.msg_iov = vec;
1823 if (send) {
1824 ret = target_to_host_cmsg(&msg, msgp);
1825 if (ret == 0)
1826 ret = get_errno(sendmsg(fd, &msg, flags));
1827 } else {
1828 ret = get_errno(recvmsg(fd, &msg, flags));
1829 if (!is_error(ret)) {
1830 len = ret;
1831 ret = host_to_target_cmsg(msgp, &msg);
1832 if (!is_error(ret)) {
1833 msgp->msg_namelen = tswap32(msg.msg_namelen);
1834 if (msg.msg_name != NULL) {
1835 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1836 msg.msg_name, msg.msg_namelen);
1837 if (ret) {
1838 goto out;
1842 ret = len;
1847 out:
1848 unlock_iovec(vec, target_vec, count, !send);
1849 out2:
1850 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1851 return ret;
1854 /* If we don't have a system accept4() then just call accept.
1855 * The callsites to do_accept4() will ensure that they don't
1856 * pass a non-zero flags argument in this config.
1858 #ifndef CONFIG_ACCEPT4
1859 static inline int accept4(int sockfd, struct sockaddr *addr,
1860 socklen_t *addrlen, int flags)
1862 assert(flags == 0);
1863 return accept(sockfd, addr, addrlen);
1865 #endif
1867 /* do_accept4() Must return target values and target errnos. */
1868 static abi_long do_accept4(int fd, abi_ulong target_addr,
1869 abi_ulong target_addrlen_addr, int flags)
1871 socklen_t addrlen;
1872 void *addr;
1873 abi_long ret;
1875 if (target_addr == 0) {
1876 return get_errno(accept4(fd, NULL, NULL, flags));
1879 /* linux returns EINVAL if addrlen pointer is invalid */
1880 if (get_user_u32(addrlen, target_addrlen_addr))
1881 return -TARGET_EINVAL;
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1887 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1888 return -TARGET_EINVAL;
1890 addr = alloca(addrlen);
1892 ret = get_errno(accept4(fd, addr, &addrlen, flags));
1893 if (!is_error(ret)) {
1894 host_to_target_sockaddr(target_addr, addr, addrlen);
1895 if (put_user_u32(addrlen, target_addrlen_addr))
1896 ret = -TARGET_EFAULT;
1898 return ret;
1901 /* do_getpeername() Must return target values and target errnos. */
1902 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1903 abi_ulong target_addrlen_addr)
1905 socklen_t addrlen;
1906 void *addr;
1907 abi_long ret;
1909 if (get_user_u32(addrlen, target_addrlen_addr))
1910 return -TARGET_EFAULT;
1912 if ((int)addrlen < 0) {
1913 return -TARGET_EINVAL;
1916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1917 return -TARGET_EFAULT;
1919 addr = alloca(addrlen);
1921 ret = get_errno(getpeername(fd, addr, &addrlen));
1922 if (!is_error(ret)) {
1923 host_to_target_sockaddr(target_addr, addr, addrlen);
1924 if (put_user_u32(addrlen, target_addrlen_addr))
1925 ret = -TARGET_EFAULT;
1927 return ret;
1930 /* do_getsockname() Must return target values and target errnos. */
1931 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1932 abi_ulong target_addrlen_addr)
1934 socklen_t addrlen;
1935 void *addr;
1936 abi_long ret;
1938 if (get_user_u32(addrlen, target_addrlen_addr))
1939 return -TARGET_EFAULT;
1941 if ((int)addrlen < 0) {
1942 return -TARGET_EINVAL;
1945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1946 return -TARGET_EFAULT;
1948 addr = alloca(addrlen);
1950 ret = get_errno(getsockname(fd, addr, &addrlen));
1951 if (!is_error(ret)) {
1952 host_to_target_sockaddr(target_addr, addr, addrlen);
1953 if (put_user_u32(addrlen, target_addrlen_addr))
1954 ret = -TARGET_EFAULT;
1956 return ret;
1959 /* do_socketpair() Must return target values and target errnos. */
1960 static abi_long do_socketpair(int domain, int type, int protocol,
1961 abi_ulong target_tab_addr)
1963 int tab[2];
1964 abi_long ret;
1966 target_to_host_sock_type(&type);
1968 ret = get_errno(socketpair(domain, type, protocol, tab));
1969 if (!is_error(ret)) {
1970 if (put_user_s32(tab[0], target_tab_addr)
1971 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1972 ret = -TARGET_EFAULT;
1974 return ret;
1977 /* do_sendto() Must return target values and target errnos. */
1978 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1979 abi_ulong target_addr, socklen_t addrlen)
1981 void *addr;
1982 void *host_msg;
1983 abi_long ret;
1985 if ((int)addrlen < 0) {
1986 return -TARGET_EINVAL;
1989 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1990 if (!host_msg)
1991 return -TARGET_EFAULT;
1992 if (target_addr) {
1993 addr = alloca(addrlen);
1994 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1995 if (ret) {
1996 unlock_user(host_msg, msg, 0);
1997 return ret;
1999 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2000 } else {
2001 ret = get_errno(send(fd, host_msg, len, flags));
2003 unlock_user(host_msg, msg, 0);
2004 return ret;
2007 /* do_recvfrom() Must return target values and target errnos. */
2008 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2009 abi_ulong target_addr,
2010 abi_ulong target_addrlen)
2012 socklen_t addrlen;
2013 void *addr;
2014 void *host_msg;
2015 abi_long ret;
2017 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2018 if (!host_msg)
2019 return -TARGET_EFAULT;
2020 if (target_addr) {
2021 if (get_user_u32(addrlen, target_addrlen)) {
2022 ret = -TARGET_EFAULT;
2023 goto fail;
2025 if ((int)addrlen < 0) {
2026 ret = -TARGET_EINVAL;
2027 goto fail;
2029 addr = alloca(addrlen);
2030 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2031 } else {
2032 addr = NULL; /* To keep compiler quiet. */
2033 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2035 if (!is_error(ret)) {
2036 if (target_addr) {
2037 host_to_target_sockaddr(target_addr, addr, addrlen);
2038 if (put_user_u32(addrlen, target_addrlen)) {
2039 ret = -TARGET_EFAULT;
2040 goto fail;
2043 unlock_user(host_msg, msg, len);
2044 } else {
2045 fail:
2046 unlock_user(host_msg, msg, 0);
2048 return ret;
2051 #ifdef TARGET_NR_socketcall
2052 /* do_socketcall() Must return target values and target errnos. */
2053 static abi_long do_socketcall(int num, abi_ulong vptr)
2055 abi_long ret;
2056 const int n = sizeof(abi_ulong);
2058 switch(num) {
2059 case SOCKOP_socket:
2061 abi_ulong domain, type, protocol;
2063 if (get_user_ual(domain, vptr)
2064 || get_user_ual(type, vptr + n)
2065 || get_user_ual(protocol, vptr + 2 * n))
2066 return -TARGET_EFAULT;
2068 ret = do_socket(domain, type, protocol);
2070 break;
2071 case SOCKOP_bind:
2073 abi_ulong sockfd;
2074 abi_ulong target_addr;
2075 socklen_t addrlen;
2077 if (get_user_ual(sockfd, vptr)
2078 || get_user_ual(target_addr, vptr + n)
2079 || get_user_ual(addrlen, vptr + 2 * n))
2080 return -TARGET_EFAULT;
2082 ret = do_bind(sockfd, target_addr, addrlen);
2084 break;
2085 case SOCKOP_connect:
2087 abi_ulong sockfd;
2088 abi_ulong target_addr;
2089 socklen_t addrlen;
2091 if (get_user_ual(sockfd, vptr)
2092 || get_user_ual(target_addr, vptr + n)
2093 || get_user_ual(addrlen, vptr + 2 * n))
2094 return -TARGET_EFAULT;
2096 ret = do_connect(sockfd, target_addr, addrlen);
2098 break;
2099 case SOCKOP_listen:
2101 abi_ulong sockfd, backlog;
2103 if (get_user_ual(sockfd, vptr)
2104 || get_user_ual(backlog, vptr + n))
2105 return -TARGET_EFAULT;
2107 ret = get_errno(listen(sockfd, backlog));
2109 break;
2110 case SOCKOP_accept:
2112 abi_ulong sockfd;
2113 abi_ulong target_addr, target_addrlen;
2115 if (get_user_ual(sockfd, vptr)
2116 || get_user_ual(target_addr, vptr + n)
2117 || get_user_ual(target_addrlen, vptr + 2 * n))
2118 return -TARGET_EFAULT;
2120 ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2122 break;
2123 case SOCKOP_getsockname:
2125 abi_ulong sockfd;
2126 abi_ulong target_addr, target_addrlen;
2128 if (get_user_ual(sockfd, vptr)
2129 || get_user_ual(target_addr, vptr + n)
2130 || get_user_ual(target_addrlen, vptr + 2 * n))
2131 return -TARGET_EFAULT;
2133 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2135 break;
2136 case SOCKOP_getpeername:
2138 abi_ulong sockfd;
2139 abi_ulong target_addr, target_addrlen;
2141 if (get_user_ual(sockfd, vptr)
2142 || get_user_ual(target_addr, vptr + n)
2143 || get_user_ual(target_addrlen, vptr + 2 * n))
2144 return -TARGET_EFAULT;
2146 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2148 break;
2149 case SOCKOP_socketpair:
2151 abi_ulong domain, type, protocol;
2152 abi_ulong tab;
2154 if (get_user_ual(domain, vptr)
2155 || get_user_ual(type, vptr + n)
2156 || get_user_ual(protocol, vptr + 2 * n)
2157 || get_user_ual(tab, vptr + 3 * n))
2158 return -TARGET_EFAULT;
2160 ret = do_socketpair(domain, type, protocol, tab);
2162 break;
2163 case SOCKOP_send:
2165 abi_ulong sockfd;
2166 abi_ulong msg;
2167 size_t len;
2168 abi_ulong flags;
2170 if (get_user_ual(sockfd, vptr)
2171 || get_user_ual(msg, vptr + n)
2172 || get_user_ual(len, vptr + 2 * n)
2173 || get_user_ual(flags, vptr + 3 * n))
2174 return -TARGET_EFAULT;
2176 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2178 break;
2179 case SOCKOP_recv:
2181 abi_ulong sockfd;
2182 abi_ulong msg;
2183 size_t len;
2184 abi_ulong flags;
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(msg, vptr + n)
2188 || get_user_ual(len, vptr + 2 * n)
2189 || get_user_ual(flags, vptr + 3 * n))
2190 return -TARGET_EFAULT;
2192 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2194 break;
2195 case SOCKOP_sendto:
2197 abi_ulong sockfd;
2198 abi_ulong msg;
2199 size_t len;
2200 abi_ulong flags;
2201 abi_ulong addr;
2202 socklen_t addrlen;
2204 if (get_user_ual(sockfd, vptr)
2205 || get_user_ual(msg, vptr + n)
2206 || get_user_ual(len, vptr + 2 * n)
2207 || get_user_ual(flags, vptr + 3 * n)
2208 || get_user_ual(addr, vptr + 4 * n)
2209 || get_user_ual(addrlen, vptr + 5 * n))
2210 return -TARGET_EFAULT;
2212 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2214 break;
2215 case SOCKOP_recvfrom:
2217 abi_ulong sockfd;
2218 abi_ulong msg;
2219 size_t len;
2220 abi_ulong flags;
2221 abi_ulong addr;
2222 socklen_t addrlen;
2224 if (get_user_ual(sockfd, vptr)
2225 || get_user_ual(msg, vptr + n)
2226 || get_user_ual(len, vptr + 2 * n)
2227 || get_user_ual(flags, vptr + 3 * n)
2228 || get_user_ual(addr, vptr + 4 * n)
2229 || get_user_ual(addrlen, vptr + 5 * n))
2230 return -TARGET_EFAULT;
2232 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2234 break;
2235 case SOCKOP_shutdown:
2237 abi_ulong sockfd, how;
2239 if (get_user_ual(sockfd, vptr)
2240 || get_user_ual(how, vptr + n))
2241 return -TARGET_EFAULT;
2243 ret = get_errno(shutdown(sockfd, how));
2245 break;
2246 case SOCKOP_sendmsg:
2247 case SOCKOP_recvmsg:
2249 abi_ulong fd;
2250 abi_ulong target_msg;
2251 abi_ulong flags;
2253 if (get_user_ual(fd, vptr)
2254 || get_user_ual(target_msg, vptr + n)
2255 || get_user_ual(flags, vptr + 2 * n))
2256 return -TARGET_EFAULT;
2258 ret = do_sendrecvmsg(fd, target_msg, flags,
2259 (num == SOCKOP_sendmsg));
2261 break;
2262 case SOCKOP_setsockopt:
2264 abi_ulong sockfd;
2265 abi_ulong level;
2266 abi_ulong optname;
2267 abi_ulong optval;
2268 socklen_t optlen;
2270 if (get_user_ual(sockfd, vptr)
2271 || get_user_ual(level, vptr + n)
2272 || get_user_ual(optname, vptr + 2 * n)
2273 || get_user_ual(optval, vptr + 3 * n)
2274 || get_user_ual(optlen, vptr + 4 * n))
2275 return -TARGET_EFAULT;
2277 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2279 break;
2280 case SOCKOP_getsockopt:
2282 abi_ulong sockfd;
2283 abi_ulong level;
2284 abi_ulong optname;
2285 abi_ulong optval;
2286 socklen_t optlen;
2288 if (get_user_ual(sockfd, vptr)
2289 || get_user_ual(level, vptr + n)
2290 || get_user_ual(optname, vptr + 2 * n)
2291 || get_user_ual(optval, vptr + 3 * n)
2292 || get_user_ual(optlen, vptr + 4 * n))
2293 return -TARGET_EFAULT;
2295 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2297 break;
2298 default:
2299 gemu_log("Unsupported socketcall: %d\n", num);
2300 ret = -TARGET_ENOSYS;
2301 break;
2303 return ret;
2305 #endif
2307 #define N_SHM_REGIONS 32
2309 static struct shm_region {
2310 abi_ulong start;
2311 abi_ulong size;
2312 } shm_regions[N_SHM_REGIONS];
2314 struct target_ipc_perm
2316 abi_long __key;
2317 abi_ulong uid;
2318 abi_ulong gid;
2319 abi_ulong cuid;
2320 abi_ulong cgid;
2321 unsigned short int mode;
2322 unsigned short int __pad1;
2323 unsigned short int __seq;
2324 unsigned short int __pad2;
2325 abi_ulong __unused1;
2326 abi_ulong __unused2;
2329 struct target_semid_ds
2331 struct target_ipc_perm sem_perm;
2332 abi_ulong sem_otime;
2333 abi_ulong __unused1;
2334 abi_ulong sem_ctime;
2335 abi_ulong __unused2;
2336 abi_ulong sem_nsems;
2337 abi_ulong __unused3;
2338 abi_ulong __unused4;
2341 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2342 abi_ulong target_addr)
2344 struct target_ipc_perm *target_ip;
2345 struct target_semid_ds *target_sd;
2347 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2348 return -TARGET_EFAULT;
2349 target_ip = &(target_sd->sem_perm);
2350 host_ip->__key = tswapal(target_ip->__key);
2351 host_ip->uid = tswapal(target_ip->uid);
2352 host_ip->gid = tswapal(target_ip->gid);
2353 host_ip->cuid = tswapal(target_ip->cuid);
2354 host_ip->cgid = tswapal(target_ip->cgid);
2355 host_ip->mode = tswap16(target_ip->mode);
2356 unlock_user_struct(target_sd, target_addr, 0);
2357 return 0;
2360 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2361 struct ipc_perm *host_ip)
2363 struct target_ipc_perm *target_ip;
2364 struct target_semid_ds *target_sd;
2366 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2367 return -TARGET_EFAULT;
2368 target_ip = &(target_sd->sem_perm);
2369 target_ip->__key = tswapal(host_ip->__key);
2370 target_ip->uid = tswapal(host_ip->uid);
2371 target_ip->gid = tswapal(host_ip->gid);
2372 target_ip->cuid = tswapal(host_ip->cuid);
2373 target_ip->cgid = tswapal(host_ip->cgid);
2374 target_ip->mode = tswap16(host_ip->mode);
2375 unlock_user_struct(target_sd, target_addr, 1);
2376 return 0;
2379 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2380 abi_ulong target_addr)
2382 struct target_semid_ds *target_sd;
2384 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2385 return -TARGET_EFAULT;
2386 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2387 return -TARGET_EFAULT;
2388 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2389 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2390 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2391 unlock_user_struct(target_sd, target_addr, 0);
2392 return 0;
2395 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2396 struct semid_ds *host_sd)
2398 struct target_semid_ds *target_sd;
2400 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2401 return -TARGET_EFAULT;
2402 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2403 return -TARGET_EFAULT;
2404 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2405 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2406 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2407 unlock_user_struct(target_sd, target_addr, 1);
2408 return 0;
2411 struct target_seminfo {
2412 int semmap;
2413 int semmni;
2414 int semmns;
2415 int semmnu;
2416 int semmsl;
2417 int semopm;
2418 int semume;
2419 int semusz;
2420 int semvmx;
2421 int semaem;
2424 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2425 struct seminfo *host_seminfo)
2427 struct target_seminfo *target_seminfo;
2428 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2429 return -TARGET_EFAULT;
2430 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2431 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2432 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2433 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2434 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2435 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2436 __put_user(host_seminfo->semume, &target_seminfo->semume);
2437 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2438 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2439 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2440 unlock_user_struct(target_seminfo, target_addr, 1);
2441 return 0;
2444 union semun {
2445 int val;
2446 struct semid_ds *buf;
2447 unsigned short *array;
2448 struct seminfo *__buf;
2451 union target_semun {
2452 int val;
2453 abi_ulong buf;
2454 abi_ulong array;
2455 abi_ulong __buf;
2458 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2459 abi_ulong target_addr)
2461 int nsems;
2462 unsigned short *array;
2463 union semun semun;
2464 struct semid_ds semid_ds;
2465 int i, ret;
2467 semun.buf = &semid_ds;
2469 ret = semctl(semid, 0, IPC_STAT, semun);
2470 if (ret == -1)
2471 return get_errno(ret);
2473 nsems = semid_ds.sem_nsems;
2475 *host_array = malloc(nsems*sizeof(unsigned short));
2476 array = lock_user(VERIFY_READ, target_addr,
2477 nsems*sizeof(unsigned short), 1);
2478 if (!array)
2479 return -TARGET_EFAULT;
2481 for(i=0; i<nsems; i++) {
2482 __get_user((*host_array)[i], &array[i]);
2484 unlock_user(array, target_addr, 0);
2486 return 0;
2489 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2490 unsigned short **host_array)
2492 int nsems;
2493 unsigned short *array;
2494 union semun semun;
2495 struct semid_ds semid_ds;
2496 int i, ret;
2498 semun.buf = &semid_ds;
2500 ret = semctl(semid, 0, IPC_STAT, semun);
2501 if (ret == -1)
2502 return get_errno(ret);
2504 nsems = semid_ds.sem_nsems;
2506 array = lock_user(VERIFY_WRITE, target_addr,
2507 nsems*sizeof(unsigned short), 0);
2508 if (!array)
2509 return -TARGET_EFAULT;
2511 for(i=0; i<nsems; i++) {
2512 __put_user((*host_array)[i], &array[i]);
2514 free(*host_array);
2515 unlock_user(array, target_addr, 1);
2517 return 0;
2520 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2521 union target_semun target_su)
2523 union semun arg;
2524 struct semid_ds dsarg;
2525 unsigned short *array = NULL;
2526 struct seminfo seminfo;
2527 abi_long ret = -TARGET_EINVAL;
2528 abi_long err;
2529 cmd &= 0xff;
2531 switch( cmd ) {
2532 case GETVAL:
2533 case SETVAL:
2534 arg.val = tswap32(target_su.val);
2535 ret = get_errno(semctl(semid, semnum, cmd, arg));
2536 target_su.val = tswap32(arg.val);
2537 break;
2538 case GETALL:
2539 case SETALL:
2540 err = target_to_host_semarray(semid, &array, target_su.array);
2541 if (err)
2542 return err;
2543 arg.array = array;
2544 ret = get_errno(semctl(semid, semnum, cmd, arg));
2545 err = host_to_target_semarray(semid, target_su.array, &array);
2546 if (err)
2547 return err;
2548 break;
2549 case IPC_STAT:
2550 case IPC_SET:
2551 case SEM_STAT:
2552 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2553 if (err)
2554 return err;
2555 arg.buf = &dsarg;
2556 ret = get_errno(semctl(semid, semnum, cmd, arg));
2557 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2558 if (err)
2559 return err;
2560 break;
2561 case IPC_INFO:
2562 case SEM_INFO:
2563 arg.__buf = &seminfo;
2564 ret = get_errno(semctl(semid, semnum, cmd, arg));
2565 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2566 if (err)
2567 return err;
2568 break;
2569 case IPC_RMID:
2570 case GETPID:
2571 case GETNCNT:
2572 case GETZCNT:
2573 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2574 break;
2577 return ret;
2580 struct target_sembuf {
2581 unsigned short sem_num;
2582 short sem_op;
2583 short sem_flg;
2586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2587 abi_ulong target_addr,
2588 unsigned nsops)
2590 struct target_sembuf *target_sembuf;
2591 int i;
2593 target_sembuf = lock_user(VERIFY_READ, target_addr,
2594 nsops*sizeof(struct target_sembuf), 1);
2595 if (!target_sembuf)
2596 return -TARGET_EFAULT;
2598 for(i=0; i<nsops; i++) {
2599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2604 unlock_user(target_sembuf, target_addr, 0);
2606 return 0;
2609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2611 struct sembuf sops[nsops];
2613 if (target_to_host_sembuf(sops, ptr, nsops))
2614 return -TARGET_EFAULT;
2616 return get_errno(semop(semid, sops, nsops));
2619 struct target_msqid_ds
2621 struct target_ipc_perm msg_perm;
2622 abi_ulong msg_stime;
2623 #if TARGET_ABI_BITS == 32
2624 abi_ulong __unused1;
2625 #endif
2626 abi_ulong msg_rtime;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused2;
2629 #endif
2630 abi_ulong msg_ctime;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused3;
2633 #endif
2634 abi_ulong __msg_cbytes;
2635 abi_ulong msg_qnum;
2636 abi_ulong msg_qbytes;
2637 abi_ulong msg_lspid;
2638 abi_ulong msg_lrpid;
2639 abi_ulong __unused4;
2640 abi_ulong __unused5;
2643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2644 abi_ulong target_addr)
2646 struct target_msqid_ds *target_md;
2648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2649 return -TARGET_EFAULT;
2650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2651 return -TARGET_EFAULT;
2652 host_md->msg_stime = tswapal(target_md->msg_stime);
2653 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2654 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2656 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2658 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2660 unlock_user_struct(target_md, target_addr, 0);
2661 return 0;
2664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2665 struct msqid_ds *host_md)
2667 struct target_msqid_ds *target_md;
2669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2670 return -TARGET_EFAULT;
2671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2672 return -TARGET_EFAULT;
2673 target_md->msg_stime = tswapal(host_md->msg_stime);
2674 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2675 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2677 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2679 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2681 unlock_user_struct(target_md, target_addr, 1);
2682 return 0;
2685 struct target_msginfo {
2686 int msgpool;
2687 int msgmap;
2688 int msgmax;
2689 int msgmnb;
2690 int msgmni;
2691 int msgssz;
2692 int msgtql;
2693 unsigned short int msgseg;
2696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2697 struct msginfo *host_msginfo)
2699 struct target_msginfo *target_msginfo;
2700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2701 return -TARGET_EFAULT;
2702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2710 unlock_user_struct(target_msginfo, target_addr, 1);
2711 return 0;
2714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2716 struct msqid_ds dsarg;
2717 struct msginfo msginfo;
2718 abi_long ret = -TARGET_EINVAL;
2720 cmd &= 0xff;
2722 switch (cmd) {
2723 case IPC_STAT:
2724 case IPC_SET:
2725 case MSG_STAT:
2726 if (target_to_host_msqid_ds(&dsarg,ptr))
2727 return -TARGET_EFAULT;
2728 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2729 if (host_to_target_msqid_ds(ptr,&dsarg))
2730 return -TARGET_EFAULT;
2731 break;
2732 case IPC_RMID:
2733 ret = get_errno(msgctl(msgid, cmd, NULL));
2734 break;
2735 case IPC_INFO:
2736 case MSG_INFO:
2737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2738 if (host_to_target_msginfo(ptr, &msginfo))
2739 return -TARGET_EFAULT;
2740 break;
2743 return ret;
2746 struct target_msgbuf {
2747 abi_long mtype;
2748 char mtext[1];
2751 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2752 unsigned int msgsz, int msgflg)
2754 struct target_msgbuf *target_mb;
2755 struct msgbuf *host_mb;
2756 abi_long ret = 0;
2758 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2759 return -TARGET_EFAULT;
2760 host_mb = malloc(msgsz+sizeof(long));
2761 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2762 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2763 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2764 free(host_mb);
2765 unlock_user_struct(target_mb, msgp, 0);
2767 return ret;
2770 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2771 unsigned int msgsz, abi_long msgtyp,
2772 int msgflg)
2774 struct target_msgbuf *target_mb;
2775 char *target_mtext;
2776 struct msgbuf *host_mb;
2777 abi_long ret = 0;
2779 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2780 return -TARGET_EFAULT;
2782 host_mb = g_malloc(msgsz+sizeof(long));
2783 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2785 if (ret > 0) {
2786 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2787 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2788 if (!target_mtext) {
2789 ret = -TARGET_EFAULT;
2790 goto end;
2792 memcpy(target_mb->mtext, host_mb->mtext, ret);
2793 unlock_user(target_mtext, target_mtext_addr, ret);
2796 target_mb->mtype = tswapal(host_mb->mtype);
2798 end:
2799 if (target_mb)
2800 unlock_user_struct(target_mb, msgp, 1);
2801 g_free(host_mb);
2802 return ret;
2805 struct target_shmid_ds
2807 struct target_ipc_perm shm_perm;
2808 abi_ulong shm_segsz;
2809 abi_ulong shm_atime;
2810 #if TARGET_ABI_BITS == 32
2811 abi_ulong __unused1;
2812 #endif
2813 abi_ulong shm_dtime;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused2;
2816 #endif
2817 abi_ulong shm_ctime;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused3;
2820 #endif
2821 int shm_cpid;
2822 int shm_lpid;
2823 abi_ulong shm_nattch;
2824 unsigned long int __unused4;
2825 unsigned long int __unused5;
2828 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2829 abi_ulong target_addr)
2831 struct target_shmid_ds *target_sd;
2833 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2834 return -TARGET_EFAULT;
2835 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2836 return -TARGET_EFAULT;
2837 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2838 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2839 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2840 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2841 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2842 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2843 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2844 unlock_user_struct(target_sd, target_addr, 0);
2845 return 0;
2848 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2849 struct shmid_ds *host_sd)
2851 struct target_shmid_ds *target_sd;
2853 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2854 return -TARGET_EFAULT;
2855 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2856 return -TARGET_EFAULT;
2857 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2858 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2859 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2860 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2861 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2862 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2863 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2864 unlock_user_struct(target_sd, target_addr, 1);
2865 return 0;
2868 struct target_shminfo {
2869 abi_ulong shmmax;
2870 abi_ulong shmmin;
2871 abi_ulong shmmni;
2872 abi_ulong shmseg;
2873 abi_ulong shmall;
2876 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2877 struct shminfo *host_shminfo)
2879 struct target_shminfo *target_shminfo;
2880 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2881 return -TARGET_EFAULT;
2882 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2883 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2884 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2885 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2886 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2887 unlock_user_struct(target_shminfo, target_addr, 1);
2888 return 0;
2891 struct target_shm_info {
2892 int used_ids;
2893 abi_ulong shm_tot;
2894 abi_ulong shm_rss;
2895 abi_ulong shm_swp;
2896 abi_ulong swap_attempts;
2897 abi_ulong swap_successes;
2900 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2901 struct shm_info *host_shm_info)
2903 struct target_shm_info *target_shm_info;
2904 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2905 return -TARGET_EFAULT;
2906 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2907 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2908 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2909 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2910 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2911 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2912 unlock_user_struct(target_shm_info, target_addr, 1);
2913 return 0;
2916 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2918 struct shmid_ds dsarg;
2919 struct shminfo shminfo;
2920 struct shm_info shm_info;
2921 abi_long ret = -TARGET_EINVAL;
2923 cmd &= 0xff;
2925 switch(cmd) {
2926 case IPC_STAT:
2927 case IPC_SET:
2928 case SHM_STAT:
2929 if (target_to_host_shmid_ds(&dsarg, buf))
2930 return -TARGET_EFAULT;
2931 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2932 if (host_to_target_shmid_ds(buf, &dsarg))
2933 return -TARGET_EFAULT;
2934 break;
2935 case IPC_INFO:
2936 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2937 if (host_to_target_shminfo(buf, &shminfo))
2938 return -TARGET_EFAULT;
2939 break;
2940 case SHM_INFO:
2941 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2942 if (host_to_target_shm_info(buf, &shm_info))
2943 return -TARGET_EFAULT;
2944 break;
2945 case IPC_RMID:
2946 case SHM_LOCK:
2947 case SHM_UNLOCK:
2948 ret = get_errno(shmctl(shmid, cmd, NULL));
2949 break;
2952 return ret;
2955 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2957 abi_long raddr;
2958 void *host_raddr;
2959 struct shmid_ds shm_info;
2960 int i,ret;
2962 /* find out the length of the shared memory segment */
2963 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2964 if (is_error(ret)) {
2965 /* can't get length, bail out */
2966 return ret;
2969 mmap_lock();
2971 if (shmaddr)
2972 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2973 else {
2974 abi_ulong mmap_start;
2976 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2978 if (mmap_start == -1) {
2979 errno = ENOMEM;
2980 host_raddr = (void *)-1;
2981 } else
2982 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2985 if (host_raddr == (void *)-1) {
2986 mmap_unlock();
2987 return get_errno((long)host_raddr);
2989 raddr=h2g((unsigned long)host_raddr);
2991 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2992 PAGE_VALID | PAGE_READ |
2993 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2995 for (i = 0; i < N_SHM_REGIONS; i++) {
2996 if (shm_regions[i].start == 0) {
2997 shm_regions[i].start = raddr;
2998 shm_regions[i].size = shm_info.shm_segsz;
2999 break;
3003 mmap_unlock();
3004 return raddr;
3008 static inline abi_long do_shmdt(abi_ulong shmaddr)
3010 int i;
3012 for (i = 0; i < N_SHM_REGIONS; ++i) {
3013 if (shm_regions[i].start == shmaddr) {
3014 shm_regions[i].start = 0;
3015 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3016 break;
3020 return get_errno(shmdt(g2h(shmaddr)));
3023 #ifdef TARGET_NR_ipc
3024 /* ??? This only works with linear mappings. */
3025 /* do_ipc() must return target values and target errnos. */
3026 static abi_long do_ipc(unsigned int call, int first,
3027 int second, int third,
3028 abi_long ptr, abi_long fifth)
3030 int version;
3031 abi_long ret = 0;
3033 version = call >> 16;
3034 call &= 0xffff;
3036 switch (call) {
3037 case IPCOP_semop:
3038 ret = do_semop(first, ptr, second);
3039 break;
3041 case IPCOP_semget:
3042 ret = get_errno(semget(first, second, third));
3043 break;
3045 case IPCOP_semctl:
3046 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3047 break;
3049 case IPCOP_msgget:
3050 ret = get_errno(msgget(first, second));
3051 break;
3053 case IPCOP_msgsnd:
3054 ret = do_msgsnd(first, ptr, second, third);
3055 break;
3057 case IPCOP_msgctl:
3058 ret = do_msgctl(first, second, ptr);
3059 break;
3061 case IPCOP_msgrcv:
3062 switch (version) {
3063 case 0:
3065 struct target_ipc_kludge {
3066 abi_long msgp;
3067 abi_long msgtyp;
3068 } *tmp;
3070 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3071 ret = -TARGET_EFAULT;
3072 break;
3075 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3077 unlock_user_struct(tmp, ptr, 0);
3078 break;
3080 default:
3081 ret = do_msgrcv(first, ptr, second, fifth, third);
3083 break;
3085 case IPCOP_shmat:
3086 switch (version) {
3087 default:
3089 abi_ulong raddr;
3090 raddr = do_shmat(first, ptr, second);
3091 if (is_error(raddr))
3092 return get_errno(raddr);
3093 if (put_user_ual(raddr, third))
3094 return -TARGET_EFAULT;
3095 break;
3097 case 1:
3098 ret = -TARGET_EINVAL;
3099 break;
3101 break;
3102 case IPCOP_shmdt:
3103 ret = do_shmdt(ptr);
3104 break;
3106 case IPCOP_shmget:
3107 /* IPC_* flag values are the same on all linux platforms */
3108 ret = get_errno(shmget(first, second, third));
3109 break;
3111 /* IPC_* and SHM_* command values are the same on all linux platforms */
3112 case IPCOP_shmctl:
3113 ret = do_shmctl(first, second, third);
3114 break;
3115 default:
3116 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3117 ret = -TARGET_ENOSYS;
3118 break;
3120 return ret;
3122 #endif
3124 /* kernel structure types definitions */
3126 #define STRUCT(name, ...) STRUCT_ ## name,
3127 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3128 enum {
3129 #include "syscall_types.h"
3131 #undef STRUCT
3132 #undef STRUCT_SPECIAL
3134 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3135 #define STRUCT_SPECIAL(name)
3136 #include "syscall_types.h"
3137 #undef STRUCT
3138 #undef STRUCT_SPECIAL
3140 typedef struct IOCTLEntry IOCTLEntry;
3142 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3143 int fd, abi_long cmd, abi_long arg);
3145 struct IOCTLEntry {
3146 unsigned int target_cmd;
3147 unsigned int host_cmd;
3148 const char *name;
3149 int access;
3150 do_ioctl_fn *do_ioctl;
3151 const argtype arg_type[5];
3154 #define IOC_R 0x0001
3155 #define IOC_W 0x0002
3156 #define IOC_RW (IOC_R | IOC_W)
3158 #define MAX_STRUCT_SIZE 4096
3160 #ifdef CONFIG_FIEMAP
3161 /* So fiemap access checks don't overflow on 32 bit systems.
3162 * This is very slightly smaller than the limit imposed by
3163 * the underlying kernel.
3165 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3166 / sizeof(struct fiemap_extent))
3168 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3169 int fd, abi_long cmd, abi_long arg)
3171 /* The parameter for this ioctl is a struct fiemap followed
3172 * by an array of struct fiemap_extent whose size is set
3173 * in fiemap->fm_extent_count. The array is filled in by the
3174 * ioctl.
3176 int target_size_in, target_size_out;
3177 struct fiemap *fm;
3178 const argtype *arg_type = ie->arg_type;
3179 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3180 void *argptr, *p;
3181 abi_long ret;
3182 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3183 uint32_t outbufsz;
3184 int free_fm = 0;
3186 assert(arg_type[0] == TYPE_PTR);
3187 assert(ie->access == IOC_RW);
3188 arg_type++;
3189 target_size_in = thunk_type_size(arg_type, 0);
3190 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3191 if (!argptr) {
3192 return -TARGET_EFAULT;
3194 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3195 unlock_user(argptr, arg, 0);
3196 fm = (struct fiemap *)buf_temp;
3197 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3198 return -TARGET_EINVAL;
3201 outbufsz = sizeof (*fm) +
3202 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3204 if (outbufsz > MAX_STRUCT_SIZE) {
3205 /* We can't fit all the extents into the fixed size buffer.
3206 * Allocate one that is large enough and use it instead.
3208 fm = malloc(outbufsz);
3209 if (!fm) {
3210 return -TARGET_ENOMEM;
3212 memcpy(fm, buf_temp, sizeof(struct fiemap));
3213 free_fm = 1;
3215 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3216 if (!is_error(ret)) {
3217 target_size_out = target_size_in;
3218 /* An extent_count of 0 means we were only counting the extents
3219 * so there are no structs to copy
3221 if (fm->fm_extent_count != 0) {
3222 target_size_out += fm->fm_mapped_extents * extent_size;
3224 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3225 if (!argptr) {
3226 ret = -TARGET_EFAULT;
3227 } else {
3228 /* Convert the struct fiemap */
3229 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3230 if (fm->fm_extent_count != 0) {
3231 p = argptr + target_size_in;
3232 /* ...and then all the struct fiemap_extents */
3233 for (i = 0; i < fm->fm_mapped_extents; i++) {
3234 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3235 THUNK_TARGET);
3236 p += extent_size;
3239 unlock_user(argptr, arg, target_size_out);
3242 if (free_fm) {
3243 free(fm);
3245 return ret;
3247 #endif
3249 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3250 int fd, abi_long cmd, abi_long arg)
3252 const argtype *arg_type = ie->arg_type;
3253 int target_size;
3254 void *argptr;
3255 int ret;
3256 struct ifconf *host_ifconf;
3257 uint32_t outbufsz;
3258 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3259 int target_ifreq_size;
3260 int nb_ifreq;
3261 int free_buf = 0;
3262 int i;
3263 int target_ifc_len;
3264 abi_long target_ifc_buf;
3265 int host_ifc_len;
3266 char *host_ifc_buf;
3268 assert(arg_type[0] == TYPE_PTR);
3269 assert(ie->access == IOC_RW);
3271 arg_type++;
3272 target_size = thunk_type_size(arg_type, 0);
3274 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3275 if (!argptr)
3276 return -TARGET_EFAULT;
3277 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3278 unlock_user(argptr, arg, 0);
3280 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3281 target_ifc_len = host_ifconf->ifc_len;
3282 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3284 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3285 nb_ifreq = target_ifc_len / target_ifreq_size;
3286 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3288 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3289 if (outbufsz > MAX_STRUCT_SIZE) {
3290 /* We can't fit all the extents into the fixed size buffer.
3291 * Allocate one that is large enough and use it instead.
3293 host_ifconf = malloc(outbufsz);
3294 if (!host_ifconf) {
3295 return -TARGET_ENOMEM;
3297 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3298 free_buf = 1;
3300 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3302 host_ifconf->ifc_len = host_ifc_len;
3303 host_ifconf->ifc_buf = host_ifc_buf;
3305 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3306 if (!is_error(ret)) {
3307 /* convert host ifc_len to target ifc_len */
3309 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3310 target_ifc_len = nb_ifreq * target_ifreq_size;
3311 host_ifconf->ifc_len = target_ifc_len;
3313 /* restore target ifc_buf */
3315 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3317 /* copy struct ifconf to target user */
3319 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3320 if (!argptr)
3321 return -TARGET_EFAULT;
3322 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3323 unlock_user(argptr, arg, target_size);
3325 /* copy ifreq[] to target user */
3327 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3328 for (i = 0; i < nb_ifreq ; i++) {
3329 thunk_convert(argptr + i * target_ifreq_size,
3330 host_ifc_buf + i * sizeof(struct ifreq),
3331 ifreq_arg_type, THUNK_TARGET);
3333 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3336 if (free_buf) {
3337 free(host_ifconf);
3340 return ret;
3343 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3344 abi_long cmd, abi_long arg)
3346 void *argptr;
3347 struct dm_ioctl *host_dm;
3348 abi_long guest_data;
3349 uint32_t guest_data_size;
3350 int target_size;
3351 const argtype *arg_type = ie->arg_type;
3352 abi_long ret;
3353 void *big_buf = NULL;
3354 char *host_data;
3356 arg_type++;
3357 target_size = thunk_type_size(arg_type, 0);
3358 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3359 if (!argptr) {
3360 ret = -TARGET_EFAULT;
3361 goto out;
3363 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3364 unlock_user(argptr, arg, 0);
3366 /* buf_temp is too small, so fetch things into a bigger buffer */
3367 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3368 memcpy(big_buf, buf_temp, target_size);
3369 buf_temp = big_buf;
3370 host_dm = big_buf;
3372 guest_data = arg + host_dm->data_start;
3373 if ((guest_data - arg) < 0) {
3374 ret = -EINVAL;
3375 goto out;
3377 guest_data_size = host_dm->data_size - host_dm->data_start;
3378 host_data = (char*)host_dm + host_dm->data_start;
3380 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3381 switch (ie->host_cmd) {
3382 case DM_REMOVE_ALL:
3383 case DM_LIST_DEVICES:
3384 case DM_DEV_CREATE:
3385 case DM_DEV_REMOVE:
3386 case DM_DEV_SUSPEND:
3387 case DM_DEV_STATUS:
3388 case DM_DEV_WAIT:
3389 case DM_TABLE_STATUS:
3390 case DM_TABLE_CLEAR:
3391 case DM_TABLE_DEPS:
3392 case DM_LIST_VERSIONS:
3393 /* no input data */
3394 break;
3395 case DM_DEV_RENAME:
3396 case DM_DEV_SET_GEOMETRY:
3397 /* data contains only strings */
3398 memcpy(host_data, argptr, guest_data_size);
3399 break;
3400 case DM_TARGET_MSG:
3401 memcpy(host_data, argptr, guest_data_size);
3402 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3403 break;
3404 case DM_TABLE_LOAD:
3406 void *gspec = argptr;
3407 void *cur_data = host_data;
3408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3409 int spec_size = thunk_type_size(arg_type, 0);
3410 int i;
3412 for (i = 0; i < host_dm->target_count; i++) {
3413 struct dm_target_spec *spec = cur_data;
3414 uint32_t next;
3415 int slen;
3417 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3418 slen = strlen((char*)gspec + spec_size) + 1;
3419 next = spec->next;
3420 spec->next = sizeof(*spec) + slen;
3421 strcpy((char*)&spec[1], gspec + spec_size);
3422 gspec += next;
3423 cur_data += spec->next;
3425 break;
3427 default:
3428 ret = -TARGET_EINVAL;
3429 goto out;
3431 unlock_user(argptr, guest_data, 0);
3433 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3434 if (!is_error(ret)) {
3435 guest_data = arg + host_dm->data_start;
3436 guest_data_size = host_dm->data_size - host_dm->data_start;
3437 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3438 switch (ie->host_cmd) {
3439 case DM_REMOVE_ALL:
3440 case DM_DEV_CREATE:
3441 case DM_DEV_REMOVE:
3442 case DM_DEV_RENAME:
3443 case DM_DEV_SUSPEND:
3444 case DM_DEV_STATUS:
3445 case DM_TABLE_LOAD:
3446 case DM_TABLE_CLEAR:
3447 case DM_TARGET_MSG:
3448 case DM_DEV_SET_GEOMETRY:
3449 /* no return data */
3450 break;
3451 case DM_LIST_DEVICES:
3453 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3454 uint32_t remaining_data = guest_data_size;
3455 void *cur_data = argptr;
3456 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3457 int nl_size = 12; /* can't use thunk_size due to alignment */
3459 while (1) {
3460 uint32_t next = nl->next;
3461 if (next) {
3462 nl->next = nl_size + (strlen(nl->name) + 1);
3464 if (remaining_data < nl->next) {
3465 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3466 break;
3468 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3469 strcpy(cur_data + nl_size, nl->name);
3470 cur_data += nl->next;
3471 remaining_data -= nl->next;
3472 if (!next) {
3473 break;
3475 nl = (void*)nl + next;
3477 break;
3479 case DM_DEV_WAIT:
3480 case DM_TABLE_STATUS:
3482 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3483 void *cur_data = argptr;
3484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3485 int spec_size = thunk_type_size(arg_type, 0);
3486 int i;
3488 for (i = 0; i < host_dm->target_count; i++) {
3489 uint32_t next = spec->next;
3490 int slen = strlen((char*)&spec[1]) + 1;
3491 spec->next = (cur_data - argptr) + spec_size + slen;
3492 if (guest_data_size < spec->next) {
3493 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3494 break;
3496 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3497 strcpy(cur_data + spec_size, (char*)&spec[1]);
3498 cur_data = argptr + spec->next;
3499 spec = (void*)host_dm + host_dm->data_start + next;
3501 break;
3503 case DM_TABLE_DEPS:
3505 void *hdata = (void*)host_dm + host_dm->data_start;
3506 int count = *(uint32_t*)hdata;
3507 uint64_t *hdev = hdata + 8;
3508 uint64_t *gdev = argptr + 8;
3509 int i;
3511 *(uint32_t*)argptr = tswap32(count);
3512 for (i = 0; i < count; i++) {
3513 *gdev = tswap64(*hdev);
3514 gdev++;
3515 hdev++;
3517 break;
3519 case DM_LIST_VERSIONS:
3521 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3522 uint32_t remaining_data = guest_data_size;
3523 void *cur_data = argptr;
3524 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3525 int vers_size = thunk_type_size(arg_type, 0);
3527 while (1) {
3528 uint32_t next = vers->next;
3529 if (next) {
3530 vers->next = vers_size + (strlen(vers->name) + 1);
3532 if (remaining_data < vers->next) {
3533 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3534 break;
3536 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3537 strcpy(cur_data + vers_size, vers->name);
3538 cur_data += vers->next;
3539 remaining_data -= vers->next;
3540 if (!next) {
3541 break;
3543 vers = (void*)vers + next;
3545 break;
3547 default:
3548 ret = -TARGET_EINVAL;
3549 goto out;
3551 unlock_user(argptr, guest_data, guest_data_size);
3553 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3554 if (!argptr) {
3555 ret = -TARGET_EFAULT;
3556 goto out;
3558 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3559 unlock_user(argptr, arg, target_size);
3561 out:
3562 g_free(big_buf);
3563 return ret;
3566 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3567 int fd, abi_long cmd, abi_long arg)
3569 const argtype *arg_type = ie->arg_type;
3570 const StructEntry *se;
3571 const argtype *field_types;
3572 const int *dst_offsets, *src_offsets;
3573 int target_size;
3574 void *argptr;
3575 abi_ulong *target_rt_dev_ptr;
3576 unsigned long *host_rt_dev_ptr;
3577 abi_long ret;
3578 int i;
3580 assert(ie->access == IOC_W);
3581 assert(*arg_type == TYPE_PTR);
3582 arg_type++;
3583 assert(*arg_type == TYPE_STRUCT);
3584 target_size = thunk_type_size(arg_type, 0);
3585 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3586 if (!argptr) {
3587 return -TARGET_EFAULT;
3589 arg_type++;
3590 assert(*arg_type == (int)STRUCT_rtentry);
3591 se = struct_entries + *arg_type++;
3592 assert(se->convert[0] == NULL);
3593 /* convert struct here to be able to catch rt_dev string */
3594 field_types = se->field_types;
3595 dst_offsets = se->field_offsets[THUNK_HOST];
3596 src_offsets = se->field_offsets[THUNK_TARGET];
3597 for (i = 0; i < se->nb_fields; i++) {
3598 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3599 assert(*field_types == TYPE_PTRVOID);
3600 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3601 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3602 if (*target_rt_dev_ptr != 0) {
3603 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3604 tswapal(*target_rt_dev_ptr));
3605 if (!*host_rt_dev_ptr) {
3606 unlock_user(argptr, arg, 0);
3607 return -TARGET_EFAULT;
3609 } else {
3610 *host_rt_dev_ptr = 0;
3612 field_types++;
3613 continue;
3615 field_types = thunk_convert(buf_temp + dst_offsets[i],
3616 argptr + src_offsets[i],
3617 field_types, THUNK_HOST);
3619 unlock_user(argptr, arg, 0);
3621 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3622 if (*host_rt_dev_ptr != 0) {
3623 unlock_user((void *)*host_rt_dev_ptr,
3624 *target_rt_dev_ptr, 0);
3626 return ret;
3629 static IOCTLEntry ioctl_entries[] = {
3630 #define IOCTL(cmd, access, ...) \
3631 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3632 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3633 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3634 #include "ioctls.h"
3635 { 0, 0, },
3638 /* ??? Implement proper locking for ioctls. */
3639 /* do_ioctl() Must return target values and target errnos. */
3640 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3642 const IOCTLEntry *ie;
3643 const argtype *arg_type;
3644 abi_long ret;
3645 uint8_t buf_temp[MAX_STRUCT_SIZE];
3646 int target_size;
3647 void *argptr;
3649 ie = ioctl_entries;
3650 for(;;) {
3651 if (ie->target_cmd == 0) {
3652 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3653 return -TARGET_ENOSYS;
3655 if (ie->target_cmd == cmd)
3656 break;
3657 ie++;
3659 arg_type = ie->arg_type;
3660 #if defined(DEBUG)
3661 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3662 #endif
3663 if (ie->do_ioctl) {
3664 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3667 switch(arg_type[0]) {
3668 case TYPE_NULL:
3669 /* no argument */
3670 ret = get_errno(ioctl(fd, ie->host_cmd));
3671 break;
3672 case TYPE_PTRVOID:
3673 case TYPE_INT:
3674 /* int argment */
3675 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3676 break;
3677 case TYPE_PTR:
3678 arg_type++;
3679 target_size = thunk_type_size(arg_type, 0);
3680 switch(ie->access) {
3681 case IOC_R:
3682 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3683 if (!is_error(ret)) {
3684 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3685 if (!argptr)
3686 return -TARGET_EFAULT;
3687 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3688 unlock_user(argptr, arg, target_size);
3690 break;
3691 case IOC_W:
3692 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3693 if (!argptr)
3694 return -TARGET_EFAULT;
3695 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3696 unlock_user(argptr, arg, 0);
3697 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3698 break;
3699 default:
3700 case IOC_RW:
3701 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3702 if (!argptr)
3703 return -TARGET_EFAULT;
3704 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3705 unlock_user(argptr, arg, 0);
3706 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3707 if (!is_error(ret)) {
3708 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3709 if (!argptr)
3710 return -TARGET_EFAULT;
3711 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3712 unlock_user(argptr, arg, target_size);
3714 break;
3716 break;
3717 default:
3718 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3719 (long)cmd, arg_type[0]);
3720 ret = -TARGET_ENOSYS;
3721 break;
3723 return ret;
3726 static const bitmask_transtbl iflag_tbl[] = {
3727 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3728 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3729 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3730 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3731 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3732 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3733 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3734 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3735 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3736 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3737 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3738 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3739 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3740 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3741 { 0, 0, 0, 0 }
3744 static const bitmask_transtbl oflag_tbl[] = {
3745 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3746 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3747 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3748 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3749 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3750 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3751 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3752 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3753 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3754 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3755 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3756 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3757 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3758 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3759 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3760 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3761 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3762 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3763 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3764 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3765 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3766 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3767 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3768 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3769 { 0, 0, 0, 0 }
3772 static const bitmask_transtbl cflag_tbl[] = {
3773 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3774 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3775 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3776 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3777 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3778 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3779 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3780 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3781 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3782 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3783 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3784 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3785 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3786 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3787 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3788 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3789 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3790 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3791 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3792 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3793 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3794 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3795 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3796 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3797 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3798 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3799 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3800 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3801 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3802 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3803 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3804 { 0, 0, 0, 0 }
3807 static const bitmask_transtbl lflag_tbl[] = {
3808 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3809 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3810 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3811 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3812 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3813 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3814 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3815 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3816 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3817 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3818 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3819 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3820 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3821 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3822 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3823 { 0, 0, 0, 0 }
3826 static void target_to_host_termios (void *dst, const void *src)
3828 struct host_termios *host = dst;
3829 const struct target_termios *target = src;
3831 host->c_iflag =
3832 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3833 host->c_oflag =
3834 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3835 host->c_cflag =
3836 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3837 host->c_lflag =
3838 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3839 host->c_line = target->c_line;
3841 memset(host->c_cc, 0, sizeof(host->c_cc));
3842 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3843 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3844 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3845 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3846 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3847 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3848 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3849 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3850 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3851 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3852 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3853 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3854 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3855 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3856 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3857 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3858 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3861 static void host_to_target_termios (void *dst, const void *src)
3863 struct target_termios *target = dst;
3864 const struct host_termios *host = src;
3866 target->c_iflag =
3867 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3868 target->c_oflag =
3869 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3870 target->c_cflag =
3871 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3872 target->c_lflag =
3873 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3874 target->c_line = host->c_line;
3876 memset(target->c_cc, 0, sizeof(target->c_cc));
3877 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3878 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3879 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3880 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3881 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3882 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3883 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3884 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3885 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3886 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3887 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3888 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3889 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3890 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3891 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3892 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3893 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3896 static const StructEntry struct_termios_def = {
3897 .convert = { host_to_target_termios, target_to_host_termios },
3898 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3899 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3902 static bitmask_transtbl mmap_flags_tbl[] = {
3903 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3904 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3905 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3906 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3907 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3908 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3909 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3910 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3911 { 0, 0, 0, 0 }
3914 #if defined(TARGET_I386)
3916 /* NOTE: there is really one LDT for all the threads */
3917 static uint8_t *ldt_table;
3919 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3921 int size;
3922 void *p;
3924 if (!ldt_table)
3925 return 0;
3926 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3927 if (size > bytecount)
3928 size = bytecount;
3929 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3930 if (!p)
3931 return -TARGET_EFAULT;
3932 /* ??? Should this by byteswapped? */
3933 memcpy(p, ldt_table, size);
3934 unlock_user(p, ptr, size);
3935 return size;
3938 /* XXX: add locking support */
3939 static abi_long write_ldt(CPUX86State *env,
3940 abi_ulong ptr, unsigned long bytecount, int oldmode)
3942 struct target_modify_ldt_ldt_s ldt_info;
3943 struct target_modify_ldt_ldt_s *target_ldt_info;
3944 int seg_32bit, contents, read_exec_only, limit_in_pages;
3945 int seg_not_present, useable, lm;
3946 uint32_t *lp, entry_1, entry_2;
3948 if (bytecount != sizeof(ldt_info))
3949 return -TARGET_EINVAL;
3950 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3951 return -TARGET_EFAULT;
3952 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3953 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3954 ldt_info.limit = tswap32(target_ldt_info->limit);
3955 ldt_info.flags = tswap32(target_ldt_info->flags);
3956 unlock_user_struct(target_ldt_info, ptr, 0);
3958 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3959 return -TARGET_EINVAL;
3960 seg_32bit = ldt_info.flags & 1;
3961 contents = (ldt_info.flags >> 1) & 3;
3962 read_exec_only = (ldt_info.flags >> 3) & 1;
3963 limit_in_pages = (ldt_info.flags >> 4) & 1;
3964 seg_not_present = (ldt_info.flags >> 5) & 1;
3965 useable = (ldt_info.flags >> 6) & 1;
3966 #ifdef TARGET_ABI32
3967 lm = 0;
3968 #else
3969 lm = (ldt_info.flags >> 7) & 1;
3970 #endif
3971 if (contents == 3) {
3972 if (oldmode)
3973 return -TARGET_EINVAL;
3974 if (seg_not_present == 0)
3975 return -TARGET_EINVAL;
3977 /* allocate the LDT */
3978 if (!ldt_table) {
3979 env->ldt.base = target_mmap(0,
3980 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3981 PROT_READ|PROT_WRITE,
3982 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3983 if (env->ldt.base == -1)
3984 return -TARGET_ENOMEM;
3985 memset(g2h(env->ldt.base), 0,
3986 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3987 env->ldt.limit = 0xffff;
3988 ldt_table = g2h(env->ldt.base);
3991 /* NOTE: same code as Linux kernel */
3992 /* Allow LDTs to be cleared by the user. */
3993 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3994 if (oldmode ||
3995 (contents == 0 &&
3996 read_exec_only == 1 &&
3997 seg_32bit == 0 &&
3998 limit_in_pages == 0 &&
3999 seg_not_present == 1 &&
4000 useable == 0 )) {
4001 entry_1 = 0;
4002 entry_2 = 0;
4003 goto install;
4007 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4008 (ldt_info.limit & 0x0ffff);
4009 entry_2 = (ldt_info.base_addr & 0xff000000) |
4010 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4011 (ldt_info.limit & 0xf0000) |
4012 ((read_exec_only ^ 1) << 9) |
4013 (contents << 10) |
4014 ((seg_not_present ^ 1) << 15) |
4015 (seg_32bit << 22) |
4016 (limit_in_pages << 23) |
4017 (lm << 21) |
4018 0x7000;
4019 if (!oldmode)
4020 entry_2 |= (useable << 20);
4022 /* Install the new entry ... */
4023 install:
4024 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4025 lp[0] = tswap32(entry_1);
4026 lp[1] = tswap32(entry_2);
4027 return 0;
4030 /* specific and weird i386 syscalls */
4031 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4032 unsigned long bytecount)
4034 abi_long ret;
4036 switch (func) {
4037 case 0:
4038 ret = read_ldt(ptr, bytecount);
4039 break;
4040 case 1:
4041 ret = write_ldt(env, ptr, bytecount, 1);
4042 break;
4043 case 0x11:
4044 ret = write_ldt(env, ptr, bytecount, 0);
4045 break;
4046 default:
4047 ret = -TARGET_ENOSYS;
4048 break;
4050 return ret;
4053 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4054 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4056 uint64_t *gdt_table = g2h(env->gdt.base);
4057 struct target_modify_ldt_ldt_s ldt_info;
4058 struct target_modify_ldt_ldt_s *target_ldt_info;
4059 int seg_32bit, contents, read_exec_only, limit_in_pages;
4060 int seg_not_present, useable, lm;
4061 uint32_t *lp, entry_1, entry_2;
4062 int i;
4064 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4065 if (!target_ldt_info)
4066 return -TARGET_EFAULT;
4067 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4068 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4069 ldt_info.limit = tswap32(target_ldt_info->limit);
4070 ldt_info.flags = tswap32(target_ldt_info->flags);
4071 if (ldt_info.entry_number == -1) {
4072 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4073 if (gdt_table[i] == 0) {
4074 ldt_info.entry_number = i;
4075 target_ldt_info->entry_number = tswap32(i);
4076 break;
4080 unlock_user_struct(target_ldt_info, ptr, 1);
4082 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4083 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4084 return -TARGET_EINVAL;
4085 seg_32bit = ldt_info.flags & 1;
4086 contents = (ldt_info.flags >> 1) & 3;
4087 read_exec_only = (ldt_info.flags >> 3) & 1;
4088 limit_in_pages = (ldt_info.flags >> 4) & 1;
4089 seg_not_present = (ldt_info.flags >> 5) & 1;
4090 useable = (ldt_info.flags >> 6) & 1;
4091 #ifdef TARGET_ABI32
4092 lm = 0;
4093 #else
4094 lm = (ldt_info.flags >> 7) & 1;
4095 #endif
4097 if (contents == 3) {
4098 if (seg_not_present == 0)
4099 return -TARGET_EINVAL;
4102 /* NOTE: same code as Linux kernel */
4103 /* Allow LDTs to be cleared by the user. */
4104 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4105 if ((contents == 0 &&
4106 read_exec_only == 1 &&
4107 seg_32bit == 0 &&
4108 limit_in_pages == 0 &&
4109 seg_not_present == 1 &&
4110 useable == 0 )) {
4111 entry_1 = 0;
4112 entry_2 = 0;
4113 goto install;
4117 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4118 (ldt_info.limit & 0x0ffff);
4119 entry_2 = (ldt_info.base_addr & 0xff000000) |
4120 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4121 (ldt_info.limit & 0xf0000) |
4122 ((read_exec_only ^ 1) << 9) |
4123 (contents << 10) |
4124 ((seg_not_present ^ 1) << 15) |
4125 (seg_32bit << 22) |
4126 (limit_in_pages << 23) |
4127 (useable << 20) |
4128 (lm << 21) |
4129 0x7000;
4131 /* Install the new entry ... */
4132 install:
4133 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4134 lp[0] = tswap32(entry_1);
4135 lp[1] = tswap32(entry_2);
4136 return 0;
4139 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4141 struct target_modify_ldt_ldt_s *target_ldt_info;
4142 uint64_t *gdt_table = g2h(env->gdt.base);
4143 uint32_t base_addr, limit, flags;
4144 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4145 int seg_not_present, useable, lm;
4146 uint32_t *lp, entry_1, entry_2;
4148 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4149 if (!target_ldt_info)
4150 return -TARGET_EFAULT;
4151 idx = tswap32(target_ldt_info->entry_number);
4152 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4153 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4154 unlock_user_struct(target_ldt_info, ptr, 1);
4155 return -TARGET_EINVAL;
4157 lp = (uint32_t *)(gdt_table + idx);
4158 entry_1 = tswap32(lp[0]);
4159 entry_2 = tswap32(lp[1]);
4161 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4162 contents = (entry_2 >> 10) & 3;
4163 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4164 seg_32bit = (entry_2 >> 22) & 1;
4165 limit_in_pages = (entry_2 >> 23) & 1;
4166 useable = (entry_2 >> 20) & 1;
4167 #ifdef TARGET_ABI32
4168 lm = 0;
4169 #else
4170 lm = (entry_2 >> 21) & 1;
4171 #endif
4172 flags = (seg_32bit << 0) | (contents << 1) |
4173 (read_exec_only << 3) | (limit_in_pages << 4) |
4174 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4175 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4176 base_addr = (entry_1 >> 16) |
4177 (entry_2 & 0xff000000) |
4178 ((entry_2 & 0xff) << 16);
4179 target_ldt_info->base_addr = tswapal(base_addr);
4180 target_ldt_info->limit = tswap32(limit);
4181 target_ldt_info->flags = tswap32(flags);
4182 unlock_user_struct(target_ldt_info, ptr, 1);
4183 return 0;
4185 #endif /* TARGET_I386 && TARGET_ABI32 */
4187 #ifndef TARGET_ABI32
4188 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4190 abi_long ret = 0;
4191 abi_ulong val;
4192 int idx;
4194 switch(code) {
4195 case TARGET_ARCH_SET_GS:
4196 case TARGET_ARCH_SET_FS:
4197 if (code == TARGET_ARCH_SET_GS)
4198 idx = R_GS;
4199 else
4200 idx = R_FS;
4201 cpu_x86_load_seg(env, idx, 0);
4202 env->segs[idx].base = addr;
4203 break;
4204 case TARGET_ARCH_GET_GS:
4205 case TARGET_ARCH_GET_FS:
4206 if (code == TARGET_ARCH_GET_GS)
4207 idx = R_GS;
4208 else
4209 idx = R_FS;
4210 val = env->segs[idx].base;
4211 if (put_user(val, addr, abi_ulong))
4212 ret = -TARGET_EFAULT;
4213 break;
4214 default:
4215 ret = -TARGET_EINVAL;
4216 break;
4218 return ret;
4220 #endif
4222 #endif /* defined(TARGET_I386) */
4224 #define NEW_STACK_SIZE 0x40000
4227 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4228 typedef struct {
4229 CPUArchState *env;
4230 pthread_mutex_t mutex;
4231 pthread_cond_t cond;
4232 pthread_t thread;
4233 uint32_t tid;
4234 abi_ulong child_tidptr;
4235 abi_ulong parent_tidptr;
4236 sigset_t sigmask;
4237 } new_thread_info;
4239 static void *clone_func(void *arg)
4241 new_thread_info *info = arg;
4242 CPUArchState *env;
4243 CPUState *cpu;
4244 TaskState *ts;
4246 env = info->env;
4247 cpu = ENV_GET_CPU(env);
4248 thread_cpu = cpu;
4249 ts = (TaskState *)env->opaque;
4250 info->tid = gettid();
4251 cpu->host_tid = info->tid;
4252 task_settid(ts);
4253 if (info->child_tidptr)
4254 put_user_u32(info->tid, info->child_tidptr);
4255 if (info->parent_tidptr)
4256 put_user_u32(info->tid, info->parent_tidptr);
4257 /* Enable signals. */
4258 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4259 /* Signal to the parent that we're ready. */
4260 pthread_mutex_lock(&info->mutex);
4261 pthread_cond_broadcast(&info->cond);
4262 pthread_mutex_unlock(&info->mutex);
4263 /* Wait until the parent has finshed initializing the tls state. */
4264 pthread_mutex_lock(&clone_lock);
4265 pthread_mutex_unlock(&clone_lock);
4266 cpu_loop(env);
4267 /* never exits */
4268 return NULL;
4271 /* do_fork() Must return host values and target errnos (unlike most
4272 do_*() functions). */
4273 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4274 abi_ulong parent_tidptr, target_ulong newtls,
4275 abi_ulong child_tidptr)
4277 int ret;
4278 TaskState *ts;
4279 CPUArchState *new_env;
4280 unsigned int nptl_flags;
4281 sigset_t sigmask;
4283 /* Emulate vfork() with fork() */
4284 if (flags & CLONE_VFORK)
4285 flags &= ~(CLONE_VFORK | CLONE_VM);
4287 if (flags & CLONE_VM) {
4288 TaskState *parent_ts = (TaskState *)env->opaque;
4289 new_thread_info info;
4290 pthread_attr_t attr;
4292 ts = g_malloc0(sizeof(TaskState));
4293 init_task_state(ts);
4294 /* we create a new CPU instance. */
4295 new_env = cpu_copy(env);
4296 /* Init regs that differ from the parent. */
4297 cpu_clone_regs(new_env, newsp);
4298 new_env->opaque = ts;
4299 ts->bprm = parent_ts->bprm;
4300 ts->info = parent_ts->info;
4301 nptl_flags = flags;
4302 flags &= ~CLONE_NPTL_FLAGS2;
4304 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4305 ts->child_tidptr = child_tidptr;
4308 if (nptl_flags & CLONE_SETTLS)
4309 cpu_set_tls (new_env, newtls);
4311 /* Grab a mutex so that thread setup appears atomic. */
4312 pthread_mutex_lock(&clone_lock);
4314 memset(&info, 0, sizeof(info));
4315 pthread_mutex_init(&info.mutex, NULL);
4316 pthread_mutex_lock(&info.mutex);
4317 pthread_cond_init(&info.cond, NULL);
4318 info.env = new_env;
4319 if (nptl_flags & CLONE_CHILD_SETTID)
4320 info.child_tidptr = child_tidptr;
4321 if (nptl_flags & CLONE_PARENT_SETTID)
4322 info.parent_tidptr = parent_tidptr;
4324 ret = pthread_attr_init(&attr);
4325 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4326 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4327 /* It is not safe to deliver signals until the child has finished
4328 initializing, so temporarily block all signals. */
4329 sigfillset(&sigmask);
4330 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4332 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4333 /* TODO: Free new CPU state if thread creation failed. */
4335 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4336 pthread_attr_destroy(&attr);
4337 if (ret == 0) {
4338 /* Wait for the child to initialize. */
4339 pthread_cond_wait(&info.cond, &info.mutex);
4340 ret = info.tid;
4341 if (flags & CLONE_PARENT_SETTID)
4342 put_user_u32(ret, parent_tidptr);
4343 } else {
4344 ret = -1;
4346 pthread_mutex_unlock(&info.mutex);
4347 pthread_cond_destroy(&info.cond);
4348 pthread_mutex_destroy(&info.mutex);
4349 pthread_mutex_unlock(&clone_lock);
4350 } else {
4351 /* if no CLONE_VM, we consider it is a fork */
4352 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4353 return -EINVAL;
4354 fork_start();
4355 ret = fork();
4356 if (ret == 0) {
4357 /* Child Process. */
4358 cpu_clone_regs(env, newsp);
4359 fork_end(1);
4360 /* There is a race condition here. The parent process could
4361 theoretically read the TID in the child process before the child
4362 tid is set. This would require using either ptrace
4363 (not implemented) or having *_tidptr to point at a shared memory
4364 mapping. We can't repeat the spinlock hack used above because
4365 the child process gets its own copy of the lock. */
4366 if (flags & CLONE_CHILD_SETTID)
4367 put_user_u32(gettid(), child_tidptr);
4368 if (flags & CLONE_PARENT_SETTID)
4369 put_user_u32(gettid(), parent_tidptr);
4370 ts = (TaskState *)env->opaque;
4371 if (flags & CLONE_SETTLS)
4372 cpu_set_tls (env, newtls);
4373 if (flags & CLONE_CHILD_CLEARTID)
4374 ts->child_tidptr = child_tidptr;
4375 } else {
4376 fork_end(0);
4379 return ret;
4382 /* warning : doesn't handle linux specific flags... */
4383 static int target_to_host_fcntl_cmd(int cmd)
4385 switch(cmd) {
4386 case TARGET_F_DUPFD:
4387 case TARGET_F_GETFD:
4388 case TARGET_F_SETFD:
4389 case TARGET_F_GETFL:
4390 case TARGET_F_SETFL:
4391 return cmd;
4392 case TARGET_F_GETLK:
4393 return F_GETLK;
4394 case TARGET_F_SETLK:
4395 return F_SETLK;
4396 case TARGET_F_SETLKW:
4397 return F_SETLKW;
4398 case TARGET_F_GETOWN:
4399 return F_GETOWN;
4400 case TARGET_F_SETOWN:
4401 return F_SETOWN;
4402 case TARGET_F_GETSIG:
4403 return F_GETSIG;
4404 case TARGET_F_SETSIG:
4405 return F_SETSIG;
4406 #if TARGET_ABI_BITS == 32
4407 case TARGET_F_GETLK64:
4408 return F_GETLK64;
4409 case TARGET_F_SETLK64:
4410 return F_SETLK64;
4411 case TARGET_F_SETLKW64:
4412 return F_SETLKW64;
4413 #endif
4414 case TARGET_F_SETLEASE:
4415 return F_SETLEASE;
4416 case TARGET_F_GETLEASE:
4417 return F_GETLEASE;
4418 #ifdef F_DUPFD_CLOEXEC
4419 case TARGET_F_DUPFD_CLOEXEC:
4420 return F_DUPFD_CLOEXEC;
4421 #endif
4422 case TARGET_F_NOTIFY:
4423 return F_NOTIFY;
4424 default:
4425 return -TARGET_EINVAL;
4427 return -TARGET_EINVAL;
4430 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4431 static const bitmask_transtbl flock_tbl[] = {
4432 TRANSTBL_CONVERT(F_RDLCK),
4433 TRANSTBL_CONVERT(F_WRLCK),
4434 TRANSTBL_CONVERT(F_UNLCK),
4435 TRANSTBL_CONVERT(F_EXLCK),
4436 TRANSTBL_CONVERT(F_SHLCK),
4437 { 0, 0, 0, 0 }
4440 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4442 struct flock fl;
4443 struct target_flock *target_fl;
4444 struct flock64 fl64;
4445 struct target_flock64 *target_fl64;
4446 abi_long ret;
4447 int host_cmd = target_to_host_fcntl_cmd(cmd);
4449 if (host_cmd == -TARGET_EINVAL)
4450 return host_cmd;
4452 switch(cmd) {
4453 case TARGET_F_GETLK:
4454 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4455 return -TARGET_EFAULT;
4456 fl.l_type =
4457 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4458 fl.l_whence = tswap16(target_fl->l_whence);
4459 fl.l_start = tswapal(target_fl->l_start);
4460 fl.l_len = tswapal(target_fl->l_len);
4461 fl.l_pid = tswap32(target_fl->l_pid);
4462 unlock_user_struct(target_fl, arg, 0);
4463 ret = get_errno(fcntl(fd, host_cmd, &fl));
4464 if (ret == 0) {
4465 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4466 return -TARGET_EFAULT;
4467 target_fl->l_type =
4468 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4469 target_fl->l_whence = tswap16(fl.l_whence);
4470 target_fl->l_start = tswapal(fl.l_start);
4471 target_fl->l_len = tswapal(fl.l_len);
4472 target_fl->l_pid = tswap32(fl.l_pid);
4473 unlock_user_struct(target_fl, arg, 1);
4475 break;
4477 case TARGET_F_SETLK:
4478 case TARGET_F_SETLKW:
4479 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4480 return -TARGET_EFAULT;
4481 fl.l_type =
4482 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4483 fl.l_whence = tswap16(target_fl->l_whence);
4484 fl.l_start = tswapal(target_fl->l_start);
4485 fl.l_len = tswapal(target_fl->l_len);
4486 fl.l_pid = tswap32(target_fl->l_pid);
4487 unlock_user_struct(target_fl, arg, 0);
4488 ret = get_errno(fcntl(fd, host_cmd, &fl));
4489 break;
4491 case TARGET_F_GETLK64:
4492 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4493 return -TARGET_EFAULT;
4494 fl64.l_type =
4495 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4496 fl64.l_whence = tswap16(target_fl64->l_whence);
4497 fl64.l_start = tswap64(target_fl64->l_start);
4498 fl64.l_len = tswap64(target_fl64->l_len);
4499 fl64.l_pid = tswap32(target_fl64->l_pid);
4500 unlock_user_struct(target_fl64, arg, 0);
4501 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4502 if (ret == 0) {
4503 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4504 return -TARGET_EFAULT;
4505 target_fl64->l_type =
4506 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4507 target_fl64->l_whence = tswap16(fl64.l_whence);
4508 target_fl64->l_start = tswap64(fl64.l_start);
4509 target_fl64->l_len = tswap64(fl64.l_len);
4510 target_fl64->l_pid = tswap32(fl64.l_pid);
4511 unlock_user_struct(target_fl64, arg, 1);
4513 break;
4514 case TARGET_F_SETLK64:
4515 case TARGET_F_SETLKW64:
4516 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4517 return -TARGET_EFAULT;
4518 fl64.l_type =
4519 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4520 fl64.l_whence = tswap16(target_fl64->l_whence);
4521 fl64.l_start = tswap64(target_fl64->l_start);
4522 fl64.l_len = tswap64(target_fl64->l_len);
4523 fl64.l_pid = tswap32(target_fl64->l_pid);
4524 unlock_user_struct(target_fl64, arg, 0);
4525 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4526 break;
4528 case TARGET_F_GETFL:
4529 ret = get_errno(fcntl(fd, host_cmd, arg));
4530 if (ret >= 0) {
4531 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4533 break;
4535 case TARGET_F_SETFL:
4536 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4537 break;
4539 case TARGET_F_SETOWN:
4540 case TARGET_F_GETOWN:
4541 case TARGET_F_SETSIG:
4542 case TARGET_F_GETSIG:
4543 case TARGET_F_SETLEASE:
4544 case TARGET_F_GETLEASE:
4545 ret = get_errno(fcntl(fd, host_cmd, arg));
4546 break;
4548 default:
4549 ret = get_errno(fcntl(fd, cmd, arg));
4550 break;
4552 return ret;
4555 #ifdef USE_UID16
4557 static inline int high2lowuid(int uid)
4559 if (uid > 65535)
4560 return 65534;
4561 else
4562 return uid;
4565 static inline int high2lowgid(int gid)
4567 if (gid > 65535)
4568 return 65534;
4569 else
4570 return gid;
4573 static inline int low2highuid(int uid)
4575 if ((int16_t)uid == -1)
4576 return -1;
4577 else
4578 return uid;
4581 static inline int low2highgid(int gid)
4583 if ((int16_t)gid == -1)
4584 return -1;
4585 else
4586 return gid;
4588 static inline int tswapid(int id)
4590 return tswap16(id);
4592 #else /* !USE_UID16 */
4593 static inline int high2lowuid(int uid)
4595 return uid;
4597 static inline int high2lowgid(int gid)
4599 return gid;
4601 static inline int low2highuid(int uid)
4603 return uid;
4605 static inline int low2highgid(int gid)
4607 return gid;
4609 static inline int tswapid(int id)
4611 return tswap32(id);
4613 #endif /* USE_UID16 */
4615 void syscall_init(void)
4617 IOCTLEntry *ie;
4618 const argtype *arg_type;
4619 int size;
4620 int i;
4622 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4623 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4624 #include "syscall_types.h"
4625 #undef STRUCT
4626 #undef STRUCT_SPECIAL
4628 /* Build target_to_host_errno_table[] table from
4629 * host_to_target_errno_table[]. */
4630 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4631 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4634 /* we patch the ioctl size if necessary. We rely on the fact that
4635 no ioctl has all the bits at '1' in the size field */
4636 ie = ioctl_entries;
4637 while (ie->target_cmd != 0) {
4638 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4639 TARGET_IOC_SIZEMASK) {
4640 arg_type = ie->arg_type;
4641 if (arg_type[0] != TYPE_PTR) {
4642 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4643 ie->target_cmd);
4644 exit(1);
4646 arg_type++;
4647 size = thunk_type_size(arg_type, 0);
4648 ie->target_cmd = (ie->target_cmd &
4649 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4650 (size << TARGET_IOC_SIZESHIFT);
4653 /* automatic consistency check if same arch */
4654 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4655 (defined(__x86_64__) && defined(TARGET_X86_64))
4656 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4657 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4658 ie->name, ie->target_cmd, ie->host_cmd);
4660 #endif
4661 ie++;
4665 #if TARGET_ABI_BITS == 32
4666 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4668 #ifdef TARGET_WORDS_BIGENDIAN
4669 return ((uint64_t)word0 << 32) | word1;
4670 #else
4671 return ((uint64_t)word1 << 32) | word0;
4672 #endif
4674 #else /* TARGET_ABI_BITS == 32 */
4675 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4677 return word0;
4679 #endif /* TARGET_ABI_BITS != 32 */
4681 #ifdef TARGET_NR_truncate64
4682 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4683 abi_long arg2,
4684 abi_long arg3,
4685 abi_long arg4)
4687 if (regpairs_aligned(cpu_env)) {
4688 arg2 = arg3;
4689 arg3 = arg4;
4691 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4693 #endif
4695 #ifdef TARGET_NR_ftruncate64
4696 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4697 abi_long arg2,
4698 abi_long arg3,
4699 abi_long arg4)
4701 if (regpairs_aligned(cpu_env)) {
4702 arg2 = arg3;
4703 arg3 = arg4;
4705 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4707 #endif
4709 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4710 abi_ulong target_addr)
4712 struct target_timespec *target_ts;
4714 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4715 return -TARGET_EFAULT;
4716 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4717 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4718 unlock_user_struct(target_ts, target_addr, 0);
4719 return 0;
4722 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4723 struct timespec *host_ts)
4725 struct target_timespec *target_ts;
4727 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4728 return -TARGET_EFAULT;
4729 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4730 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4731 unlock_user_struct(target_ts, target_addr, 1);
4732 return 0;
4735 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4736 static inline abi_long host_to_target_stat64(void *cpu_env,
4737 abi_ulong target_addr,
4738 struct stat *host_st)
4740 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4741 if (((CPUARMState *)cpu_env)->eabi) {
4742 struct target_eabi_stat64 *target_st;
4744 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4745 return -TARGET_EFAULT;
4746 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4747 __put_user(host_st->st_dev, &target_st->st_dev);
4748 __put_user(host_st->st_ino, &target_st->st_ino);
4749 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4750 __put_user(host_st->st_ino, &target_st->__st_ino);
4751 #endif
4752 __put_user(host_st->st_mode, &target_st->st_mode);
4753 __put_user(host_st->st_nlink, &target_st->st_nlink);
4754 __put_user(host_st->st_uid, &target_st->st_uid);
4755 __put_user(host_st->st_gid, &target_st->st_gid);
4756 __put_user(host_st->st_rdev, &target_st->st_rdev);
4757 __put_user(host_st->st_size, &target_st->st_size);
4758 __put_user(host_st->st_blksize, &target_st->st_blksize);
4759 __put_user(host_st->st_blocks, &target_st->st_blocks);
4760 __put_user(host_st->st_atime, &target_st->target_st_atime);
4761 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4762 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4763 unlock_user_struct(target_st, target_addr, 1);
4764 } else
4765 #endif
4767 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4768 struct target_stat *target_st;
4769 #else
4770 struct target_stat64 *target_st;
4771 #endif
4773 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4774 return -TARGET_EFAULT;
4775 memset(target_st, 0, sizeof(*target_st));
4776 __put_user(host_st->st_dev, &target_st->st_dev);
4777 __put_user(host_st->st_ino, &target_st->st_ino);
4778 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4779 __put_user(host_st->st_ino, &target_st->__st_ino);
4780 #endif
4781 __put_user(host_st->st_mode, &target_st->st_mode);
4782 __put_user(host_st->st_nlink, &target_st->st_nlink);
4783 __put_user(host_st->st_uid, &target_st->st_uid);
4784 __put_user(host_st->st_gid, &target_st->st_gid);
4785 __put_user(host_st->st_rdev, &target_st->st_rdev);
4786 /* XXX: better use of kernel struct */
4787 __put_user(host_st->st_size, &target_st->st_size);
4788 __put_user(host_st->st_blksize, &target_st->st_blksize);
4789 __put_user(host_st->st_blocks, &target_st->st_blocks);
4790 __put_user(host_st->st_atime, &target_st->target_st_atime);
4791 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4792 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4793 unlock_user_struct(target_st, target_addr, 1);
4796 return 0;
4798 #endif
4800 /* ??? Using host futex calls even when target atomic operations
4801 are not really atomic probably breaks things. However implementing
4802 futexes locally would make futexes shared between multiple processes
4803 tricky. However they're probably useless because guest atomic
4804 operations won't work either. */
4805 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4806 target_ulong uaddr2, int val3)
4808 struct timespec ts, *pts;
4809 int base_op;
4811 /* ??? We assume FUTEX_* constants are the same on both host
4812 and target. */
4813 #ifdef FUTEX_CMD_MASK
4814 base_op = op & FUTEX_CMD_MASK;
4815 #else
4816 base_op = op;
4817 #endif
4818 switch (base_op) {
4819 case FUTEX_WAIT:
4820 case FUTEX_WAIT_BITSET:
4821 if (timeout) {
4822 pts = &ts;
4823 target_to_host_timespec(pts, timeout);
4824 } else {
4825 pts = NULL;
4827 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4828 pts, NULL, val3));
4829 case FUTEX_WAKE:
4830 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4831 case FUTEX_FD:
4832 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4833 case FUTEX_REQUEUE:
4834 case FUTEX_CMP_REQUEUE:
4835 case FUTEX_WAKE_OP:
4836 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4837 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4838 But the prototype takes a `struct timespec *'; insert casts
4839 to satisfy the compiler. We do not need to tswap TIMEOUT
4840 since it's not compared to guest memory. */
4841 pts = (struct timespec *)(uintptr_t) timeout;
4842 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4843 g2h(uaddr2),
4844 (base_op == FUTEX_CMP_REQUEUE
4845 ? tswap32(val3)
4846 : val3)));
4847 default:
4848 return -TARGET_ENOSYS;
4852 /* Map host to target signal numbers for the wait family of syscalls.
4853 Assume all other status bits are the same. */
4854 int host_to_target_waitstatus(int status)
4856 if (WIFSIGNALED(status)) {
4857 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4859 if (WIFSTOPPED(status)) {
4860 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4861 | (status & 0xff);
4863 return status;
4866 static int relstr_to_int(const char *s)
4868 /* Convert a uname release string like "2.6.18" to an integer
4869 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4871 int i, n, tmp;
4873 tmp = 0;
4874 for (i = 0; i < 3; i++) {
4875 n = 0;
4876 while (*s >= '0' && *s <= '9') {
4877 n *= 10;
4878 n += *s - '0';
4879 s++;
4881 tmp = (tmp << 8) + n;
4882 if (*s == '.') {
4883 s++;
4886 return tmp;
4889 int get_osversion(void)
4891 static int osversion;
4892 struct new_utsname buf;
4893 const char *s;
4895 if (osversion)
4896 return osversion;
4897 if (qemu_uname_release && *qemu_uname_release) {
4898 s = qemu_uname_release;
4899 } else {
4900 if (sys_uname(&buf))
4901 return 0;
4902 s = buf.release;
4904 osversion = relstr_to_int(s);
4905 return osversion;
4908 void init_qemu_uname_release(void)
4910 /* Initialize qemu_uname_release for later use.
4911 * If the host kernel is too old and the user hasn't asked for
4912 * a specific fake version number, we might want to fake a minimum
4913 * target kernel version.
4915 #ifdef UNAME_MINIMUM_RELEASE
4916 struct new_utsname buf;
4918 if (qemu_uname_release && *qemu_uname_release) {
4919 return;
4922 if (sys_uname(&buf)) {
4923 return;
4926 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
4927 qemu_uname_release = UNAME_MINIMUM_RELEASE;
4929 #endif
4932 static int open_self_maps(void *cpu_env, int fd)
4934 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4935 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4936 #endif
4937 FILE *fp;
4938 char *line = NULL;
4939 size_t len = 0;
4940 ssize_t read;
4942 fp = fopen("/proc/self/maps", "r");
4943 if (fp == NULL) {
4944 return -EACCES;
4947 while ((read = getline(&line, &len, fp)) != -1) {
4948 int fields, dev_maj, dev_min, inode;
4949 uint64_t min, max, offset;
4950 char flag_r, flag_w, flag_x, flag_p;
4951 char path[512] = "";
4952 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4953 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4954 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4956 if ((fields < 10) || (fields > 11)) {
4957 continue;
4959 if (!strncmp(path, "[stack]", 7)) {
4960 continue;
4962 if (h2g_valid(min) && h2g_valid(max)) {
4963 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
4964 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
4965 h2g(min), h2g(max), flag_r, flag_w,
4966 flag_x, flag_p, offset, dev_maj, dev_min, inode,
4967 path[0] ? " " : "", path);
4971 free(line);
4972 fclose(fp);
4974 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4975 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4976 (unsigned long long)ts->info->stack_limit,
4977 (unsigned long long)(ts->info->start_stack +
4978 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
4979 (unsigned long long)0);
4980 #endif
4982 return 0;
4985 static int open_self_stat(void *cpu_env, int fd)
4987 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4988 abi_ulong start_stack = ts->info->start_stack;
4989 int i;
4991 for (i = 0; i < 44; i++) {
4992 char buf[128];
4993 int len;
4994 uint64_t val = 0;
4996 if (i == 0) {
4997 /* pid */
4998 val = getpid();
4999 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5000 } else if (i == 1) {
5001 /* app name */
5002 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5003 } else if (i == 27) {
5004 /* stack bottom */
5005 val = start_stack;
5006 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5007 } else {
5008 /* for the rest, there is MasterCard */
5009 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5012 len = strlen(buf);
5013 if (write(fd, buf, len) != len) {
5014 return -1;
5018 return 0;
5021 static int open_self_auxv(void *cpu_env, int fd)
5023 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5024 abi_ulong auxv = ts->info->saved_auxv;
5025 abi_ulong len = ts->info->auxv_len;
5026 char *ptr;
5029 * Auxiliary vector is stored in target process stack.
5030 * read in whole auxv vector and copy it to file
5032 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5033 if (ptr != NULL) {
5034 while (len > 0) {
5035 ssize_t r;
5036 r = write(fd, ptr, len);
5037 if (r <= 0) {
5038 break;
5040 len -= r;
5041 ptr += r;
5043 lseek(fd, 0, SEEK_SET);
5044 unlock_user(ptr, auxv, len);
5047 return 0;
5050 static int is_proc_myself(const char *filename, const char *entry)
5052 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5053 filename += strlen("/proc/");
5054 if (!strncmp(filename, "self/", strlen("self/"))) {
5055 filename += strlen("self/");
5056 } else if (*filename >= '1' && *filename <= '9') {
5057 char myself[80];
5058 snprintf(myself, sizeof(myself), "%d/", getpid());
5059 if (!strncmp(filename, myself, strlen(myself))) {
5060 filename += strlen(myself);
5061 } else {
5062 return 0;
5064 } else {
5065 return 0;
5067 if (!strcmp(filename, entry)) {
5068 return 1;
5071 return 0;
5074 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5075 static int is_proc(const char *filename, const char *entry)
5077 return strcmp(filename, entry) == 0;
5080 static int open_net_route(void *cpu_env, int fd)
5082 FILE *fp;
5083 char *line = NULL;
5084 size_t len = 0;
5085 ssize_t read;
5087 fp = fopen("/proc/net/route", "r");
5088 if (fp == NULL) {
5089 return -EACCES;
5092 /* read header */
5094 read = getline(&line, &len, fp);
5095 dprintf(fd, "%s", line);
5097 /* read routes */
5099 while ((read = getline(&line, &len, fp)) != -1) {
5100 char iface[16];
5101 uint32_t dest, gw, mask;
5102 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5103 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5104 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5105 &mask, &mtu, &window, &irtt);
5106 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5107 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5108 metric, tswap32(mask), mtu, window, irtt);
5111 free(line);
5112 fclose(fp);
5114 return 0;
5116 #endif
5118 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5120 struct fake_open {
5121 const char *filename;
5122 int (*fill)(void *cpu_env, int fd);
5123 int (*cmp)(const char *s1, const char *s2);
5125 const struct fake_open *fake_open;
5126 static const struct fake_open fakes[] = {
5127 { "maps", open_self_maps, is_proc_myself },
5128 { "stat", open_self_stat, is_proc_myself },
5129 { "auxv", open_self_auxv, is_proc_myself },
5130 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5131 { "/proc/net/route", open_net_route, is_proc },
5132 #endif
5133 { NULL, NULL, NULL }
5136 for (fake_open = fakes; fake_open->filename; fake_open++) {
5137 if (fake_open->cmp(pathname, fake_open->filename)) {
5138 break;
5142 if (fake_open->filename) {
5143 const char *tmpdir;
5144 char filename[PATH_MAX];
5145 int fd, r;
5147 /* create temporary file to map stat to */
5148 tmpdir = getenv("TMPDIR");
5149 if (!tmpdir)
5150 tmpdir = "/tmp";
5151 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5152 fd = mkstemp(filename);
5153 if (fd < 0) {
5154 return fd;
5156 unlink(filename);
5158 if ((r = fake_open->fill(cpu_env, fd))) {
5159 close(fd);
5160 return r;
5162 lseek(fd, 0, SEEK_SET);
5164 return fd;
5167 return get_errno(open(path(pathname), flags, mode));
5170 /* do_syscall() should always have a single exit point at the end so
5171 that actions, such as logging of syscall results, can be performed.
5172 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5173 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5174 abi_long arg2, abi_long arg3, abi_long arg4,
5175 abi_long arg5, abi_long arg6, abi_long arg7,
5176 abi_long arg8)
5178 CPUState *cpu = ENV_GET_CPU(cpu_env);
5179 abi_long ret;
5180 struct stat st;
5181 struct statfs stfs;
5182 void *p;
5184 #ifdef DEBUG
5185 gemu_log("syscall %d", num);
5186 #endif
5187 if(do_strace)
5188 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5190 switch(num) {
5191 case TARGET_NR_exit:
5192 /* In old applications this may be used to implement _exit(2).
5193 However in threaded applictions it is used for thread termination,
5194 and _exit_group is used for application termination.
5195 Do thread termination if we have more then one thread. */
5196 /* FIXME: This probably breaks if a signal arrives. We should probably
5197 be disabling signals. */
5198 if (CPU_NEXT(first_cpu)) {
5199 TaskState *ts;
5201 cpu_list_lock();
5202 /* Remove the CPU from the list. */
5203 QTAILQ_REMOVE(&cpus, cpu, node);
5204 cpu_list_unlock();
5205 ts = ((CPUArchState *)cpu_env)->opaque;
5206 if (ts->child_tidptr) {
5207 put_user_u32(0, ts->child_tidptr);
5208 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5209 NULL, NULL, 0);
5211 thread_cpu = NULL;
5212 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5213 g_free(ts);
5214 pthread_exit(NULL);
5216 #ifdef TARGET_GPROF
5217 _mcleanup();
5218 #endif
5219 gdb_exit(cpu_env, arg1);
5220 _exit(arg1);
5221 ret = 0; /* avoid warning */
5222 break;
5223 case TARGET_NR_read:
5224 if (arg3 == 0)
5225 ret = 0;
5226 else {
5227 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5228 goto efault;
5229 ret = get_errno(read(arg1, p, arg3));
5230 unlock_user(p, arg2, ret);
5232 break;
5233 case TARGET_NR_write:
5234 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5235 goto efault;
5236 ret = get_errno(write(arg1, p, arg3));
5237 unlock_user(p, arg2, 0);
5238 break;
5239 case TARGET_NR_open:
5240 if (!(p = lock_user_string(arg1)))
5241 goto efault;
5242 ret = get_errno(do_open(cpu_env, p,
5243 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5244 arg3));
5245 unlock_user(p, arg1, 0);
5246 break;
5247 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5248 case TARGET_NR_openat:
5249 if (!(p = lock_user_string(arg2)))
5250 goto efault;
5251 ret = get_errno(sys_openat(arg1,
5252 path(p),
5253 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5254 arg4));
5255 unlock_user(p, arg2, 0);
5256 break;
5257 #endif
5258 case TARGET_NR_close:
5259 ret = get_errno(close(arg1));
5260 break;
5261 case TARGET_NR_brk:
5262 ret = do_brk(arg1);
5263 break;
5264 case TARGET_NR_fork:
5265 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5266 break;
5267 #ifdef TARGET_NR_waitpid
5268 case TARGET_NR_waitpid:
5270 int status;
5271 ret = get_errno(waitpid(arg1, &status, arg3));
5272 if (!is_error(ret) && arg2 && ret
5273 && put_user_s32(host_to_target_waitstatus(status), arg2))
5274 goto efault;
5276 break;
5277 #endif
5278 #ifdef TARGET_NR_waitid
5279 case TARGET_NR_waitid:
5281 siginfo_t info;
5282 info.si_pid = 0;
5283 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5284 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5285 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5286 goto efault;
5287 host_to_target_siginfo(p, &info);
5288 unlock_user(p, arg3, sizeof(target_siginfo_t));
5291 break;
5292 #endif
5293 #ifdef TARGET_NR_creat /* not on alpha */
5294 case TARGET_NR_creat:
5295 if (!(p = lock_user_string(arg1)))
5296 goto efault;
5297 ret = get_errno(creat(p, arg2));
5298 unlock_user(p, arg1, 0);
5299 break;
5300 #endif
5301 case TARGET_NR_link:
5303 void * p2;
5304 p = lock_user_string(arg1);
5305 p2 = lock_user_string(arg2);
5306 if (!p || !p2)
5307 ret = -TARGET_EFAULT;
5308 else
5309 ret = get_errno(link(p, p2));
5310 unlock_user(p2, arg2, 0);
5311 unlock_user(p, arg1, 0);
5313 break;
5314 #if defined(TARGET_NR_linkat)
5315 case TARGET_NR_linkat:
5317 void * p2 = NULL;
5318 if (!arg2 || !arg4)
5319 goto efault;
5320 p = lock_user_string(arg2);
5321 p2 = lock_user_string(arg4);
5322 if (!p || !p2)
5323 ret = -TARGET_EFAULT;
5324 else
5325 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5326 unlock_user(p, arg2, 0);
5327 unlock_user(p2, arg4, 0);
5329 break;
5330 #endif
5331 case TARGET_NR_unlink:
5332 if (!(p = lock_user_string(arg1)))
5333 goto efault;
5334 ret = get_errno(unlink(p));
5335 unlock_user(p, arg1, 0);
5336 break;
5337 #if defined(TARGET_NR_unlinkat)
5338 case TARGET_NR_unlinkat:
5339 if (!(p = lock_user_string(arg2)))
5340 goto efault;
5341 ret = get_errno(unlinkat(arg1, p, arg3));
5342 unlock_user(p, arg2, 0);
5343 break;
5344 #endif
5345 case TARGET_NR_execve:
5347 char **argp, **envp;
5348 int argc, envc;
5349 abi_ulong gp;
5350 abi_ulong guest_argp;
5351 abi_ulong guest_envp;
5352 abi_ulong addr;
5353 char **q;
5354 int total_size = 0;
5356 argc = 0;
5357 guest_argp = arg2;
5358 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5359 if (get_user_ual(addr, gp))
5360 goto efault;
5361 if (!addr)
5362 break;
5363 argc++;
5365 envc = 0;
5366 guest_envp = arg3;
5367 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5368 if (get_user_ual(addr, gp))
5369 goto efault;
5370 if (!addr)
5371 break;
5372 envc++;
5375 argp = alloca((argc + 1) * sizeof(void *));
5376 envp = alloca((envc + 1) * sizeof(void *));
5378 for (gp = guest_argp, q = argp; gp;
5379 gp += sizeof(abi_ulong), q++) {
5380 if (get_user_ual(addr, gp))
5381 goto execve_efault;
5382 if (!addr)
5383 break;
5384 if (!(*q = lock_user_string(addr)))
5385 goto execve_efault;
5386 total_size += strlen(*q) + 1;
5388 *q = NULL;
5390 for (gp = guest_envp, q = envp; gp;
5391 gp += sizeof(abi_ulong), q++) {
5392 if (get_user_ual(addr, gp))
5393 goto execve_efault;
5394 if (!addr)
5395 break;
5396 if (!(*q = lock_user_string(addr)))
5397 goto execve_efault;
5398 total_size += strlen(*q) + 1;
5400 *q = NULL;
5402 /* This case will not be caught by the host's execve() if its
5403 page size is bigger than the target's. */
5404 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5405 ret = -TARGET_E2BIG;
5406 goto execve_end;
5408 if (!(p = lock_user_string(arg1)))
5409 goto execve_efault;
5410 ret = get_errno(execve(p, argp, envp));
5411 unlock_user(p, arg1, 0);
5413 goto execve_end;
5415 execve_efault:
5416 ret = -TARGET_EFAULT;
5418 execve_end:
5419 for (gp = guest_argp, q = argp; *q;
5420 gp += sizeof(abi_ulong), q++) {
5421 if (get_user_ual(addr, gp)
5422 || !addr)
5423 break;
5424 unlock_user(*q, addr, 0);
5426 for (gp = guest_envp, q = envp; *q;
5427 gp += sizeof(abi_ulong), q++) {
5428 if (get_user_ual(addr, gp)
5429 || !addr)
5430 break;
5431 unlock_user(*q, addr, 0);
5434 break;
5435 case TARGET_NR_chdir:
5436 if (!(p = lock_user_string(arg1)))
5437 goto efault;
5438 ret = get_errno(chdir(p));
5439 unlock_user(p, arg1, 0);
5440 break;
5441 #ifdef TARGET_NR_time
5442 case TARGET_NR_time:
5444 time_t host_time;
5445 ret = get_errno(time(&host_time));
5446 if (!is_error(ret)
5447 && arg1
5448 && put_user_sal(host_time, arg1))
5449 goto efault;
5451 break;
5452 #endif
5453 case TARGET_NR_mknod:
5454 if (!(p = lock_user_string(arg1)))
5455 goto efault;
5456 ret = get_errno(mknod(p, arg2, arg3));
5457 unlock_user(p, arg1, 0);
5458 break;
5459 #if defined(TARGET_NR_mknodat)
5460 case TARGET_NR_mknodat:
5461 if (!(p = lock_user_string(arg2)))
5462 goto efault;
5463 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5464 unlock_user(p, arg2, 0);
5465 break;
5466 #endif
5467 case TARGET_NR_chmod:
5468 if (!(p = lock_user_string(arg1)))
5469 goto efault;
5470 ret = get_errno(chmod(p, arg2));
5471 unlock_user(p, arg1, 0);
5472 break;
5473 #ifdef TARGET_NR_break
5474 case TARGET_NR_break:
5475 goto unimplemented;
5476 #endif
5477 #ifdef TARGET_NR_oldstat
5478 case TARGET_NR_oldstat:
5479 goto unimplemented;
5480 #endif
5481 case TARGET_NR_lseek:
5482 ret = get_errno(lseek(arg1, arg2, arg3));
5483 break;
5484 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5485 /* Alpha specific */
5486 case TARGET_NR_getxpid:
5487 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5488 ret = get_errno(getpid());
5489 break;
5490 #endif
5491 #ifdef TARGET_NR_getpid
5492 case TARGET_NR_getpid:
5493 ret = get_errno(getpid());
5494 break;
5495 #endif
5496 case TARGET_NR_mount:
5498 /* need to look at the data field */
5499 void *p2, *p3;
5500 p = lock_user_string(arg1);
5501 p2 = lock_user_string(arg2);
5502 p3 = lock_user_string(arg3);
5503 if (!p || !p2 || !p3)
5504 ret = -TARGET_EFAULT;
5505 else {
5506 /* FIXME - arg5 should be locked, but it isn't clear how to
5507 * do that since it's not guaranteed to be a NULL-terminated
5508 * string.
5510 if ( ! arg5 )
5511 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5512 else
5513 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5515 unlock_user(p, arg1, 0);
5516 unlock_user(p2, arg2, 0);
5517 unlock_user(p3, arg3, 0);
5518 break;
5520 #ifdef TARGET_NR_umount
5521 case TARGET_NR_umount:
5522 if (!(p = lock_user_string(arg1)))
5523 goto efault;
5524 ret = get_errno(umount(p));
5525 unlock_user(p, arg1, 0);
5526 break;
5527 #endif
5528 #ifdef TARGET_NR_stime /* not on alpha */
5529 case TARGET_NR_stime:
5531 time_t host_time;
5532 if (get_user_sal(host_time, arg1))
5533 goto efault;
5534 ret = get_errno(stime(&host_time));
5536 break;
5537 #endif
5538 case TARGET_NR_ptrace:
5539 goto unimplemented;
5540 #ifdef TARGET_NR_alarm /* not on alpha */
5541 case TARGET_NR_alarm:
5542 ret = alarm(arg1);
5543 break;
5544 #endif
5545 #ifdef TARGET_NR_oldfstat
5546 case TARGET_NR_oldfstat:
5547 goto unimplemented;
5548 #endif
5549 #ifdef TARGET_NR_pause /* not on alpha */
5550 case TARGET_NR_pause:
5551 ret = get_errno(pause());
5552 break;
5553 #endif
5554 #ifdef TARGET_NR_utime
5555 case TARGET_NR_utime:
5557 struct utimbuf tbuf, *host_tbuf;
5558 struct target_utimbuf *target_tbuf;
5559 if (arg2) {
5560 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5561 goto efault;
5562 tbuf.actime = tswapal(target_tbuf->actime);
5563 tbuf.modtime = tswapal(target_tbuf->modtime);
5564 unlock_user_struct(target_tbuf, arg2, 0);
5565 host_tbuf = &tbuf;
5566 } else {
5567 host_tbuf = NULL;
5569 if (!(p = lock_user_string(arg1)))
5570 goto efault;
5571 ret = get_errno(utime(p, host_tbuf));
5572 unlock_user(p, arg1, 0);
5574 break;
5575 #endif
5576 case TARGET_NR_utimes:
5578 struct timeval *tvp, tv[2];
5579 if (arg2) {
5580 if (copy_from_user_timeval(&tv[0], arg2)
5581 || copy_from_user_timeval(&tv[1],
5582 arg2 + sizeof(struct target_timeval)))
5583 goto efault;
5584 tvp = tv;
5585 } else {
5586 tvp = NULL;
5588 if (!(p = lock_user_string(arg1)))
5589 goto efault;
5590 ret = get_errno(utimes(p, tvp));
5591 unlock_user(p, arg1, 0);
5593 break;
5594 #if defined(TARGET_NR_futimesat)
5595 case TARGET_NR_futimesat:
5597 struct timeval *tvp, tv[2];
5598 if (arg3) {
5599 if (copy_from_user_timeval(&tv[0], arg3)
5600 || copy_from_user_timeval(&tv[1],
5601 arg3 + sizeof(struct target_timeval)))
5602 goto efault;
5603 tvp = tv;
5604 } else {
5605 tvp = NULL;
5607 if (!(p = lock_user_string(arg2)))
5608 goto efault;
5609 ret = get_errno(futimesat(arg1, path(p), tvp));
5610 unlock_user(p, arg2, 0);
5612 break;
5613 #endif
5614 #ifdef TARGET_NR_stty
5615 case TARGET_NR_stty:
5616 goto unimplemented;
5617 #endif
5618 #ifdef TARGET_NR_gtty
5619 case TARGET_NR_gtty:
5620 goto unimplemented;
5621 #endif
5622 case TARGET_NR_access:
5623 if (!(p = lock_user_string(arg1)))
5624 goto efault;
5625 ret = get_errno(access(path(p), arg2));
5626 unlock_user(p, arg1, 0);
5627 break;
5628 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5629 case TARGET_NR_faccessat:
5630 if (!(p = lock_user_string(arg2)))
5631 goto efault;
5632 ret = get_errno(faccessat(arg1, p, arg3, 0));
5633 unlock_user(p, arg2, 0);
5634 break;
5635 #endif
5636 #ifdef TARGET_NR_nice /* not on alpha */
5637 case TARGET_NR_nice:
5638 ret = get_errno(nice(arg1));
5639 break;
5640 #endif
5641 #ifdef TARGET_NR_ftime
5642 case TARGET_NR_ftime:
5643 goto unimplemented;
5644 #endif
5645 case TARGET_NR_sync:
5646 sync();
5647 ret = 0;
5648 break;
5649 case TARGET_NR_kill:
5650 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5651 break;
5652 case TARGET_NR_rename:
5654 void *p2;
5655 p = lock_user_string(arg1);
5656 p2 = lock_user_string(arg2);
5657 if (!p || !p2)
5658 ret = -TARGET_EFAULT;
5659 else
5660 ret = get_errno(rename(p, p2));
5661 unlock_user(p2, arg2, 0);
5662 unlock_user(p, arg1, 0);
5664 break;
5665 #if defined(TARGET_NR_renameat)
5666 case TARGET_NR_renameat:
5668 void *p2;
5669 p = lock_user_string(arg2);
5670 p2 = lock_user_string(arg4);
5671 if (!p || !p2)
5672 ret = -TARGET_EFAULT;
5673 else
5674 ret = get_errno(renameat(arg1, p, arg3, p2));
5675 unlock_user(p2, arg4, 0);
5676 unlock_user(p, arg2, 0);
5678 break;
5679 #endif
5680 case TARGET_NR_mkdir:
5681 if (!(p = lock_user_string(arg1)))
5682 goto efault;
5683 ret = get_errno(mkdir(p, arg2));
5684 unlock_user(p, arg1, 0);
5685 break;
5686 #if defined(TARGET_NR_mkdirat)
5687 case TARGET_NR_mkdirat:
5688 if (!(p = lock_user_string(arg2)))
5689 goto efault;
5690 ret = get_errno(mkdirat(arg1, p, arg3));
5691 unlock_user(p, arg2, 0);
5692 break;
5693 #endif
5694 case TARGET_NR_rmdir:
5695 if (!(p = lock_user_string(arg1)))
5696 goto efault;
5697 ret = get_errno(rmdir(p));
5698 unlock_user(p, arg1, 0);
5699 break;
5700 case TARGET_NR_dup:
5701 ret = get_errno(dup(arg1));
5702 break;
5703 case TARGET_NR_pipe:
5704 ret = do_pipe(cpu_env, arg1, 0, 0);
5705 break;
5706 #ifdef TARGET_NR_pipe2
5707 case TARGET_NR_pipe2:
5708 ret = do_pipe(cpu_env, arg1,
5709 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5710 break;
5711 #endif
5712 case TARGET_NR_times:
5714 struct target_tms *tmsp;
5715 struct tms tms;
5716 ret = get_errno(times(&tms));
5717 if (arg1) {
5718 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5719 if (!tmsp)
5720 goto efault;
5721 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5722 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5723 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5724 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5726 if (!is_error(ret))
5727 ret = host_to_target_clock_t(ret);
5729 break;
5730 #ifdef TARGET_NR_prof
5731 case TARGET_NR_prof:
5732 goto unimplemented;
5733 #endif
5734 #ifdef TARGET_NR_signal
5735 case TARGET_NR_signal:
5736 goto unimplemented;
5737 #endif
5738 case TARGET_NR_acct:
5739 if (arg1 == 0) {
5740 ret = get_errno(acct(NULL));
5741 } else {
5742 if (!(p = lock_user_string(arg1)))
5743 goto efault;
5744 ret = get_errno(acct(path(p)));
5745 unlock_user(p, arg1, 0);
5747 break;
5748 #ifdef TARGET_NR_umount2
5749 case TARGET_NR_umount2:
5750 if (!(p = lock_user_string(arg1)))
5751 goto efault;
5752 ret = get_errno(umount2(p, arg2));
5753 unlock_user(p, arg1, 0);
5754 break;
5755 #endif
5756 #ifdef TARGET_NR_lock
5757 case TARGET_NR_lock:
5758 goto unimplemented;
5759 #endif
5760 case TARGET_NR_ioctl:
5761 ret = do_ioctl(arg1, arg2, arg3);
5762 break;
5763 case TARGET_NR_fcntl:
5764 ret = do_fcntl(arg1, arg2, arg3);
5765 break;
5766 #ifdef TARGET_NR_mpx
5767 case TARGET_NR_mpx:
5768 goto unimplemented;
5769 #endif
5770 case TARGET_NR_setpgid:
5771 ret = get_errno(setpgid(arg1, arg2));
5772 break;
5773 #ifdef TARGET_NR_ulimit
5774 case TARGET_NR_ulimit:
5775 goto unimplemented;
5776 #endif
5777 #ifdef TARGET_NR_oldolduname
5778 case TARGET_NR_oldolduname:
5779 goto unimplemented;
5780 #endif
5781 case TARGET_NR_umask:
5782 ret = get_errno(umask(arg1));
5783 break;
5784 case TARGET_NR_chroot:
5785 if (!(p = lock_user_string(arg1)))
5786 goto efault;
5787 ret = get_errno(chroot(p));
5788 unlock_user(p, arg1, 0);
5789 break;
5790 case TARGET_NR_ustat:
5791 goto unimplemented;
5792 case TARGET_NR_dup2:
5793 ret = get_errno(dup2(arg1, arg2));
5794 break;
5795 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5796 case TARGET_NR_dup3:
5797 ret = get_errno(dup3(arg1, arg2, arg3));
5798 break;
5799 #endif
5800 #ifdef TARGET_NR_getppid /* not on alpha */
5801 case TARGET_NR_getppid:
5802 ret = get_errno(getppid());
5803 break;
5804 #endif
5805 case TARGET_NR_getpgrp:
5806 ret = get_errno(getpgrp());
5807 break;
5808 case TARGET_NR_setsid:
5809 ret = get_errno(setsid());
5810 break;
5811 #ifdef TARGET_NR_sigaction
5812 case TARGET_NR_sigaction:
5814 #if defined(TARGET_ALPHA)
5815 struct target_sigaction act, oact, *pact = 0;
5816 struct target_old_sigaction *old_act;
5817 if (arg2) {
5818 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5819 goto efault;
5820 act._sa_handler = old_act->_sa_handler;
5821 target_siginitset(&act.sa_mask, old_act->sa_mask);
5822 act.sa_flags = old_act->sa_flags;
5823 act.sa_restorer = 0;
5824 unlock_user_struct(old_act, arg2, 0);
5825 pact = &act;
5827 ret = get_errno(do_sigaction(arg1, pact, &oact));
5828 if (!is_error(ret) && arg3) {
5829 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5830 goto efault;
5831 old_act->_sa_handler = oact._sa_handler;
5832 old_act->sa_mask = oact.sa_mask.sig[0];
5833 old_act->sa_flags = oact.sa_flags;
5834 unlock_user_struct(old_act, arg3, 1);
5836 #elif defined(TARGET_MIPS)
5837 struct target_sigaction act, oact, *pact, *old_act;
5839 if (arg2) {
5840 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5841 goto efault;
5842 act._sa_handler = old_act->_sa_handler;
5843 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5844 act.sa_flags = old_act->sa_flags;
5845 unlock_user_struct(old_act, arg2, 0);
5846 pact = &act;
5847 } else {
5848 pact = NULL;
5851 ret = get_errno(do_sigaction(arg1, pact, &oact));
5853 if (!is_error(ret) && arg3) {
5854 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5855 goto efault;
5856 old_act->_sa_handler = oact._sa_handler;
5857 old_act->sa_flags = oact.sa_flags;
5858 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5859 old_act->sa_mask.sig[1] = 0;
5860 old_act->sa_mask.sig[2] = 0;
5861 old_act->sa_mask.sig[3] = 0;
5862 unlock_user_struct(old_act, arg3, 1);
5864 #else
5865 struct target_old_sigaction *old_act;
5866 struct target_sigaction act, oact, *pact;
5867 if (arg2) {
5868 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5869 goto efault;
5870 act._sa_handler = old_act->_sa_handler;
5871 target_siginitset(&act.sa_mask, old_act->sa_mask);
5872 act.sa_flags = old_act->sa_flags;
5873 act.sa_restorer = old_act->sa_restorer;
5874 unlock_user_struct(old_act, arg2, 0);
5875 pact = &act;
5876 } else {
5877 pact = NULL;
5879 ret = get_errno(do_sigaction(arg1, pact, &oact));
5880 if (!is_error(ret) && arg3) {
5881 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5882 goto efault;
5883 old_act->_sa_handler = oact._sa_handler;
5884 old_act->sa_mask = oact.sa_mask.sig[0];
5885 old_act->sa_flags = oact.sa_flags;
5886 old_act->sa_restorer = oact.sa_restorer;
5887 unlock_user_struct(old_act, arg3, 1);
5889 #endif
5891 break;
5892 #endif
5893 case TARGET_NR_rt_sigaction:
5895 #if defined(TARGET_ALPHA)
5896 struct target_sigaction act, oact, *pact = 0;
5897 struct target_rt_sigaction *rt_act;
5898 /* ??? arg4 == sizeof(sigset_t). */
5899 if (arg2) {
5900 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5901 goto efault;
5902 act._sa_handler = rt_act->_sa_handler;
5903 act.sa_mask = rt_act->sa_mask;
5904 act.sa_flags = rt_act->sa_flags;
5905 act.sa_restorer = arg5;
5906 unlock_user_struct(rt_act, arg2, 0);
5907 pact = &act;
5909 ret = get_errno(do_sigaction(arg1, pact, &oact));
5910 if (!is_error(ret) && arg3) {
5911 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5912 goto efault;
5913 rt_act->_sa_handler = oact._sa_handler;
5914 rt_act->sa_mask = oact.sa_mask;
5915 rt_act->sa_flags = oact.sa_flags;
5916 unlock_user_struct(rt_act, arg3, 1);
5918 #else
5919 struct target_sigaction *act;
5920 struct target_sigaction *oact;
5922 if (arg2) {
5923 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5924 goto efault;
5925 } else
5926 act = NULL;
5927 if (arg3) {
5928 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5929 ret = -TARGET_EFAULT;
5930 goto rt_sigaction_fail;
5932 } else
5933 oact = NULL;
5934 ret = get_errno(do_sigaction(arg1, act, oact));
5935 rt_sigaction_fail:
5936 if (act)
5937 unlock_user_struct(act, arg2, 0);
5938 if (oact)
5939 unlock_user_struct(oact, arg3, 1);
5940 #endif
5942 break;
5943 #ifdef TARGET_NR_sgetmask /* not on alpha */
5944 case TARGET_NR_sgetmask:
5946 sigset_t cur_set;
5947 abi_ulong target_set;
5948 sigprocmask(0, NULL, &cur_set);
5949 host_to_target_old_sigset(&target_set, &cur_set);
5950 ret = target_set;
5952 break;
5953 #endif
5954 #ifdef TARGET_NR_ssetmask /* not on alpha */
5955 case TARGET_NR_ssetmask:
5957 sigset_t set, oset, cur_set;
5958 abi_ulong target_set = arg1;
5959 sigprocmask(0, NULL, &cur_set);
5960 target_to_host_old_sigset(&set, &target_set);
5961 sigorset(&set, &set, &cur_set);
5962 sigprocmask(SIG_SETMASK, &set, &oset);
5963 host_to_target_old_sigset(&target_set, &oset);
5964 ret = target_set;
5966 break;
5967 #endif
5968 #ifdef TARGET_NR_sigprocmask
5969 case TARGET_NR_sigprocmask:
5971 #if defined(TARGET_ALPHA)
5972 sigset_t set, oldset;
5973 abi_ulong mask;
5974 int how;
5976 switch (arg1) {
5977 case TARGET_SIG_BLOCK:
5978 how = SIG_BLOCK;
5979 break;
5980 case TARGET_SIG_UNBLOCK:
5981 how = SIG_UNBLOCK;
5982 break;
5983 case TARGET_SIG_SETMASK:
5984 how = SIG_SETMASK;
5985 break;
5986 default:
5987 ret = -TARGET_EINVAL;
5988 goto fail;
5990 mask = arg2;
5991 target_to_host_old_sigset(&set, &mask);
5993 ret = get_errno(sigprocmask(how, &set, &oldset));
5994 if (!is_error(ret)) {
5995 host_to_target_old_sigset(&mask, &oldset);
5996 ret = mask;
5997 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5999 #else
6000 sigset_t set, oldset, *set_ptr;
6001 int how;
6003 if (arg2) {
6004 switch (arg1) {
6005 case TARGET_SIG_BLOCK:
6006 how = SIG_BLOCK;
6007 break;
6008 case TARGET_SIG_UNBLOCK:
6009 how = SIG_UNBLOCK;
6010 break;
6011 case TARGET_SIG_SETMASK:
6012 how = SIG_SETMASK;
6013 break;
6014 default:
6015 ret = -TARGET_EINVAL;
6016 goto fail;
6018 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6019 goto efault;
6020 target_to_host_old_sigset(&set, p);
6021 unlock_user(p, arg2, 0);
6022 set_ptr = &set;
6023 } else {
6024 how = 0;
6025 set_ptr = NULL;
6027 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6028 if (!is_error(ret) && arg3) {
6029 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6030 goto efault;
6031 host_to_target_old_sigset(p, &oldset);
6032 unlock_user(p, arg3, sizeof(target_sigset_t));
6034 #endif
6036 break;
6037 #endif
6038 case TARGET_NR_rt_sigprocmask:
6040 int how = arg1;
6041 sigset_t set, oldset, *set_ptr;
6043 if (arg2) {
6044 switch(how) {
6045 case TARGET_SIG_BLOCK:
6046 how = SIG_BLOCK;
6047 break;
6048 case TARGET_SIG_UNBLOCK:
6049 how = SIG_UNBLOCK;
6050 break;
6051 case TARGET_SIG_SETMASK:
6052 how = SIG_SETMASK;
6053 break;
6054 default:
6055 ret = -TARGET_EINVAL;
6056 goto fail;
6058 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6059 goto efault;
6060 target_to_host_sigset(&set, p);
6061 unlock_user(p, arg2, 0);
6062 set_ptr = &set;
6063 } else {
6064 how = 0;
6065 set_ptr = NULL;
6067 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6068 if (!is_error(ret) && arg3) {
6069 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6070 goto efault;
6071 host_to_target_sigset(p, &oldset);
6072 unlock_user(p, arg3, sizeof(target_sigset_t));
6075 break;
6076 #ifdef TARGET_NR_sigpending
6077 case TARGET_NR_sigpending:
6079 sigset_t set;
6080 ret = get_errno(sigpending(&set));
6081 if (!is_error(ret)) {
6082 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6083 goto efault;
6084 host_to_target_old_sigset(p, &set);
6085 unlock_user(p, arg1, sizeof(target_sigset_t));
6088 break;
6089 #endif
6090 case TARGET_NR_rt_sigpending:
6092 sigset_t set;
6093 ret = get_errno(sigpending(&set));
6094 if (!is_error(ret)) {
6095 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6096 goto efault;
6097 host_to_target_sigset(p, &set);
6098 unlock_user(p, arg1, sizeof(target_sigset_t));
6101 break;
6102 #ifdef TARGET_NR_sigsuspend
6103 case TARGET_NR_sigsuspend:
6105 sigset_t set;
6106 #if defined(TARGET_ALPHA)
6107 abi_ulong mask = arg1;
6108 target_to_host_old_sigset(&set, &mask);
6109 #else
6110 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6111 goto efault;
6112 target_to_host_old_sigset(&set, p);
6113 unlock_user(p, arg1, 0);
6114 #endif
6115 ret = get_errno(sigsuspend(&set));
6117 break;
6118 #endif
6119 case TARGET_NR_rt_sigsuspend:
6121 sigset_t set;
6122 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6123 goto efault;
6124 target_to_host_sigset(&set, p);
6125 unlock_user(p, arg1, 0);
6126 ret = get_errno(sigsuspend(&set));
6128 break;
6129 case TARGET_NR_rt_sigtimedwait:
6131 sigset_t set;
6132 struct timespec uts, *puts;
6133 siginfo_t uinfo;
6135 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6136 goto efault;
6137 target_to_host_sigset(&set, p);
6138 unlock_user(p, arg1, 0);
6139 if (arg3) {
6140 puts = &uts;
6141 target_to_host_timespec(puts, arg3);
6142 } else {
6143 puts = NULL;
6145 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6146 if (!is_error(ret) && arg2) {
6147 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6148 goto efault;
6149 host_to_target_siginfo(p, &uinfo);
6150 unlock_user(p, arg2, sizeof(target_siginfo_t));
6153 break;
6154 case TARGET_NR_rt_sigqueueinfo:
6156 siginfo_t uinfo;
6157 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6158 goto efault;
6159 target_to_host_siginfo(&uinfo, p);
6160 unlock_user(p, arg1, 0);
6161 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6163 break;
6164 #ifdef TARGET_NR_sigreturn
6165 case TARGET_NR_sigreturn:
6166 /* NOTE: ret is eax, so not transcoding must be done */
6167 ret = do_sigreturn(cpu_env);
6168 break;
6169 #endif
6170 case TARGET_NR_rt_sigreturn:
6171 /* NOTE: ret is eax, so not transcoding must be done */
6172 ret = do_rt_sigreturn(cpu_env);
6173 break;
6174 case TARGET_NR_sethostname:
6175 if (!(p = lock_user_string(arg1)))
6176 goto efault;
6177 ret = get_errno(sethostname(p, arg2));
6178 unlock_user(p, arg1, 0);
6179 break;
6180 case TARGET_NR_setrlimit:
6182 int resource = target_to_host_resource(arg1);
6183 struct target_rlimit *target_rlim;
6184 struct rlimit rlim;
6185 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6186 goto efault;
6187 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6188 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6189 unlock_user_struct(target_rlim, arg2, 0);
6190 ret = get_errno(setrlimit(resource, &rlim));
6192 break;
6193 case TARGET_NR_getrlimit:
6195 int resource = target_to_host_resource(arg1);
6196 struct target_rlimit *target_rlim;
6197 struct rlimit rlim;
6199 ret = get_errno(getrlimit(resource, &rlim));
6200 if (!is_error(ret)) {
6201 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6202 goto efault;
6203 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6204 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6205 unlock_user_struct(target_rlim, arg2, 1);
6208 break;
6209 case TARGET_NR_getrusage:
6211 struct rusage rusage;
6212 ret = get_errno(getrusage(arg1, &rusage));
6213 if (!is_error(ret)) {
6214 host_to_target_rusage(arg2, &rusage);
6217 break;
6218 case TARGET_NR_gettimeofday:
6220 struct timeval tv;
6221 ret = get_errno(gettimeofday(&tv, NULL));
6222 if (!is_error(ret)) {
6223 if (copy_to_user_timeval(arg1, &tv))
6224 goto efault;
6227 break;
6228 case TARGET_NR_settimeofday:
6230 struct timeval tv;
6231 if (copy_from_user_timeval(&tv, arg1))
6232 goto efault;
6233 ret = get_errno(settimeofday(&tv, NULL));
6235 break;
6236 #if defined(TARGET_NR_select)
6237 case TARGET_NR_select:
6238 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6239 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6240 #else
6242 struct target_sel_arg_struct *sel;
6243 abi_ulong inp, outp, exp, tvp;
6244 long nsel;
6246 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6247 goto efault;
6248 nsel = tswapal(sel->n);
6249 inp = tswapal(sel->inp);
6250 outp = tswapal(sel->outp);
6251 exp = tswapal(sel->exp);
6252 tvp = tswapal(sel->tvp);
6253 unlock_user_struct(sel, arg1, 0);
6254 ret = do_select(nsel, inp, outp, exp, tvp);
6256 #endif
6257 break;
6258 #endif
6259 #ifdef TARGET_NR_pselect6
6260 case TARGET_NR_pselect6:
6262 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6263 fd_set rfds, wfds, efds;
6264 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6265 struct timespec ts, *ts_ptr;
6268 * The 6th arg is actually two args smashed together,
6269 * so we cannot use the C library.
6271 sigset_t set;
6272 struct {
6273 sigset_t *set;
6274 size_t size;
6275 } sig, *sig_ptr;
6277 abi_ulong arg_sigset, arg_sigsize, *arg7;
6278 target_sigset_t *target_sigset;
6280 n = arg1;
6281 rfd_addr = arg2;
6282 wfd_addr = arg3;
6283 efd_addr = arg4;
6284 ts_addr = arg5;
6286 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6287 if (ret) {
6288 goto fail;
6290 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6291 if (ret) {
6292 goto fail;
6294 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6295 if (ret) {
6296 goto fail;
6300 * This takes a timespec, and not a timeval, so we cannot
6301 * use the do_select() helper ...
6303 if (ts_addr) {
6304 if (target_to_host_timespec(&ts, ts_addr)) {
6305 goto efault;
6307 ts_ptr = &ts;
6308 } else {
6309 ts_ptr = NULL;
6312 /* Extract the two packed args for the sigset */
6313 if (arg6) {
6314 sig_ptr = &sig;
6315 sig.size = _NSIG / 8;
6317 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6318 if (!arg7) {
6319 goto efault;
6321 arg_sigset = tswapal(arg7[0]);
6322 arg_sigsize = tswapal(arg7[1]);
6323 unlock_user(arg7, arg6, 0);
6325 if (arg_sigset) {
6326 sig.set = &set;
6327 if (arg_sigsize != sizeof(*target_sigset)) {
6328 /* Like the kernel, we enforce correct size sigsets */
6329 ret = -TARGET_EINVAL;
6330 goto fail;
6332 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6333 sizeof(*target_sigset), 1);
6334 if (!target_sigset) {
6335 goto efault;
6337 target_to_host_sigset(&set, target_sigset);
6338 unlock_user(target_sigset, arg_sigset, 0);
6339 } else {
6340 sig.set = NULL;
6342 } else {
6343 sig_ptr = NULL;
6346 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6347 ts_ptr, sig_ptr));
6349 if (!is_error(ret)) {
6350 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6351 goto efault;
6352 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6353 goto efault;
6354 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6355 goto efault;
6357 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6358 goto efault;
6361 break;
6362 #endif
6363 case TARGET_NR_symlink:
6365 void *p2;
6366 p = lock_user_string(arg1);
6367 p2 = lock_user_string(arg2);
6368 if (!p || !p2)
6369 ret = -TARGET_EFAULT;
6370 else
6371 ret = get_errno(symlink(p, p2));
6372 unlock_user(p2, arg2, 0);
6373 unlock_user(p, arg1, 0);
6375 break;
6376 #if defined(TARGET_NR_symlinkat)
6377 case TARGET_NR_symlinkat:
6379 void *p2;
6380 p = lock_user_string(arg1);
6381 p2 = lock_user_string(arg3);
6382 if (!p || !p2)
6383 ret = -TARGET_EFAULT;
6384 else
6385 ret = get_errno(symlinkat(p, arg2, p2));
6386 unlock_user(p2, arg3, 0);
6387 unlock_user(p, arg1, 0);
6389 break;
6390 #endif
6391 #ifdef TARGET_NR_oldlstat
6392 case TARGET_NR_oldlstat:
6393 goto unimplemented;
6394 #endif
6395 case TARGET_NR_readlink:
6397 void *p2;
6398 p = lock_user_string(arg1);
6399 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6400 if (!p || !p2) {
6401 ret = -TARGET_EFAULT;
6402 } else if (is_proc_myself((const char *)p, "exe")) {
6403 char real[PATH_MAX], *temp;
6404 temp = realpath(exec_path, real);
6405 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6406 snprintf((char *)p2, arg3, "%s", real);
6407 } else {
6408 ret = get_errno(readlink(path(p), p2, arg3));
6410 unlock_user(p2, arg2, ret);
6411 unlock_user(p, arg1, 0);
6413 break;
6414 #if defined(TARGET_NR_readlinkat)
6415 case TARGET_NR_readlinkat:
6417 void *p2;
6418 p = lock_user_string(arg2);
6419 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6420 if (!p || !p2) {
6421 ret = -TARGET_EFAULT;
6422 } else if (is_proc_myself((const char *)p, "exe")) {
6423 char real[PATH_MAX], *temp;
6424 temp = realpath(exec_path, real);
6425 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6426 snprintf((char *)p2, arg4, "%s", real);
6427 } else {
6428 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6430 unlock_user(p2, arg3, ret);
6431 unlock_user(p, arg2, 0);
6433 break;
6434 #endif
6435 #ifdef TARGET_NR_uselib
6436 case TARGET_NR_uselib:
6437 goto unimplemented;
6438 #endif
6439 #ifdef TARGET_NR_swapon
6440 case TARGET_NR_swapon:
6441 if (!(p = lock_user_string(arg1)))
6442 goto efault;
6443 ret = get_errno(swapon(p, arg2));
6444 unlock_user(p, arg1, 0);
6445 break;
6446 #endif
6447 case TARGET_NR_reboot:
6448 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6449 /* arg4 must be ignored in all other cases */
6450 p = lock_user_string(arg4);
6451 if (!p) {
6452 goto efault;
6454 ret = get_errno(reboot(arg1, arg2, arg3, p));
6455 unlock_user(p, arg4, 0);
6456 } else {
6457 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6459 break;
6460 #ifdef TARGET_NR_readdir
6461 case TARGET_NR_readdir:
6462 goto unimplemented;
6463 #endif
6464 #ifdef TARGET_NR_mmap
6465 case TARGET_NR_mmap:
6466 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6467 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6468 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6469 || defined(TARGET_S390X)
6471 abi_ulong *v;
6472 abi_ulong v1, v2, v3, v4, v5, v6;
6473 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6474 goto efault;
6475 v1 = tswapal(v[0]);
6476 v2 = tswapal(v[1]);
6477 v3 = tswapal(v[2]);
6478 v4 = tswapal(v[3]);
6479 v5 = tswapal(v[4]);
6480 v6 = tswapal(v[5]);
6481 unlock_user(v, arg1, 0);
6482 ret = get_errno(target_mmap(v1, v2, v3,
6483 target_to_host_bitmask(v4, mmap_flags_tbl),
6484 v5, v6));
6486 #else
6487 ret = get_errno(target_mmap(arg1, arg2, arg3,
6488 target_to_host_bitmask(arg4, mmap_flags_tbl),
6489 arg5,
6490 arg6));
6491 #endif
6492 break;
6493 #endif
6494 #ifdef TARGET_NR_mmap2
6495 case TARGET_NR_mmap2:
6496 #ifndef MMAP_SHIFT
6497 #define MMAP_SHIFT 12
6498 #endif
6499 ret = get_errno(target_mmap(arg1, arg2, arg3,
6500 target_to_host_bitmask(arg4, mmap_flags_tbl),
6501 arg5,
6502 arg6 << MMAP_SHIFT));
6503 break;
6504 #endif
6505 case TARGET_NR_munmap:
6506 ret = get_errno(target_munmap(arg1, arg2));
6507 break;
6508 case TARGET_NR_mprotect:
6510 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6511 /* Special hack to detect libc making the stack executable. */
6512 if ((arg3 & PROT_GROWSDOWN)
6513 && arg1 >= ts->info->stack_limit
6514 && arg1 <= ts->info->start_stack) {
6515 arg3 &= ~PROT_GROWSDOWN;
6516 arg2 = arg2 + arg1 - ts->info->stack_limit;
6517 arg1 = ts->info->stack_limit;
6520 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6521 break;
6522 #ifdef TARGET_NR_mremap
6523 case TARGET_NR_mremap:
6524 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6525 break;
6526 #endif
6527 /* ??? msync/mlock/munlock are broken for softmmu. */
6528 #ifdef TARGET_NR_msync
6529 case TARGET_NR_msync:
6530 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6531 break;
6532 #endif
6533 #ifdef TARGET_NR_mlock
6534 case TARGET_NR_mlock:
6535 ret = get_errno(mlock(g2h(arg1), arg2));
6536 break;
6537 #endif
6538 #ifdef TARGET_NR_munlock
6539 case TARGET_NR_munlock:
6540 ret = get_errno(munlock(g2h(arg1), arg2));
6541 break;
6542 #endif
6543 #ifdef TARGET_NR_mlockall
6544 case TARGET_NR_mlockall:
6545 ret = get_errno(mlockall(arg1));
6546 break;
6547 #endif
6548 #ifdef TARGET_NR_munlockall
6549 case TARGET_NR_munlockall:
6550 ret = get_errno(munlockall());
6551 break;
6552 #endif
6553 case TARGET_NR_truncate:
6554 if (!(p = lock_user_string(arg1)))
6555 goto efault;
6556 ret = get_errno(truncate(p, arg2));
6557 unlock_user(p, arg1, 0);
6558 break;
6559 case TARGET_NR_ftruncate:
6560 ret = get_errno(ftruncate(arg1, arg2));
6561 break;
6562 case TARGET_NR_fchmod:
6563 ret = get_errno(fchmod(arg1, arg2));
6564 break;
6565 #if defined(TARGET_NR_fchmodat)
6566 case TARGET_NR_fchmodat:
6567 if (!(p = lock_user_string(arg2)))
6568 goto efault;
6569 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6570 unlock_user(p, arg2, 0);
6571 break;
6572 #endif
6573 case TARGET_NR_getpriority:
6574 /* Note that negative values are valid for getpriority, so we must
6575 differentiate based on errno settings. */
6576 errno = 0;
6577 ret = getpriority(arg1, arg2);
6578 if (ret == -1 && errno != 0) {
6579 ret = -host_to_target_errno(errno);
6580 break;
6582 #ifdef TARGET_ALPHA
6583 /* Return value is the unbiased priority. Signal no error. */
6584 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6585 #else
6586 /* Return value is a biased priority to avoid negative numbers. */
6587 ret = 20 - ret;
6588 #endif
6589 break;
6590 case TARGET_NR_setpriority:
6591 ret = get_errno(setpriority(arg1, arg2, arg3));
6592 break;
6593 #ifdef TARGET_NR_profil
6594 case TARGET_NR_profil:
6595 goto unimplemented;
6596 #endif
6597 case TARGET_NR_statfs:
6598 if (!(p = lock_user_string(arg1)))
6599 goto efault;
6600 ret = get_errno(statfs(path(p), &stfs));
6601 unlock_user(p, arg1, 0);
6602 convert_statfs:
6603 if (!is_error(ret)) {
6604 struct target_statfs *target_stfs;
6606 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6607 goto efault;
6608 __put_user(stfs.f_type, &target_stfs->f_type);
6609 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6610 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6611 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6612 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6613 __put_user(stfs.f_files, &target_stfs->f_files);
6614 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6615 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6616 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6617 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6618 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6619 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6620 unlock_user_struct(target_stfs, arg2, 1);
6622 break;
6623 case TARGET_NR_fstatfs:
6624 ret = get_errno(fstatfs(arg1, &stfs));
6625 goto convert_statfs;
6626 #ifdef TARGET_NR_statfs64
6627 case TARGET_NR_statfs64:
6628 if (!(p = lock_user_string(arg1)))
6629 goto efault;
6630 ret = get_errno(statfs(path(p), &stfs));
6631 unlock_user(p, arg1, 0);
6632 convert_statfs64:
6633 if (!is_error(ret)) {
6634 struct target_statfs64 *target_stfs;
6636 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6637 goto efault;
6638 __put_user(stfs.f_type, &target_stfs->f_type);
6639 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6640 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6641 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6642 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6643 __put_user(stfs.f_files, &target_stfs->f_files);
6644 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6645 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6646 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6647 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6648 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6649 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6650 unlock_user_struct(target_stfs, arg3, 1);
6652 break;
6653 case TARGET_NR_fstatfs64:
6654 ret = get_errno(fstatfs(arg1, &stfs));
6655 goto convert_statfs64;
6656 #endif
6657 #ifdef TARGET_NR_ioperm
6658 case TARGET_NR_ioperm:
6659 goto unimplemented;
6660 #endif
6661 #ifdef TARGET_NR_socketcall
6662 case TARGET_NR_socketcall:
6663 ret = do_socketcall(arg1, arg2);
6664 break;
6665 #endif
6666 #ifdef TARGET_NR_accept
6667 case TARGET_NR_accept:
6668 ret = do_accept4(arg1, arg2, arg3, 0);
6669 break;
6670 #endif
6671 #ifdef TARGET_NR_accept4
6672 case TARGET_NR_accept4:
6673 #ifdef CONFIG_ACCEPT4
6674 ret = do_accept4(arg1, arg2, arg3, arg4);
6675 #else
6676 goto unimplemented;
6677 #endif
6678 break;
6679 #endif
6680 #ifdef TARGET_NR_bind
6681 case TARGET_NR_bind:
6682 ret = do_bind(arg1, arg2, arg3);
6683 break;
6684 #endif
6685 #ifdef TARGET_NR_connect
6686 case TARGET_NR_connect:
6687 ret = do_connect(arg1, arg2, arg3);
6688 break;
6689 #endif
6690 #ifdef TARGET_NR_getpeername
6691 case TARGET_NR_getpeername:
6692 ret = do_getpeername(arg1, arg2, arg3);
6693 break;
6694 #endif
6695 #ifdef TARGET_NR_getsockname
6696 case TARGET_NR_getsockname:
6697 ret = do_getsockname(arg1, arg2, arg3);
6698 break;
6699 #endif
6700 #ifdef TARGET_NR_getsockopt
6701 case TARGET_NR_getsockopt:
6702 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6703 break;
6704 #endif
6705 #ifdef TARGET_NR_listen
6706 case TARGET_NR_listen:
6707 ret = get_errno(listen(arg1, arg2));
6708 break;
6709 #endif
6710 #ifdef TARGET_NR_recv
6711 case TARGET_NR_recv:
6712 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6713 break;
6714 #endif
6715 #ifdef TARGET_NR_recvfrom
6716 case TARGET_NR_recvfrom:
6717 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6718 break;
6719 #endif
6720 #ifdef TARGET_NR_recvmsg
6721 case TARGET_NR_recvmsg:
6722 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6723 break;
6724 #endif
6725 #ifdef TARGET_NR_send
6726 case TARGET_NR_send:
6727 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6728 break;
6729 #endif
6730 #ifdef TARGET_NR_sendmsg
6731 case TARGET_NR_sendmsg:
6732 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6733 break;
6734 #endif
6735 #ifdef TARGET_NR_sendto
6736 case TARGET_NR_sendto:
6737 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6738 break;
6739 #endif
6740 #ifdef TARGET_NR_shutdown
6741 case TARGET_NR_shutdown:
6742 ret = get_errno(shutdown(arg1, arg2));
6743 break;
6744 #endif
6745 #ifdef TARGET_NR_socket
6746 case TARGET_NR_socket:
6747 ret = do_socket(arg1, arg2, arg3);
6748 break;
6749 #endif
6750 #ifdef TARGET_NR_socketpair
6751 case TARGET_NR_socketpair:
6752 ret = do_socketpair(arg1, arg2, arg3, arg4);
6753 break;
6754 #endif
6755 #ifdef TARGET_NR_setsockopt
6756 case TARGET_NR_setsockopt:
6757 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6758 break;
6759 #endif
6761 case TARGET_NR_syslog:
6762 if (!(p = lock_user_string(arg2)))
6763 goto efault;
6764 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6765 unlock_user(p, arg2, 0);
6766 break;
6768 case TARGET_NR_setitimer:
6770 struct itimerval value, ovalue, *pvalue;
6772 if (arg2) {
6773 pvalue = &value;
6774 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6775 || copy_from_user_timeval(&pvalue->it_value,
6776 arg2 + sizeof(struct target_timeval)))
6777 goto efault;
6778 } else {
6779 pvalue = NULL;
6781 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6782 if (!is_error(ret) && arg3) {
6783 if (copy_to_user_timeval(arg3,
6784 &ovalue.it_interval)
6785 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6786 &ovalue.it_value))
6787 goto efault;
6790 break;
6791 case TARGET_NR_getitimer:
6793 struct itimerval value;
6795 ret = get_errno(getitimer(arg1, &value));
6796 if (!is_error(ret) && arg2) {
6797 if (copy_to_user_timeval(arg2,
6798 &value.it_interval)
6799 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6800 &value.it_value))
6801 goto efault;
6804 break;
6805 case TARGET_NR_stat:
6806 if (!(p = lock_user_string(arg1)))
6807 goto efault;
6808 ret = get_errno(stat(path(p), &st));
6809 unlock_user(p, arg1, 0);
6810 goto do_stat;
6811 case TARGET_NR_lstat:
6812 if (!(p = lock_user_string(arg1)))
6813 goto efault;
6814 ret = get_errno(lstat(path(p), &st));
6815 unlock_user(p, arg1, 0);
6816 goto do_stat;
6817 case TARGET_NR_fstat:
6819 ret = get_errno(fstat(arg1, &st));
6820 do_stat:
6821 if (!is_error(ret)) {
6822 struct target_stat *target_st;
6824 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6825 goto efault;
6826 memset(target_st, 0, sizeof(*target_st));
6827 __put_user(st.st_dev, &target_st->st_dev);
6828 __put_user(st.st_ino, &target_st->st_ino);
6829 __put_user(st.st_mode, &target_st->st_mode);
6830 __put_user(st.st_uid, &target_st->st_uid);
6831 __put_user(st.st_gid, &target_st->st_gid);
6832 __put_user(st.st_nlink, &target_st->st_nlink);
6833 __put_user(st.st_rdev, &target_st->st_rdev);
6834 __put_user(st.st_size, &target_st->st_size);
6835 __put_user(st.st_blksize, &target_st->st_blksize);
6836 __put_user(st.st_blocks, &target_st->st_blocks);
6837 __put_user(st.st_atime, &target_st->target_st_atime);
6838 __put_user(st.st_mtime, &target_st->target_st_mtime);
6839 __put_user(st.st_ctime, &target_st->target_st_ctime);
6840 unlock_user_struct(target_st, arg2, 1);
6843 break;
6844 #ifdef TARGET_NR_olduname
6845 case TARGET_NR_olduname:
6846 goto unimplemented;
6847 #endif
6848 #ifdef TARGET_NR_iopl
6849 case TARGET_NR_iopl:
6850 goto unimplemented;
6851 #endif
6852 case TARGET_NR_vhangup:
6853 ret = get_errno(vhangup());
6854 break;
6855 #ifdef TARGET_NR_idle
6856 case TARGET_NR_idle:
6857 goto unimplemented;
6858 #endif
6859 #ifdef TARGET_NR_syscall
6860 case TARGET_NR_syscall:
6861 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6862 arg6, arg7, arg8, 0);
6863 break;
6864 #endif
6865 case TARGET_NR_wait4:
6867 int status;
6868 abi_long status_ptr = arg2;
6869 struct rusage rusage, *rusage_ptr;
6870 abi_ulong target_rusage = arg4;
6871 if (target_rusage)
6872 rusage_ptr = &rusage;
6873 else
6874 rusage_ptr = NULL;
6875 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6876 if (!is_error(ret)) {
6877 if (status_ptr && ret) {
6878 status = host_to_target_waitstatus(status);
6879 if (put_user_s32(status, status_ptr))
6880 goto efault;
6882 if (target_rusage)
6883 host_to_target_rusage(target_rusage, &rusage);
6886 break;
6887 #ifdef TARGET_NR_swapoff
6888 case TARGET_NR_swapoff:
6889 if (!(p = lock_user_string(arg1)))
6890 goto efault;
6891 ret = get_errno(swapoff(p));
6892 unlock_user(p, arg1, 0);
6893 break;
6894 #endif
6895 case TARGET_NR_sysinfo:
6897 struct target_sysinfo *target_value;
6898 struct sysinfo value;
6899 ret = get_errno(sysinfo(&value));
6900 if (!is_error(ret) && arg1)
6902 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6903 goto efault;
6904 __put_user(value.uptime, &target_value->uptime);
6905 __put_user(value.loads[0], &target_value->loads[0]);
6906 __put_user(value.loads[1], &target_value->loads[1]);
6907 __put_user(value.loads[2], &target_value->loads[2]);
6908 __put_user(value.totalram, &target_value->totalram);
6909 __put_user(value.freeram, &target_value->freeram);
6910 __put_user(value.sharedram, &target_value->sharedram);
6911 __put_user(value.bufferram, &target_value->bufferram);
6912 __put_user(value.totalswap, &target_value->totalswap);
6913 __put_user(value.freeswap, &target_value->freeswap);
6914 __put_user(value.procs, &target_value->procs);
6915 __put_user(value.totalhigh, &target_value->totalhigh);
6916 __put_user(value.freehigh, &target_value->freehigh);
6917 __put_user(value.mem_unit, &target_value->mem_unit);
6918 unlock_user_struct(target_value, arg1, 1);
6921 break;
6922 #ifdef TARGET_NR_ipc
6923 case TARGET_NR_ipc:
6924 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6925 break;
6926 #endif
6927 #ifdef TARGET_NR_semget
6928 case TARGET_NR_semget:
6929 ret = get_errno(semget(arg1, arg2, arg3));
6930 break;
6931 #endif
6932 #ifdef TARGET_NR_semop
6933 case TARGET_NR_semop:
6934 ret = do_semop(arg1, arg2, arg3);
6935 break;
6936 #endif
6937 #ifdef TARGET_NR_semctl
6938 case TARGET_NR_semctl:
6939 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6940 break;
6941 #endif
6942 #ifdef TARGET_NR_msgctl
6943 case TARGET_NR_msgctl:
6944 ret = do_msgctl(arg1, arg2, arg3);
6945 break;
6946 #endif
6947 #ifdef TARGET_NR_msgget
6948 case TARGET_NR_msgget:
6949 ret = get_errno(msgget(arg1, arg2));
6950 break;
6951 #endif
6952 #ifdef TARGET_NR_msgrcv
6953 case TARGET_NR_msgrcv:
6954 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6955 break;
6956 #endif
6957 #ifdef TARGET_NR_msgsnd
6958 case TARGET_NR_msgsnd:
6959 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6960 break;
6961 #endif
6962 #ifdef TARGET_NR_shmget
6963 case TARGET_NR_shmget:
6964 ret = get_errno(shmget(arg1, arg2, arg3));
6965 break;
6966 #endif
6967 #ifdef TARGET_NR_shmctl
6968 case TARGET_NR_shmctl:
6969 ret = do_shmctl(arg1, arg2, arg3);
6970 break;
6971 #endif
6972 #ifdef TARGET_NR_shmat
6973 case TARGET_NR_shmat:
6974 ret = do_shmat(arg1, arg2, arg3);
6975 break;
6976 #endif
6977 #ifdef TARGET_NR_shmdt
6978 case TARGET_NR_shmdt:
6979 ret = do_shmdt(arg1);
6980 break;
6981 #endif
6982 case TARGET_NR_fsync:
6983 ret = get_errno(fsync(arg1));
6984 break;
6985 case TARGET_NR_clone:
6986 /* Linux manages to have three different orderings for its
6987 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
6988 * match the kernel's CONFIG_CLONE_* settings.
6989 * Microblaze is further special in that it uses a sixth
6990 * implicit argument to clone for the TLS pointer.
6992 #if defined(TARGET_MICROBLAZE)
6993 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6994 #elif defined(TARGET_CLONE_BACKWARDS)
6995 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6996 #elif defined(TARGET_CLONE_BACKWARDS2)
6997 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6998 #else
6999 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7000 #endif
7001 break;
7002 #ifdef __NR_exit_group
7003 /* new thread calls */
7004 case TARGET_NR_exit_group:
7005 #ifdef TARGET_GPROF
7006 _mcleanup();
7007 #endif
7008 gdb_exit(cpu_env, arg1);
7009 ret = get_errno(exit_group(arg1));
7010 break;
7011 #endif
7012 case TARGET_NR_setdomainname:
7013 if (!(p = lock_user_string(arg1)))
7014 goto efault;
7015 ret = get_errno(setdomainname(p, arg2));
7016 unlock_user(p, arg1, 0);
7017 break;
7018 case TARGET_NR_uname:
7019 /* no need to transcode because we use the linux syscall */
7021 struct new_utsname * buf;
7023 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7024 goto efault;
7025 ret = get_errno(sys_uname(buf));
7026 if (!is_error(ret)) {
7027 /* Overrite the native machine name with whatever is being
7028 emulated. */
7029 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7030 /* Allow the user to override the reported release. */
7031 if (qemu_uname_release && *qemu_uname_release)
7032 strcpy (buf->release, qemu_uname_release);
7034 unlock_user_struct(buf, arg1, 1);
7036 break;
7037 #ifdef TARGET_I386
7038 case TARGET_NR_modify_ldt:
7039 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7040 break;
7041 #if !defined(TARGET_X86_64)
7042 case TARGET_NR_vm86old:
7043 goto unimplemented;
7044 case TARGET_NR_vm86:
7045 ret = do_vm86(cpu_env, arg1, arg2);
7046 break;
7047 #endif
7048 #endif
7049 case TARGET_NR_adjtimex:
7050 goto unimplemented;
7051 #ifdef TARGET_NR_create_module
7052 case TARGET_NR_create_module:
7053 #endif
7054 case TARGET_NR_init_module:
7055 case TARGET_NR_delete_module:
7056 #ifdef TARGET_NR_get_kernel_syms
7057 case TARGET_NR_get_kernel_syms:
7058 #endif
7059 goto unimplemented;
7060 case TARGET_NR_quotactl:
7061 goto unimplemented;
7062 case TARGET_NR_getpgid:
7063 ret = get_errno(getpgid(arg1));
7064 break;
7065 case TARGET_NR_fchdir:
7066 ret = get_errno(fchdir(arg1));
7067 break;
7068 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7069 case TARGET_NR_bdflush:
7070 goto unimplemented;
7071 #endif
7072 #ifdef TARGET_NR_sysfs
7073 case TARGET_NR_sysfs:
7074 goto unimplemented;
7075 #endif
7076 case TARGET_NR_personality:
7077 ret = get_errno(personality(arg1));
7078 break;
7079 #ifdef TARGET_NR_afs_syscall
7080 case TARGET_NR_afs_syscall:
7081 goto unimplemented;
7082 #endif
7083 #ifdef TARGET_NR__llseek /* Not on alpha */
7084 case TARGET_NR__llseek:
7086 int64_t res;
7087 #if !defined(__NR_llseek)
7088 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7089 if (res == -1) {
7090 ret = get_errno(res);
7091 } else {
7092 ret = 0;
7094 #else
7095 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7096 #endif
7097 if ((ret == 0) && put_user_s64(res, arg4)) {
7098 goto efault;
7101 break;
7102 #endif
7103 case TARGET_NR_getdents:
7104 #ifdef __NR_getdents
7105 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7107 struct target_dirent *target_dirp;
7108 struct linux_dirent *dirp;
7109 abi_long count = arg3;
7111 dirp = malloc(count);
7112 if (!dirp) {
7113 ret = -TARGET_ENOMEM;
7114 goto fail;
7117 ret = get_errno(sys_getdents(arg1, dirp, count));
7118 if (!is_error(ret)) {
7119 struct linux_dirent *de;
7120 struct target_dirent *tde;
7121 int len = ret;
7122 int reclen, treclen;
7123 int count1, tnamelen;
7125 count1 = 0;
7126 de = dirp;
7127 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7128 goto efault;
7129 tde = target_dirp;
7130 while (len > 0) {
7131 reclen = de->d_reclen;
7132 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7133 assert(tnamelen >= 0);
7134 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7135 assert(count1 + treclen <= count);
7136 tde->d_reclen = tswap16(treclen);
7137 tde->d_ino = tswapal(de->d_ino);
7138 tde->d_off = tswapal(de->d_off);
7139 memcpy(tde->d_name, de->d_name, tnamelen);
7140 de = (struct linux_dirent *)((char *)de + reclen);
7141 len -= reclen;
7142 tde = (struct target_dirent *)((char *)tde + treclen);
7143 count1 += treclen;
7145 ret = count1;
7146 unlock_user(target_dirp, arg2, ret);
7148 free(dirp);
7150 #else
7152 struct linux_dirent *dirp;
7153 abi_long count = arg3;
7155 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7156 goto efault;
7157 ret = get_errno(sys_getdents(arg1, dirp, count));
7158 if (!is_error(ret)) {
7159 struct linux_dirent *de;
7160 int len = ret;
7161 int reclen;
7162 de = dirp;
7163 while (len > 0) {
7164 reclen = de->d_reclen;
7165 if (reclen > len)
7166 break;
7167 de->d_reclen = tswap16(reclen);
7168 tswapls(&de->d_ino);
7169 tswapls(&de->d_off);
7170 de = (struct linux_dirent *)((char *)de + reclen);
7171 len -= reclen;
7174 unlock_user(dirp, arg2, ret);
7176 #endif
7177 #else
7178 /* Implement getdents in terms of getdents64 */
7180 struct linux_dirent64 *dirp;
7181 abi_long count = arg3;
7183 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7184 if (!dirp) {
7185 goto efault;
7187 ret = get_errno(sys_getdents64(arg1, dirp, count));
7188 if (!is_error(ret)) {
7189 /* Convert the dirent64 structs to target dirent. We do this
7190 * in-place, since we can guarantee that a target_dirent is no
7191 * larger than a dirent64; however this means we have to be
7192 * careful to read everything before writing in the new format.
7194 struct linux_dirent64 *de;
7195 struct target_dirent *tde;
7196 int len = ret;
7197 int tlen = 0;
7199 de = dirp;
7200 tde = (struct target_dirent *)dirp;
7201 while (len > 0) {
7202 int namelen, treclen;
7203 int reclen = de->d_reclen;
7204 uint64_t ino = de->d_ino;
7205 int64_t off = de->d_off;
7206 uint8_t type = de->d_type;
7208 namelen = strlen(de->d_name);
7209 treclen = offsetof(struct target_dirent, d_name)
7210 + namelen + 2;
7211 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7213 memmove(tde->d_name, de->d_name, namelen + 1);
7214 tde->d_ino = tswapal(ino);
7215 tde->d_off = tswapal(off);
7216 tde->d_reclen = tswap16(treclen);
7217 /* The target_dirent type is in what was formerly a padding
7218 * byte at the end of the structure:
7220 *(((char *)tde) + treclen - 1) = type;
7222 de = (struct linux_dirent64 *)((char *)de + reclen);
7223 tde = (struct target_dirent *)((char *)tde + treclen);
7224 len -= reclen;
7225 tlen += treclen;
7227 ret = tlen;
7229 unlock_user(dirp, arg2, ret);
7231 #endif
7232 break;
7233 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7234 case TARGET_NR_getdents64:
7236 struct linux_dirent64 *dirp;
7237 abi_long count = arg3;
7238 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7239 goto efault;
7240 ret = get_errno(sys_getdents64(arg1, dirp, count));
7241 if (!is_error(ret)) {
7242 struct linux_dirent64 *de;
7243 int len = ret;
7244 int reclen;
7245 de = dirp;
7246 while (len > 0) {
7247 reclen = de->d_reclen;
7248 if (reclen > len)
7249 break;
7250 de->d_reclen = tswap16(reclen);
7251 tswap64s((uint64_t *)&de->d_ino);
7252 tswap64s((uint64_t *)&de->d_off);
7253 de = (struct linux_dirent64 *)((char *)de + reclen);
7254 len -= reclen;
7257 unlock_user(dirp, arg2, ret);
7259 break;
7260 #endif /* TARGET_NR_getdents64 */
7261 #if defined(TARGET_NR__newselect)
7262 case TARGET_NR__newselect:
7263 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7264 break;
7265 #endif
7266 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7267 # ifdef TARGET_NR_poll
7268 case TARGET_NR_poll:
7269 # endif
7270 # ifdef TARGET_NR_ppoll
7271 case TARGET_NR_ppoll:
7272 # endif
7274 struct target_pollfd *target_pfd;
7275 unsigned int nfds = arg2;
7276 int timeout = arg3;
7277 struct pollfd *pfd;
7278 unsigned int i;
7280 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7281 if (!target_pfd)
7282 goto efault;
7284 pfd = alloca(sizeof(struct pollfd) * nfds);
7285 for(i = 0; i < nfds; i++) {
7286 pfd[i].fd = tswap32(target_pfd[i].fd);
7287 pfd[i].events = tswap16(target_pfd[i].events);
7290 # ifdef TARGET_NR_ppoll
7291 if (num == TARGET_NR_ppoll) {
7292 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7293 target_sigset_t *target_set;
7294 sigset_t _set, *set = &_set;
7296 if (arg3) {
7297 if (target_to_host_timespec(timeout_ts, arg3)) {
7298 unlock_user(target_pfd, arg1, 0);
7299 goto efault;
7301 } else {
7302 timeout_ts = NULL;
7305 if (arg4) {
7306 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7307 if (!target_set) {
7308 unlock_user(target_pfd, arg1, 0);
7309 goto efault;
7311 target_to_host_sigset(set, target_set);
7312 } else {
7313 set = NULL;
7316 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7318 if (!is_error(ret) && arg3) {
7319 host_to_target_timespec(arg3, timeout_ts);
7321 if (arg4) {
7322 unlock_user(target_set, arg4, 0);
7324 } else
7325 # endif
7326 ret = get_errno(poll(pfd, nfds, timeout));
7328 if (!is_error(ret)) {
7329 for(i = 0; i < nfds; i++) {
7330 target_pfd[i].revents = tswap16(pfd[i].revents);
7333 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7335 break;
7336 #endif
7337 case TARGET_NR_flock:
7338 /* NOTE: the flock constant seems to be the same for every
7339 Linux platform */
7340 ret = get_errno(flock(arg1, arg2));
7341 break;
7342 case TARGET_NR_readv:
7344 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7345 if (vec != NULL) {
7346 ret = get_errno(readv(arg1, vec, arg3));
7347 unlock_iovec(vec, arg2, arg3, 1);
7348 } else {
7349 ret = -host_to_target_errno(errno);
7352 break;
7353 case TARGET_NR_writev:
7355 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7356 if (vec != NULL) {
7357 ret = get_errno(writev(arg1, vec, arg3));
7358 unlock_iovec(vec, arg2, arg3, 0);
7359 } else {
7360 ret = -host_to_target_errno(errno);
7363 break;
7364 case TARGET_NR_getsid:
7365 ret = get_errno(getsid(arg1));
7366 break;
7367 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7368 case TARGET_NR_fdatasync:
7369 ret = get_errno(fdatasync(arg1));
7370 break;
7371 #endif
7372 case TARGET_NR__sysctl:
7373 /* We don't implement this, but ENOTDIR is always a safe
7374 return value. */
7375 ret = -TARGET_ENOTDIR;
7376 break;
7377 case TARGET_NR_sched_getaffinity:
7379 unsigned int mask_size;
7380 unsigned long *mask;
7383 * sched_getaffinity needs multiples of ulong, so need to take
7384 * care of mismatches between target ulong and host ulong sizes.
7386 if (arg2 & (sizeof(abi_ulong) - 1)) {
7387 ret = -TARGET_EINVAL;
7388 break;
7390 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7392 mask = alloca(mask_size);
7393 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7395 if (!is_error(ret)) {
7396 if (copy_to_user(arg3, mask, ret)) {
7397 goto efault;
7401 break;
7402 case TARGET_NR_sched_setaffinity:
7404 unsigned int mask_size;
7405 unsigned long *mask;
7408 * sched_setaffinity needs multiples of ulong, so need to take
7409 * care of mismatches between target ulong and host ulong sizes.
7411 if (arg2 & (sizeof(abi_ulong) - 1)) {
7412 ret = -TARGET_EINVAL;
7413 break;
7415 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7417 mask = alloca(mask_size);
7418 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7419 goto efault;
7421 memcpy(mask, p, arg2);
7422 unlock_user_struct(p, arg2, 0);
7424 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7426 break;
7427 case TARGET_NR_sched_setparam:
7429 struct sched_param *target_schp;
7430 struct sched_param schp;
7432 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7433 goto efault;
7434 schp.sched_priority = tswap32(target_schp->sched_priority);
7435 unlock_user_struct(target_schp, arg2, 0);
7436 ret = get_errno(sched_setparam(arg1, &schp));
7438 break;
7439 case TARGET_NR_sched_getparam:
7441 struct sched_param *target_schp;
7442 struct sched_param schp;
7443 ret = get_errno(sched_getparam(arg1, &schp));
7444 if (!is_error(ret)) {
7445 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7446 goto efault;
7447 target_schp->sched_priority = tswap32(schp.sched_priority);
7448 unlock_user_struct(target_schp, arg2, 1);
7451 break;
7452 case TARGET_NR_sched_setscheduler:
7454 struct sched_param *target_schp;
7455 struct sched_param schp;
7456 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7457 goto efault;
7458 schp.sched_priority = tswap32(target_schp->sched_priority);
7459 unlock_user_struct(target_schp, arg3, 0);
7460 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7462 break;
7463 case TARGET_NR_sched_getscheduler:
7464 ret = get_errno(sched_getscheduler(arg1));
7465 break;
7466 case TARGET_NR_sched_yield:
7467 ret = get_errno(sched_yield());
7468 break;
7469 case TARGET_NR_sched_get_priority_max:
7470 ret = get_errno(sched_get_priority_max(arg1));
7471 break;
7472 case TARGET_NR_sched_get_priority_min:
7473 ret = get_errno(sched_get_priority_min(arg1));
7474 break;
7475 case TARGET_NR_sched_rr_get_interval:
7477 struct timespec ts;
7478 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7479 if (!is_error(ret)) {
7480 host_to_target_timespec(arg2, &ts);
7483 break;
7484 case TARGET_NR_nanosleep:
7486 struct timespec req, rem;
7487 target_to_host_timespec(&req, arg1);
7488 ret = get_errno(nanosleep(&req, &rem));
7489 if (is_error(ret) && arg2) {
7490 host_to_target_timespec(arg2, &rem);
7493 break;
7494 #ifdef TARGET_NR_query_module
7495 case TARGET_NR_query_module:
7496 goto unimplemented;
7497 #endif
7498 #ifdef TARGET_NR_nfsservctl
7499 case TARGET_NR_nfsservctl:
7500 goto unimplemented;
7501 #endif
7502 case TARGET_NR_prctl:
7503 switch (arg1) {
7504 case PR_GET_PDEATHSIG:
7506 int deathsig;
7507 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7508 if (!is_error(ret) && arg2
7509 && put_user_ual(deathsig, arg2)) {
7510 goto efault;
7512 break;
7514 #ifdef PR_GET_NAME
7515 case PR_GET_NAME:
7517 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7518 if (!name) {
7519 goto efault;
7521 ret = get_errno(prctl(arg1, (unsigned long)name,
7522 arg3, arg4, arg5));
7523 unlock_user(name, arg2, 16);
7524 break;
7526 case PR_SET_NAME:
7528 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7529 if (!name) {
7530 goto efault;
7532 ret = get_errno(prctl(arg1, (unsigned long)name,
7533 arg3, arg4, arg5));
7534 unlock_user(name, arg2, 0);
7535 break;
7537 #endif
7538 default:
7539 /* Most prctl options have no pointer arguments */
7540 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7541 break;
7543 break;
7544 #ifdef TARGET_NR_arch_prctl
7545 case TARGET_NR_arch_prctl:
7546 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7547 ret = do_arch_prctl(cpu_env, arg1, arg2);
7548 break;
7549 #else
7550 goto unimplemented;
7551 #endif
7552 #endif
7553 #ifdef TARGET_NR_pread64
7554 case TARGET_NR_pread64:
7555 if (regpairs_aligned(cpu_env)) {
7556 arg4 = arg5;
7557 arg5 = arg6;
7559 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7560 goto efault;
7561 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7562 unlock_user(p, arg2, ret);
7563 break;
7564 case TARGET_NR_pwrite64:
7565 if (regpairs_aligned(cpu_env)) {
7566 arg4 = arg5;
7567 arg5 = arg6;
7569 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7570 goto efault;
7571 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7572 unlock_user(p, arg2, 0);
7573 break;
7574 #endif
7575 case TARGET_NR_getcwd:
7576 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7577 goto efault;
7578 ret = get_errno(sys_getcwd1(p, arg2));
7579 unlock_user(p, arg1, ret);
7580 break;
7581 case TARGET_NR_capget:
7582 goto unimplemented;
7583 case TARGET_NR_capset:
7584 goto unimplemented;
7585 case TARGET_NR_sigaltstack:
7586 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7587 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7588 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7589 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7590 break;
7591 #else
7592 goto unimplemented;
7593 #endif
7595 #ifdef CONFIG_SENDFILE
7596 case TARGET_NR_sendfile:
7598 off_t *offp = NULL;
7599 off_t off;
7600 if (arg3) {
7601 ret = get_user_sal(off, arg3);
7602 if (is_error(ret)) {
7603 break;
7605 offp = &off;
7607 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7608 if (!is_error(ret) && arg3) {
7609 abi_long ret2 = put_user_sal(off, arg3);
7610 if (is_error(ret2)) {
7611 ret = ret2;
7614 break;
7616 #ifdef TARGET_NR_sendfile64
7617 case TARGET_NR_sendfile64:
7619 off_t *offp = NULL;
7620 off_t off;
7621 if (arg3) {
7622 ret = get_user_s64(off, arg3);
7623 if (is_error(ret)) {
7624 break;
7626 offp = &off;
7628 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7629 if (!is_error(ret) && arg3) {
7630 abi_long ret2 = put_user_s64(off, arg3);
7631 if (is_error(ret2)) {
7632 ret = ret2;
7635 break;
7637 #endif
7638 #else
7639 case TARGET_NR_sendfile:
7640 #ifdef TARGET_NR_sendfile64
7641 case TARGET_NR_sendfile64:
7642 #endif
7643 goto unimplemented;
7644 #endif
7646 #ifdef TARGET_NR_getpmsg
7647 case TARGET_NR_getpmsg:
7648 goto unimplemented;
7649 #endif
7650 #ifdef TARGET_NR_putpmsg
7651 case TARGET_NR_putpmsg:
7652 goto unimplemented;
7653 #endif
7654 #ifdef TARGET_NR_vfork
7655 case TARGET_NR_vfork:
7656 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7657 0, 0, 0, 0));
7658 break;
7659 #endif
7660 #ifdef TARGET_NR_ugetrlimit
7661 case TARGET_NR_ugetrlimit:
7663 struct rlimit rlim;
7664 int resource = target_to_host_resource(arg1);
7665 ret = get_errno(getrlimit(resource, &rlim));
7666 if (!is_error(ret)) {
7667 struct target_rlimit *target_rlim;
7668 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7669 goto efault;
7670 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7671 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7672 unlock_user_struct(target_rlim, arg2, 1);
7674 break;
7676 #endif
7677 #ifdef TARGET_NR_truncate64
7678 case TARGET_NR_truncate64:
7679 if (!(p = lock_user_string(arg1)))
7680 goto efault;
7681 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7682 unlock_user(p, arg1, 0);
7683 break;
7684 #endif
7685 #ifdef TARGET_NR_ftruncate64
7686 case TARGET_NR_ftruncate64:
7687 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7688 break;
7689 #endif
7690 #ifdef TARGET_NR_stat64
7691 case TARGET_NR_stat64:
7692 if (!(p = lock_user_string(arg1)))
7693 goto efault;
7694 ret = get_errno(stat(path(p), &st));
7695 unlock_user(p, arg1, 0);
7696 if (!is_error(ret))
7697 ret = host_to_target_stat64(cpu_env, arg2, &st);
7698 break;
7699 #endif
7700 #ifdef TARGET_NR_lstat64
7701 case TARGET_NR_lstat64:
7702 if (!(p = lock_user_string(arg1)))
7703 goto efault;
7704 ret = get_errno(lstat(path(p), &st));
7705 unlock_user(p, arg1, 0);
7706 if (!is_error(ret))
7707 ret = host_to_target_stat64(cpu_env, arg2, &st);
7708 break;
7709 #endif
7710 #ifdef TARGET_NR_fstat64
7711 case TARGET_NR_fstat64:
7712 ret = get_errno(fstat(arg1, &st));
7713 if (!is_error(ret))
7714 ret = host_to_target_stat64(cpu_env, arg2, &st);
7715 break;
7716 #endif
7717 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7718 #ifdef TARGET_NR_fstatat64
7719 case TARGET_NR_fstatat64:
7720 #endif
7721 #ifdef TARGET_NR_newfstatat
7722 case TARGET_NR_newfstatat:
7723 #endif
7724 if (!(p = lock_user_string(arg2)))
7725 goto efault;
7726 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7727 if (!is_error(ret))
7728 ret = host_to_target_stat64(cpu_env, arg3, &st);
7729 break;
7730 #endif
7731 case TARGET_NR_lchown:
7732 if (!(p = lock_user_string(arg1)))
7733 goto efault;
7734 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7735 unlock_user(p, arg1, 0);
7736 break;
7737 #ifdef TARGET_NR_getuid
7738 case TARGET_NR_getuid:
7739 ret = get_errno(high2lowuid(getuid()));
7740 break;
7741 #endif
7742 #ifdef TARGET_NR_getgid
7743 case TARGET_NR_getgid:
7744 ret = get_errno(high2lowgid(getgid()));
7745 break;
7746 #endif
7747 #ifdef TARGET_NR_geteuid
7748 case TARGET_NR_geteuid:
7749 ret = get_errno(high2lowuid(geteuid()));
7750 break;
7751 #endif
7752 #ifdef TARGET_NR_getegid
7753 case TARGET_NR_getegid:
7754 ret = get_errno(high2lowgid(getegid()));
7755 break;
7756 #endif
7757 case TARGET_NR_setreuid:
7758 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7759 break;
7760 case TARGET_NR_setregid:
7761 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7762 break;
7763 case TARGET_NR_getgroups:
7765 int gidsetsize = arg1;
7766 target_id *target_grouplist;
7767 gid_t *grouplist;
7768 int i;
7770 grouplist = alloca(gidsetsize * sizeof(gid_t));
7771 ret = get_errno(getgroups(gidsetsize, grouplist));
7772 if (gidsetsize == 0)
7773 break;
7774 if (!is_error(ret)) {
7775 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7776 if (!target_grouplist)
7777 goto efault;
7778 for(i = 0;i < ret; i++)
7779 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7780 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7783 break;
7784 case TARGET_NR_setgroups:
7786 int gidsetsize = arg1;
7787 target_id *target_grouplist;
7788 gid_t *grouplist = NULL;
7789 int i;
7790 if (gidsetsize) {
7791 grouplist = alloca(gidsetsize * sizeof(gid_t));
7792 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7793 if (!target_grouplist) {
7794 ret = -TARGET_EFAULT;
7795 goto fail;
7797 for (i = 0; i < gidsetsize; i++) {
7798 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7800 unlock_user(target_grouplist, arg2, 0);
7802 ret = get_errno(setgroups(gidsetsize, grouplist));
7804 break;
7805 case TARGET_NR_fchown:
7806 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7807 break;
7808 #if defined(TARGET_NR_fchownat)
7809 case TARGET_NR_fchownat:
7810 if (!(p = lock_user_string(arg2)))
7811 goto efault;
7812 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7813 low2highgid(arg4), arg5));
7814 unlock_user(p, arg2, 0);
7815 break;
7816 #endif
7817 #ifdef TARGET_NR_setresuid
7818 case TARGET_NR_setresuid:
7819 ret = get_errno(setresuid(low2highuid(arg1),
7820 low2highuid(arg2),
7821 low2highuid(arg3)));
7822 break;
7823 #endif
7824 #ifdef TARGET_NR_getresuid
7825 case TARGET_NR_getresuid:
7827 uid_t ruid, euid, suid;
7828 ret = get_errno(getresuid(&ruid, &euid, &suid));
7829 if (!is_error(ret)) {
7830 if (put_user_u16(high2lowuid(ruid), arg1)
7831 || put_user_u16(high2lowuid(euid), arg2)
7832 || put_user_u16(high2lowuid(suid), arg3))
7833 goto efault;
7836 break;
7837 #endif
7838 #ifdef TARGET_NR_getresgid
7839 case TARGET_NR_setresgid:
7840 ret = get_errno(setresgid(low2highgid(arg1),
7841 low2highgid(arg2),
7842 low2highgid(arg3)));
7843 break;
7844 #endif
7845 #ifdef TARGET_NR_getresgid
7846 case TARGET_NR_getresgid:
7848 gid_t rgid, egid, sgid;
7849 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7850 if (!is_error(ret)) {
7851 if (put_user_u16(high2lowgid(rgid), arg1)
7852 || put_user_u16(high2lowgid(egid), arg2)
7853 || put_user_u16(high2lowgid(sgid), arg3))
7854 goto efault;
7857 break;
7858 #endif
7859 case TARGET_NR_chown:
7860 if (!(p = lock_user_string(arg1)))
7861 goto efault;
7862 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7863 unlock_user(p, arg1, 0);
7864 break;
7865 case TARGET_NR_setuid:
7866 ret = get_errno(setuid(low2highuid(arg1)));
7867 break;
7868 case TARGET_NR_setgid:
7869 ret = get_errno(setgid(low2highgid(arg1)));
7870 break;
7871 case TARGET_NR_setfsuid:
7872 ret = get_errno(setfsuid(arg1));
7873 break;
7874 case TARGET_NR_setfsgid:
7875 ret = get_errno(setfsgid(arg1));
7876 break;
7878 #ifdef TARGET_NR_lchown32
7879 case TARGET_NR_lchown32:
7880 if (!(p = lock_user_string(arg1)))
7881 goto efault;
7882 ret = get_errno(lchown(p, arg2, arg3));
7883 unlock_user(p, arg1, 0);
7884 break;
7885 #endif
7886 #ifdef TARGET_NR_getuid32
7887 case TARGET_NR_getuid32:
7888 ret = get_errno(getuid());
7889 break;
7890 #endif
7892 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7893 /* Alpha specific */
7894 case TARGET_NR_getxuid:
7896 uid_t euid;
7897 euid=geteuid();
7898 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7900 ret = get_errno(getuid());
7901 break;
7902 #endif
7903 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7904 /* Alpha specific */
7905 case TARGET_NR_getxgid:
7907 uid_t egid;
7908 egid=getegid();
7909 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7911 ret = get_errno(getgid());
7912 break;
7913 #endif
7914 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7915 /* Alpha specific */
7916 case TARGET_NR_osf_getsysinfo:
7917 ret = -TARGET_EOPNOTSUPP;
7918 switch (arg1) {
7919 case TARGET_GSI_IEEE_FP_CONTROL:
7921 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7923 /* Copied from linux ieee_fpcr_to_swcr. */
7924 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7925 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7926 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7927 | SWCR_TRAP_ENABLE_DZE
7928 | SWCR_TRAP_ENABLE_OVF);
7929 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7930 | SWCR_TRAP_ENABLE_INE);
7931 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7932 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7934 if (put_user_u64 (swcr, arg2))
7935 goto efault;
7936 ret = 0;
7938 break;
7940 /* case GSI_IEEE_STATE_AT_SIGNAL:
7941 -- Not implemented in linux kernel.
7942 case GSI_UACPROC:
7943 -- Retrieves current unaligned access state; not much used.
7944 case GSI_PROC_TYPE:
7945 -- Retrieves implver information; surely not used.
7946 case GSI_GET_HWRPB:
7947 -- Grabs a copy of the HWRPB; surely not used.
7950 break;
7951 #endif
7952 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7953 /* Alpha specific */
7954 case TARGET_NR_osf_setsysinfo:
7955 ret = -TARGET_EOPNOTSUPP;
7956 switch (arg1) {
7957 case TARGET_SSI_IEEE_FP_CONTROL:
7959 uint64_t swcr, fpcr, orig_fpcr;
7961 if (get_user_u64 (swcr, arg2)) {
7962 goto efault;
7964 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7965 fpcr = orig_fpcr & FPCR_DYN_MASK;
7967 /* Copied from linux ieee_swcr_to_fpcr. */
7968 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7969 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7970 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7971 | SWCR_TRAP_ENABLE_DZE
7972 | SWCR_TRAP_ENABLE_OVF)) << 48;
7973 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7974 | SWCR_TRAP_ENABLE_INE)) << 57;
7975 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7976 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7978 cpu_alpha_store_fpcr(cpu_env, fpcr);
7979 ret = 0;
7981 break;
7983 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7985 uint64_t exc, fpcr, orig_fpcr;
7986 int si_code;
7988 if (get_user_u64(exc, arg2)) {
7989 goto efault;
7992 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7994 /* We only add to the exception status here. */
7995 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7997 cpu_alpha_store_fpcr(cpu_env, fpcr);
7998 ret = 0;
8000 /* Old exceptions are not signaled. */
8001 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8003 /* If any exceptions set by this call,
8004 and are unmasked, send a signal. */
8005 si_code = 0;
8006 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8007 si_code = TARGET_FPE_FLTRES;
8009 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8010 si_code = TARGET_FPE_FLTUND;
8012 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8013 si_code = TARGET_FPE_FLTOVF;
8015 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8016 si_code = TARGET_FPE_FLTDIV;
8018 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8019 si_code = TARGET_FPE_FLTINV;
8021 if (si_code != 0) {
8022 target_siginfo_t info;
8023 info.si_signo = SIGFPE;
8024 info.si_errno = 0;
8025 info.si_code = si_code;
8026 info._sifields._sigfault._addr
8027 = ((CPUArchState *)cpu_env)->pc;
8028 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8031 break;
8033 /* case SSI_NVPAIRS:
8034 -- Used with SSIN_UACPROC to enable unaligned accesses.
8035 case SSI_IEEE_STATE_AT_SIGNAL:
8036 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8037 -- Not implemented in linux kernel
8040 break;
8041 #endif
8042 #ifdef TARGET_NR_osf_sigprocmask
8043 /* Alpha specific. */
8044 case TARGET_NR_osf_sigprocmask:
8046 abi_ulong mask;
8047 int how;
8048 sigset_t set, oldset;
8050 switch(arg1) {
8051 case TARGET_SIG_BLOCK:
8052 how = SIG_BLOCK;
8053 break;
8054 case TARGET_SIG_UNBLOCK:
8055 how = SIG_UNBLOCK;
8056 break;
8057 case TARGET_SIG_SETMASK:
8058 how = SIG_SETMASK;
8059 break;
8060 default:
8061 ret = -TARGET_EINVAL;
8062 goto fail;
8064 mask = arg2;
8065 target_to_host_old_sigset(&set, &mask);
8066 sigprocmask(how, &set, &oldset);
8067 host_to_target_old_sigset(&mask, &oldset);
8068 ret = mask;
8070 break;
8071 #endif
8073 #ifdef TARGET_NR_getgid32
8074 case TARGET_NR_getgid32:
8075 ret = get_errno(getgid());
8076 break;
8077 #endif
8078 #ifdef TARGET_NR_geteuid32
8079 case TARGET_NR_geteuid32:
8080 ret = get_errno(geteuid());
8081 break;
8082 #endif
8083 #ifdef TARGET_NR_getegid32
8084 case TARGET_NR_getegid32:
8085 ret = get_errno(getegid());
8086 break;
8087 #endif
8088 #ifdef TARGET_NR_setreuid32
8089 case TARGET_NR_setreuid32:
8090 ret = get_errno(setreuid(arg1, arg2));
8091 break;
8092 #endif
8093 #ifdef TARGET_NR_setregid32
8094 case TARGET_NR_setregid32:
8095 ret = get_errno(setregid(arg1, arg2));
8096 break;
8097 #endif
8098 #ifdef TARGET_NR_getgroups32
8099 case TARGET_NR_getgroups32:
8101 int gidsetsize = arg1;
8102 uint32_t *target_grouplist;
8103 gid_t *grouplist;
8104 int i;
8106 grouplist = alloca(gidsetsize * sizeof(gid_t));
8107 ret = get_errno(getgroups(gidsetsize, grouplist));
8108 if (gidsetsize == 0)
8109 break;
8110 if (!is_error(ret)) {
8111 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8112 if (!target_grouplist) {
8113 ret = -TARGET_EFAULT;
8114 goto fail;
8116 for(i = 0;i < ret; i++)
8117 target_grouplist[i] = tswap32(grouplist[i]);
8118 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8121 break;
8122 #endif
8123 #ifdef TARGET_NR_setgroups32
8124 case TARGET_NR_setgroups32:
8126 int gidsetsize = arg1;
8127 uint32_t *target_grouplist;
8128 gid_t *grouplist;
8129 int i;
8131 grouplist = alloca(gidsetsize * sizeof(gid_t));
8132 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8133 if (!target_grouplist) {
8134 ret = -TARGET_EFAULT;
8135 goto fail;
8137 for(i = 0;i < gidsetsize; i++)
8138 grouplist[i] = tswap32(target_grouplist[i]);
8139 unlock_user(target_grouplist, arg2, 0);
8140 ret = get_errno(setgroups(gidsetsize, grouplist));
8142 break;
8143 #endif
8144 #ifdef TARGET_NR_fchown32
8145 case TARGET_NR_fchown32:
8146 ret = get_errno(fchown(arg1, arg2, arg3));
8147 break;
8148 #endif
8149 #ifdef TARGET_NR_setresuid32
8150 case TARGET_NR_setresuid32:
8151 ret = get_errno(setresuid(arg1, arg2, arg3));
8152 break;
8153 #endif
8154 #ifdef TARGET_NR_getresuid32
8155 case TARGET_NR_getresuid32:
8157 uid_t ruid, euid, suid;
8158 ret = get_errno(getresuid(&ruid, &euid, &suid));
8159 if (!is_error(ret)) {
8160 if (put_user_u32(ruid, arg1)
8161 || put_user_u32(euid, arg2)
8162 || put_user_u32(suid, arg3))
8163 goto efault;
8166 break;
8167 #endif
8168 #ifdef TARGET_NR_setresgid32
8169 case TARGET_NR_setresgid32:
8170 ret = get_errno(setresgid(arg1, arg2, arg3));
8171 break;
8172 #endif
8173 #ifdef TARGET_NR_getresgid32
8174 case TARGET_NR_getresgid32:
8176 gid_t rgid, egid, sgid;
8177 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8178 if (!is_error(ret)) {
8179 if (put_user_u32(rgid, arg1)
8180 || put_user_u32(egid, arg2)
8181 || put_user_u32(sgid, arg3))
8182 goto efault;
8185 break;
8186 #endif
8187 #ifdef TARGET_NR_chown32
8188 case TARGET_NR_chown32:
8189 if (!(p = lock_user_string(arg1)))
8190 goto efault;
8191 ret = get_errno(chown(p, arg2, arg3));
8192 unlock_user(p, arg1, 0);
8193 break;
8194 #endif
8195 #ifdef TARGET_NR_setuid32
8196 case TARGET_NR_setuid32:
8197 ret = get_errno(setuid(arg1));
8198 break;
8199 #endif
8200 #ifdef TARGET_NR_setgid32
8201 case TARGET_NR_setgid32:
8202 ret = get_errno(setgid(arg1));
8203 break;
8204 #endif
8205 #ifdef TARGET_NR_setfsuid32
8206 case TARGET_NR_setfsuid32:
8207 ret = get_errno(setfsuid(arg1));
8208 break;
8209 #endif
8210 #ifdef TARGET_NR_setfsgid32
8211 case TARGET_NR_setfsgid32:
8212 ret = get_errno(setfsgid(arg1));
8213 break;
8214 #endif
8216 case TARGET_NR_pivot_root:
8217 goto unimplemented;
8218 #ifdef TARGET_NR_mincore
8219 case TARGET_NR_mincore:
8221 void *a;
8222 ret = -TARGET_EFAULT;
8223 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8224 goto efault;
8225 if (!(p = lock_user_string(arg3)))
8226 goto mincore_fail;
8227 ret = get_errno(mincore(a, arg2, p));
8228 unlock_user(p, arg3, ret);
8229 mincore_fail:
8230 unlock_user(a, arg1, 0);
8232 break;
8233 #endif
8234 #ifdef TARGET_NR_arm_fadvise64_64
8235 case TARGET_NR_arm_fadvise64_64:
8238 * arm_fadvise64_64 looks like fadvise64_64 but
8239 * with different argument order
8241 abi_long temp;
8242 temp = arg3;
8243 arg3 = arg4;
8244 arg4 = temp;
8246 #endif
8247 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8248 #ifdef TARGET_NR_fadvise64_64
8249 case TARGET_NR_fadvise64_64:
8250 #endif
8251 #ifdef TARGET_NR_fadvise64
8252 case TARGET_NR_fadvise64:
8253 #endif
8254 #ifdef TARGET_S390X
8255 switch (arg4) {
8256 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8257 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8258 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8259 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8260 default: break;
8262 #endif
8263 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8264 break;
8265 #endif
8266 #ifdef TARGET_NR_madvise
8267 case TARGET_NR_madvise:
8268 /* A straight passthrough may not be safe because qemu sometimes
8269 turns private file-backed mappings into anonymous mappings.
8270 This will break MADV_DONTNEED.
8271 This is a hint, so ignoring and returning success is ok. */
8272 ret = get_errno(0);
8273 break;
8274 #endif
8275 #if TARGET_ABI_BITS == 32
8276 case TARGET_NR_fcntl64:
8278 int cmd;
8279 struct flock64 fl;
8280 struct target_flock64 *target_fl;
8281 #ifdef TARGET_ARM
8282 struct target_eabi_flock64 *target_efl;
8283 #endif
8285 cmd = target_to_host_fcntl_cmd(arg2);
8286 if (cmd == -TARGET_EINVAL) {
8287 ret = cmd;
8288 break;
8291 switch(arg2) {
8292 case TARGET_F_GETLK64:
8293 #ifdef TARGET_ARM
8294 if (((CPUARMState *)cpu_env)->eabi) {
8295 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8296 goto efault;
8297 fl.l_type = tswap16(target_efl->l_type);
8298 fl.l_whence = tswap16(target_efl->l_whence);
8299 fl.l_start = tswap64(target_efl->l_start);
8300 fl.l_len = tswap64(target_efl->l_len);
8301 fl.l_pid = tswap32(target_efl->l_pid);
8302 unlock_user_struct(target_efl, arg3, 0);
8303 } else
8304 #endif
8306 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8307 goto efault;
8308 fl.l_type = tswap16(target_fl->l_type);
8309 fl.l_whence = tswap16(target_fl->l_whence);
8310 fl.l_start = tswap64(target_fl->l_start);
8311 fl.l_len = tswap64(target_fl->l_len);
8312 fl.l_pid = tswap32(target_fl->l_pid);
8313 unlock_user_struct(target_fl, arg3, 0);
8315 ret = get_errno(fcntl(arg1, cmd, &fl));
8316 if (ret == 0) {
8317 #ifdef TARGET_ARM
8318 if (((CPUARMState *)cpu_env)->eabi) {
8319 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8320 goto efault;
8321 target_efl->l_type = tswap16(fl.l_type);
8322 target_efl->l_whence = tswap16(fl.l_whence);
8323 target_efl->l_start = tswap64(fl.l_start);
8324 target_efl->l_len = tswap64(fl.l_len);
8325 target_efl->l_pid = tswap32(fl.l_pid);
8326 unlock_user_struct(target_efl, arg3, 1);
8327 } else
8328 #endif
8330 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8331 goto efault;
8332 target_fl->l_type = tswap16(fl.l_type);
8333 target_fl->l_whence = tswap16(fl.l_whence);
8334 target_fl->l_start = tswap64(fl.l_start);
8335 target_fl->l_len = tswap64(fl.l_len);
8336 target_fl->l_pid = tswap32(fl.l_pid);
8337 unlock_user_struct(target_fl, arg3, 1);
8340 break;
8342 case TARGET_F_SETLK64:
8343 case TARGET_F_SETLKW64:
8344 #ifdef TARGET_ARM
8345 if (((CPUARMState *)cpu_env)->eabi) {
8346 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8347 goto efault;
8348 fl.l_type = tswap16(target_efl->l_type);
8349 fl.l_whence = tswap16(target_efl->l_whence);
8350 fl.l_start = tswap64(target_efl->l_start);
8351 fl.l_len = tswap64(target_efl->l_len);
8352 fl.l_pid = tswap32(target_efl->l_pid);
8353 unlock_user_struct(target_efl, arg3, 0);
8354 } else
8355 #endif
8357 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8358 goto efault;
8359 fl.l_type = tswap16(target_fl->l_type);
8360 fl.l_whence = tswap16(target_fl->l_whence);
8361 fl.l_start = tswap64(target_fl->l_start);
8362 fl.l_len = tswap64(target_fl->l_len);
8363 fl.l_pid = tswap32(target_fl->l_pid);
8364 unlock_user_struct(target_fl, arg3, 0);
8366 ret = get_errno(fcntl(arg1, cmd, &fl));
8367 break;
8368 default:
8369 ret = do_fcntl(arg1, arg2, arg3);
8370 break;
8372 break;
8374 #endif
8375 #ifdef TARGET_NR_cacheflush
8376 case TARGET_NR_cacheflush:
8377 /* self-modifying code is handled automatically, so nothing needed */
8378 ret = 0;
8379 break;
8380 #endif
8381 #ifdef TARGET_NR_security
8382 case TARGET_NR_security:
8383 goto unimplemented;
8384 #endif
8385 #ifdef TARGET_NR_getpagesize
8386 case TARGET_NR_getpagesize:
8387 ret = TARGET_PAGE_SIZE;
8388 break;
8389 #endif
8390 case TARGET_NR_gettid:
8391 ret = get_errno(gettid());
8392 break;
8393 #ifdef TARGET_NR_readahead
8394 case TARGET_NR_readahead:
8395 #if TARGET_ABI_BITS == 32
8396 if (regpairs_aligned(cpu_env)) {
8397 arg2 = arg3;
8398 arg3 = arg4;
8399 arg4 = arg5;
8401 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8402 #else
8403 ret = get_errno(readahead(arg1, arg2, arg3));
8404 #endif
8405 break;
8406 #endif
8407 #ifdef CONFIG_ATTR
8408 #ifdef TARGET_NR_setxattr
8409 case TARGET_NR_listxattr:
8410 case TARGET_NR_llistxattr:
8412 void *p, *b = 0;
8413 if (arg2) {
8414 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8415 if (!b) {
8416 ret = -TARGET_EFAULT;
8417 break;
8420 p = lock_user_string(arg1);
8421 if (p) {
8422 if (num == TARGET_NR_listxattr) {
8423 ret = get_errno(listxattr(p, b, arg3));
8424 } else {
8425 ret = get_errno(llistxattr(p, b, arg3));
8427 } else {
8428 ret = -TARGET_EFAULT;
8430 unlock_user(p, arg1, 0);
8431 unlock_user(b, arg2, arg3);
8432 break;
8434 case TARGET_NR_flistxattr:
8436 void *b = 0;
8437 if (arg2) {
8438 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8439 if (!b) {
8440 ret = -TARGET_EFAULT;
8441 break;
8444 ret = get_errno(flistxattr(arg1, b, arg3));
8445 unlock_user(b, arg2, arg3);
8446 break;
8448 case TARGET_NR_setxattr:
8449 case TARGET_NR_lsetxattr:
8451 void *p, *n, *v = 0;
8452 if (arg3) {
8453 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8454 if (!v) {
8455 ret = -TARGET_EFAULT;
8456 break;
8459 p = lock_user_string(arg1);
8460 n = lock_user_string(arg2);
8461 if (p && n) {
8462 if (num == TARGET_NR_setxattr) {
8463 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8464 } else {
8465 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8467 } else {
8468 ret = -TARGET_EFAULT;
8470 unlock_user(p, arg1, 0);
8471 unlock_user(n, arg2, 0);
8472 unlock_user(v, arg3, 0);
8474 break;
8475 case TARGET_NR_fsetxattr:
8477 void *n, *v = 0;
8478 if (arg3) {
8479 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8480 if (!v) {
8481 ret = -TARGET_EFAULT;
8482 break;
8485 n = lock_user_string(arg2);
8486 if (n) {
8487 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8488 } else {
8489 ret = -TARGET_EFAULT;
8491 unlock_user(n, arg2, 0);
8492 unlock_user(v, arg3, 0);
8494 break;
8495 case TARGET_NR_getxattr:
8496 case TARGET_NR_lgetxattr:
8498 void *p, *n, *v = 0;
8499 if (arg3) {
8500 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8501 if (!v) {
8502 ret = -TARGET_EFAULT;
8503 break;
8506 p = lock_user_string(arg1);
8507 n = lock_user_string(arg2);
8508 if (p && n) {
8509 if (num == TARGET_NR_getxattr) {
8510 ret = get_errno(getxattr(p, n, v, arg4));
8511 } else {
8512 ret = get_errno(lgetxattr(p, n, v, arg4));
8514 } else {
8515 ret = -TARGET_EFAULT;
8517 unlock_user(p, arg1, 0);
8518 unlock_user(n, arg2, 0);
8519 unlock_user(v, arg3, arg4);
8521 break;
8522 case TARGET_NR_fgetxattr:
8524 void *n, *v = 0;
8525 if (arg3) {
8526 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8527 if (!v) {
8528 ret = -TARGET_EFAULT;
8529 break;
8532 n = lock_user_string(arg2);
8533 if (n) {
8534 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8535 } else {
8536 ret = -TARGET_EFAULT;
8538 unlock_user(n, arg2, 0);
8539 unlock_user(v, arg3, arg4);
8541 break;
8542 case TARGET_NR_removexattr:
8543 case TARGET_NR_lremovexattr:
8545 void *p, *n;
8546 p = lock_user_string(arg1);
8547 n = lock_user_string(arg2);
8548 if (p && n) {
8549 if (num == TARGET_NR_removexattr) {
8550 ret = get_errno(removexattr(p, n));
8551 } else {
8552 ret = get_errno(lremovexattr(p, n));
8554 } else {
8555 ret = -TARGET_EFAULT;
8557 unlock_user(p, arg1, 0);
8558 unlock_user(n, arg2, 0);
8560 break;
8561 case TARGET_NR_fremovexattr:
8563 void *n;
8564 n = lock_user_string(arg2);
8565 if (n) {
8566 ret = get_errno(fremovexattr(arg1, n));
8567 } else {
8568 ret = -TARGET_EFAULT;
8570 unlock_user(n, arg2, 0);
8572 break;
8573 #endif
8574 #endif /* CONFIG_ATTR */
8575 #ifdef TARGET_NR_set_thread_area
8576 case TARGET_NR_set_thread_area:
8577 #if defined(TARGET_MIPS)
8578 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8579 ret = 0;
8580 break;
8581 #elif defined(TARGET_CRIS)
8582 if (arg1 & 0xff)
8583 ret = -TARGET_EINVAL;
8584 else {
8585 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8586 ret = 0;
8588 break;
8589 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8590 ret = do_set_thread_area(cpu_env, arg1);
8591 break;
8592 #elif defined(TARGET_M68K)
8594 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8595 ts->tp_value = arg1;
8596 ret = 0;
8597 break;
8599 #else
8600 goto unimplemented_nowarn;
8601 #endif
8602 #endif
8603 #ifdef TARGET_NR_get_thread_area
8604 case TARGET_NR_get_thread_area:
8605 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8606 ret = do_get_thread_area(cpu_env, arg1);
8607 break;
8608 #elif defined(TARGET_M68K)
8610 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8611 ret = ts->tp_value;
8612 break;
8614 #else
8615 goto unimplemented_nowarn;
8616 #endif
8617 #endif
8618 #ifdef TARGET_NR_getdomainname
8619 case TARGET_NR_getdomainname:
8620 goto unimplemented_nowarn;
8621 #endif
8623 #ifdef TARGET_NR_clock_gettime
8624 case TARGET_NR_clock_gettime:
8626 struct timespec ts;
8627 ret = get_errno(clock_gettime(arg1, &ts));
8628 if (!is_error(ret)) {
8629 host_to_target_timespec(arg2, &ts);
8631 break;
8633 #endif
8634 #ifdef TARGET_NR_clock_getres
8635 case TARGET_NR_clock_getres:
8637 struct timespec ts;
8638 ret = get_errno(clock_getres(arg1, &ts));
8639 if (!is_error(ret)) {
8640 host_to_target_timespec(arg2, &ts);
8642 break;
8644 #endif
8645 #ifdef TARGET_NR_clock_nanosleep
8646 case TARGET_NR_clock_nanosleep:
8648 struct timespec ts;
8649 target_to_host_timespec(&ts, arg3);
8650 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8651 if (arg4)
8652 host_to_target_timespec(arg4, &ts);
8653 break;
8655 #endif
8657 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8658 case TARGET_NR_set_tid_address:
8659 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8660 break;
8661 #endif
8663 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8664 case TARGET_NR_tkill:
8665 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8666 break;
8667 #endif
8669 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8670 case TARGET_NR_tgkill:
8671 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8672 target_to_host_signal(arg3)));
8673 break;
8674 #endif
8676 #ifdef TARGET_NR_set_robust_list
8677 case TARGET_NR_set_robust_list:
8678 case TARGET_NR_get_robust_list:
8679 /* The ABI for supporting robust futexes has userspace pass
8680 * the kernel a pointer to a linked list which is updated by
8681 * userspace after the syscall; the list is walked by the kernel
8682 * when the thread exits. Since the linked list in QEMU guest
8683 * memory isn't a valid linked list for the host and we have
8684 * no way to reliably intercept the thread-death event, we can't
8685 * support these. Silently return ENOSYS so that guest userspace
8686 * falls back to a non-robust futex implementation (which should
8687 * be OK except in the corner case of the guest crashing while
8688 * holding a mutex that is shared with another process via
8689 * shared memory).
8691 goto unimplemented_nowarn;
8692 #endif
8694 #if defined(TARGET_NR_utimensat)
8695 case TARGET_NR_utimensat:
8697 struct timespec *tsp, ts[2];
8698 if (!arg3) {
8699 tsp = NULL;
8700 } else {
8701 target_to_host_timespec(ts, arg3);
8702 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8703 tsp = ts;
8705 if (!arg2)
8706 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8707 else {
8708 if (!(p = lock_user_string(arg2))) {
8709 ret = -TARGET_EFAULT;
8710 goto fail;
8712 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8713 unlock_user(p, arg2, 0);
8716 break;
8717 #endif
8718 case TARGET_NR_futex:
8719 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8720 break;
8721 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8722 case TARGET_NR_inotify_init:
8723 ret = get_errno(sys_inotify_init());
8724 break;
8725 #endif
8726 #ifdef CONFIG_INOTIFY1
8727 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8728 case TARGET_NR_inotify_init1:
8729 ret = get_errno(sys_inotify_init1(arg1));
8730 break;
8731 #endif
8732 #endif
8733 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8734 case TARGET_NR_inotify_add_watch:
8735 p = lock_user_string(arg2);
8736 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8737 unlock_user(p, arg2, 0);
8738 break;
8739 #endif
8740 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8741 case TARGET_NR_inotify_rm_watch:
8742 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8743 break;
8744 #endif
8746 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8747 case TARGET_NR_mq_open:
8749 struct mq_attr posix_mq_attr;
8751 p = lock_user_string(arg1 - 1);
8752 if (arg4 != 0)
8753 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8754 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8755 unlock_user (p, arg1, 0);
8757 break;
8759 case TARGET_NR_mq_unlink:
8760 p = lock_user_string(arg1 - 1);
8761 ret = get_errno(mq_unlink(p));
8762 unlock_user (p, arg1, 0);
8763 break;
8765 case TARGET_NR_mq_timedsend:
8767 struct timespec ts;
8769 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8770 if (arg5 != 0) {
8771 target_to_host_timespec(&ts, arg5);
8772 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8773 host_to_target_timespec(arg5, &ts);
8775 else
8776 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8777 unlock_user (p, arg2, arg3);
8779 break;
8781 case TARGET_NR_mq_timedreceive:
8783 struct timespec ts;
8784 unsigned int prio;
8786 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8787 if (arg5 != 0) {
8788 target_to_host_timespec(&ts, arg5);
8789 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8790 host_to_target_timespec(arg5, &ts);
8792 else
8793 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8794 unlock_user (p, arg2, arg3);
8795 if (arg4 != 0)
8796 put_user_u32(prio, arg4);
8798 break;
8800 /* Not implemented for now... */
8801 /* case TARGET_NR_mq_notify: */
8802 /* break; */
8804 case TARGET_NR_mq_getsetattr:
8806 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8807 ret = 0;
8808 if (arg3 != 0) {
8809 ret = mq_getattr(arg1, &posix_mq_attr_out);
8810 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8812 if (arg2 != 0) {
8813 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8814 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8818 break;
8819 #endif
8821 #ifdef CONFIG_SPLICE
8822 #ifdef TARGET_NR_tee
8823 case TARGET_NR_tee:
8825 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8827 break;
8828 #endif
8829 #ifdef TARGET_NR_splice
8830 case TARGET_NR_splice:
8832 loff_t loff_in, loff_out;
8833 loff_t *ploff_in = NULL, *ploff_out = NULL;
8834 if(arg2) {
8835 get_user_u64(loff_in, arg2);
8836 ploff_in = &loff_in;
8838 if(arg4) {
8839 get_user_u64(loff_out, arg2);
8840 ploff_out = &loff_out;
8842 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8844 break;
8845 #endif
8846 #ifdef TARGET_NR_vmsplice
8847 case TARGET_NR_vmsplice:
8849 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8850 if (vec != NULL) {
8851 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8852 unlock_iovec(vec, arg2, arg3, 0);
8853 } else {
8854 ret = -host_to_target_errno(errno);
8857 break;
8858 #endif
8859 #endif /* CONFIG_SPLICE */
8860 #ifdef CONFIG_EVENTFD
8861 #if defined(TARGET_NR_eventfd)
8862 case TARGET_NR_eventfd:
8863 ret = get_errno(eventfd(arg1, 0));
8864 break;
8865 #endif
8866 #if defined(TARGET_NR_eventfd2)
8867 case TARGET_NR_eventfd2:
8869 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8870 if (arg2 & TARGET_O_NONBLOCK) {
8871 host_flags |= O_NONBLOCK;
8873 if (arg2 & TARGET_O_CLOEXEC) {
8874 host_flags |= O_CLOEXEC;
8876 ret = get_errno(eventfd(arg1, host_flags));
8877 break;
8879 #endif
8880 #endif /* CONFIG_EVENTFD */
8881 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8882 case TARGET_NR_fallocate:
8883 #if TARGET_ABI_BITS == 32
8884 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8885 target_offset64(arg5, arg6)));
8886 #else
8887 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8888 #endif
8889 break;
8890 #endif
8891 #if defined(CONFIG_SYNC_FILE_RANGE)
8892 #if defined(TARGET_NR_sync_file_range)
8893 case TARGET_NR_sync_file_range:
8894 #if TARGET_ABI_BITS == 32
8895 #if defined(TARGET_MIPS)
8896 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8897 target_offset64(arg5, arg6), arg7));
8898 #else
8899 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8900 target_offset64(arg4, arg5), arg6));
8901 #endif /* !TARGET_MIPS */
8902 #else
8903 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8904 #endif
8905 break;
8906 #endif
8907 #if defined(TARGET_NR_sync_file_range2)
8908 case TARGET_NR_sync_file_range2:
8909 /* This is like sync_file_range but the arguments are reordered */
8910 #if TARGET_ABI_BITS == 32
8911 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8912 target_offset64(arg5, arg6), arg2));
8913 #else
8914 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8915 #endif
8916 break;
8917 #endif
8918 #endif
8919 #if defined(CONFIG_EPOLL)
8920 #if defined(TARGET_NR_epoll_create)
8921 case TARGET_NR_epoll_create:
8922 ret = get_errno(epoll_create(arg1));
8923 break;
8924 #endif
8925 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8926 case TARGET_NR_epoll_create1:
8927 ret = get_errno(epoll_create1(arg1));
8928 break;
8929 #endif
8930 #if defined(TARGET_NR_epoll_ctl)
8931 case TARGET_NR_epoll_ctl:
8933 struct epoll_event ep;
8934 struct epoll_event *epp = 0;
8935 if (arg4) {
8936 struct target_epoll_event *target_ep;
8937 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8938 goto efault;
8940 ep.events = tswap32(target_ep->events);
8941 /* The epoll_data_t union is just opaque data to the kernel,
8942 * so we transfer all 64 bits across and need not worry what
8943 * actual data type it is.
8945 ep.data.u64 = tswap64(target_ep->data.u64);
8946 unlock_user_struct(target_ep, arg4, 0);
8947 epp = &ep;
8949 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8950 break;
8952 #endif
8954 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8955 #define IMPLEMENT_EPOLL_PWAIT
8956 #endif
8957 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8958 #if defined(TARGET_NR_epoll_wait)
8959 case TARGET_NR_epoll_wait:
8960 #endif
8961 #if defined(IMPLEMENT_EPOLL_PWAIT)
8962 case TARGET_NR_epoll_pwait:
8963 #endif
8965 struct target_epoll_event *target_ep;
8966 struct epoll_event *ep;
8967 int epfd = arg1;
8968 int maxevents = arg3;
8969 int timeout = arg4;
8971 target_ep = lock_user(VERIFY_WRITE, arg2,
8972 maxevents * sizeof(struct target_epoll_event), 1);
8973 if (!target_ep) {
8974 goto efault;
8977 ep = alloca(maxevents * sizeof(struct epoll_event));
8979 switch (num) {
8980 #if defined(IMPLEMENT_EPOLL_PWAIT)
8981 case TARGET_NR_epoll_pwait:
8983 target_sigset_t *target_set;
8984 sigset_t _set, *set = &_set;
8986 if (arg5) {
8987 target_set = lock_user(VERIFY_READ, arg5,
8988 sizeof(target_sigset_t), 1);
8989 if (!target_set) {
8990 unlock_user(target_ep, arg2, 0);
8991 goto efault;
8993 target_to_host_sigset(set, target_set);
8994 unlock_user(target_set, arg5, 0);
8995 } else {
8996 set = NULL;
8999 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9000 break;
9002 #endif
9003 #if defined(TARGET_NR_epoll_wait)
9004 case TARGET_NR_epoll_wait:
9005 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9006 break;
9007 #endif
9008 default:
9009 ret = -TARGET_ENOSYS;
9011 if (!is_error(ret)) {
9012 int i;
9013 for (i = 0; i < ret; i++) {
9014 target_ep[i].events = tswap32(ep[i].events);
9015 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9018 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9019 break;
9021 #endif
9022 #endif
9023 #ifdef TARGET_NR_prlimit64
9024 case TARGET_NR_prlimit64:
9026 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9027 struct target_rlimit64 *target_rnew, *target_rold;
9028 struct host_rlimit64 rnew, rold, *rnewp = 0;
9029 if (arg3) {
9030 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9031 goto efault;
9033 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9034 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9035 unlock_user_struct(target_rnew, arg3, 0);
9036 rnewp = &rnew;
9039 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9040 if (!is_error(ret) && arg4) {
9041 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9042 goto efault;
9044 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9045 target_rold->rlim_max = tswap64(rold.rlim_max);
9046 unlock_user_struct(target_rold, arg4, 1);
9048 break;
9050 #endif
9051 #ifdef TARGET_NR_gethostname
9052 case TARGET_NR_gethostname:
9054 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9055 if (name) {
9056 ret = get_errno(gethostname(name, arg2));
9057 unlock_user(name, arg1, arg2);
9058 } else {
9059 ret = -TARGET_EFAULT;
9061 break;
9063 #endif
9064 default:
9065 unimplemented:
9066 gemu_log("qemu: Unsupported syscall: %d\n", num);
9067 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9068 unimplemented_nowarn:
9069 #endif
9070 ret = -TARGET_ENOSYS;
9071 break;
9073 fail:
9074 #ifdef DEBUG
9075 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9076 #endif
9077 if(do_strace)
9078 print_syscall_ret(num, ret);
9079 return ret;
9080 efault:
9081 ret = -TARGET_EFAULT;
9082 goto fail;