qdev-monitor-test: Simplify using g_assert_cmpstr()
[qemu/cris-port.git] / linux-user / syscall.c
blobe2c10cc0bd92e7c404b7cc7b44a45d21d635e444
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include "linux_loop.h"
112 #include "cpu-uname.h"
114 #include "qemu.h"
116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 //#define DEBUG
121 //#include <linux/msdos_fs.h>
122 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
123 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 #undef _syscall0
127 #undef _syscall1
128 #undef _syscall2
129 #undef _syscall3
130 #undef _syscall4
131 #undef _syscall5
132 #undef _syscall6
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 type6 arg6) \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_tgkill __NR_tgkill
189 #define __NR_sys_tkill __NR_tkill
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
200 #ifdef __NR_gettid
201 _syscall0(int, gettid)
202 #else
203 /* This is a replacement for the host gettid() and must return a host
204 errno. */
205 static int gettid(void) {
206 return -ENOSYS;
208 #endif
209 #ifdef __NR_getdents
210 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
211 #endif
212 #if !defined(__NR_getdents) || \
213 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
214 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
215 #endif
216 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
217 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
218 loff_t *, res, uint, wh);
219 #endif
220 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
221 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
222 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
223 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
224 #endif
225 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
226 _syscall2(int,sys_tkill,int,tid,int,sig)
227 #endif
228 #ifdef __NR_exit_group
229 _syscall1(int,exit_group,int,error_code)
230 #endif
231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
232 _syscall1(int,set_tid_address,int *,tidptr)
233 #endif
234 #if defined(TARGET_NR_futex) && defined(__NR_futex)
235 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
236 const struct timespec *,timeout,int *,uaddr2,int,val3)
237 #endif
238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
239 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
242 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
245 void *, arg);
247 static bitmask_transtbl fcntl_flags_tbl[] = {
248 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
249 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
250 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
251 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
252 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
253 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
254 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
255 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
256 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
257 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
258 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
259 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
260 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
261 #if defined(O_DIRECT)
262 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
263 #endif
264 #if defined(O_NOATIME)
265 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
266 #endif
267 #if defined(O_CLOEXEC)
268 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
269 #endif
270 #if defined(O_PATH)
271 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
272 #endif
273 /* Don't terminate the list prematurely on 64-bit host+guest. */
274 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
275 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
276 #endif
277 { 0, 0, 0, 0 }
280 #define COPY_UTSNAME_FIELD(dest, src) \
281 do { \
282 /* __NEW_UTS_LEN doesn't include terminating null */ \
283 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
284 (dest)[__NEW_UTS_LEN] = '\0'; \
285 } while (0)
287 static int sys_uname(struct new_utsname *buf)
289 struct utsname uts_buf;
291 if (uname(&uts_buf) < 0)
292 return (-1);
295 * Just in case these have some differences, we
296 * translate utsname to new_utsname (which is the
297 * struct linux kernel uses).
300 memset(buf, 0, sizeof(*buf));
301 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
302 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
303 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
304 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
305 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
306 #ifdef _GNU_SOURCE
307 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
308 #endif
309 return (0);
311 #undef COPY_UTSNAME_FIELD
314 static int sys_getcwd1(char *buf, size_t size)
316 if (getcwd(buf, size) == NULL) {
317 /* getcwd() sets errno */
318 return (-1);
320 return strlen(buf)+1;
323 #ifdef TARGET_NR_openat
324 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
327 * open(2) has extra parameter 'mode' when called with
328 * flag O_CREAT.
330 if ((flags & O_CREAT) != 0) {
331 return (openat(dirfd, pathname, flags, mode));
333 return (openat(dirfd, pathname, flags));
335 #endif
337 #ifdef TARGET_NR_utimensat
338 #ifdef CONFIG_UTIMENSAT
339 static int sys_utimensat(int dirfd, const char *pathname,
340 const struct timespec times[2], int flags)
342 if (pathname == NULL)
343 return futimens(dirfd, times);
344 else
345 return utimensat(dirfd, pathname, times, flags);
347 #elif defined(__NR_utimensat)
348 #define __NR_sys_utimensat __NR_utimensat
349 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
350 const struct timespec *,tsp,int,flags)
351 #else
352 static int sys_utimensat(int dirfd, const char *pathname,
353 const struct timespec times[2], int flags)
355 errno = ENOSYS;
356 return -1;
358 #endif
359 #endif /* TARGET_NR_utimensat */
361 #ifdef CONFIG_INOTIFY
362 #include <sys/inotify.h>
364 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
365 static int sys_inotify_init(void)
367 return (inotify_init());
369 #endif
370 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
371 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
373 return (inotify_add_watch(fd, pathname, mask));
375 #endif
376 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
377 static int sys_inotify_rm_watch(int fd, int32_t wd)
379 return (inotify_rm_watch(fd, wd));
381 #endif
382 #ifdef CONFIG_INOTIFY1
383 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
384 static int sys_inotify_init1(int flags)
386 return (inotify_init1(flags));
388 #endif
389 #endif
390 #else
391 /* Userspace can usually survive runtime without inotify */
392 #undef TARGET_NR_inotify_init
393 #undef TARGET_NR_inotify_init1
394 #undef TARGET_NR_inotify_add_watch
395 #undef TARGET_NR_inotify_rm_watch
396 #endif /* CONFIG_INOTIFY */
398 #if defined(TARGET_NR_ppoll)
399 #ifndef __NR_ppoll
400 # define __NR_ppoll -1
401 #endif
402 #define __NR_sys_ppoll __NR_ppoll
403 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
404 struct timespec *, timeout, const __sigset_t *, sigmask,
405 size_t, sigsetsize)
406 #endif
408 #if defined(TARGET_NR_pselect6)
409 #ifndef __NR_pselect6
410 # define __NR_pselect6 -1
411 #endif
412 #define __NR_sys_pselect6 __NR_pselect6
413 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
414 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
415 #endif
417 #if defined(TARGET_NR_prlimit64)
418 #ifndef __NR_prlimit64
419 # define __NR_prlimit64 -1
420 #endif
421 #define __NR_sys_prlimit64 __NR_prlimit64
422 /* The glibc rlimit structure may not be that used by the underlying syscall */
423 struct host_rlimit64 {
424 uint64_t rlim_cur;
425 uint64_t rlim_max;
427 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
428 const struct host_rlimit64 *, new_limit,
429 struct host_rlimit64 *, old_limit)
430 #endif
433 #if defined(TARGET_NR_timer_create)
434 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
435 static timer_t g_posix_timers[32] = { 0, } ;
437 static inline int next_free_host_timer(void)
439 int k ;
440 /* FIXME: Does finding the next free slot require a lock? */
441 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
442 if (g_posix_timers[k] == 0) {
443 g_posix_timers[k] = (timer_t) 1;
444 return k;
447 return -1;
449 #endif
451 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
452 #ifdef TARGET_ARM
453 static inline int regpairs_aligned(void *cpu_env) {
454 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
456 #elif defined(TARGET_MIPS)
457 static inline int regpairs_aligned(void *cpu_env) { return 1; }
458 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
459 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
460 * of registers which translates to the same as ARM/MIPS, because we start with
461 * r3 as arg1 */
462 static inline int regpairs_aligned(void *cpu_env) { return 1; }
463 #else
464 static inline int regpairs_aligned(void *cpu_env) { return 0; }
465 #endif
467 #define ERRNO_TABLE_SIZE 1200
469 /* target_to_host_errno_table[] is initialized from
470 * host_to_target_errno_table[] in syscall_init(). */
471 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
475 * This list is the union of errno values overridden in asm-<arch>/errno.h
476 * minus the errnos that are not actually generic to all archs.
478 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
479 [EIDRM] = TARGET_EIDRM,
480 [ECHRNG] = TARGET_ECHRNG,
481 [EL2NSYNC] = TARGET_EL2NSYNC,
482 [EL3HLT] = TARGET_EL3HLT,
483 [EL3RST] = TARGET_EL3RST,
484 [ELNRNG] = TARGET_ELNRNG,
485 [EUNATCH] = TARGET_EUNATCH,
486 [ENOCSI] = TARGET_ENOCSI,
487 [EL2HLT] = TARGET_EL2HLT,
488 [EDEADLK] = TARGET_EDEADLK,
489 [ENOLCK] = TARGET_ENOLCK,
490 [EBADE] = TARGET_EBADE,
491 [EBADR] = TARGET_EBADR,
492 [EXFULL] = TARGET_EXFULL,
493 [ENOANO] = TARGET_ENOANO,
494 [EBADRQC] = TARGET_EBADRQC,
495 [EBADSLT] = TARGET_EBADSLT,
496 [EBFONT] = TARGET_EBFONT,
497 [ENOSTR] = TARGET_ENOSTR,
498 [ENODATA] = TARGET_ENODATA,
499 [ETIME] = TARGET_ETIME,
500 [ENOSR] = TARGET_ENOSR,
501 [ENONET] = TARGET_ENONET,
502 [ENOPKG] = TARGET_ENOPKG,
503 [EREMOTE] = TARGET_EREMOTE,
504 [ENOLINK] = TARGET_ENOLINK,
505 [EADV] = TARGET_EADV,
506 [ESRMNT] = TARGET_ESRMNT,
507 [ECOMM] = TARGET_ECOMM,
508 [EPROTO] = TARGET_EPROTO,
509 [EDOTDOT] = TARGET_EDOTDOT,
510 [EMULTIHOP] = TARGET_EMULTIHOP,
511 [EBADMSG] = TARGET_EBADMSG,
512 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
513 [EOVERFLOW] = TARGET_EOVERFLOW,
514 [ENOTUNIQ] = TARGET_ENOTUNIQ,
515 [EBADFD] = TARGET_EBADFD,
516 [EREMCHG] = TARGET_EREMCHG,
517 [ELIBACC] = TARGET_ELIBACC,
518 [ELIBBAD] = TARGET_ELIBBAD,
519 [ELIBSCN] = TARGET_ELIBSCN,
520 [ELIBMAX] = TARGET_ELIBMAX,
521 [ELIBEXEC] = TARGET_ELIBEXEC,
522 [EILSEQ] = TARGET_EILSEQ,
523 [ENOSYS] = TARGET_ENOSYS,
524 [ELOOP] = TARGET_ELOOP,
525 [ERESTART] = TARGET_ERESTART,
526 [ESTRPIPE] = TARGET_ESTRPIPE,
527 [ENOTEMPTY] = TARGET_ENOTEMPTY,
528 [EUSERS] = TARGET_EUSERS,
529 [ENOTSOCK] = TARGET_ENOTSOCK,
530 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
531 [EMSGSIZE] = TARGET_EMSGSIZE,
532 [EPROTOTYPE] = TARGET_EPROTOTYPE,
533 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
534 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
535 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
536 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
537 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
538 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
539 [EADDRINUSE] = TARGET_EADDRINUSE,
540 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
541 [ENETDOWN] = TARGET_ENETDOWN,
542 [ENETUNREACH] = TARGET_ENETUNREACH,
543 [ENETRESET] = TARGET_ENETRESET,
544 [ECONNABORTED] = TARGET_ECONNABORTED,
545 [ECONNRESET] = TARGET_ECONNRESET,
546 [ENOBUFS] = TARGET_ENOBUFS,
547 [EISCONN] = TARGET_EISCONN,
548 [ENOTCONN] = TARGET_ENOTCONN,
549 [EUCLEAN] = TARGET_EUCLEAN,
550 [ENOTNAM] = TARGET_ENOTNAM,
551 [ENAVAIL] = TARGET_ENAVAIL,
552 [EISNAM] = TARGET_EISNAM,
553 [EREMOTEIO] = TARGET_EREMOTEIO,
554 [ESHUTDOWN] = TARGET_ESHUTDOWN,
555 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
556 [ETIMEDOUT] = TARGET_ETIMEDOUT,
557 [ECONNREFUSED] = TARGET_ECONNREFUSED,
558 [EHOSTDOWN] = TARGET_EHOSTDOWN,
559 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
560 [EALREADY] = TARGET_EALREADY,
561 [EINPROGRESS] = TARGET_EINPROGRESS,
562 [ESTALE] = TARGET_ESTALE,
563 [ECANCELED] = TARGET_ECANCELED,
564 [ENOMEDIUM] = TARGET_ENOMEDIUM,
565 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
566 #ifdef ENOKEY
567 [ENOKEY] = TARGET_ENOKEY,
568 #endif
569 #ifdef EKEYEXPIRED
570 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
571 #endif
572 #ifdef EKEYREVOKED
573 [EKEYREVOKED] = TARGET_EKEYREVOKED,
574 #endif
575 #ifdef EKEYREJECTED
576 [EKEYREJECTED] = TARGET_EKEYREJECTED,
577 #endif
578 #ifdef EOWNERDEAD
579 [EOWNERDEAD] = TARGET_EOWNERDEAD,
580 #endif
581 #ifdef ENOTRECOVERABLE
582 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
583 #endif
586 static inline int host_to_target_errno(int err)
588 if(host_to_target_errno_table[err])
589 return host_to_target_errno_table[err];
590 return err;
593 static inline int target_to_host_errno(int err)
595 if (target_to_host_errno_table[err])
596 return target_to_host_errno_table[err];
597 return err;
600 static inline abi_long get_errno(abi_long ret)
602 if (ret == -1)
603 return -host_to_target_errno(errno);
604 else
605 return ret;
608 static inline int is_error(abi_long ret)
610 return (abi_ulong)ret >= (abi_ulong)(-4096);
613 char *target_strerror(int err)
615 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
616 return NULL;
618 return strerror(target_to_host_errno(err));
621 static abi_ulong target_brk;
622 static abi_ulong target_original_brk;
623 static abi_ulong brk_page;
625 void target_set_brk(abi_ulong new_brk)
627 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
628 brk_page = HOST_PAGE_ALIGN(target_brk);
631 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
632 #define DEBUGF_BRK(message, args...)
634 /* do_brk() must return target values and target errnos. */
635 abi_long do_brk(abi_ulong new_brk)
637 abi_long mapped_addr;
638 int new_alloc_size;
640 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
642 if (!new_brk) {
643 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
644 return target_brk;
646 if (new_brk < target_original_brk) {
647 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
648 target_brk);
649 return target_brk;
652 /* If the new brk is less than the highest page reserved to the
653 * target heap allocation, set it and we're almost done... */
654 if (new_brk <= brk_page) {
655 /* Heap contents are initialized to zero, as for anonymous
656 * mapped pages. */
657 if (new_brk > target_brk) {
658 memset(g2h(target_brk), 0, new_brk - target_brk);
660 target_brk = new_brk;
661 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
662 return target_brk;
665 /* We need to allocate more memory after the brk... Note that
666 * we don't use MAP_FIXED because that will map over the top of
667 * any existing mapping (like the one with the host libc or qemu
668 * itself); instead we treat "mapped but at wrong address" as
669 * a failure and unmap again.
671 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
672 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
673 PROT_READ|PROT_WRITE,
674 MAP_ANON|MAP_PRIVATE, 0, 0));
676 if (mapped_addr == brk_page) {
677 /* Heap contents are initialized to zero, as for anonymous
678 * mapped pages. Technically the new pages are already
679 * initialized to zero since they *are* anonymous mapped
680 * pages, however we have to take care with the contents that
681 * come from the remaining part of the previous page: it may
682 * contains garbage data due to a previous heap usage (grown
683 * then shrunken). */
684 memset(g2h(target_brk), 0, brk_page - target_brk);
686 target_brk = new_brk;
687 brk_page = HOST_PAGE_ALIGN(target_brk);
688 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
689 target_brk);
690 return target_brk;
691 } else if (mapped_addr != -1) {
692 /* Mapped but at wrong address, meaning there wasn't actually
693 * enough space for this brk.
695 target_munmap(mapped_addr, new_alloc_size);
696 mapped_addr = -1;
697 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
699 else {
700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
703 #if defined(TARGET_ALPHA)
704 /* We (partially) emulate OSF/1 on Alpha, which requires we
705 return a proper errno, not an unchanged brk value. */
706 return -TARGET_ENOMEM;
707 #endif
708 /* For everything else, return the previous break. */
709 return target_brk;
712 static inline abi_long copy_from_user_fdset(fd_set *fds,
713 abi_ulong target_fds_addr,
714 int n)
716 int i, nw, j, k;
717 abi_ulong b, *target_fds;
719 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
720 if (!(target_fds = lock_user(VERIFY_READ,
721 target_fds_addr,
722 sizeof(abi_ulong) * nw,
723 1)))
724 return -TARGET_EFAULT;
726 FD_ZERO(fds);
727 k = 0;
728 for (i = 0; i < nw; i++) {
729 /* grab the abi_ulong */
730 __get_user(b, &target_fds[i]);
731 for (j = 0; j < TARGET_ABI_BITS; j++) {
732 /* check the bit inside the abi_ulong */
733 if ((b >> j) & 1)
734 FD_SET(k, fds);
735 k++;
739 unlock_user(target_fds, target_fds_addr, 0);
741 return 0;
744 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
745 abi_ulong target_fds_addr,
746 int n)
748 if (target_fds_addr) {
749 if (copy_from_user_fdset(fds, target_fds_addr, n))
750 return -TARGET_EFAULT;
751 *fds_ptr = fds;
752 } else {
753 *fds_ptr = NULL;
755 return 0;
758 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
759 const fd_set *fds,
760 int n)
762 int i, nw, j, k;
763 abi_long v;
764 abi_ulong *target_fds;
766 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
767 if (!(target_fds = lock_user(VERIFY_WRITE,
768 target_fds_addr,
769 sizeof(abi_ulong) * nw,
770 0)))
771 return -TARGET_EFAULT;
773 k = 0;
774 for (i = 0; i < nw; i++) {
775 v = 0;
776 for (j = 0; j < TARGET_ABI_BITS; j++) {
777 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
778 k++;
780 __put_user(v, &target_fds[i]);
783 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
785 return 0;
788 #if defined(__alpha__)
789 #define HOST_HZ 1024
790 #else
791 #define HOST_HZ 100
792 #endif
794 static inline abi_long host_to_target_clock_t(long ticks)
796 #if HOST_HZ == TARGET_HZ
797 return ticks;
798 #else
799 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
800 #endif
803 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
804 const struct rusage *rusage)
806 struct target_rusage *target_rusage;
808 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
809 return -TARGET_EFAULT;
810 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
811 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
812 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
813 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
814 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
815 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
816 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
817 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
818 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
819 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
820 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
821 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
822 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
823 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
824 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
825 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
826 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
827 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
828 unlock_user_struct(target_rusage, target_addr, 1);
830 return 0;
833 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
835 abi_ulong target_rlim_swap;
836 rlim_t result;
838 target_rlim_swap = tswapal(target_rlim);
839 if (target_rlim_swap == TARGET_RLIM_INFINITY)
840 return RLIM_INFINITY;
842 result = target_rlim_swap;
843 if (target_rlim_swap != (rlim_t)result)
844 return RLIM_INFINITY;
846 return result;
849 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
851 abi_ulong target_rlim_swap;
852 abi_ulong result;
854 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
855 target_rlim_swap = TARGET_RLIM_INFINITY;
856 else
857 target_rlim_swap = rlim;
858 result = tswapal(target_rlim_swap);
860 return result;
863 static inline int target_to_host_resource(int code)
865 switch (code) {
866 case TARGET_RLIMIT_AS:
867 return RLIMIT_AS;
868 case TARGET_RLIMIT_CORE:
869 return RLIMIT_CORE;
870 case TARGET_RLIMIT_CPU:
871 return RLIMIT_CPU;
872 case TARGET_RLIMIT_DATA:
873 return RLIMIT_DATA;
874 case TARGET_RLIMIT_FSIZE:
875 return RLIMIT_FSIZE;
876 case TARGET_RLIMIT_LOCKS:
877 return RLIMIT_LOCKS;
878 case TARGET_RLIMIT_MEMLOCK:
879 return RLIMIT_MEMLOCK;
880 case TARGET_RLIMIT_MSGQUEUE:
881 return RLIMIT_MSGQUEUE;
882 case TARGET_RLIMIT_NICE:
883 return RLIMIT_NICE;
884 case TARGET_RLIMIT_NOFILE:
885 return RLIMIT_NOFILE;
886 case TARGET_RLIMIT_NPROC:
887 return RLIMIT_NPROC;
888 case TARGET_RLIMIT_RSS:
889 return RLIMIT_RSS;
890 case TARGET_RLIMIT_RTPRIO:
891 return RLIMIT_RTPRIO;
892 case TARGET_RLIMIT_SIGPENDING:
893 return RLIMIT_SIGPENDING;
894 case TARGET_RLIMIT_STACK:
895 return RLIMIT_STACK;
896 default:
897 return code;
901 static inline abi_long copy_from_user_timeval(struct timeval *tv,
902 abi_ulong target_tv_addr)
904 struct target_timeval *target_tv;
906 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
907 return -TARGET_EFAULT;
909 __get_user(tv->tv_sec, &target_tv->tv_sec);
910 __get_user(tv->tv_usec, &target_tv->tv_usec);
912 unlock_user_struct(target_tv, target_tv_addr, 0);
914 return 0;
917 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
918 const struct timeval *tv)
920 struct target_timeval *target_tv;
922 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
923 return -TARGET_EFAULT;
925 __put_user(tv->tv_sec, &target_tv->tv_sec);
926 __put_user(tv->tv_usec, &target_tv->tv_usec);
928 unlock_user_struct(target_tv, target_tv_addr, 1);
930 return 0;
933 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
934 #include <mqueue.h>
936 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
937 abi_ulong target_mq_attr_addr)
939 struct target_mq_attr *target_mq_attr;
941 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
942 target_mq_attr_addr, 1))
943 return -TARGET_EFAULT;
945 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
946 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
947 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
948 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
950 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
952 return 0;
955 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
956 const struct mq_attr *attr)
958 struct target_mq_attr *target_mq_attr;
960 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
961 target_mq_attr_addr, 0))
962 return -TARGET_EFAULT;
964 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
965 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
966 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
967 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
969 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
971 return 0;
973 #endif
975 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
976 /* do_select() must return target values and target errnos. */
977 static abi_long do_select(int n,
978 abi_ulong rfd_addr, abi_ulong wfd_addr,
979 abi_ulong efd_addr, abi_ulong target_tv_addr)
981 fd_set rfds, wfds, efds;
982 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
983 struct timeval tv, *tv_ptr;
984 abi_long ret;
986 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
987 if (ret) {
988 return ret;
990 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
991 if (ret) {
992 return ret;
994 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
995 if (ret) {
996 return ret;
999 if (target_tv_addr) {
1000 if (copy_from_user_timeval(&tv, target_tv_addr))
1001 return -TARGET_EFAULT;
1002 tv_ptr = &tv;
1003 } else {
1004 tv_ptr = NULL;
1007 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1009 if (!is_error(ret)) {
1010 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1011 return -TARGET_EFAULT;
1012 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1013 return -TARGET_EFAULT;
1014 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1015 return -TARGET_EFAULT;
1017 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1018 return -TARGET_EFAULT;
1021 return ret;
1023 #endif
1025 static abi_long do_pipe2(int host_pipe[], int flags)
1027 #ifdef CONFIG_PIPE2
1028 return pipe2(host_pipe, flags);
1029 #else
1030 return -ENOSYS;
1031 #endif
1034 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1035 int flags, int is_pipe2)
1037 int host_pipe[2];
1038 abi_long ret;
1039 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1041 if (is_error(ret))
1042 return get_errno(ret);
1044 /* Several targets have special calling conventions for the original
1045 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1046 if (!is_pipe2) {
1047 #if defined(TARGET_ALPHA)
1048 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1049 return host_pipe[0];
1050 #elif defined(TARGET_MIPS)
1051 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1052 return host_pipe[0];
1053 #elif defined(TARGET_SH4)
1054 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1055 return host_pipe[0];
1056 #elif defined(TARGET_SPARC)
1057 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1058 return host_pipe[0];
1059 #endif
1062 if (put_user_s32(host_pipe[0], pipedes)
1063 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1064 return -TARGET_EFAULT;
1065 return get_errno(ret);
1068 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1069 abi_ulong target_addr,
1070 socklen_t len)
1072 struct target_ip_mreqn *target_smreqn;
1074 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1075 if (!target_smreqn)
1076 return -TARGET_EFAULT;
1077 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1078 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1079 if (len == sizeof(struct target_ip_mreqn))
1080 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1081 unlock_user(target_smreqn, target_addr, 0);
1083 return 0;
1086 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1087 abi_ulong target_addr,
1088 socklen_t len)
1090 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1091 sa_family_t sa_family;
1092 struct target_sockaddr *target_saddr;
1094 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1095 if (!target_saddr)
1096 return -TARGET_EFAULT;
1098 sa_family = tswap16(target_saddr->sa_family);
1100 /* Oops. The caller might send a incomplete sun_path; sun_path
1101 * must be terminated by \0 (see the manual page), but
1102 * unfortunately it is quite common to specify sockaddr_un
1103 * length as "strlen(x->sun_path)" while it should be
1104 * "strlen(...) + 1". We'll fix that here if needed.
1105 * Linux kernel has a similar feature.
1108 if (sa_family == AF_UNIX) {
1109 if (len < unix_maxlen && len > 0) {
1110 char *cp = (char*)target_saddr;
1112 if ( cp[len-1] && !cp[len] )
1113 len++;
1115 if (len > unix_maxlen)
1116 len = unix_maxlen;
1119 memcpy(addr, target_saddr, len);
1120 addr->sa_family = sa_family;
1121 unlock_user(target_saddr, target_addr, 0);
1123 return 0;
1126 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1127 struct sockaddr *addr,
1128 socklen_t len)
1130 struct target_sockaddr *target_saddr;
1132 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1133 if (!target_saddr)
1134 return -TARGET_EFAULT;
1135 memcpy(target_saddr, addr, len);
1136 target_saddr->sa_family = tswap16(addr->sa_family);
1137 unlock_user(target_saddr, target_addr, len);
1139 return 0;
1142 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1143 struct target_msghdr *target_msgh)
1145 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1146 abi_long msg_controllen;
1147 abi_ulong target_cmsg_addr;
1148 struct target_cmsghdr *target_cmsg;
1149 socklen_t space = 0;
1151 msg_controllen = tswapal(target_msgh->msg_controllen);
1152 if (msg_controllen < sizeof (struct target_cmsghdr))
1153 goto the_end;
1154 target_cmsg_addr = tswapal(target_msgh->msg_control);
1155 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1156 if (!target_cmsg)
1157 return -TARGET_EFAULT;
1159 while (cmsg && target_cmsg) {
1160 void *data = CMSG_DATA(cmsg);
1161 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1163 int len = tswapal(target_cmsg->cmsg_len)
1164 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1166 space += CMSG_SPACE(len);
1167 if (space > msgh->msg_controllen) {
1168 space -= CMSG_SPACE(len);
1169 gemu_log("Host cmsg overflow\n");
1170 break;
1173 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1174 cmsg->cmsg_level = SOL_SOCKET;
1175 } else {
1176 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1178 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1179 cmsg->cmsg_len = CMSG_LEN(len);
1181 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1182 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1183 memcpy(data, target_data, len);
1184 } else {
1185 int *fd = (int *)data;
1186 int *target_fd = (int *)target_data;
1187 int i, numfds = len / sizeof(int);
1189 for (i = 0; i < numfds; i++)
1190 fd[i] = tswap32(target_fd[i]);
1193 cmsg = CMSG_NXTHDR(msgh, cmsg);
1194 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1196 unlock_user(target_cmsg, target_cmsg_addr, 0);
1197 the_end:
1198 msgh->msg_controllen = space;
1199 return 0;
1202 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1203 struct msghdr *msgh)
1205 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1206 abi_long msg_controllen;
1207 abi_ulong target_cmsg_addr;
1208 struct target_cmsghdr *target_cmsg;
1209 socklen_t space = 0;
1211 msg_controllen = tswapal(target_msgh->msg_controllen);
1212 if (msg_controllen < sizeof (struct target_cmsghdr))
1213 goto the_end;
1214 target_cmsg_addr = tswapal(target_msgh->msg_control);
1215 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1216 if (!target_cmsg)
1217 return -TARGET_EFAULT;
1219 while (cmsg && target_cmsg) {
1220 void *data = CMSG_DATA(cmsg);
1221 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1223 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1225 space += TARGET_CMSG_SPACE(len);
1226 if (space > msg_controllen) {
1227 space -= TARGET_CMSG_SPACE(len);
1228 gemu_log("Target cmsg overflow\n");
1229 break;
1232 if (cmsg->cmsg_level == SOL_SOCKET) {
1233 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1234 } else {
1235 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1237 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1238 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1240 if ((cmsg->cmsg_level == SOL_SOCKET) &&
1241 (cmsg->cmsg_type == SCM_RIGHTS)) {
1242 int *fd = (int *)data;
1243 int *target_fd = (int *)target_data;
1244 int i, numfds = len / sizeof(int);
1246 for (i = 0; i < numfds; i++)
1247 target_fd[i] = tswap32(fd[i]);
1248 } else if ((cmsg->cmsg_level == SOL_SOCKET) &&
1249 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1250 (len == sizeof(struct timeval))) {
1251 /* copy struct timeval to target */
1252 struct timeval *tv = (struct timeval *)data;
1253 struct target_timeval *target_tv =
1254 (struct target_timeval *)target_data;
1256 target_tv->tv_sec = tswapal(tv->tv_sec);
1257 target_tv->tv_usec = tswapal(tv->tv_usec);
1258 } else {
1259 gemu_log("Unsupported ancillary data: %d/%d\n",
1260 cmsg->cmsg_level, cmsg->cmsg_type);
1261 memcpy(target_data, data, len);
1264 cmsg = CMSG_NXTHDR(msgh, cmsg);
1265 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1267 unlock_user(target_cmsg, target_cmsg_addr, space);
1268 the_end:
1269 target_msgh->msg_controllen = tswapal(space);
1270 return 0;
1273 /* do_setsockopt() Must return target values and target errnos. */
1274 static abi_long do_setsockopt(int sockfd, int level, int optname,
1275 abi_ulong optval_addr, socklen_t optlen)
1277 abi_long ret;
1278 int val;
1279 struct ip_mreqn *ip_mreq;
1280 struct ip_mreq_source *ip_mreq_source;
1282 switch(level) {
1283 case SOL_TCP:
1284 /* TCP options all take an 'int' value. */
1285 if (optlen < sizeof(uint32_t))
1286 return -TARGET_EINVAL;
1288 if (get_user_u32(val, optval_addr))
1289 return -TARGET_EFAULT;
1290 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1291 break;
1292 case SOL_IP:
1293 switch(optname) {
1294 case IP_TOS:
1295 case IP_TTL:
1296 case IP_HDRINCL:
1297 case IP_ROUTER_ALERT:
1298 case IP_RECVOPTS:
1299 case IP_RETOPTS:
1300 case IP_PKTINFO:
1301 case IP_MTU_DISCOVER:
1302 case IP_RECVERR:
1303 case IP_RECVTOS:
1304 #ifdef IP_FREEBIND
1305 case IP_FREEBIND:
1306 #endif
1307 case IP_MULTICAST_TTL:
1308 case IP_MULTICAST_LOOP:
1309 val = 0;
1310 if (optlen >= sizeof(uint32_t)) {
1311 if (get_user_u32(val, optval_addr))
1312 return -TARGET_EFAULT;
1313 } else if (optlen >= 1) {
1314 if (get_user_u8(val, optval_addr))
1315 return -TARGET_EFAULT;
1317 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1318 break;
1319 case IP_ADD_MEMBERSHIP:
1320 case IP_DROP_MEMBERSHIP:
1321 if (optlen < sizeof (struct target_ip_mreq) ||
1322 optlen > sizeof (struct target_ip_mreqn))
1323 return -TARGET_EINVAL;
1325 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1326 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1327 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1328 break;
1330 case IP_BLOCK_SOURCE:
1331 case IP_UNBLOCK_SOURCE:
1332 case IP_ADD_SOURCE_MEMBERSHIP:
1333 case IP_DROP_SOURCE_MEMBERSHIP:
1334 if (optlen != sizeof (struct target_ip_mreq_source))
1335 return -TARGET_EINVAL;
1337 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1338 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1339 unlock_user (ip_mreq_source, optval_addr, 0);
1340 break;
1342 default:
1343 goto unimplemented;
1345 break;
1346 case SOL_IPV6:
1347 switch (optname) {
1348 case IPV6_MTU_DISCOVER:
1349 case IPV6_MTU:
1350 case IPV6_V6ONLY:
1351 case IPV6_RECVPKTINFO:
1352 val = 0;
1353 if (optlen < sizeof(uint32_t)) {
1354 return -TARGET_EINVAL;
1356 if (get_user_u32(val, optval_addr)) {
1357 return -TARGET_EFAULT;
1359 ret = get_errno(setsockopt(sockfd, level, optname,
1360 &val, sizeof(val)));
1361 break;
1362 default:
1363 goto unimplemented;
1365 break;
1366 case SOL_RAW:
1367 switch (optname) {
1368 case ICMP_FILTER:
1369 /* struct icmp_filter takes an u32 value */
1370 if (optlen < sizeof(uint32_t)) {
1371 return -TARGET_EINVAL;
1374 if (get_user_u32(val, optval_addr)) {
1375 return -TARGET_EFAULT;
1377 ret = get_errno(setsockopt(sockfd, level, optname,
1378 &val, sizeof(val)));
1379 break;
1381 default:
1382 goto unimplemented;
1384 break;
1385 case TARGET_SOL_SOCKET:
1386 switch (optname) {
1387 case TARGET_SO_RCVTIMEO:
1389 struct timeval tv;
1391 optname = SO_RCVTIMEO;
1393 set_timeout:
1394 if (optlen != sizeof(struct target_timeval)) {
1395 return -TARGET_EINVAL;
1398 if (copy_from_user_timeval(&tv, optval_addr)) {
1399 return -TARGET_EFAULT;
1402 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1403 &tv, sizeof(tv)));
1404 return ret;
1406 case TARGET_SO_SNDTIMEO:
1407 optname = SO_SNDTIMEO;
1408 goto set_timeout;
1409 case TARGET_SO_ATTACH_FILTER:
1411 struct target_sock_fprog *tfprog;
1412 struct target_sock_filter *tfilter;
1413 struct sock_fprog fprog;
1414 struct sock_filter *filter;
1415 int i;
1417 if (optlen != sizeof(*tfprog)) {
1418 return -TARGET_EINVAL;
1420 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1421 return -TARGET_EFAULT;
1423 if (!lock_user_struct(VERIFY_READ, tfilter,
1424 tswapal(tfprog->filter), 0)) {
1425 unlock_user_struct(tfprog, optval_addr, 1);
1426 return -TARGET_EFAULT;
1429 fprog.len = tswap16(tfprog->len);
1430 filter = malloc(fprog.len * sizeof(*filter));
1431 if (filter == NULL) {
1432 unlock_user_struct(tfilter, tfprog->filter, 1);
1433 unlock_user_struct(tfprog, optval_addr, 1);
1434 return -TARGET_ENOMEM;
1436 for (i = 0; i < fprog.len; i++) {
1437 filter[i].code = tswap16(tfilter[i].code);
1438 filter[i].jt = tfilter[i].jt;
1439 filter[i].jf = tfilter[i].jf;
1440 filter[i].k = tswap32(tfilter[i].k);
1442 fprog.filter = filter;
1444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1445 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1446 free(filter);
1448 unlock_user_struct(tfilter, tfprog->filter, 1);
1449 unlock_user_struct(tfprog, optval_addr, 1);
1450 return ret;
1452 /* Options with 'int' argument. */
1453 case TARGET_SO_DEBUG:
1454 optname = SO_DEBUG;
1455 break;
1456 case TARGET_SO_REUSEADDR:
1457 optname = SO_REUSEADDR;
1458 break;
1459 case TARGET_SO_TYPE:
1460 optname = SO_TYPE;
1461 break;
1462 case TARGET_SO_ERROR:
1463 optname = SO_ERROR;
1464 break;
1465 case TARGET_SO_DONTROUTE:
1466 optname = SO_DONTROUTE;
1467 break;
1468 case TARGET_SO_BROADCAST:
1469 optname = SO_BROADCAST;
1470 break;
1471 case TARGET_SO_SNDBUF:
1472 optname = SO_SNDBUF;
1473 break;
1474 case TARGET_SO_RCVBUF:
1475 optname = SO_RCVBUF;
1476 break;
1477 case TARGET_SO_KEEPALIVE:
1478 optname = SO_KEEPALIVE;
1479 break;
1480 case TARGET_SO_OOBINLINE:
1481 optname = SO_OOBINLINE;
1482 break;
1483 case TARGET_SO_NO_CHECK:
1484 optname = SO_NO_CHECK;
1485 break;
1486 case TARGET_SO_PRIORITY:
1487 optname = SO_PRIORITY;
1488 break;
1489 #ifdef SO_BSDCOMPAT
1490 case TARGET_SO_BSDCOMPAT:
1491 optname = SO_BSDCOMPAT;
1492 break;
1493 #endif
1494 case TARGET_SO_PASSCRED:
1495 optname = SO_PASSCRED;
1496 break;
1497 case TARGET_SO_TIMESTAMP:
1498 optname = SO_TIMESTAMP;
1499 break;
1500 case TARGET_SO_RCVLOWAT:
1501 optname = SO_RCVLOWAT;
1502 break;
1503 break;
1504 default:
1505 goto unimplemented;
1507 if (optlen < sizeof(uint32_t))
1508 return -TARGET_EINVAL;
1510 if (get_user_u32(val, optval_addr))
1511 return -TARGET_EFAULT;
1512 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1513 break;
1514 default:
1515 unimplemented:
1516 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1517 ret = -TARGET_ENOPROTOOPT;
1519 return ret;
1522 /* do_getsockopt() Must return target values and target errnos. */
1523 static abi_long do_getsockopt(int sockfd, int level, int optname,
1524 abi_ulong optval_addr, abi_ulong optlen)
1526 abi_long ret;
1527 int len, val;
1528 socklen_t lv;
1530 switch(level) {
1531 case TARGET_SOL_SOCKET:
1532 level = SOL_SOCKET;
1533 switch (optname) {
1534 /* These don't just return a single integer */
1535 case TARGET_SO_LINGER:
1536 case TARGET_SO_RCVTIMEO:
1537 case TARGET_SO_SNDTIMEO:
1538 case TARGET_SO_PEERNAME:
1539 goto unimplemented;
1540 case TARGET_SO_PEERCRED: {
1541 struct ucred cr;
1542 socklen_t crlen;
1543 struct target_ucred *tcr;
1545 if (get_user_u32(len, optlen)) {
1546 return -TARGET_EFAULT;
1548 if (len < 0) {
1549 return -TARGET_EINVAL;
1552 crlen = sizeof(cr);
1553 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1554 &cr, &crlen));
1555 if (ret < 0) {
1556 return ret;
1558 if (len > crlen) {
1559 len = crlen;
1561 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1562 return -TARGET_EFAULT;
1564 __put_user(cr.pid, &tcr->pid);
1565 __put_user(cr.uid, &tcr->uid);
1566 __put_user(cr.gid, &tcr->gid);
1567 unlock_user_struct(tcr, optval_addr, 1);
1568 if (put_user_u32(len, optlen)) {
1569 return -TARGET_EFAULT;
1571 break;
1573 /* Options with 'int' argument. */
1574 case TARGET_SO_DEBUG:
1575 optname = SO_DEBUG;
1576 goto int_case;
1577 case TARGET_SO_REUSEADDR:
1578 optname = SO_REUSEADDR;
1579 goto int_case;
1580 case TARGET_SO_TYPE:
1581 optname = SO_TYPE;
1582 goto int_case;
1583 case TARGET_SO_ERROR:
1584 optname = SO_ERROR;
1585 goto int_case;
1586 case TARGET_SO_DONTROUTE:
1587 optname = SO_DONTROUTE;
1588 goto int_case;
1589 case TARGET_SO_BROADCAST:
1590 optname = SO_BROADCAST;
1591 goto int_case;
1592 case TARGET_SO_SNDBUF:
1593 optname = SO_SNDBUF;
1594 goto int_case;
1595 case TARGET_SO_RCVBUF:
1596 optname = SO_RCVBUF;
1597 goto int_case;
1598 case TARGET_SO_KEEPALIVE:
1599 optname = SO_KEEPALIVE;
1600 goto int_case;
1601 case TARGET_SO_OOBINLINE:
1602 optname = SO_OOBINLINE;
1603 goto int_case;
1604 case TARGET_SO_NO_CHECK:
1605 optname = SO_NO_CHECK;
1606 goto int_case;
1607 case TARGET_SO_PRIORITY:
1608 optname = SO_PRIORITY;
1609 goto int_case;
1610 #ifdef SO_BSDCOMPAT
1611 case TARGET_SO_BSDCOMPAT:
1612 optname = SO_BSDCOMPAT;
1613 goto int_case;
1614 #endif
1615 case TARGET_SO_PASSCRED:
1616 optname = SO_PASSCRED;
1617 goto int_case;
1618 case TARGET_SO_TIMESTAMP:
1619 optname = SO_TIMESTAMP;
1620 goto int_case;
1621 case TARGET_SO_RCVLOWAT:
1622 optname = SO_RCVLOWAT;
1623 goto int_case;
1624 default:
1625 goto int_case;
1627 break;
1628 case SOL_TCP:
1629 /* TCP options all take an 'int' value. */
1630 int_case:
1631 if (get_user_u32(len, optlen))
1632 return -TARGET_EFAULT;
1633 if (len < 0)
1634 return -TARGET_EINVAL;
1635 lv = sizeof(lv);
1636 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1637 if (ret < 0)
1638 return ret;
1639 if (len > lv)
1640 len = lv;
1641 if (len == 4) {
1642 if (put_user_u32(val, optval_addr))
1643 return -TARGET_EFAULT;
1644 } else {
1645 if (put_user_u8(val, optval_addr))
1646 return -TARGET_EFAULT;
1648 if (put_user_u32(len, optlen))
1649 return -TARGET_EFAULT;
1650 break;
1651 case SOL_IP:
1652 switch(optname) {
1653 case IP_TOS:
1654 case IP_TTL:
1655 case IP_HDRINCL:
1656 case IP_ROUTER_ALERT:
1657 case IP_RECVOPTS:
1658 case IP_RETOPTS:
1659 case IP_PKTINFO:
1660 case IP_MTU_DISCOVER:
1661 case IP_RECVERR:
1662 case IP_RECVTOS:
1663 #ifdef IP_FREEBIND
1664 case IP_FREEBIND:
1665 #endif
1666 case IP_MULTICAST_TTL:
1667 case IP_MULTICAST_LOOP:
1668 if (get_user_u32(len, optlen))
1669 return -TARGET_EFAULT;
1670 if (len < 0)
1671 return -TARGET_EINVAL;
1672 lv = sizeof(lv);
1673 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1674 if (ret < 0)
1675 return ret;
1676 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1677 len = 1;
1678 if (put_user_u32(len, optlen)
1679 || put_user_u8(val, optval_addr))
1680 return -TARGET_EFAULT;
1681 } else {
1682 if (len > sizeof(int))
1683 len = sizeof(int);
1684 if (put_user_u32(len, optlen)
1685 || put_user_u32(val, optval_addr))
1686 return -TARGET_EFAULT;
1688 break;
1689 default:
1690 ret = -TARGET_ENOPROTOOPT;
1691 break;
1693 break;
1694 default:
1695 unimplemented:
1696 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1697 level, optname);
1698 ret = -TARGET_EOPNOTSUPP;
1699 break;
1701 return ret;
1704 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1705 int count, int copy)
1707 struct target_iovec *target_vec;
1708 struct iovec *vec;
1709 abi_ulong total_len, max_len;
1710 int i;
1711 int err = 0;
1713 if (count == 0) {
1714 errno = 0;
1715 return NULL;
1717 if (count < 0 || count > IOV_MAX) {
1718 errno = EINVAL;
1719 return NULL;
1722 vec = calloc(count, sizeof(struct iovec));
1723 if (vec == NULL) {
1724 errno = ENOMEM;
1725 return NULL;
1728 target_vec = lock_user(VERIFY_READ, target_addr,
1729 count * sizeof(struct target_iovec), 1);
1730 if (target_vec == NULL) {
1731 err = EFAULT;
1732 goto fail2;
1735 /* ??? If host page size > target page size, this will result in a
1736 value larger than what we can actually support. */
1737 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1738 total_len = 0;
1740 for (i = 0; i < count; i++) {
1741 abi_ulong base = tswapal(target_vec[i].iov_base);
1742 abi_long len = tswapal(target_vec[i].iov_len);
1744 if (len < 0) {
1745 err = EINVAL;
1746 goto fail;
1747 } else if (len == 0) {
1748 /* Zero length pointer is ignored. */
1749 vec[i].iov_base = 0;
1750 } else {
1751 vec[i].iov_base = lock_user(type, base, len, copy);
1752 if (!vec[i].iov_base) {
1753 err = EFAULT;
1754 goto fail;
1756 if (len > max_len - total_len) {
1757 len = max_len - total_len;
1760 vec[i].iov_len = len;
1761 total_len += len;
1764 unlock_user(target_vec, target_addr, 0);
1765 return vec;
1767 fail:
1768 unlock_user(target_vec, target_addr, 0);
1769 fail2:
1770 free(vec);
1771 errno = err;
1772 return NULL;
1775 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1776 int count, int copy)
1778 struct target_iovec *target_vec;
1779 int i;
1781 target_vec = lock_user(VERIFY_READ, target_addr,
1782 count * sizeof(struct target_iovec), 1);
1783 if (target_vec) {
1784 for (i = 0; i < count; i++) {
1785 abi_ulong base = tswapal(target_vec[i].iov_base);
1786 abi_long len = tswapal(target_vec[i].iov_base);
1787 if (len < 0) {
1788 break;
1790 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1792 unlock_user(target_vec, target_addr, 0);
1795 free(vec);
1798 static inline int target_to_host_sock_type(int *type)
1800 int host_type = 0;
1801 int target_type = *type;
1803 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1804 case TARGET_SOCK_DGRAM:
1805 host_type = SOCK_DGRAM;
1806 break;
1807 case TARGET_SOCK_STREAM:
1808 host_type = SOCK_STREAM;
1809 break;
1810 default:
1811 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1812 break;
1814 if (target_type & TARGET_SOCK_CLOEXEC) {
1815 #if defined(SOCK_CLOEXEC)
1816 host_type |= SOCK_CLOEXEC;
1817 #else
1818 return -TARGET_EINVAL;
1819 #endif
1821 if (target_type & TARGET_SOCK_NONBLOCK) {
1822 #if defined(SOCK_NONBLOCK)
1823 host_type |= SOCK_NONBLOCK;
1824 #elif !defined(O_NONBLOCK)
1825 return -TARGET_EINVAL;
1826 #endif
1828 *type = host_type;
1829 return 0;
1832 /* Try to emulate socket type flags after socket creation. */
1833 static int sock_flags_fixup(int fd, int target_type)
1835 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1836 if (target_type & TARGET_SOCK_NONBLOCK) {
1837 int flags = fcntl(fd, F_GETFL);
1838 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1839 close(fd);
1840 return -TARGET_EINVAL;
1843 #endif
1844 return fd;
1847 /* do_socket() Must return target values and target errnos. */
1848 static abi_long do_socket(int domain, int type, int protocol)
1850 int target_type = type;
1851 int ret;
1853 ret = target_to_host_sock_type(&type);
1854 if (ret) {
1855 return ret;
1858 if (domain == PF_NETLINK)
1859 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1860 ret = get_errno(socket(domain, type, protocol));
1861 if (ret >= 0) {
1862 ret = sock_flags_fixup(ret, target_type);
1864 return ret;
1867 /* do_bind() Must return target values and target errnos. */
1868 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1869 socklen_t addrlen)
1871 void *addr;
1872 abi_long ret;
1874 if ((int)addrlen < 0) {
1875 return -TARGET_EINVAL;
1878 addr = alloca(addrlen+1);
1880 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1881 if (ret)
1882 return ret;
1884 return get_errno(bind(sockfd, addr, addrlen));
1887 /* do_connect() Must return target values and target errnos. */
1888 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1889 socklen_t addrlen)
1891 void *addr;
1892 abi_long ret;
1894 if ((int)addrlen < 0) {
1895 return -TARGET_EINVAL;
1898 addr = alloca(addrlen);
1900 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1901 if (ret)
1902 return ret;
1904 return get_errno(connect(sockfd, addr, addrlen));
1907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1908 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
1909 int flags, int send)
1911 abi_long ret, len;
1912 struct msghdr msg;
1913 int count;
1914 struct iovec *vec;
1915 abi_ulong target_vec;
1917 if (msgp->msg_name) {
1918 msg.msg_namelen = tswap32(msgp->msg_namelen);
1919 msg.msg_name = alloca(msg.msg_namelen);
1920 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1921 msg.msg_namelen);
1922 if (ret) {
1923 goto out2;
1925 } else {
1926 msg.msg_name = NULL;
1927 msg.msg_namelen = 0;
1929 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1930 msg.msg_control = alloca(msg.msg_controllen);
1931 msg.msg_flags = tswap32(msgp->msg_flags);
1933 count = tswapal(msgp->msg_iovlen);
1934 target_vec = tswapal(msgp->msg_iov);
1935 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1936 target_vec, count, send);
1937 if (vec == NULL) {
1938 ret = -host_to_target_errno(errno);
1939 goto out2;
1941 msg.msg_iovlen = count;
1942 msg.msg_iov = vec;
1944 if (send) {
1945 ret = target_to_host_cmsg(&msg, msgp);
1946 if (ret == 0)
1947 ret = get_errno(sendmsg(fd, &msg, flags));
1948 } else {
1949 ret = get_errno(recvmsg(fd, &msg, flags));
1950 if (!is_error(ret)) {
1951 len = ret;
1952 ret = host_to_target_cmsg(msgp, &msg);
1953 if (!is_error(ret)) {
1954 msgp->msg_namelen = tswap32(msg.msg_namelen);
1955 if (msg.msg_name != NULL) {
1956 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1957 msg.msg_name, msg.msg_namelen);
1958 if (ret) {
1959 goto out;
1963 ret = len;
1968 out:
1969 unlock_iovec(vec, target_vec, count, !send);
1970 out2:
1971 return ret;
1974 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1975 int flags, int send)
1977 abi_long ret;
1978 struct target_msghdr *msgp;
1980 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1981 msgp,
1982 target_msg,
1983 send ? 1 : 0)) {
1984 return -TARGET_EFAULT;
1986 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
1987 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1988 return ret;
1991 #ifdef TARGET_NR_sendmmsg
1992 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
1993 * so it might not have this *mmsg-specific flag either.
1995 #ifndef MSG_WAITFORONE
1996 #define MSG_WAITFORONE 0x10000
1997 #endif
1999 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2000 unsigned int vlen, unsigned int flags,
2001 int send)
2003 struct target_mmsghdr *mmsgp;
2004 abi_long ret = 0;
2005 int i;
2007 if (vlen > UIO_MAXIOV) {
2008 vlen = UIO_MAXIOV;
2011 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2012 if (!mmsgp) {
2013 return -TARGET_EFAULT;
2016 for (i = 0; i < vlen; i++) {
2017 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2018 if (is_error(ret)) {
2019 break;
2021 mmsgp[i].msg_len = tswap32(ret);
2022 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2023 if (flags & MSG_WAITFORONE) {
2024 flags |= MSG_DONTWAIT;
2028 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2030 /* Return number of datagrams sent if we sent any at all;
2031 * otherwise return the error.
2033 if (i) {
2034 return i;
2036 return ret;
2038 #endif
2040 /* If we don't have a system accept4() then just call accept.
2041 * The callsites to do_accept4() will ensure that they don't
2042 * pass a non-zero flags argument in this config.
2044 #ifndef CONFIG_ACCEPT4
2045 static inline int accept4(int sockfd, struct sockaddr *addr,
2046 socklen_t *addrlen, int flags)
2048 assert(flags == 0);
2049 return accept(sockfd, addr, addrlen);
2051 #endif
2053 /* do_accept4() Must return target values and target errnos. */
2054 static abi_long do_accept4(int fd, abi_ulong target_addr,
2055 abi_ulong target_addrlen_addr, int flags)
2057 socklen_t addrlen;
2058 void *addr;
2059 abi_long ret;
2061 if (target_addr == 0) {
2062 return get_errno(accept4(fd, NULL, NULL, flags));
2065 /* linux returns EINVAL if addrlen pointer is invalid */
2066 if (get_user_u32(addrlen, target_addrlen_addr))
2067 return -TARGET_EINVAL;
2069 if ((int)addrlen < 0) {
2070 return -TARGET_EINVAL;
2073 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2074 return -TARGET_EINVAL;
2076 addr = alloca(addrlen);
2078 ret = get_errno(accept4(fd, addr, &addrlen, flags));
2079 if (!is_error(ret)) {
2080 host_to_target_sockaddr(target_addr, addr, addrlen);
2081 if (put_user_u32(addrlen, target_addrlen_addr))
2082 ret = -TARGET_EFAULT;
2084 return ret;
2087 /* do_getpeername() Must return target values and target errnos. */
2088 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2089 abi_ulong target_addrlen_addr)
2091 socklen_t addrlen;
2092 void *addr;
2093 abi_long ret;
2095 if (get_user_u32(addrlen, target_addrlen_addr))
2096 return -TARGET_EFAULT;
2098 if ((int)addrlen < 0) {
2099 return -TARGET_EINVAL;
2102 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2103 return -TARGET_EFAULT;
2105 addr = alloca(addrlen);
2107 ret = get_errno(getpeername(fd, addr, &addrlen));
2108 if (!is_error(ret)) {
2109 host_to_target_sockaddr(target_addr, addr, addrlen);
2110 if (put_user_u32(addrlen, target_addrlen_addr))
2111 ret = -TARGET_EFAULT;
2113 return ret;
2116 /* do_getsockname() Must return target values and target errnos. */
2117 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2118 abi_ulong target_addrlen_addr)
2120 socklen_t addrlen;
2121 void *addr;
2122 abi_long ret;
2124 if (get_user_u32(addrlen, target_addrlen_addr))
2125 return -TARGET_EFAULT;
2127 if ((int)addrlen < 0) {
2128 return -TARGET_EINVAL;
2131 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2132 return -TARGET_EFAULT;
2134 addr = alloca(addrlen);
2136 ret = get_errno(getsockname(fd, addr, &addrlen));
2137 if (!is_error(ret)) {
2138 host_to_target_sockaddr(target_addr, addr, addrlen);
2139 if (put_user_u32(addrlen, target_addrlen_addr))
2140 ret = -TARGET_EFAULT;
2142 return ret;
2145 /* do_socketpair() Must return target values and target errnos. */
2146 static abi_long do_socketpair(int domain, int type, int protocol,
2147 abi_ulong target_tab_addr)
2149 int tab[2];
2150 abi_long ret;
2152 target_to_host_sock_type(&type);
2154 ret = get_errno(socketpair(domain, type, protocol, tab));
2155 if (!is_error(ret)) {
2156 if (put_user_s32(tab[0], target_tab_addr)
2157 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2158 ret = -TARGET_EFAULT;
2160 return ret;
2163 /* do_sendto() Must return target values and target errnos. */
2164 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2165 abi_ulong target_addr, socklen_t addrlen)
2167 void *addr;
2168 void *host_msg;
2169 abi_long ret;
2171 if ((int)addrlen < 0) {
2172 return -TARGET_EINVAL;
2175 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2176 if (!host_msg)
2177 return -TARGET_EFAULT;
2178 if (target_addr) {
2179 addr = alloca(addrlen);
2180 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2181 if (ret) {
2182 unlock_user(host_msg, msg, 0);
2183 return ret;
2185 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2186 } else {
2187 ret = get_errno(send(fd, host_msg, len, flags));
2189 unlock_user(host_msg, msg, 0);
2190 return ret;
2193 /* do_recvfrom() Must return target values and target errnos. */
2194 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2195 abi_ulong target_addr,
2196 abi_ulong target_addrlen)
2198 socklen_t addrlen;
2199 void *addr;
2200 void *host_msg;
2201 abi_long ret;
2203 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2204 if (!host_msg)
2205 return -TARGET_EFAULT;
2206 if (target_addr) {
2207 if (get_user_u32(addrlen, target_addrlen)) {
2208 ret = -TARGET_EFAULT;
2209 goto fail;
2211 if ((int)addrlen < 0) {
2212 ret = -TARGET_EINVAL;
2213 goto fail;
2215 addr = alloca(addrlen);
2216 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2217 } else {
2218 addr = NULL; /* To keep compiler quiet. */
2219 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2221 if (!is_error(ret)) {
2222 if (target_addr) {
2223 host_to_target_sockaddr(target_addr, addr, addrlen);
2224 if (put_user_u32(addrlen, target_addrlen)) {
2225 ret = -TARGET_EFAULT;
2226 goto fail;
2229 unlock_user(host_msg, msg, len);
2230 } else {
2231 fail:
2232 unlock_user(host_msg, msg, 0);
2234 return ret;
2237 #ifdef TARGET_NR_socketcall
2238 /* do_socketcall() Must return target values and target errnos. */
2239 static abi_long do_socketcall(int num, abi_ulong vptr)
2241 static const unsigned ac[] = { /* number of arguments per call */
2242 [SOCKOP_socket] = 3, /* domain, type, protocol */
2243 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2244 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2245 [SOCKOP_listen] = 2, /* sockfd, backlog */
2246 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2247 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2248 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2249 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2250 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2251 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2252 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2253 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2254 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2255 [SOCKOP_shutdown] = 2, /* sockfd, how */
2256 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2257 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2258 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2259 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2261 abi_long a[6]; /* max 6 args */
2263 /* first, collect the arguments in a[] according to ac[] */
2264 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2265 unsigned i;
2266 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2267 for (i = 0; i < ac[num]; ++i) {
2268 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2269 return -TARGET_EFAULT;
2274 /* now when we have the args, actually handle the call */
2275 switch (num) {
2276 case SOCKOP_socket: /* domain, type, protocol */
2277 return do_socket(a[0], a[1], a[2]);
2278 case SOCKOP_bind: /* sockfd, addr, addrlen */
2279 return do_bind(a[0], a[1], a[2]);
2280 case SOCKOP_connect: /* sockfd, addr, addrlen */
2281 return do_connect(a[0], a[1], a[2]);
2282 case SOCKOP_listen: /* sockfd, backlog */
2283 return get_errno(listen(a[0], a[1]));
2284 case SOCKOP_accept: /* sockfd, addr, addrlen */
2285 return do_accept4(a[0], a[1], a[2], 0);
2286 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2287 return do_accept4(a[0], a[1], a[2], a[3]);
2288 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2289 return do_getsockname(a[0], a[1], a[2]);
2290 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2291 return do_getpeername(a[0], a[1], a[2]);
2292 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2293 return do_socketpair(a[0], a[1], a[2], a[3]);
2294 case SOCKOP_send: /* sockfd, msg, len, flags */
2295 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2296 case SOCKOP_recv: /* sockfd, msg, len, flags */
2297 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2298 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2299 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2300 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2301 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2302 case SOCKOP_shutdown: /* sockfd, how */
2303 return get_errno(shutdown(a[0], a[1]));
2304 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2305 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2306 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2307 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2308 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2309 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2310 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2311 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2312 default:
2313 gemu_log("Unsupported socketcall: %d\n", num);
2314 return -TARGET_ENOSYS;
2317 #endif
2319 #define N_SHM_REGIONS 32
2321 static struct shm_region {
2322 abi_ulong start;
2323 abi_ulong size;
2324 } shm_regions[N_SHM_REGIONS];
2326 struct target_semid_ds
2328 struct target_ipc_perm sem_perm;
2329 abi_ulong sem_otime;
2330 abi_ulong __unused1;
2331 abi_ulong sem_ctime;
2332 abi_ulong __unused2;
2333 abi_ulong sem_nsems;
2334 abi_ulong __unused3;
2335 abi_ulong __unused4;
2338 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2339 abi_ulong target_addr)
2341 struct target_ipc_perm *target_ip;
2342 struct target_semid_ds *target_sd;
2344 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2345 return -TARGET_EFAULT;
2346 target_ip = &(target_sd->sem_perm);
2347 host_ip->__key = tswap32(target_ip->__key);
2348 host_ip->uid = tswap32(target_ip->uid);
2349 host_ip->gid = tswap32(target_ip->gid);
2350 host_ip->cuid = tswap32(target_ip->cuid);
2351 host_ip->cgid = tswap32(target_ip->cgid);
2352 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2353 host_ip->mode = tswap32(target_ip->mode);
2354 #else
2355 host_ip->mode = tswap16(target_ip->mode);
2356 #endif
2357 #if defined(TARGET_PPC)
2358 host_ip->__seq = tswap32(target_ip->__seq);
2359 #else
2360 host_ip->__seq = tswap16(target_ip->__seq);
2361 #endif
2362 unlock_user_struct(target_sd, target_addr, 0);
2363 return 0;
2366 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2367 struct ipc_perm *host_ip)
2369 struct target_ipc_perm *target_ip;
2370 struct target_semid_ds *target_sd;
2372 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2373 return -TARGET_EFAULT;
2374 target_ip = &(target_sd->sem_perm);
2375 target_ip->__key = tswap32(host_ip->__key);
2376 target_ip->uid = tswap32(host_ip->uid);
2377 target_ip->gid = tswap32(host_ip->gid);
2378 target_ip->cuid = tswap32(host_ip->cuid);
2379 target_ip->cgid = tswap32(host_ip->cgid);
2380 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2381 target_ip->mode = tswap32(host_ip->mode);
2382 #else
2383 target_ip->mode = tswap16(host_ip->mode);
2384 #endif
2385 #if defined(TARGET_PPC)
2386 target_ip->__seq = tswap32(host_ip->__seq);
2387 #else
2388 target_ip->__seq = tswap16(host_ip->__seq);
2389 #endif
2390 unlock_user_struct(target_sd, target_addr, 1);
2391 return 0;
2394 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2395 abi_ulong target_addr)
2397 struct target_semid_ds *target_sd;
2399 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2400 return -TARGET_EFAULT;
2401 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2402 return -TARGET_EFAULT;
2403 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2404 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2405 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2406 unlock_user_struct(target_sd, target_addr, 0);
2407 return 0;
2410 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2411 struct semid_ds *host_sd)
2413 struct target_semid_ds *target_sd;
2415 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2416 return -TARGET_EFAULT;
2417 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2418 return -TARGET_EFAULT;
2419 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2420 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2421 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2422 unlock_user_struct(target_sd, target_addr, 1);
2423 return 0;
2426 struct target_seminfo {
2427 int semmap;
2428 int semmni;
2429 int semmns;
2430 int semmnu;
2431 int semmsl;
2432 int semopm;
2433 int semume;
2434 int semusz;
2435 int semvmx;
2436 int semaem;
2439 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2440 struct seminfo *host_seminfo)
2442 struct target_seminfo *target_seminfo;
2443 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2444 return -TARGET_EFAULT;
2445 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2446 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2447 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2448 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2449 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2450 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2451 __put_user(host_seminfo->semume, &target_seminfo->semume);
2452 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2453 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2454 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2455 unlock_user_struct(target_seminfo, target_addr, 1);
2456 return 0;
2459 union semun {
2460 int val;
2461 struct semid_ds *buf;
2462 unsigned short *array;
2463 struct seminfo *__buf;
2466 union target_semun {
2467 int val;
2468 abi_ulong buf;
2469 abi_ulong array;
2470 abi_ulong __buf;
2473 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2474 abi_ulong target_addr)
2476 int nsems;
2477 unsigned short *array;
2478 union semun semun;
2479 struct semid_ds semid_ds;
2480 int i, ret;
2482 semun.buf = &semid_ds;
2484 ret = semctl(semid, 0, IPC_STAT, semun);
2485 if (ret == -1)
2486 return get_errno(ret);
2488 nsems = semid_ds.sem_nsems;
2490 *host_array = malloc(nsems*sizeof(unsigned short));
2491 if (!*host_array) {
2492 return -TARGET_ENOMEM;
2494 array = lock_user(VERIFY_READ, target_addr,
2495 nsems*sizeof(unsigned short), 1);
2496 if (!array) {
2497 free(*host_array);
2498 return -TARGET_EFAULT;
2501 for(i=0; i<nsems; i++) {
2502 __get_user((*host_array)[i], &array[i]);
2504 unlock_user(array, target_addr, 0);
2506 return 0;
2509 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2510 unsigned short **host_array)
2512 int nsems;
2513 unsigned short *array;
2514 union semun semun;
2515 struct semid_ds semid_ds;
2516 int i, ret;
2518 semun.buf = &semid_ds;
2520 ret = semctl(semid, 0, IPC_STAT, semun);
2521 if (ret == -1)
2522 return get_errno(ret);
2524 nsems = semid_ds.sem_nsems;
2526 array = lock_user(VERIFY_WRITE, target_addr,
2527 nsems*sizeof(unsigned short), 0);
2528 if (!array)
2529 return -TARGET_EFAULT;
2531 for(i=0; i<nsems; i++) {
2532 __put_user((*host_array)[i], &array[i]);
2534 free(*host_array);
2535 unlock_user(array, target_addr, 1);
2537 return 0;
2540 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2541 union target_semun target_su)
2543 union semun arg;
2544 struct semid_ds dsarg;
2545 unsigned short *array = NULL;
2546 struct seminfo seminfo;
2547 abi_long ret = -TARGET_EINVAL;
2548 abi_long err;
2549 cmd &= 0xff;
2551 switch( cmd ) {
2552 case GETVAL:
2553 case SETVAL:
2554 arg.val = tswap32(target_su.val);
2555 ret = get_errno(semctl(semid, semnum, cmd, arg));
2556 target_su.val = tswap32(arg.val);
2557 break;
2558 case GETALL:
2559 case SETALL:
2560 err = target_to_host_semarray(semid, &array, target_su.array);
2561 if (err)
2562 return err;
2563 arg.array = array;
2564 ret = get_errno(semctl(semid, semnum, cmd, arg));
2565 err = host_to_target_semarray(semid, target_su.array, &array);
2566 if (err)
2567 return err;
2568 break;
2569 case IPC_STAT:
2570 case IPC_SET:
2571 case SEM_STAT:
2572 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2573 if (err)
2574 return err;
2575 arg.buf = &dsarg;
2576 ret = get_errno(semctl(semid, semnum, cmd, arg));
2577 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2578 if (err)
2579 return err;
2580 break;
2581 case IPC_INFO:
2582 case SEM_INFO:
2583 arg.__buf = &seminfo;
2584 ret = get_errno(semctl(semid, semnum, cmd, arg));
2585 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2586 if (err)
2587 return err;
2588 break;
2589 case IPC_RMID:
2590 case GETPID:
2591 case GETNCNT:
2592 case GETZCNT:
2593 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2594 break;
2597 return ret;
2600 struct target_sembuf {
2601 unsigned short sem_num;
2602 short sem_op;
2603 short sem_flg;
2606 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2607 abi_ulong target_addr,
2608 unsigned nsops)
2610 struct target_sembuf *target_sembuf;
2611 int i;
2613 target_sembuf = lock_user(VERIFY_READ, target_addr,
2614 nsops*sizeof(struct target_sembuf), 1);
2615 if (!target_sembuf)
2616 return -TARGET_EFAULT;
2618 for(i=0; i<nsops; i++) {
2619 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2620 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2621 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2624 unlock_user(target_sembuf, target_addr, 0);
2626 return 0;
2629 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2631 struct sembuf sops[nsops];
2633 if (target_to_host_sembuf(sops, ptr, nsops))
2634 return -TARGET_EFAULT;
2636 return get_errno(semop(semid, sops, nsops));
2639 struct target_msqid_ds
2641 struct target_ipc_perm msg_perm;
2642 abi_ulong msg_stime;
2643 #if TARGET_ABI_BITS == 32
2644 abi_ulong __unused1;
2645 #endif
2646 abi_ulong msg_rtime;
2647 #if TARGET_ABI_BITS == 32
2648 abi_ulong __unused2;
2649 #endif
2650 abi_ulong msg_ctime;
2651 #if TARGET_ABI_BITS == 32
2652 abi_ulong __unused3;
2653 #endif
2654 abi_ulong __msg_cbytes;
2655 abi_ulong msg_qnum;
2656 abi_ulong msg_qbytes;
2657 abi_ulong msg_lspid;
2658 abi_ulong msg_lrpid;
2659 abi_ulong __unused4;
2660 abi_ulong __unused5;
2663 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2664 abi_ulong target_addr)
2666 struct target_msqid_ds *target_md;
2668 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2669 return -TARGET_EFAULT;
2670 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2671 return -TARGET_EFAULT;
2672 host_md->msg_stime = tswapal(target_md->msg_stime);
2673 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2674 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2675 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2676 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2677 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2678 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2679 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2680 unlock_user_struct(target_md, target_addr, 0);
2681 return 0;
2684 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2685 struct msqid_ds *host_md)
2687 struct target_msqid_ds *target_md;
2689 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2690 return -TARGET_EFAULT;
2691 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2692 return -TARGET_EFAULT;
2693 target_md->msg_stime = tswapal(host_md->msg_stime);
2694 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2695 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2696 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2697 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2698 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2699 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2700 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2701 unlock_user_struct(target_md, target_addr, 1);
2702 return 0;
2705 struct target_msginfo {
2706 int msgpool;
2707 int msgmap;
2708 int msgmax;
2709 int msgmnb;
2710 int msgmni;
2711 int msgssz;
2712 int msgtql;
2713 unsigned short int msgseg;
2716 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2717 struct msginfo *host_msginfo)
2719 struct target_msginfo *target_msginfo;
2720 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2721 return -TARGET_EFAULT;
2722 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2723 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2724 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2725 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2726 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2727 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2728 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2729 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2730 unlock_user_struct(target_msginfo, target_addr, 1);
2731 return 0;
2734 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2736 struct msqid_ds dsarg;
2737 struct msginfo msginfo;
2738 abi_long ret = -TARGET_EINVAL;
2740 cmd &= 0xff;
2742 switch (cmd) {
2743 case IPC_STAT:
2744 case IPC_SET:
2745 case MSG_STAT:
2746 if (target_to_host_msqid_ds(&dsarg,ptr))
2747 return -TARGET_EFAULT;
2748 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2749 if (host_to_target_msqid_ds(ptr,&dsarg))
2750 return -TARGET_EFAULT;
2751 break;
2752 case IPC_RMID:
2753 ret = get_errno(msgctl(msgid, cmd, NULL));
2754 break;
2755 case IPC_INFO:
2756 case MSG_INFO:
2757 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2758 if (host_to_target_msginfo(ptr, &msginfo))
2759 return -TARGET_EFAULT;
2760 break;
2763 return ret;
2766 struct target_msgbuf {
2767 abi_long mtype;
2768 char mtext[1];
2771 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2772 unsigned int msgsz, int msgflg)
2774 struct target_msgbuf *target_mb;
2775 struct msgbuf *host_mb;
2776 abi_long ret = 0;
2778 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2779 return -TARGET_EFAULT;
2780 host_mb = malloc(msgsz+sizeof(long));
2781 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2782 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2783 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2784 free(host_mb);
2785 unlock_user_struct(target_mb, msgp, 0);
2787 return ret;
2790 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2791 unsigned int msgsz, abi_long msgtyp,
2792 int msgflg)
2794 struct target_msgbuf *target_mb;
2795 char *target_mtext;
2796 struct msgbuf *host_mb;
2797 abi_long ret = 0;
2799 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2800 return -TARGET_EFAULT;
2802 host_mb = g_malloc(msgsz+sizeof(long));
2803 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2805 if (ret > 0) {
2806 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2807 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2808 if (!target_mtext) {
2809 ret = -TARGET_EFAULT;
2810 goto end;
2812 memcpy(target_mb->mtext, host_mb->mtext, ret);
2813 unlock_user(target_mtext, target_mtext_addr, ret);
2816 target_mb->mtype = tswapal(host_mb->mtype);
2818 end:
2819 if (target_mb)
2820 unlock_user_struct(target_mb, msgp, 1);
2821 g_free(host_mb);
2822 return ret;
2825 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2826 abi_ulong target_addr)
2828 struct target_shmid_ds *target_sd;
2830 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2831 return -TARGET_EFAULT;
2832 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2833 return -TARGET_EFAULT;
2834 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2835 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2836 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2837 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2838 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2839 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2840 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2841 unlock_user_struct(target_sd, target_addr, 0);
2842 return 0;
2845 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2846 struct shmid_ds *host_sd)
2848 struct target_shmid_ds *target_sd;
2850 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2851 return -TARGET_EFAULT;
2852 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2853 return -TARGET_EFAULT;
2854 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2855 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2856 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2857 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2858 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2859 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2860 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2861 unlock_user_struct(target_sd, target_addr, 1);
2862 return 0;
2865 struct target_shminfo {
2866 abi_ulong shmmax;
2867 abi_ulong shmmin;
2868 abi_ulong shmmni;
2869 abi_ulong shmseg;
2870 abi_ulong shmall;
2873 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2874 struct shminfo *host_shminfo)
2876 struct target_shminfo *target_shminfo;
2877 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2878 return -TARGET_EFAULT;
2879 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2880 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2881 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2882 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2883 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2884 unlock_user_struct(target_shminfo, target_addr, 1);
2885 return 0;
2888 struct target_shm_info {
2889 int used_ids;
2890 abi_ulong shm_tot;
2891 abi_ulong shm_rss;
2892 abi_ulong shm_swp;
2893 abi_ulong swap_attempts;
2894 abi_ulong swap_successes;
2897 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2898 struct shm_info *host_shm_info)
2900 struct target_shm_info *target_shm_info;
2901 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2902 return -TARGET_EFAULT;
2903 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2904 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2905 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2906 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2907 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2908 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2909 unlock_user_struct(target_shm_info, target_addr, 1);
2910 return 0;
2913 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2915 struct shmid_ds dsarg;
2916 struct shminfo shminfo;
2917 struct shm_info shm_info;
2918 abi_long ret = -TARGET_EINVAL;
2920 cmd &= 0xff;
2922 switch(cmd) {
2923 case IPC_STAT:
2924 case IPC_SET:
2925 case SHM_STAT:
2926 if (target_to_host_shmid_ds(&dsarg, buf))
2927 return -TARGET_EFAULT;
2928 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2929 if (host_to_target_shmid_ds(buf, &dsarg))
2930 return -TARGET_EFAULT;
2931 break;
2932 case IPC_INFO:
2933 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2934 if (host_to_target_shminfo(buf, &shminfo))
2935 return -TARGET_EFAULT;
2936 break;
2937 case SHM_INFO:
2938 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2939 if (host_to_target_shm_info(buf, &shm_info))
2940 return -TARGET_EFAULT;
2941 break;
2942 case IPC_RMID:
2943 case SHM_LOCK:
2944 case SHM_UNLOCK:
2945 ret = get_errno(shmctl(shmid, cmd, NULL));
2946 break;
2949 return ret;
2952 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2954 abi_long raddr;
2955 void *host_raddr;
2956 struct shmid_ds shm_info;
2957 int i,ret;
2959 /* find out the length of the shared memory segment */
2960 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2961 if (is_error(ret)) {
2962 /* can't get length, bail out */
2963 return ret;
2966 mmap_lock();
2968 if (shmaddr)
2969 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2970 else {
2971 abi_ulong mmap_start;
2973 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2975 if (mmap_start == -1) {
2976 errno = ENOMEM;
2977 host_raddr = (void *)-1;
2978 } else
2979 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2982 if (host_raddr == (void *)-1) {
2983 mmap_unlock();
2984 return get_errno((long)host_raddr);
2986 raddr=h2g((unsigned long)host_raddr);
2988 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2989 PAGE_VALID | PAGE_READ |
2990 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2992 for (i = 0; i < N_SHM_REGIONS; i++) {
2993 if (shm_regions[i].start == 0) {
2994 shm_regions[i].start = raddr;
2995 shm_regions[i].size = shm_info.shm_segsz;
2996 break;
3000 mmap_unlock();
3001 return raddr;
3005 static inline abi_long do_shmdt(abi_ulong shmaddr)
3007 int i;
3009 for (i = 0; i < N_SHM_REGIONS; ++i) {
3010 if (shm_regions[i].start == shmaddr) {
3011 shm_regions[i].start = 0;
3012 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3013 break;
3017 return get_errno(shmdt(g2h(shmaddr)));
3020 #ifdef TARGET_NR_ipc
3021 /* ??? This only works with linear mappings. */
3022 /* do_ipc() must return target values and target errnos. */
3023 static abi_long do_ipc(unsigned int call, int first,
3024 int second, int third,
3025 abi_long ptr, abi_long fifth)
3027 int version;
3028 abi_long ret = 0;
3030 version = call >> 16;
3031 call &= 0xffff;
3033 switch (call) {
3034 case IPCOP_semop:
3035 ret = do_semop(first, ptr, second);
3036 break;
3038 case IPCOP_semget:
3039 ret = get_errno(semget(first, second, third));
3040 break;
3042 case IPCOP_semctl:
3043 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3044 break;
3046 case IPCOP_msgget:
3047 ret = get_errno(msgget(first, second));
3048 break;
3050 case IPCOP_msgsnd:
3051 ret = do_msgsnd(first, ptr, second, third);
3052 break;
3054 case IPCOP_msgctl:
3055 ret = do_msgctl(first, second, ptr);
3056 break;
3058 case IPCOP_msgrcv:
3059 switch (version) {
3060 case 0:
3062 struct target_ipc_kludge {
3063 abi_long msgp;
3064 abi_long msgtyp;
3065 } *tmp;
3067 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3068 ret = -TARGET_EFAULT;
3069 break;
3072 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3074 unlock_user_struct(tmp, ptr, 0);
3075 break;
3077 default:
3078 ret = do_msgrcv(first, ptr, second, fifth, third);
3080 break;
3082 case IPCOP_shmat:
3083 switch (version) {
3084 default:
3086 abi_ulong raddr;
3087 raddr = do_shmat(first, ptr, second);
3088 if (is_error(raddr))
3089 return get_errno(raddr);
3090 if (put_user_ual(raddr, third))
3091 return -TARGET_EFAULT;
3092 break;
3094 case 1:
3095 ret = -TARGET_EINVAL;
3096 break;
3098 break;
3099 case IPCOP_shmdt:
3100 ret = do_shmdt(ptr);
3101 break;
3103 case IPCOP_shmget:
3104 /* IPC_* flag values are the same on all linux platforms */
3105 ret = get_errno(shmget(first, second, third));
3106 break;
3108 /* IPC_* and SHM_* command values are the same on all linux platforms */
3109 case IPCOP_shmctl:
3110 ret = do_shmctl(first, second, ptr);
3111 break;
3112 default:
3113 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3114 ret = -TARGET_ENOSYS;
3115 break;
3117 return ret;
3119 #endif
3121 /* kernel structure types definitions */
3123 #define STRUCT(name, ...) STRUCT_ ## name,
3124 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3125 enum {
3126 #include "syscall_types.h"
3128 #undef STRUCT
3129 #undef STRUCT_SPECIAL
3131 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3132 #define STRUCT_SPECIAL(name)
3133 #include "syscall_types.h"
3134 #undef STRUCT
3135 #undef STRUCT_SPECIAL
3137 typedef struct IOCTLEntry IOCTLEntry;
3139 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3140 int fd, abi_long cmd, abi_long arg);
3142 struct IOCTLEntry {
3143 unsigned int target_cmd;
3144 unsigned int host_cmd;
3145 const char *name;
3146 int access;
3147 do_ioctl_fn *do_ioctl;
3148 const argtype arg_type[5];
3151 #define IOC_R 0x0001
3152 #define IOC_W 0x0002
3153 #define IOC_RW (IOC_R | IOC_W)
3155 #define MAX_STRUCT_SIZE 4096
3157 #ifdef CONFIG_FIEMAP
3158 /* So fiemap access checks don't overflow on 32 bit systems.
3159 * This is very slightly smaller than the limit imposed by
3160 * the underlying kernel.
3162 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3163 / sizeof(struct fiemap_extent))
3165 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3166 int fd, abi_long cmd, abi_long arg)
3168 /* The parameter for this ioctl is a struct fiemap followed
3169 * by an array of struct fiemap_extent whose size is set
3170 * in fiemap->fm_extent_count. The array is filled in by the
3171 * ioctl.
3173 int target_size_in, target_size_out;
3174 struct fiemap *fm;
3175 const argtype *arg_type = ie->arg_type;
3176 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3177 void *argptr, *p;
3178 abi_long ret;
3179 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3180 uint32_t outbufsz;
3181 int free_fm = 0;
3183 assert(arg_type[0] == TYPE_PTR);
3184 assert(ie->access == IOC_RW);
3185 arg_type++;
3186 target_size_in = thunk_type_size(arg_type, 0);
3187 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3188 if (!argptr) {
3189 return -TARGET_EFAULT;
3191 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3192 unlock_user(argptr, arg, 0);
3193 fm = (struct fiemap *)buf_temp;
3194 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3195 return -TARGET_EINVAL;
3198 outbufsz = sizeof (*fm) +
3199 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3201 if (outbufsz > MAX_STRUCT_SIZE) {
3202 /* We can't fit all the extents into the fixed size buffer.
3203 * Allocate one that is large enough and use it instead.
3205 fm = malloc(outbufsz);
3206 if (!fm) {
3207 return -TARGET_ENOMEM;
3209 memcpy(fm, buf_temp, sizeof(struct fiemap));
3210 free_fm = 1;
3212 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3213 if (!is_error(ret)) {
3214 target_size_out = target_size_in;
3215 /* An extent_count of 0 means we were only counting the extents
3216 * so there are no structs to copy
3218 if (fm->fm_extent_count != 0) {
3219 target_size_out += fm->fm_mapped_extents * extent_size;
3221 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3222 if (!argptr) {
3223 ret = -TARGET_EFAULT;
3224 } else {
3225 /* Convert the struct fiemap */
3226 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3227 if (fm->fm_extent_count != 0) {
3228 p = argptr + target_size_in;
3229 /* ...and then all the struct fiemap_extents */
3230 for (i = 0; i < fm->fm_mapped_extents; i++) {
3231 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3232 THUNK_TARGET);
3233 p += extent_size;
3236 unlock_user(argptr, arg, target_size_out);
3239 if (free_fm) {
3240 free(fm);
3242 return ret;
3244 #endif
3246 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3247 int fd, abi_long cmd, abi_long arg)
3249 const argtype *arg_type = ie->arg_type;
3250 int target_size;
3251 void *argptr;
3252 int ret;
3253 struct ifconf *host_ifconf;
3254 uint32_t outbufsz;
3255 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3256 int target_ifreq_size;
3257 int nb_ifreq;
3258 int free_buf = 0;
3259 int i;
3260 int target_ifc_len;
3261 abi_long target_ifc_buf;
3262 int host_ifc_len;
3263 char *host_ifc_buf;
3265 assert(arg_type[0] == TYPE_PTR);
3266 assert(ie->access == IOC_RW);
3268 arg_type++;
3269 target_size = thunk_type_size(arg_type, 0);
3271 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3272 if (!argptr)
3273 return -TARGET_EFAULT;
3274 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3275 unlock_user(argptr, arg, 0);
3277 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3278 target_ifc_len = host_ifconf->ifc_len;
3279 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3281 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3282 nb_ifreq = target_ifc_len / target_ifreq_size;
3283 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3285 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3286 if (outbufsz > MAX_STRUCT_SIZE) {
3287 /* We can't fit all the extents into the fixed size buffer.
3288 * Allocate one that is large enough and use it instead.
3290 host_ifconf = malloc(outbufsz);
3291 if (!host_ifconf) {
3292 return -TARGET_ENOMEM;
3294 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3295 free_buf = 1;
3297 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3299 host_ifconf->ifc_len = host_ifc_len;
3300 host_ifconf->ifc_buf = host_ifc_buf;
3302 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3303 if (!is_error(ret)) {
3304 /* convert host ifc_len to target ifc_len */
3306 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3307 target_ifc_len = nb_ifreq * target_ifreq_size;
3308 host_ifconf->ifc_len = target_ifc_len;
3310 /* restore target ifc_buf */
3312 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3314 /* copy struct ifconf to target user */
3316 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3317 if (!argptr)
3318 return -TARGET_EFAULT;
3319 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3320 unlock_user(argptr, arg, target_size);
3322 /* copy ifreq[] to target user */
3324 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3325 for (i = 0; i < nb_ifreq ; i++) {
3326 thunk_convert(argptr + i * target_ifreq_size,
3327 host_ifc_buf + i * sizeof(struct ifreq),
3328 ifreq_arg_type, THUNK_TARGET);
3330 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3333 if (free_buf) {
3334 free(host_ifconf);
3337 return ret;
3340 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3341 abi_long cmd, abi_long arg)
3343 void *argptr;
3344 struct dm_ioctl *host_dm;
3345 abi_long guest_data;
3346 uint32_t guest_data_size;
3347 int target_size;
3348 const argtype *arg_type = ie->arg_type;
3349 abi_long ret;
3350 void *big_buf = NULL;
3351 char *host_data;
3353 arg_type++;
3354 target_size = thunk_type_size(arg_type, 0);
3355 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3356 if (!argptr) {
3357 ret = -TARGET_EFAULT;
3358 goto out;
3360 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3361 unlock_user(argptr, arg, 0);
3363 /* buf_temp is too small, so fetch things into a bigger buffer */
3364 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3365 memcpy(big_buf, buf_temp, target_size);
3366 buf_temp = big_buf;
3367 host_dm = big_buf;
3369 guest_data = arg + host_dm->data_start;
3370 if ((guest_data - arg) < 0) {
3371 ret = -EINVAL;
3372 goto out;
3374 guest_data_size = host_dm->data_size - host_dm->data_start;
3375 host_data = (char*)host_dm + host_dm->data_start;
3377 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3378 switch (ie->host_cmd) {
3379 case DM_REMOVE_ALL:
3380 case DM_LIST_DEVICES:
3381 case DM_DEV_CREATE:
3382 case DM_DEV_REMOVE:
3383 case DM_DEV_SUSPEND:
3384 case DM_DEV_STATUS:
3385 case DM_DEV_WAIT:
3386 case DM_TABLE_STATUS:
3387 case DM_TABLE_CLEAR:
3388 case DM_TABLE_DEPS:
3389 case DM_LIST_VERSIONS:
3390 /* no input data */
3391 break;
3392 case DM_DEV_RENAME:
3393 case DM_DEV_SET_GEOMETRY:
3394 /* data contains only strings */
3395 memcpy(host_data, argptr, guest_data_size);
3396 break;
3397 case DM_TARGET_MSG:
3398 memcpy(host_data, argptr, guest_data_size);
3399 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3400 break;
3401 case DM_TABLE_LOAD:
3403 void *gspec = argptr;
3404 void *cur_data = host_data;
3405 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3406 int spec_size = thunk_type_size(arg_type, 0);
3407 int i;
3409 for (i = 0; i < host_dm->target_count; i++) {
3410 struct dm_target_spec *spec = cur_data;
3411 uint32_t next;
3412 int slen;
3414 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3415 slen = strlen((char*)gspec + spec_size) + 1;
3416 next = spec->next;
3417 spec->next = sizeof(*spec) + slen;
3418 strcpy((char*)&spec[1], gspec + spec_size);
3419 gspec += next;
3420 cur_data += spec->next;
3422 break;
3424 default:
3425 ret = -TARGET_EINVAL;
3426 goto out;
3428 unlock_user(argptr, guest_data, 0);
3430 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3431 if (!is_error(ret)) {
3432 guest_data = arg + host_dm->data_start;
3433 guest_data_size = host_dm->data_size - host_dm->data_start;
3434 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3435 switch (ie->host_cmd) {
3436 case DM_REMOVE_ALL:
3437 case DM_DEV_CREATE:
3438 case DM_DEV_REMOVE:
3439 case DM_DEV_RENAME:
3440 case DM_DEV_SUSPEND:
3441 case DM_DEV_STATUS:
3442 case DM_TABLE_LOAD:
3443 case DM_TABLE_CLEAR:
3444 case DM_TARGET_MSG:
3445 case DM_DEV_SET_GEOMETRY:
3446 /* no return data */
3447 break;
3448 case DM_LIST_DEVICES:
3450 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3451 uint32_t remaining_data = guest_data_size;
3452 void *cur_data = argptr;
3453 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3454 int nl_size = 12; /* can't use thunk_size due to alignment */
3456 while (1) {
3457 uint32_t next = nl->next;
3458 if (next) {
3459 nl->next = nl_size + (strlen(nl->name) + 1);
3461 if (remaining_data < nl->next) {
3462 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3463 break;
3465 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3466 strcpy(cur_data + nl_size, nl->name);
3467 cur_data += nl->next;
3468 remaining_data -= nl->next;
3469 if (!next) {
3470 break;
3472 nl = (void*)nl + next;
3474 break;
3476 case DM_DEV_WAIT:
3477 case DM_TABLE_STATUS:
3479 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3480 void *cur_data = argptr;
3481 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3482 int spec_size = thunk_type_size(arg_type, 0);
3483 int i;
3485 for (i = 0; i < host_dm->target_count; i++) {
3486 uint32_t next = spec->next;
3487 int slen = strlen((char*)&spec[1]) + 1;
3488 spec->next = (cur_data - argptr) + spec_size + slen;
3489 if (guest_data_size < spec->next) {
3490 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3491 break;
3493 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3494 strcpy(cur_data + spec_size, (char*)&spec[1]);
3495 cur_data = argptr + spec->next;
3496 spec = (void*)host_dm + host_dm->data_start + next;
3498 break;
3500 case DM_TABLE_DEPS:
3502 void *hdata = (void*)host_dm + host_dm->data_start;
3503 int count = *(uint32_t*)hdata;
3504 uint64_t *hdev = hdata + 8;
3505 uint64_t *gdev = argptr + 8;
3506 int i;
3508 *(uint32_t*)argptr = tswap32(count);
3509 for (i = 0; i < count; i++) {
3510 *gdev = tswap64(*hdev);
3511 gdev++;
3512 hdev++;
3514 break;
3516 case DM_LIST_VERSIONS:
3518 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3519 uint32_t remaining_data = guest_data_size;
3520 void *cur_data = argptr;
3521 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3522 int vers_size = thunk_type_size(arg_type, 0);
3524 while (1) {
3525 uint32_t next = vers->next;
3526 if (next) {
3527 vers->next = vers_size + (strlen(vers->name) + 1);
3529 if (remaining_data < vers->next) {
3530 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3531 break;
3533 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3534 strcpy(cur_data + vers_size, vers->name);
3535 cur_data += vers->next;
3536 remaining_data -= vers->next;
3537 if (!next) {
3538 break;
3540 vers = (void*)vers + next;
3542 break;
3544 default:
3545 ret = -TARGET_EINVAL;
3546 goto out;
3548 unlock_user(argptr, guest_data, guest_data_size);
3550 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3551 if (!argptr) {
3552 ret = -TARGET_EFAULT;
3553 goto out;
3555 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3556 unlock_user(argptr, arg, target_size);
3558 out:
3559 g_free(big_buf);
3560 return ret;
3563 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3564 int fd, abi_long cmd, abi_long arg)
3566 const argtype *arg_type = ie->arg_type;
3567 const StructEntry *se;
3568 const argtype *field_types;
3569 const int *dst_offsets, *src_offsets;
3570 int target_size;
3571 void *argptr;
3572 abi_ulong *target_rt_dev_ptr;
3573 unsigned long *host_rt_dev_ptr;
3574 abi_long ret;
3575 int i;
3577 assert(ie->access == IOC_W);
3578 assert(*arg_type == TYPE_PTR);
3579 arg_type++;
3580 assert(*arg_type == TYPE_STRUCT);
3581 target_size = thunk_type_size(arg_type, 0);
3582 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3583 if (!argptr) {
3584 return -TARGET_EFAULT;
3586 arg_type++;
3587 assert(*arg_type == (int)STRUCT_rtentry);
3588 se = struct_entries + *arg_type++;
3589 assert(se->convert[0] == NULL);
3590 /* convert struct here to be able to catch rt_dev string */
3591 field_types = se->field_types;
3592 dst_offsets = se->field_offsets[THUNK_HOST];
3593 src_offsets = se->field_offsets[THUNK_TARGET];
3594 for (i = 0; i < se->nb_fields; i++) {
3595 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3596 assert(*field_types == TYPE_PTRVOID);
3597 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3598 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3599 if (*target_rt_dev_ptr != 0) {
3600 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3601 tswapal(*target_rt_dev_ptr));
3602 if (!*host_rt_dev_ptr) {
3603 unlock_user(argptr, arg, 0);
3604 return -TARGET_EFAULT;
3606 } else {
3607 *host_rt_dev_ptr = 0;
3609 field_types++;
3610 continue;
3612 field_types = thunk_convert(buf_temp + dst_offsets[i],
3613 argptr + src_offsets[i],
3614 field_types, THUNK_HOST);
3616 unlock_user(argptr, arg, 0);
3618 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3619 if (*host_rt_dev_ptr != 0) {
3620 unlock_user((void *)*host_rt_dev_ptr,
3621 *target_rt_dev_ptr, 0);
3623 return ret;
3626 static IOCTLEntry ioctl_entries[] = {
3627 #define IOCTL(cmd, access, ...) \
3628 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3629 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3630 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3631 #include "ioctls.h"
3632 { 0, 0, },
3635 /* ??? Implement proper locking for ioctls. */
3636 /* do_ioctl() Must return target values and target errnos. */
3637 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3639 const IOCTLEntry *ie;
3640 const argtype *arg_type;
3641 abi_long ret;
3642 uint8_t buf_temp[MAX_STRUCT_SIZE];
3643 int target_size;
3644 void *argptr;
3646 ie = ioctl_entries;
3647 for(;;) {
3648 if (ie->target_cmd == 0) {
3649 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3650 return -TARGET_ENOSYS;
3652 if (ie->target_cmd == cmd)
3653 break;
3654 ie++;
3656 arg_type = ie->arg_type;
3657 #if defined(DEBUG)
3658 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3659 #endif
3660 if (ie->do_ioctl) {
3661 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3664 switch(arg_type[0]) {
3665 case TYPE_NULL:
3666 /* no argument */
3667 ret = get_errno(ioctl(fd, ie->host_cmd));
3668 break;
3669 case TYPE_PTRVOID:
3670 case TYPE_INT:
3671 /* int argment */
3672 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3673 break;
3674 case TYPE_PTR:
3675 arg_type++;
3676 target_size = thunk_type_size(arg_type, 0);
3677 switch(ie->access) {
3678 case IOC_R:
3679 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3680 if (!is_error(ret)) {
3681 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3682 if (!argptr)
3683 return -TARGET_EFAULT;
3684 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3685 unlock_user(argptr, arg, target_size);
3687 break;
3688 case IOC_W:
3689 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3690 if (!argptr)
3691 return -TARGET_EFAULT;
3692 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3693 unlock_user(argptr, arg, 0);
3694 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3695 break;
3696 default:
3697 case IOC_RW:
3698 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3699 if (!argptr)
3700 return -TARGET_EFAULT;
3701 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3702 unlock_user(argptr, arg, 0);
3703 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3704 if (!is_error(ret)) {
3705 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3706 if (!argptr)
3707 return -TARGET_EFAULT;
3708 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3709 unlock_user(argptr, arg, target_size);
3711 break;
3713 break;
3714 default:
3715 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3716 (long)cmd, arg_type[0]);
3717 ret = -TARGET_ENOSYS;
3718 break;
3720 return ret;
3723 static const bitmask_transtbl iflag_tbl[] = {
3724 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3725 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3726 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3727 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3728 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3729 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3730 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3731 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3732 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3733 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3734 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3735 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3736 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3737 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3738 { 0, 0, 0, 0 }
3741 static const bitmask_transtbl oflag_tbl[] = {
3742 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3743 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3744 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3745 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3746 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3747 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3748 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3749 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3750 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3751 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3752 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3753 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3754 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3755 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3756 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3757 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3758 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3759 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3760 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3761 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3762 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3763 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3764 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3765 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3766 { 0, 0, 0, 0 }
3769 static const bitmask_transtbl cflag_tbl[] = {
3770 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3771 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3772 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3773 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3774 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3775 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3776 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3777 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3778 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3779 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3780 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3781 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3782 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3783 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3784 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3785 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3786 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3787 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3788 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3789 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3790 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3791 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3792 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3793 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3794 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3795 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3796 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3797 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3798 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3799 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3800 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3801 { 0, 0, 0, 0 }
3804 static const bitmask_transtbl lflag_tbl[] = {
3805 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3806 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3807 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3808 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3809 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3810 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3811 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3812 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3813 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3814 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3815 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3816 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3817 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3818 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3819 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3820 { 0, 0, 0, 0 }
3823 static void target_to_host_termios (void *dst, const void *src)
3825 struct host_termios *host = dst;
3826 const struct target_termios *target = src;
3828 host->c_iflag =
3829 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3830 host->c_oflag =
3831 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3832 host->c_cflag =
3833 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3834 host->c_lflag =
3835 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3836 host->c_line = target->c_line;
3838 memset(host->c_cc, 0, sizeof(host->c_cc));
3839 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3840 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3841 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3842 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3843 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3844 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3845 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3846 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3847 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3848 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3849 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3850 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3851 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3852 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3853 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3854 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3855 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3858 static void host_to_target_termios (void *dst, const void *src)
3860 struct target_termios *target = dst;
3861 const struct host_termios *host = src;
3863 target->c_iflag =
3864 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3865 target->c_oflag =
3866 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3867 target->c_cflag =
3868 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3869 target->c_lflag =
3870 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3871 target->c_line = host->c_line;
3873 memset(target->c_cc, 0, sizeof(target->c_cc));
3874 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3875 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3876 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3877 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3878 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3879 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3880 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3881 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3882 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3883 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3884 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3885 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3886 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3887 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3888 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3889 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3890 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3893 static const StructEntry struct_termios_def = {
3894 .convert = { host_to_target_termios, target_to_host_termios },
3895 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3896 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3899 static bitmask_transtbl mmap_flags_tbl[] = {
3900 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3901 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3902 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3903 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3904 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3905 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3906 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3907 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3908 { 0, 0, 0, 0 }
3911 #if defined(TARGET_I386)
3913 /* NOTE: there is really one LDT for all the threads */
3914 static uint8_t *ldt_table;
3916 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3918 int size;
3919 void *p;
3921 if (!ldt_table)
3922 return 0;
3923 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3924 if (size > bytecount)
3925 size = bytecount;
3926 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3927 if (!p)
3928 return -TARGET_EFAULT;
3929 /* ??? Should this by byteswapped? */
3930 memcpy(p, ldt_table, size);
3931 unlock_user(p, ptr, size);
3932 return size;
3935 /* XXX: add locking support */
3936 static abi_long write_ldt(CPUX86State *env,
3937 abi_ulong ptr, unsigned long bytecount, int oldmode)
3939 struct target_modify_ldt_ldt_s ldt_info;
3940 struct target_modify_ldt_ldt_s *target_ldt_info;
3941 int seg_32bit, contents, read_exec_only, limit_in_pages;
3942 int seg_not_present, useable, lm;
3943 uint32_t *lp, entry_1, entry_2;
3945 if (bytecount != sizeof(ldt_info))
3946 return -TARGET_EINVAL;
3947 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3948 return -TARGET_EFAULT;
3949 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3950 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3951 ldt_info.limit = tswap32(target_ldt_info->limit);
3952 ldt_info.flags = tswap32(target_ldt_info->flags);
3953 unlock_user_struct(target_ldt_info, ptr, 0);
3955 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3956 return -TARGET_EINVAL;
3957 seg_32bit = ldt_info.flags & 1;
3958 contents = (ldt_info.flags >> 1) & 3;
3959 read_exec_only = (ldt_info.flags >> 3) & 1;
3960 limit_in_pages = (ldt_info.flags >> 4) & 1;
3961 seg_not_present = (ldt_info.flags >> 5) & 1;
3962 useable = (ldt_info.flags >> 6) & 1;
3963 #ifdef TARGET_ABI32
3964 lm = 0;
3965 #else
3966 lm = (ldt_info.flags >> 7) & 1;
3967 #endif
3968 if (contents == 3) {
3969 if (oldmode)
3970 return -TARGET_EINVAL;
3971 if (seg_not_present == 0)
3972 return -TARGET_EINVAL;
3974 /* allocate the LDT */
3975 if (!ldt_table) {
3976 env->ldt.base = target_mmap(0,
3977 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3978 PROT_READ|PROT_WRITE,
3979 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3980 if (env->ldt.base == -1)
3981 return -TARGET_ENOMEM;
3982 memset(g2h(env->ldt.base), 0,
3983 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3984 env->ldt.limit = 0xffff;
3985 ldt_table = g2h(env->ldt.base);
3988 /* NOTE: same code as Linux kernel */
3989 /* Allow LDTs to be cleared by the user. */
3990 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3991 if (oldmode ||
3992 (contents == 0 &&
3993 read_exec_only == 1 &&
3994 seg_32bit == 0 &&
3995 limit_in_pages == 0 &&
3996 seg_not_present == 1 &&
3997 useable == 0 )) {
3998 entry_1 = 0;
3999 entry_2 = 0;
4000 goto install;
4004 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4005 (ldt_info.limit & 0x0ffff);
4006 entry_2 = (ldt_info.base_addr & 0xff000000) |
4007 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4008 (ldt_info.limit & 0xf0000) |
4009 ((read_exec_only ^ 1) << 9) |
4010 (contents << 10) |
4011 ((seg_not_present ^ 1) << 15) |
4012 (seg_32bit << 22) |
4013 (limit_in_pages << 23) |
4014 (lm << 21) |
4015 0x7000;
4016 if (!oldmode)
4017 entry_2 |= (useable << 20);
4019 /* Install the new entry ... */
4020 install:
4021 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4022 lp[0] = tswap32(entry_1);
4023 lp[1] = tswap32(entry_2);
4024 return 0;
4027 /* specific and weird i386 syscalls */
4028 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4029 unsigned long bytecount)
4031 abi_long ret;
4033 switch (func) {
4034 case 0:
4035 ret = read_ldt(ptr, bytecount);
4036 break;
4037 case 1:
4038 ret = write_ldt(env, ptr, bytecount, 1);
4039 break;
4040 case 0x11:
4041 ret = write_ldt(env, ptr, bytecount, 0);
4042 break;
4043 default:
4044 ret = -TARGET_ENOSYS;
4045 break;
4047 return ret;
4050 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4051 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4053 uint64_t *gdt_table = g2h(env->gdt.base);
4054 struct target_modify_ldt_ldt_s ldt_info;
4055 struct target_modify_ldt_ldt_s *target_ldt_info;
4056 int seg_32bit, contents, read_exec_only, limit_in_pages;
4057 int seg_not_present, useable, lm;
4058 uint32_t *lp, entry_1, entry_2;
4059 int i;
4061 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4062 if (!target_ldt_info)
4063 return -TARGET_EFAULT;
4064 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4065 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4066 ldt_info.limit = tswap32(target_ldt_info->limit);
4067 ldt_info.flags = tswap32(target_ldt_info->flags);
4068 if (ldt_info.entry_number == -1) {
4069 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4070 if (gdt_table[i] == 0) {
4071 ldt_info.entry_number = i;
4072 target_ldt_info->entry_number = tswap32(i);
4073 break;
4077 unlock_user_struct(target_ldt_info, ptr, 1);
4079 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4080 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4081 return -TARGET_EINVAL;
4082 seg_32bit = ldt_info.flags & 1;
4083 contents = (ldt_info.flags >> 1) & 3;
4084 read_exec_only = (ldt_info.flags >> 3) & 1;
4085 limit_in_pages = (ldt_info.flags >> 4) & 1;
4086 seg_not_present = (ldt_info.flags >> 5) & 1;
4087 useable = (ldt_info.flags >> 6) & 1;
4088 #ifdef TARGET_ABI32
4089 lm = 0;
4090 #else
4091 lm = (ldt_info.flags >> 7) & 1;
4092 #endif
4094 if (contents == 3) {
4095 if (seg_not_present == 0)
4096 return -TARGET_EINVAL;
4099 /* NOTE: same code as Linux kernel */
4100 /* Allow LDTs to be cleared by the user. */
4101 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4102 if ((contents == 0 &&
4103 read_exec_only == 1 &&
4104 seg_32bit == 0 &&
4105 limit_in_pages == 0 &&
4106 seg_not_present == 1 &&
4107 useable == 0 )) {
4108 entry_1 = 0;
4109 entry_2 = 0;
4110 goto install;
4114 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4115 (ldt_info.limit & 0x0ffff);
4116 entry_2 = (ldt_info.base_addr & 0xff000000) |
4117 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4118 (ldt_info.limit & 0xf0000) |
4119 ((read_exec_only ^ 1) << 9) |
4120 (contents << 10) |
4121 ((seg_not_present ^ 1) << 15) |
4122 (seg_32bit << 22) |
4123 (limit_in_pages << 23) |
4124 (useable << 20) |
4125 (lm << 21) |
4126 0x7000;
4128 /* Install the new entry ... */
4129 install:
4130 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4131 lp[0] = tswap32(entry_1);
4132 lp[1] = tswap32(entry_2);
4133 return 0;
4136 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4138 struct target_modify_ldt_ldt_s *target_ldt_info;
4139 uint64_t *gdt_table = g2h(env->gdt.base);
4140 uint32_t base_addr, limit, flags;
4141 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4142 int seg_not_present, useable, lm;
4143 uint32_t *lp, entry_1, entry_2;
4145 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4146 if (!target_ldt_info)
4147 return -TARGET_EFAULT;
4148 idx = tswap32(target_ldt_info->entry_number);
4149 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4150 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4151 unlock_user_struct(target_ldt_info, ptr, 1);
4152 return -TARGET_EINVAL;
4154 lp = (uint32_t *)(gdt_table + idx);
4155 entry_1 = tswap32(lp[0]);
4156 entry_2 = tswap32(lp[1]);
4158 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4159 contents = (entry_2 >> 10) & 3;
4160 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4161 seg_32bit = (entry_2 >> 22) & 1;
4162 limit_in_pages = (entry_2 >> 23) & 1;
4163 useable = (entry_2 >> 20) & 1;
4164 #ifdef TARGET_ABI32
4165 lm = 0;
4166 #else
4167 lm = (entry_2 >> 21) & 1;
4168 #endif
4169 flags = (seg_32bit << 0) | (contents << 1) |
4170 (read_exec_only << 3) | (limit_in_pages << 4) |
4171 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4172 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4173 base_addr = (entry_1 >> 16) |
4174 (entry_2 & 0xff000000) |
4175 ((entry_2 & 0xff) << 16);
4176 target_ldt_info->base_addr = tswapal(base_addr);
4177 target_ldt_info->limit = tswap32(limit);
4178 target_ldt_info->flags = tswap32(flags);
4179 unlock_user_struct(target_ldt_info, ptr, 1);
4180 return 0;
4182 #endif /* TARGET_I386 && TARGET_ABI32 */
4184 #ifndef TARGET_ABI32
4185 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4187 abi_long ret = 0;
4188 abi_ulong val;
4189 int idx;
4191 switch(code) {
4192 case TARGET_ARCH_SET_GS:
4193 case TARGET_ARCH_SET_FS:
4194 if (code == TARGET_ARCH_SET_GS)
4195 idx = R_GS;
4196 else
4197 idx = R_FS;
4198 cpu_x86_load_seg(env, idx, 0);
4199 env->segs[idx].base = addr;
4200 break;
4201 case TARGET_ARCH_GET_GS:
4202 case TARGET_ARCH_GET_FS:
4203 if (code == TARGET_ARCH_GET_GS)
4204 idx = R_GS;
4205 else
4206 idx = R_FS;
4207 val = env->segs[idx].base;
4208 if (put_user(val, addr, abi_ulong))
4209 ret = -TARGET_EFAULT;
4210 break;
4211 default:
4212 ret = -TARGET_EINVAL;
4213 break;
4215 return ret;
4217 #endif
4219 #endif /* defined(TARGET_I386) */
4221 #define NEW_STACK_SIZE 0x40000
4224 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4225 typedef struct {
4226 CPUArchState *env;
4227 pthread_mutex_t mutex;
4228 pthread_cond_t cond;
4229 pthread_t thread;
4230 uint32_t tid;
4231 abi_ulong child_tidptr;
4232 abi_ulong parent_tidptr;
4233 sigset_t sigmask;
4234 } new_thread_info;
4236 static void *clone_func(void *arg)
4238 new_thread_info *info = arg;
4239 CPUArchState *env;
4240 CPUState *cpu;
4241 TaskState *ts;
4243 env = info->env;
4244 cpu = ENV_GET_CPU(env);
4245 thread_cpu = cpu;
4246 ts = (TaskState *)env->opaque;
4247 info->tid = gettid();
4248 cpu->host_tid = info->tid;
4249 task_settid(ts);
4250 if (info->child_tidptr)
4251 put_user_u32(info->tid, info->child_tidptr);
4252 if (info->parent_tidptr)
4253 put_user_u32(info->tid, info->parent_tidptr);
4254 /* Enable signals. */
4255 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4256 /* Signal to the parent that we're ready. */
4257 pthread_mutex_lock(&info->mutex);
4258 pthread_cond_broadcast(&info->cond);
4259 pthread_mutex_unlock(&info->mutex);
4260 /* Wait until the parent has finshed initializing the tls state. */
4261 pthread_mutex_lock(&clone_lock);
4262 pthread_mutex_unlock(&clone_lock);
4263 cpu_loop(env);
4264 /* never exits */
4265 return NULL;
4268 /* do_fork() Must return host values and target errnos (unlike most
4269 do_*() functions). */
4270 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4271 abi_ulong parent_tidptr, target_ulong newtls,
4272 abi_ulong child_tidptr)
4274 int ret;
4275 TaskState *ts;
4276 CPUArchState *new_env;
4277 unsigned int nptl_flags;
4278 sigset_t sigmask;
4280 /* Emulate vfork() with fork() */
4281 if (flags & CLONE_VFORK)
4282 flags &= ~(CLONE_VFORK | CLONE_VM);
4284 if (flags & CLONE_VM) {
4285 TaskState *parent_ts = (TaskState *)env->opaque;
4286 new_thread_info info;
4287 pthread_attr_t attr;
4289 ts = g_malloc0(sizeof(TaskState));
4290 init_task_state(ts);
4291 /* we create a new CPU instance. */
4292 new_env = cpu_copy(env);
4293 /* Init regs that differ from the parent. */
4294 cpu_clone_regs(new_env, newsp);
4295 new_env->opaque = ts;
4296 ts->bprm = parent_ts->bprm;
4297 ts->info = parent_ts->info;
4298 nptl_flags = flags;
4299 flags &= ~CLONE_NPTL_FLAGS2;
4301 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4302 ts->child_tidptr = child_tidptr;
4305 if (nptl_flags & CLONE_SETTLS)
4306 cpu_set_tls (new_env, newtls);
4308 /* Grab a mutex so that thread setup appears atomic. */
4309 pthread_mutex_lock(&clone_lock);
4311 memset(&info, 0, sizeof(info));
4312 pthread_mutex_init(&info.mutex, NULL);
4313 pthread_mutex_lock(&info.mutex);
4314 pthread_cond_init(&info.cond, NULL);
4315 info.env = new_env;
4316 if (nptl_flags & CLONE_CHILD_SETTID)
4317 info.child_tidptr = child_tidptr;
4318 if (nptl_flags & CLONE_PARENT_SETTID)
4319 info.parent_tidptr = parent_tidptr;
4321 ret = pthread_attr_init(&attr);
4322 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4323 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4324 /* It is not safe to deliver signals until the child has finished
4325 initializing, so temporarily block all signals. */
4326 sigfillset(&sigmask);
4327 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4329 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4330 /* TODO: Free new CPU state if thread creation failed. */
4332 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4333 pthread_attr_destroy(&attr);
4334 if (ret == 0) {
4335 /* Wait for the child to initialize. */
4336 pthread_cond_wait(&info.cond, &info.mutex);
4337 ret = info.tid;
4338 if (flags & CLONE_PARENT_SETTID)
4339 put_user_u32(ret, parent_tidptr);
4340 } else {
4341 ret = -1;
4343 pthread_mutex_unlock(&info.mutex);
4344 pthread_cond_destroy(&info.cond);
4345 pthread_mutex_destroy(&info.mutex);
4346 pthread_mutex_unlock(&clone_lock);
4347 } else {
4348 /* if no CLONE_VM, we consider it is a fork */
4349 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4350 return -EINVAL;
4351 fork_start();
4352 ret = fork();
4353 if (ret == 0) {
4354 /* Child Process. */
4355 cpu_clone_regs(env, newsp);
4356 fork_end(1);
4357 /* There is a race condition here. The parent process could
4358 theoretically read the TID in the child process before the child
4359 tid is set. This would require using either ptrace
4360 (not implemented) or having *_tidptr to point at a shared memory
4361 mapping. We can't repeat the spinlock hack used above because
4362 the child process gets its own copy of the lock. */
4363 if (flags & CLONE_CHILD_SETTID)
4364 put_user_u32(gettid(), child_tidptr);
4365 if (flags & CLONE_PARENT_SETTID)
4366 put_user_u32(gettid(), parent_tidptr);
4367 ts = (TaskState *)env->opaque;
4368 if (flags & CLONE_SETTLS)
4369 cpu_set_tls (env, newtls);
4370 if (flags & CLONE_CHILD_CLEARTID)
4371 ts->child_tidptr = child_tidptr;
4372 } else {
4373 fork_end(0);
4376 return ret;
4379 /* warning : doesn't handle linux specific flags... */
4380 static int target_to_host_fcntl_cmd(int cmd)
4382 switch(cmd) {
4383 case TARGET_F_DUPFD:
4384 case TARGET_F_GETFD:
4385 case TARGET_F_SETFD:
4386 case TARGET_F_GETFL:
4387 case TARGET_F_SETFL:
4388 return cmd;
4389 case TARGET_F_GETLK:
4390 return F_GETLK;
4391 case TARGET_F_SETLK:
4392 return F_SETLK;
4393 case TARGET_F_SETLKW:
4394 return F_SETLKW;
4395 case TARGET_F_GETOWN:
4396 return F_GETOWN;
4397 case TARGET_F_SETOWN:
4398 return F_SETOWN;
4399 case TARGET_F_GETSIG:
4400 return F_GETSIG;
4401 case TARGET_F_SETSIG:
4402 return F_SETSIG;
4403 #if TARGET_ABI_BITS == 32
4404 case TARGET_F_GETLK64:
4405 return F_GETLK64;
4406 case TARGET_F_SETLK64:
4407 return F_SETLK64;
4408 case TARGET_F_SETLKW64:
4409 return F_SETLKW64;
4410 #endif
4411 case TARGET_F_SETLEASE:
4412 return F_SETLEASE;
4413 case TARGET_F_GETLEASE:
4414 return F_GETLEASE;
4415 #ifdef F_DUPFD_CLOEXEC
4416 case TARGET_F_DUPFD_CLOEXEC:
4417 return F_DUPFD_CLOEXEC;
4418 #endif
4419 case TARGET_F_NOTIFY:
4420 return F_NOTIFY;
4421 default:
4422 return -TARGET_EINVAL;
4424 return -TARGET_EINVAL;
4427 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4428 static const bitmask_transtbl flock_tbl[] = {
4429 TRANSTBL_CONVERT(F_RDLCK),
4430 TRANSTBL_CONVERT(F_WRLCK),
4431 TRANSTBL_CONVERT(F_UNLCK),
4432 TRANSTBL_CONVERT(F_EXLCK),
4433 TRANSTBL_CONVERT(F_SHLCK),
4434 { 0, 0, 0, 0 }
4437 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4439 struct flock fl;
4440 struct target_flock *target_fl;
4441 struct flock64 fl64;
4442 struct target_flock64 *target_fl64;
4443 abi_long ret;
4444 int host_cmd = target_to_host_fcntl_cmd(cmd);
4446 if (host_cmd == -TARGET_EINVAL)
4447 return host_cmd;
4449 switch(cmd) {
4450 case TARGET_F_GETLK:
4451 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4452 return -TARGET_EFAULT;
4453 fl.l_type =
4454 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4455 fl.l_whence = tswap16(target_fl->l_whence);
4456 fl.l_start = tswapal(target_fl->l_start);
4457 fl.l_len = tswapal(target_fl->l_len);
4458 fl.l_pid = tswap32(target_fl->l_pid);
4459 unlock_user_struct(target_fl, arg, 0);
4460 ret = get_errno(fcntl(fd, host_cmd, &fl));
4461 if (ret == 0) {
4462 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4463 return -TARGET_EFAULT;
4464 target_fl->l_type =
4465 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4466 target_fl->l_whence = tswap16(fl.l_whence);
4467 target_fl->l_start = tswapal(fl.l_start);
4468 target_fl->l_len = tswapal(fl.l_len);
4469 target_fl->l_pid = tswap32(fl.l_pid);
4470 unlock_user_struct(target_fl, arg, 1);
4472 break;
4474 case TARGET_F_SETLK:
4475 case TARGET_F_SETLKW:
4476 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4477 return -TARGET_EFAULT;
4478 fl.l_type =
4479 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4480 fl.l_whence = tswap16(target_fl->l_whence);
4481 fl.l_start = tswapal(target_fl->l_start);
4482 fl.l_len = tswapal(target_fl->l_len);
4483 fl.l_pid = tswap32(target_fl->l_pid);
4484 unlock_user_struct(target_fl, arg, 0);
4485 ret = get_errno(fcntl(fd, host_cmd, &fl));
4486 break;
4488 case TARGET_F_GETLK64:
4489 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4490 return -TARGET_EFAULT;
4491 fl64.l_type =
4492 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4493 fl64.l_whence = tswap16(target_fl64->l_whence);
4494 fl64.l_start = tswap64(target_fl64->l_start);
4495 fl64.l_len = tswap64(target_fl64->l_len);
4496 fl64.l_pid = tswap32(target_fl64->l_pid);
4497 unlock_user_struct(target_fl64, arg, 0);
4498 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4499 if (ret == 0) {
4500 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4501 return -TARGET_EFAULT;
4502 target_fl64->l_type =
4503 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4504 target_fl64->l_whence = tswap16(fl64.l_whence);
4505 target_fl64->l_start = tswap64(fl64.l_start);
4506 target_fl64->l_len = tswap64(fl64.l_len);
4507 target_fl64->l_pid = tswap32(fl64.l_pid);
4508 unlock_user_struct(target_fl64, arg, 1);
4510 break;
4511 case TARGET_F_SETLK64:
4512 case TARGET_F_SETLKW64:
4513 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4514 return -TARGET_EFAULT;
4515 fl64.l_type =
4516 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4517 fl64.l_whence = tswap16(target_fl64->l_whence);
4518 fl64.l_start = tswap64(target_fl64->l_start);
4519 fl64.l_len = tswap64(target_fl64->l_len);
4520 fl64.l_pid = tswap32(target_fl64->l_pid);
4521 unlock_user_struct(target_fl64, arg, 0);
4522 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4523 break;
4525 case TARGET_F_GETFL:
4526 ret = get_errno(fcntl(fd, host_cmd, arg));
4527 if (ret >= 0) {
4528 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4530 break;
4532 case TARGET_F_SETFL:
4533 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4534 break;
4536 case TARGET_F_SETOWN:
4537 case TARGET_F_GETOWN:
4538 case TARGET_F_SETSIG:
4539 case TARGET_F_GETSIG:
4540 case TARGET_F_SETLEASE:
4541 case TARGET_F_GETLEASE:
4542 ret = get_errno(fcntl(fd, host_cmd, arg));
4543 break;
4545 default:
4546 ret = get_errno(fcntl(fd, cmd, arg));
4547 break;
4549 return ret;
4552 #ifdef USE_UID16
4554 static inline int high2lowuid(int uid)
4556 if (uid > 65535)
4557 return 65534;
4558 else
4559 return uid;
4562 static inline int high2lowgid(int gid)
4564 if (gid > 65535)
4565 return 65534;
4566 else
4567 return gid;
4570 static inline int low2highuid(int uid)
4572 if ((int16_t)uid == -1)
4573 return -1;
4574 else
4575 return uid;
4578 static inline int low2highgid(int gid)
4580 if ((int16_t)gid == -1)
4581 return -1;
4582 else
4583 return gid;
4585 static inline int tswapid(int id)
4587 return tswap16(id);
4590 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4592 #else /* !USE_UID16 */
4593 static inline int high2lowuid(int uid)
4595 return uid;
4597 static inline int high2lowgid(int gid)
4599 return gid;
4601 static inline int low2highuid(int uid)
4603 return uid;
4605 static inline int low2highgid(int gid)
4607 return gid;
4609 static inline int tswapid(int id)
4611 return tswap32(id);
4614 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4616 #endif /* USE_UID16 */
4618 void syscall_init(void)
4620 IOCTLEntry *ie;
4621 const argtype *arg_type;
4622 int size;
4623 int i;
4625 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4626 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4627 #include "syscall_types.h"
4628 #undef STRUCT
4629 #undef STRUCT_SPECIAL
4631 /* Build target_to_host_errno_table[] table from
4632 * host_to_target_errno_table[]. */
4633 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4634 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4637 /* we patch the ioctl size if necessary. We rely on the fact that
4638 no ioctl has all the bits at '1' in the size field */
4639 ie = ioctl_entries;
4640 while (ie->target_cmd != 0) {
4641 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4642 TARGET_IOC_SIZEMASK) {
4643 arg_type = ie->arg_type;
4644 if (arg_type[0] != TYPE_PTR) {
4645 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4646 ie->target_cmd);
4647 exit(1);
4649 arg_type++;
4650 size = thunk_type_size(arg_type, 0);
4651 ie->target_cmd = (ie->target_cmd &
4652 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4653 (size << TARGET_IOC_SIZESHIFT);
4656 /* automatic consistency check if same arch */
4657 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4658 (defined(__x86_64__) && defined(TARGET_X86_64))
4659 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4660 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4661 ie->name, ie->target_cmd, ie->host_cmd);
4663 #endif
4664 ie++;
4668 #if TARGET_ABI_BITS == 32
4669 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4671 #ifdef TARGET_WORDS_BIGENDIAN
4672 return ((uint64_t)word0 << 32) | word1;
4673 #else
4674 return ((uint64_t)word1 << 32) | word0;
4675 #endif
4677 #else /* TARGET_ABI_BITS == 32 */
4678 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4680 return word0;
4682 #endif /* TARGET_ABI_BITS != 32 */
4684 #ifdef TARGET_NR_truncate64
4685 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4686 abi_long arg2,
4687 abi_long arg3,
4688 abi_long arg4)
4690 if (regpairs_aligned(cpu_env)) {
4691 arg2 = arg3;
4692 arg3 = arg4;
4694 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4696 #endif
4698 #ifdef TARGET_NR_ftruncate64
4699 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4700 abi_long arg2,
4701 abi_long arg3,
4702 abi_long arg4)
4704 if (regpairs_aligned(cpu_env)) {
4705 arg2 = arg3;
4706 arg3 = arg4;
4708 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4710 #endif
4712 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4713 abi_ulong target_addr)
4715 struct target_timespec *target_ts;
4717 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4718 return -TARGET_EFAULT;
4719 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4720 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4721 unlock_user_struct(target_ts, target_addr, 0);
4722 return 0;
4725 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4726 struct timespec *host_ts)
4728 struct target_timespec *target_ts;
4730 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4731 return -TARGET_EFAULT;
4732 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4733 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4734 unlock_user_struct(target_ts, target_addr, 1);
4735 return 0;
4738 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4739 abi_ulong target_addr)
4741 struct target_itimerspec *target_itspec;
4743 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4744 return -TARGET_EFAULT;
4747 host_itspec->it_interval.tv_sec =
4748 tswapal(target_itspec->it_interval.tv_sec);
4749 host_itspec->it_interval.tv_nsec =
4750 tswapal(target_itspec->it_interval.tv_nsec);
4751 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4752 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4754 unlock_user_struct(target_itspec, target_addr, 1);
4755 return 0;
4758 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4759 struct itimerspec *host_its)
4761 struct target_itimerspec *target_itspec;
4763 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4764 return -TARGET_EFAULT;
4767 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4768 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4770 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4771 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4773 unlock_user_struct(target_itspec, target_addr, 0);
4774 return 0;
4777 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4778 static inline abi_long host_to_target_stat64(void *cpu_env,
4779 abi_ulong target_addr,
4780 struct stat *host_st)
4782 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4783 if (((CPUARMState *)cpu_env)->eabi) {
4784 struct target_eabi_stat64 *target_st;
4786 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4787 return -TARGET_EFAULT;
4788 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4789 __put_user(host_st->st_dev, &target_st->st_dev);
4790 __put_user(host_st->st_ino, &target_st->st_ino);
4791 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4792 __put_user(host_st->st_ino, &target_st->__st_ino);
4793 #endif
4794 __put_user(host_st->st_mode, &target_st->st_mode);
4795 __put_user(host_st->st_nlink, &target_st->st_nlink);
4796 __put_user(host_st->st_uid, &target_st->st_uid);
4797 __put_user(host_st->st_gid, &target_st->st_gid);
4798 __put_user(host_st->st_rdev, &target_st->st_rdev);
4799 __put_user(host_st->st_size, &target_st->st_size);
4800 __put_user(host_st->st_blksize, &target_st->st_blksize);
4801 __put_user(host_st->st_blocks, &target_st->st_blocks);
4802 __put_user(host_st->st_atime, &target_st->target_st_atime);
4803 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4804 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4805 unlock_user_struct(target_st, target_addr, 1);
4806 } else
4807 #endif
4809 #if defined(TARGET_HAS_STRUCT_STAT64)
4810 struct target_stat64 *target_st;
4811 #else
4812 struct target_stat *target_st;
4813 #endif
4815 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4816 return -TARGET_EFAULT;
4817 memset(target_st, 0, sizeof(*target_st));
4818 __put_user(host_st->st_dev, &target_st->st_dev);
4819 __put_user(host_st->st_ino, &target_st->st_ino);
4820 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4821 __put_user(host_st->st_ino, &target_st->__st_ino);
4822 #endif
4823 __put_user(host_st->st_mode, &target_st->st_mode);
4824 __put_user(host_st->st_nlink, &target_st->st_nlink);
4825 __put_user(host_st->st_uid, &target_st->st_uid);
4826 __put_user(host_st->st_gid, &target_st->st_gid);
4827 __put_user(host_st->st_rdev, &target_st->st_rdev);
4828 /* XXX: better use of kernel struct */
4829 __put_user(host_st->st_size, &target_st->st_size);
4830 __put_user(host_st->st_blksize, &target_st->st_blksize);
4831 __put_user(host_st->st_blocks, &target_st->st_blocks);
4832 __put_user(host_st->st_atime, &target_st->target_st_atime);
4833 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4834 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4835 unlock_user_struct(target_st, target_addr, 1);
4838 return 0;
4840 #endif
4842 /* ??? Using host futex calls even when target atomic operations
4843 are not really atomic probably breaks things. However implementing
4844 futexes locally would make futexes shared between multiple processes
4845 tricky. However they're probably useless because guest atomic
4846 operations won't work either. */
4847 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4848 target_ulong uaddr2, int val3)
4850 struct timespec ts, *pts;
4851 int base_op;
4853 /* ??? We assume FUTEX_* constants are the same on both host
4854 and target. */
4855 #ifdef FUTEX_CMD_MASK
4856 base_op = op & FUTEX_CMD_MASK;
4857 #else
4858 base_op = op;
4859 #endif
4860 switch (base_op) {
4861 case FUTEX_WAIT:
4862 case FUTEX_WAIT_BITSET:
4863 if (timeout) {
4864 pts = &ts;
4865 target_to_host_timespec(pts, timeout);
4866 } else {
4867 pts = NULL;
4869 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4870 pts, NULL, val3));
4871 case FUTEX_WAKE:
4872 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4873 case FUTEX_FD:
4874 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4875 case FUTEX_REQUEUE:
4876 case FUTEX_CMP_REQUEUE:
4877 case FUTEX_WAKE_OP:
4878 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4879 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4880 But the prototype takes a `struct timespec *'; insert casts
4881 to satisfy the compiler. We do not need to tswap TIMEOUT
4882 since it's not compared to guest memory. */
4883 pts = (struct timespec *)(uintptr_t) timeout;
4884 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4885 g2h(uaddr2),
4886 (base_op == FUTEX_CMP_REQUEUE
4887 ? tswap32(val3)
4888 : val3)));
4889 default:
4890 return -TARGET_ENOSYS;
4894 /* Map host to target signal numbers for the wait family of syscalls.
4895 Assume all other status bits are the same. */
4896 int host_to_target_waitstatus(int status)
4898 if (WIFSIGNALED(status)) {
4899 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4901 if (WIFSTOPPED(status)) {
4902 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4903 | (status & 0xff);
4905 return status;
4908 static int relstr_to_int(const char *s)
4910 /* Convert a uname release string like "2.6.18" to an integer
4911 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4913 int i, n, tmp;
4915 tmp = 0;
4916 for (i = 0; i < 3; i++) {
4917 n = 0;
4918 while (*s >= '0' && *s <= '9') {
4919 n *= 10;
4920 n += *s - '0';
4921 s++;
4923 tmp = (tmp << 8) + n;
4924 if (*s == '.') {
4925 s++;
4928 return tmp;
4931 int get_osversion(void)
4933 static int osversion;
4934 struct new_utsname buf;
4935 const char *s;
4937 if (osversion)
4938 return osversion;
4939 if (qemu_uname_release && *qemu_uname_release) {
4940 s = qemu_uname_release;
4941 } else {
4942 if (sys_uname(&buf))
4943 return 0;
4944 s = buf.release;
4946 osversion = relstr_to_int(s);
4947 return osversion;
4950 void init_qemu_uname_release(void)
4952 /* Initialize qemu_uname_release for later use.
4953 * If the host kernel is too old and the user hasn't asked for
4954 * a specific fake version number, we might want to fake a minimum
4955 * target kernel version.
4957 #ifdef UNAME_MINIMUM_RELEASE
4958 struct new_utsname buf;
4960 if (qemu_uname_release && *qemu_uname_release) {
4961 return;
4964 if (sys_uname(&buf)) {
4965 return;
4968 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
4969 qemu_uname_release = UNAME_MINIMUM_RELEASE;
4971 #endif
4974 static int open_self_maps(void *cpu_env, int fd)
4976 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4977 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4978 #endif
4979 FILE *fp;
4980 char *line = NULL;
4981 size_t len = 0;
4982 ssize_t read;
4984 fp = fopen("/proc/self/maps", "r");
4985 if (fp == NULL) {
4986 return -EACCES;
4989 while ((read = getline(&line, &len, fp)) != -1) {
4990 int fields, dev_maj, dev_min, inode;
4991 uint64_t min, max, offset;
4992 char flag_r, flag_w, flag_x, flag_p;
4993 char path[512] = "";
4994 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
4995 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
4996 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
4998 if ((fields < 10) || (fields > 11)) {
4999 continue;
5001 if (!strncmp(path, "[stack]", 7)) {
5002 continue;
5004 if (h2g_valid(min) && h2g_valid(max)) {
5005 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5006 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5007 h2g(min), h2g(max), flag_r, flag_w,
5008 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5009 path[0] ? " " : "", path);
5013 free(line);
5014 fclose(fp);
5016 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5017 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5018 (unsigned long long)ts->info->stack_limit,
5019 (unsigned long long)(ts->info->start_stack +
5020 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5021 (unsigned long long)0);
5022 #endif
5024 return 0;
5027 static int open_self_stat(void *cpu_env, int fd)
5029 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5030 abi_ulong start_stack = ts->info->start_stack;
5031 int i;
5033 for (i = 0; i < 44; i++) {
5034 char buf[128];
5035 int len;
5036 uint64_t val = 0;
5038 if (i == 0) {
5039 /* pid */
5040 val = getpid();
5041 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5042 } else if (i == 1) {
5043 /* app name */
5044 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5045 } else if (i == 27) {
5046 /* stack bottom */
5047 val = start_stack;
5048 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5049 } else {
5050 /* for the rest, there is MasterCard */
5051 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5054 len = strlen(buf);
5055 if (write(fd, buf, len) != len) {
5056 return -1;
5060 return 0;
5063 static int open_self_auxv(void *cpu_env, int fd)
5065 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5066 abi_ulong auxv = ts->info->saved_auxv;
5067 abi_ulong len = ts->info->auxv_len;
5068 char *ptr;
5071 * Auxiliary vector is stored in target process stack.
5072 * read in whole auxv vector and copy it to file
5074 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5075 if (ptr != NULL) {
5076 while (len > 0) {
5077 ssize_t r;
5078 r = write(fd, ptr, len);
5079 if (r <= 0) {
5080 break;
5082 len -= r;
5083 ptr += r;
5085 lseek(fd, 0, SEEK_SET);
5086 unlock_user(ptr, auxv, len);
5089 return 0;
5092 static int is_proc_myself(const char *filename, const char *entry)
5094 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5095 filename += strlen("/proc/");
5096 if (!strncmp(filename, "self/", strlen("self/"))) {
5097 filename += strlen("self/");
5098 } else if (*filename >= '1' && *filename <= '9') {
5099 char myself[80];
5100 snprintf(myself, sizeof(myself), "%d/", getpid());
5101 if (!strncmp(filename, myself, strlen(myself))) {
5102 filename += strlen(myself);
5103 } else {
5104 return 0;
5106 } else {
5107 return 0;
5109 if (!strcmp(filename, entry)) {
5110 return 1;
5113 return 0;
5116 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5117 static int is_proc(const char *filename, const char *entry)
5119 return strcmp(filename, entry) == 0;
5122 static int open_net_route(void *cpu_env, int fd)
5124 FILE *fp;
5125 char *line = NULL;
5126 size_t len = 0;
5127 ssize_t read;
5129 fp = fopen("/proc/net/route", "r");
5130 if (fp == NULL) {
5131 return -EACCES;
5134 /* read header */
5136 read = getline(&line, &len, fp);
5137 dprintf(fd, "%s", line);
5139 /* read routes */
5141 while ((read = getline(&line, &len, fp)) != -1) {
5142 char iface[16];
5143 uint32_t dest, gw, mask;
5144 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5145 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5146 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5147 &mask, &mtu, &window, &irtt);
5148 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5149 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5150 metric, tswap32(mask), mtu, window, irtt);
5153 free(line);
5154 fclose(fp);
5156 return 0;
5158 #endif
5160 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5162 struct fake_open {
5163 const char *filename;
5164 int (*fill)(void *cpu_env, int fd);
5165 int (*cmp)(const char *s1, const char *s2);
5167 const struct fake_open *fake_open;
5168 static const struct fake_open fakes[] = {
5169 { "maps", open_self_maps, is_proc_myself },
5170 { "stat", open_self_stat, is_proc_myself },
5171 { "auxv", open_self_auxv, is_proc_myself },
5172 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5173 { "/proc/net/route", open_net_route, is_proc },
5174 #endif
5175 { NULL, NULL, NULL }
5178 for (fake_open = fakes; fake_open->filename; fake_open++) {
5179 if (fake_open->cmp(pathname, fake_open->filename)) {
5180 break;
5184 if (fake_open->filename) {
5185 const char *tmpdir;
5186 char filename[PATH_MAX];
5187 int fd, r;
5189 /* create temporary file to map stat to */
5190 tmpdir = getenv("TMPDIR");
5191 if (!tmpdir)
5192 tmpdir = "/tmp";
5193 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5194 fd = mkstemp(filename);
5195 if (fd < 0) {
5196 return fd;
5198 unlink(filename);
5200 if ((r = fake_open->fill(cpu_env, fd))) {
5201 close(fd);
5202 return r;
5204 lseek(fd, 0, SEEK_SET);
5206 return fd;
5209 return get_errno(open(path(pathname), flags, mode));
5212 /* do_syscall() should always have a single exit point at the end so
5213 that actions, such as logging of syscall results, can be performed.
5214 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5215 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5216 abi_long arg2, abi_long arg3, abi_long arg4,
5217 abi_long arg5, abi_long arg6, abi_long arg7,
5218 abi_long arg8)
5220 CPUState *cpu = ENV_GET_CPU(cpu_env);
5221 abi_long ret;
5222 struct stat st;
5223 struct statfs stfs;
5224 void *p;
5226 #ifdef DEBUG
5227 gemu_log("syscall %d", num);
5228 #endif
5229 if(do_strace)
5230 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5232 switch(num) {
5233 case TARGET_NR_exit:
5234 /* In old applications this may be used to implement _exit(2).
5235 However in threaded applictions it is used for thread termination,
5236 and _exit_group is used for application termination.
5237 Do thread termination if we have more then one thread. */
5238 /* FIXME: This probably breaks if a signal arrives. We should probably
5239 be disabling signals. */
5240 if (CPU_NEXT(first_cpu)) {
5241 TaskState *ts;
5243 cpu_list_lock();
5244 /* Remove the CPU from the list. */
5245 QTAILQ_REMOVE(&cpus, cpu, node);
5246 cpu_list_unlock();
5247 ts = ((CPUArchState *)cpu_env)->opaque;
5248 if (ts->child_tidptr) {
5249 put_user_u32(0, ts->child_tidptr);
5250 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5251 NULL, NULL, 0);
5253 thread_cpu = NULL;
5254 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5255 g_free(ts);
5256 pthread_exit(NULL);
5258 #ifdef TARGET_GPROF
5259 _mcleanup();
5260 #endif
5261 gdb_exit(cpu_env, arg1);
5262 _exit(arg1);
5263 ret = 0; /* avoid warning */
5264 break;
5265 case TARGET_NR_read:
5266 if (arg3 == 0)
5267 ret = 0;
5268 else {
5269 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5270 goto efault;
5271 ret = get_errno(read(arg1, p, arg3));
5272 unlock_user(p, arg2, ret);
5274 break;
5275 case TARGET_NR_write:
5276 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5277 goto efault;
5278 ret = get_errno(write(arg1, p, arg3));
5279 unlock_user(p, arg2, 0);
5280 break;
5281 case TARGET_NR_open:
5282 if (!(p = lock_user_string(arg1)))
5283 goto efault;
5284 ret = get_errno(do_open(cpu_env, p,
5285 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5286 arg3));
5287 unlock_user(p, arg1, 0);
5288 break;
5289 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5290 case TARGET_NR_openat:
5291 if (!(p = lock_user_string(arg2)))
5292 goto efault;
5293 ret = get_errno(sys_openat(arg1,
5294 path(p),
5295 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5296 arg4));
5297 unlock_user(p, arg2, 0);
5298 break;
5299 #endif
5300 case TARGET_NR_close:
5301 ret = get_errno(close(arg1));
5302 break;
5303 case TARGET_NR_brk:
5304 ret = do_brk(arg1);
5305 break;
5306 case TARGET_NR_fork:
5307 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5308 break;
5309 #ifdef TARGET_NR_waitpid
5310 case TARGET_NR_waitpid:
5312 int status;
5313 ret = get_errno(waitpid(arg1, &status, arg3));
5314 if (!is_error(ret) && arg2 && ret
5315 && put_user_s32(host_to_target_waitstatus(status), arg2))
5316 goto efault;
5318 break;
5319 #endif
5320 #ifdef TARGET_NR_waitid
5321 case TARGET_NR_waitid:
5323 siginfo_t info;
5324 info.si_pid = 0;
5325 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5326 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5327 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5328 goto efault;
5329 host_to_target_siginfo(p, &info);
5330 unlock_user(p, arg3, sizeof(target_siginfo_t));
5333 break;
5334 #endif
5335 #ifdef TARGET_NR_creat /* not on alpha */
5336 case TARGET_NR_creat:
5337 if (!(p = lock_user_string(arg1)))
5338 goto efault;
5339 ret = get_errno(creat(p, arg2));
5340 unlock_user(p, arg1, 0);
5341 break;
5342 #endif
5343 case TARGET_NR_link:
5345 void * p2;
5346 p = lock_user_string(arg1);
5347 p2 = lock_user_string(arg2);
5348 if (!p || !p2)
5349 ret = -TARGET_EFAULT;
5350 else
5351 ret = get_errno(link(p, p2));
5352 unlock_user(p2, arg2, 0);
5353 unlock_user(p, arg1, 0);
5355 break;
5356 #if defined(TARGET_NR_linkat)
5357 case TARGET_NR_linkat:
5359 void * p2 = NULL;
5360 if (!arg2 || !arg4)
5361 goto efault;
5362 p = lock_user_string(arg2);
5363 p2 = lock_user_string(arg4);
5364 if (!p || !p2)
5365 ret = -TARGET_EFAULT;
5366 else
5367 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5368 unlock_user(p, arg2, 0);
5369 unlock_user(p2, arg4, 0);
5371 break;
5372 #endif
5373 case TARGET_NR_unlink:
5374 if (!(p = lock_user_string(arg1)))
5375 goto efault;
5376 ret = get_errno(unlink(p));
5377 unlock_user(p, arg1, 0);
5378 break;
5379 #if defined(TARGET_NR_unlinkat)
5380 case TARGET_NR_unlinkat:
5381 if (!(p = lock_user_string(arg2)))
5382 goto efault;
5383 ret = get_errno(unlinkat(arg1, p, arg3));
5384 unlock_user(p, arg2, 0);
5385 break;
5386 #endif
5387 case TARGET_NR_execve:
5389 char **argp, **envp;
5390 int argc, envc;
5391 abi_ulong gp;
5392 abi_ulong guest_argp;
5393 abi_ulong guest_envp;
5394 abi_ulong addr;
5395 char **q;
5396 int total_size = 0;
5398 argc = 0;
5399 guest_argp = arg2;
5400 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5401 if (get_user_ual(addr, gp))
5402 goto efault;
5403 if (!addr)
5404 break;
5405 argc++;
5407 envc = 0;
5408 guest_envp = arg3;
5409 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5410 if (get_user_ual(addr, gp))
5411 goto efault;
5412 if (!addr)
5413 break;
5414 envc++;
5417 argp = alloca((argc + 1) * sizeof(void *));
5418 envp = alloca((envc + 1) * sizeof(void *));
5420 for (gp = guest_argp, q = argp; gp;
5421 gp += sizeof(abi_ulong), q++) {
5422 if (get_user_ual(addr, gp))
5423 goto execve_efault;
5424 if (!addr)
5425 break;
5426 if (!(*q = lock_user_string(addr)))
5427 goto execve_efault;
5428 total_size += strlen(*q) + 1;
5430 *q = NULL;
5432 for (gp = guest_envp, q = envp; gp;
5433 gp += sizeof(abi_ulong), q++) {
5434 if (get_user_ual(addr, gp))
5435 goto execve_efault;
5436 if (!addr)
5437 break;
5438 if (!(*q = lock_user_string(addr)))
5439 goto execve_efault;
5440 total_size += strlen(*q) + 1;
5442 *q = NULL;
5444 /* This case will not be caught by the host's execve() if its
5445 page size is bigger than the target's. */
5446 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5447 ret = -TARGET_E2BIG;
5448 goto execve_end;
5450 if (!(p = lock_user_string(arg1)))
5451 goto execve_efault;
5452 ret = get_errno(execve(p, argp, envp));
5453 unlock_user(p, arg1, 0);
5455 goto execve_end;
5457 execve_efault:
5458 ret = -TARGET_EFAULT;
5460 execve_end:
5461 for (gp = guest_argp, q = argp; *q;
5462 gp += sizeof(abi_ulong), q++) {
5463 if (get_user_ual(addr, gp)
5464 || !addr)
5465 break;
5466 unlock_user(*q, addr, 0);
5468 for (gp = guest_envp, q = envp; *q;
5469 gp += sizeof(abi_ulong), q++) {
5470 if (get_user_ual(addr, gp)
5471 || !addr)
5472 break;
5473 unlock_user(*q, addr, 0);
5476 break;
5477 case TARGET_NR_chdir:
5478 if (!(p = lock_user_string(arg1)))
5479 goto efault;
5480 ret = get_errno(chdir(p));
5481 unlock_user(p, arg1, 0);
5482 break;
5483 #ifdef TARGET_NR_time
5484 case TARGET_NR_time:
5486 time_t host_time;
5487 ret = get_errno(time(&host_time));
5488 if (!is_error(ret)
5489 && arg1
5490 && put_user_sal(host_time, arg1))
5491 goto efault;
5493 break;
5494 #endif
5495 case TARGET_NR_mknod:
5496 if (!(p = lock_user_string(arg1)))
5497 goto efault;
5498 ret = get_errno(mknod(p, arg2, arg3));
5499 unlock_user(p, arg1, 0);
5500 break;
5501 #if defined(TARGET_NR_mknodat)
5502 case TARGET_NR_mknodat:
5503 if (!(p = lock_user_string(arg2)))
5504 goto efault;
5505 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5506 unlock_user(p, arg2, 0);
5507 break;
5508 #endif
5509 case TARGET_NR_chmod:
5510 if (!(p = lock_user_string(arg1)))
5511 goto efault;
5512 ret = get_errno(chmod(p, arg2));
5513 unlock_user(p, arg1, 0);
5514 break;
5515 #ifdef TARGET_NR_break
5516 case TARGET_NR_break:
5517 goto unimplemented;
5518 #endif
5519 #ifdef TARGET_NR_oldstat
5520 case TARGET_NR_oldstat:
5521 goto unimplemented;
5522 #endif
5523 case TARGET_NR_lseek:
5524 ret = get_errno(lseek(arg1, arg2, arg3));
5525 break;
5526 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5527 /* Alpha specific */
5528 case TARGET_NR_getxpid:
5529 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5530 ret = get_errno(getpid());
5531 break;
5532 #endif
5533 #ifdef TARGET_NR_getpid
5534 case TARGET_NR_getpid:
5535 ret = get_errno(getpid());
5536 break;
5537 #endif
5538 case TARGET_NR_mount:
5540 /* need to look at the data field */
5541 void *p2, *p3;
5542 p = lock_user_string(arg1);
5543 p2 = lock_user_string(arg2);
5544 p3 = lock_user_string(arg3);
5545 if (!p || !p2 || !p3)
5546 ret = -TARGET_EFAULT;
5547 else {
5548 /* FIXME - arg5 should be locked, but it isn't clear how to
5549 * do that since it's not guaranteed to be a NULL-terminated
5550 * string.
5552 if ( ! arg5 )
5553 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5554 else
5555 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5557 unlock_user(p, arg1, 0);
5558 unlock_user(p2, arg2, 0);
5559 unlock_user(p3, arg3, 0);
5560 break;
5562 #ifdef TARGET_NR_umount
5563 case TARGET_NR_umount:
5564 if (!(p = lock_user_string(arg1)))
5565 goto efault;
5566 ret = get_errno(umount(p));
5567 unlock_user(p, arg1, 0);
5568 break;
5569 #endif
5570 #ifdef TARGET_NR_stime /* not on alpha */
5571 case TARGET_NR_stime:
5573 time_t host_time;
5574 if (get_user_sal(host_time, arg1))
5575 goto efault;
5576 ret = get_errno(stime(&host_time));
5578 break;
5579 #endif
5580 case TARGET_NR_ptrace:
5581 goto unimplemented;
5582 #ifdef TARGET_NR_alarm /* not on alpha */
5583 case TARGET_NR_alarm:
5584 ret = alarm(arg1);
5585 break;
5586 #endif
5587 #ifdef TARGET_NR_oldfstat
5588 case TARGET_NR_oldfstat:
5589 goto unimplemented;
5590 #endif
5591 #ifdef TARGET_NR_pause /* not on alpha */
5592 case TARGET_NR_pause:
5593 ret = get_errno(pause());
5594 break;
5595 #endif
5596 #ifdef TARGET_NR_utime
5597 case TARGET_NR_utime:
5599 struct utimbuf tbuf, *host_tbuf;
5600 struct target_utimbuf *target_tbuf;
5601 if (arg2) {
5602 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5603 goto efault;
5604 tbuf.actime = tswapal(target_tbuf->actime);
5605 tbuf.modtime = tswapal(target_tbuf->modtime);
5606 unlock_user_struct(target_tbuf, arg2, 0);
5607 host_tbuf = &tbuf;
5608 } else {
5609 host_tbuf = NULL;
5611 if (!(p = lock_user_string(arg1)))
5612 goto efault;
5613 ret = get_errno(utime(p, host_tbuf));
5614 unlock_user(p, arg1, 0);
5616 break;
5617 #endif
5618 case TARGET_NR_utimes:
5620 struct timeval *tvp, tv[2];
5621 if (arg2) {
5622 if (copy_from_user_timeval(&tv[0], arg2)
5623 || copy_from_user_timeval(&tv[1],
5624 arg2 + sizeof(struct target_timeval)))
5625 goto efault;
5626 tvp = tv;
5627 } else {
5628 tvp = NULL;
5630 if (!(p = lock_user_string(arg1)))
5631 goto efault;
5632 ret = get_errno(utimes(p, tvp));
5633 unlock_user(p, arg1, 0);
5635 break;
5636 #if defined(TARGET_NR_futimesat)
5637 case TARGET_NR_futimesat:
5639 struct timeval *tvp, tv[2];
5640 if (arg3) {
5641 if (copy_from_user_timeval(&tv[0], arg3)
5642 || copy_from_user_timeval(&tv[1],
5643 arg3 + sizeof(struct target_timeval)))
5644 goto efault;
5645 tvp = tv;
5646 } else {
5647 tvp = NULL;
5649 if (!(p = lock_user_string(arg2)))
5650 goto efault;
5651 ret = get_errno(futimesat(arg1, path(p), tvp));
5652 unlock_user(p, arg2, 0);
5654 break;
5655 #endif
5656 #ifdef TARGET_NR_stty
5657 case TARGET_NR_stty:
5658 goto unimplemented;
5659 #endif
5660 #ifdef TARGET_NR_gtty
5661 case TARGET_NR_gtty:
5662 goto unimplemented;
5663 #endif
5664 case TARGET_NR_access:
5665 if (!(p = lock_user_string(arg1)))
5666 goto efault;
5667 ret = get_errno(access(path(p), arg2));
5668 unlock_user(p, arg1, 0);
5669 break;
5670 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5671 case TARGET_NR_faccessat:
5672 if (!(p = lock_user_string(arg2)))
5673 goto efault;
5674 ret = get_errno(faccessat(arg1, p, arg3, 0));
5675 unlock_user(p, arg2, 0);
5676 break;
5677 #endif
5678 #ifdef TARGET_NR_nice /* not on alpha */
5679 case TARGET_NR_nice:
5680 ret = get_errno(nice(arg1));
5681 break;
5682 #endif
5683 #ifdef TARGET_NR_ftime
5684 case TARGET_NR_ftime:
5685 goto unimplemented;
5686 #endif
5687 case TARGET_NR_sync:
5688 sync();
5689 ret = 0;
5690 break;
5691 case TARGET_NR_kill:
5692 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5693 break;
5694 case TARGET_NR_rename:
5696 void *p2;
5697 p = lock_user_string(arg1);
5698 p2 = lock_user_string(arg2);
5699 if (!p || !p2)
5700 ret = -TARGET_EFAULT;
5701 else
5702 ret = get_errno(rename(p, p2));
5703 unlock_user(p2, arg2, 0);
5704 unlock_user(p, arg1, 0);
5706 break;
5707 #if defined(TARGET_NR_renameat)
5708 case TARGET_NR_renameat:
5710 void *p2;
5711 p = lock_user_string(arg2);
5712 p2 = lock_user_string(arg4);
5713 if (!p || !p2)
5714 ret = -TARGET_EFAULT;
5715 else
5716 ret = get_errno(renameat(arg1, p, arg3, p2));
5717 unlock_user(p2, arg4, 0);
5718 unlock_user(p, arg2, 0);
5720 break;
5721 #endif
5722 case TARGET_NR_mkdir:
5723 if (!(p = lock_user_string(arg1)))
5724 goto efault;
5725 ret = get_errno(mkdir(p, arg2));
5726 unlock_user(p, arg1, 0);
5727 break;
5728 #if defined(TARGET_NR_mkdirat)
5729 case TARGET_NR_mkdirat:
5730 if (!(p = lock_user_string(arg2)))
5731 goto efault;
5732 ret = get_errno(mkdirat(arg1, p, arg3));
5733 unlock_user(p, arg2, 0);
5734 break;
5735 #endif
5736 case TARGET_NR_rmdir:
5737 if (!(p = lock_user_string(arg1)))
5738 goto efault;
5739 ret = get_errno(rmdir(p));
5740 unlock_user(p, arg1, 0);
5741 break;
5742 case TARGET_NR_dup:
5743 ret = get_errno(dup(arg1));
5744 break;
5745 case TARGET_NR_pipe:
5746 ret = do_pipe(cpu_env, arg1, 0, 0);
5747 break;
5748 #ifdef TARGET_NR_pipe2
5749 case TARGET_NR_pipe2:
5750 ret = do_pipe(cpu_env, arg1,
5751 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5752 break;
5753 #endif
5754 case TARGET_NR_times:
5756 struct target_tms *tmsp;
5757 struct tms tms;
5758 ret = get_errno(times(&tms));
5759 if (arg1) {
5760 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5761 if (!tmsp)
5762 goto efault;
5763 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5764 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5765 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5766 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5768 if (!is_error(ret))
5769 ret = host_to_target_clock_t(ret);
5771 break;
5772 #ifdef TARGET_NR_prof
5773 case TARGET_NR_prof:
5774 goto unimplemented;
5775 #endif
5776 #ifdef TARGET_NR_signal
5777 case TARGET_NR_signal:
5778 goto unimplemented;
5779 #endif
5780 case TARGET_NR_acct:
5781 if (arg1 == 0) {
5782 ret = get_errno(acct(NULL));
5783 } else {
5784 if (!(p = lock_user_string(arg1)))
5785 goto efault;
5786 ret = get_errno(acct(path(p)));
5787 unlock_user(p, arg1, 0);
5789 break;
5790 #ifdef TARGET_NR_umount2
5791 case TARGET_NR_umount2:
5792 if (!(p = lock_user_string(arg1)))
5793 goto efault;
5794 ret = get_errno(umount2(p, arg2));
5795 unlock_user(p, arg1, 0);
5796 break;
5797 #endif
5798 #ifdef TARGET_NR_lock
5799 case TARGET_NR_lock:
5800 goto unimplemented;
5801 #endif
5802 case TARGET_NR_ioctl:
5803 ret = do_ioctl(arg1, arg2, arg3);
5804 break;
5805 case TARGET_NR_fcntl:
5806 ret = do_fcntl(arg1, arg2, arg3);
5807 break;
5808 #ifdef TARGET_NR_mpx
5809 case TARGET_NR_mpx:
5810 goto unimplemented;
5811 #endif
5812 case TARGET_NR_setpgid:
5813 ret = get_errno(setpgid(arg1, arg2));
5814 break;
5815 #ifdef TARGET_NR_ulimit
5816 case TARGET_NR_ulimit:
5817 goto unimplemented;
5818 #endif
5819 #ifdef TARGET_NR_oldolduname
5820 case TARGET_NR_oldolduname:
5821 goto unimplemented;
5822 #endif
5823 case TARGET_NR_umask:
5824 ret = get_errno(umask(arg1));
5825 break;
5826 case TARGET_NR_chroot:
5827 if (!(p = lock_user_string(arg1)))
5828 goto efault;
5829 ret = get_errno(chroot(p));
5830 unlock_user(p, arg1, 0);
5831 break;
5832 case TARGET_NR_ustat:
5833 goto unimplemented;
5834 case TARGET_NR_dup2:
5835 ret = get_errno(dup2(arg1, arg2));
5836 break;
5837 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5838 case TARGET_NR_dup3:
5839 ret = get_errno(dup3(arg1, arg2, arg3));
5840 break;
5841 #endif
5842 #ifdef TARGET_NR_getppid /* not on alpha */
5843 case TARGET_NR_getppid:
5844 ret = get_errno(getppid());
5845 break;
5846 #endif
5847 case TARGET_NR_getpgrp:
5848 ret = get_errno(getpgrp());
5849 break;
5850 case TARGET_NR_setsid:
5851 ret = get_errno(setsid());
5852 break;
5853 #ifdef TARGET_NR_sigaction
5854 case TARGET_NR_sigaction:
5856 #if defined(TARGET_ALPHA)
5857 struct target_sigaction act, oact, *pact = 0;
5858 struct target_old_sigaction *old_act;
5859 if (arg2) {
5860 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5861 goto efault;
5862 act._sa_handler = old_act->_sa_handler;
5863 target_siginitset(&act.sa_mask, old_act->sa_mask);
5864 act.sa_flags = old_act->sa_flags;
5865 act.sa_restorer = 0;
5866 unlock_user_struct(old_act, arg2, 0);
5867 pact = &act;
5869 ret = get_errno(do_sigaction(arg1, pact, &oact));
5870 if (!is_error(ret) && arg3) {
5871 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5872 goto efault;
5873 old_act->_sa_handler = oact._sa_handler;
5874 old_act->sa_mask = oact.sa_mask.sig[0];
5875 old_act->sa_flags = oact.sa_flags;
5876 unlock_user_struct(old_act, arg3, 1);
5878 #elif defined(TARGET_MIPS)
5879 struct target_sigaction act, oact, *pact, *old_act;
5881 if (arg2) {
5882 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5883 goto efault;
5884 act._sa_handler = old_act->_sa_handler;
5885 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5886 act.sa_flags = old_act->sa_flags;
5887 unlock_user_struct(old_act, arg2, 0);
5888 pact = &act;
5889 } else {
5890 pact = NULL;
5893 ret = get_errno(do_sigaction(arg1, pact, &oact));
5895 if (!is_error(ret) && arg3) {
5896 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5897 goto efault;
5898 old_act->_sa_handler = oact._sa_handler;
5899 old_act->sa_flags = oact.sa_flags;
5900 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5901 old_act->sa_mask.sig[1] = 0;
5902 old_act->sa_mask.sig[2] = 0;
5903 old_act->sa_mask.sig[3] = 0;
5904 unlock_user_struct(old_act, arg3, 1);
5906 #else
5907 struct target_old_sigaction *old_act;
5908 struct target_sigaction act, oact, *pact;
5909 if (arg2) {
5910 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5911 goto efault;
5912 act._sa_handler = old_act->_sa_handler;
5913 target_siginitset(&act.sa_mask, old_act->sa_mask);
5914 act.sa_flags = old_act->sa_flags;
5915 act.sa_restorer = old_act->sa_restorer;
5916 unlock_user_struct(old_act, arg2, 0);
5917 pact = &act;
5918 } else {
5919 pact = NULL;
5921 ret = get_errno(do_sigaction(arg1, pact, &oact));
5922 if (!is_error(ret) && arg3) {
5923 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5924 goto efault;
5925 old_act->_sa_handler = oact._sa_handler;
5926 old_act->sa_mask = oact.sa_mask.sig[0];
5927 old_act->sa_flags = oact.sa_flags;
5928 old_act->sa_restorer = oact.sa_restorer;
5929 unlock_user_struct(old_act, arg3, 1);
5931 #endif
5933 break;
5934 #endif
5935 case TARGET_NR_rt_sigaction:
5937 #if defined(TARGET_ALPHA)
5938 struct target_sigaction act, oact, *pact = 0;
5939 struct target_rt_sigaction *rt_act;
5940 /* ??? arg4 == sizeof(sigset_t). */
5941 if (arg2) {
5942 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5943 goto efault;
5944 act._sa_handler = rt_act->_sa_handler;
5945 act.sa_mask = rt_act->sa_mask;
5946 act.sa_flags = rt_act->sa_flags;
5947 act.sa_restorer = arg5;
5948 unlock_user_struct(rt_act, arg2, 0);
5949 pact = &act;
5951 ret = get_errno(do_sigaction(arg1, pact, &oact));
5952 if (!is_error(ret) && arg3) {
5953 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5954 goto efault;
5955 rt_act->_sa_handler = oact._sa_handler;
5956 rt_act->sa_mask = oact.sa_mask;
5957 rt_act->sa_flags = oact.sa_flags;
5958 unlock_user_struct(rt_act, arg3, 1);
5960 #else
5961 struct target_sigaction *act;
5962 struct target_sigaction *oact;
5964 if (arg2) {
5965 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5966 goto efault;
5967 } else
5968 act = NULL;
5969 if (arg3) {
5970 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5971 ret = -TARGET_EFAULT;
5972 goto rt_sigaction_fail;
5974 } else
5975 oact = NULL;
5976 ret = get_errno(do_sigaction(arg1, act, oact));
5977 rt_sigaction_fail:
5978 if (act)
5979 unlock_user_struct(act, arg2, 0);
5980 if (oact)
5981 unlock_user_struct(oact, arg3, 1);
5982 #endif
5984 break;
5985 #ifdef TARGET_NR_sgetmask /* not on alpha */
5986 case TARGET_NR_sgetmask:
5988 sigset_t cur_set;
5989 abi_ulong target_set;
5990 sigprocmask(0, NULL, &cur_set);
5991 host_to_target_old_sigset(&target_set, &cur_set);
5992 ret = target_set;
5994 break;
5995 #endif
5996 #ifdef TARGET_NR_ssetmask /* not on alpha */
5997 case TARGET_NR_ssetmask:
5999 sigset_t set, oset, cur_set;
6000 abi_ulong target_set = arg1;
6001 sigprocmask(0, NULL, &cur_set);
6002 target_to_host_old_sigset(&set, &target_set);
6003 sigorset(&set, &set, &cur_set);
6004 sigprocmask(SIG_SETMASK, &set, &oset);
6005 host_to_target_old_sigset(&target_set, &oset);
6006 ret = target_set;
6008 break;
6009 #endif
6010 #ifdef TARGET_NR_sigprocmask
6011 case TARGET_NR_sigprocmask:
6013 #if defined(TARGET_ALPHA)
6014 sigset_t set, oldset;
6015 abi_ulong mask;
6016 int how;
6018 switch (arg1) {
6019 case TARGET_SIG_BLOCK:
6020 how = SIG_BLOCK;
6021 break;
6022 case TARGET_SIG_UNBLOCK:
6023 how = SIG_UNBLOCK;
6024 break;
6025 case TARGET_SIG_SETMASK:
6026 how = SIG_SETMASK;
6027 break;
6028 default:
6029 ret = -TARGET_EINVAL;
6030 goto fail;
6032 mask = arg2;
6033 target_to_host_old_sigset(&set, &mask);
6035 ret = get_errno(sigprocmask(how, &set, &oldset));
6036 if (!is_error(ret)) {
6037 host_to_target_old_sigset(&mask, &oldset);
6038 ret = mask;
6039 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6041 #else
6042 sigset_t set, oldset, *set_ptr;
6043 int how;
6045 if (arg2) {
6046 switch (arg1) {
6047 case TARGET_SIG_BLOCK:
6048 how = SIG_BLOCK;
6049 break;
6050 case TARGET_SIG_UNBLOCK:
6051 how = SIG_UNBLOCK;
6052 break;
6053 case TARGET_SIG_SETMASK:
6054 how = SIG_SETMASK;
6055 break;
6056 default:
6057 ret = -TARGET_EINVAL;
6058 goto fail;
6060 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6061 goto efault;
6062 target_to_host_old_sigset(&set, p);
6063 unlock_user(p, arg2, 0);
6064 set_ptr = &set;
6065 } else {
6066 how = 0;
6067 set_ptr = NULL;
6069 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6070 if (!is_error(ret) && arg3) {
6071 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6072 goto efault;
6073 host_to_target_old_sigset(p, &oldset);
6074 unlock_user(p, arg3, sizeof(target_sigset_t));
6076 #endif
6078 break;
6079 #endif
6080 case TARGET_NR_rt_sigprocmask:
6082 int how = arg1;
6083 sigset_t set, oldset, *set_ptr;
6085 if (arg2) {
6086 switch(how) {
6087 case TARGET_SIG_BLOCK:
6088 how = SIG_BLOCK;
6089 break;
6090 case TARGET_SIG_UNBLOCK:
6091 how = SIG_UNBLOCK;
6092 break;
6093 case TARGET_SIG_SETMASK:
6094 how = SIG_SETMASK;
6095 break;
6096 default:
6097 ret = -TARGET_EINVAL;
6098 goto fail;
6100 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6101 goto efault;
6102 target_to_host_sigset(&set, p);
6103 unlock_user(p, arg2, 0);
6104 set_ptr = &set;
6105 } else {
6106 how = 0;
6107 set_ptr = NULL;
6109 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6110 if (!is_error(ret) && arg3) {
6111 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6112 goto efault;
6113 host_to_target_sigset(p, &oldset);
6114 unlock_user(p, arg3, sizeof(target_sigset_t));
6117 break;
6118 #ifdef TARGET_NR_sigpending
6119 case TARGET_NR_sigpending:
6121 sigset_t set;
6122 ret = get_errno(sigpending(&set));
6123 if (!is_error(ret)) {
6124 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6125 goto efault;
6126 host_to_target_old_sigset(p, &set);
6127 unlock_user(p, arg1, sizeof(target_sigset_t));
6130 break;
6131 #endif
6132 case TARGET_NR_rt_sigpending:
6134 sigset_t set;
6135 ret = get_errno(sigpending(&set));
6136 if (!is_error(ret)) {
6137 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6138 goto efault;
6139 host_to_target_sigset(p, &set);
6140 unlock_user(p, arg1, sizeof(target_sigset_t));
6143 break;
6144 #ifdef TARGET_NR_sigsuspend
6145 case TARGET_NR_sigsuspend:
6147 sigset_t set;
6148 #if defined(TARGET_ALPHA)
6149 abi_ulong mask = arg1;
6150 target_to_host_old_sigset(&set, &mask);
6151 #else
6152 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6153 goto efault;
6154 target_to_host_old_sigset(&set, p);
6155 unlock_user(p, arg1, 0);
6156 #endif
6157 ret = get_errno(sigsuspend(&set));
6159 break;
6160 #endif
6161 case TARGET_NR_rt_sigsuspend:
6163 sigset_t set;
6164 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6165 goto efault;
6166 target_to_host_sigset(&set, p);
6167 unlock_user(p, arg1, 0);
6168 ret = get_errno(sigsuspend(&set));
6170 break;
6171 case TARGET_NR_rt_sigtimedwait:
6173 sigset_t set;
6174 struct timespec uts, *puts;
6175 siginfo_t uinfo;
6177 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6178 goto efault;
6179 target_to_host_sigset(&set, p);
6180 unlock_user(p, arg1, 0);
6181 if (arg3) {
6182 puts = &uts;
6183 target_to_host_timespec(puts, arg3);
6184 } else {
6185 puts = NULL;
6187 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6188 if (!is_error(ret)) {
6189 if (arg2) {
6190 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6192 if (!p) {
6193 goto efault;
6195 host_to_target_siginfo(p, &uinfo);
6196 unlock_user(p, arg2, sizeof(target_siginfo_t));
6198 ret = host_to_target_signal(ret);
6201 break;
6202 case TARGET_NR_rt_sigqueueinfo:
6204 siginfo_t uinfo;
6205 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6206 goto efault;
6207 target_to_host_siginfo(&uinfo, p);
6208 unlock_user(p, arg1, 0);
6209 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6211 break;
6212 #ifdef TARGET_NR_sigreturn
6213 case TARGET_NR_sigreturn:
6214 /* NOTE: ret is eax, so not transcoding must be done */
6215 ret = do_sigreturn(cpu_env);
6216 break;
6217 #endif
6218 case TARGET_NR_rt_sigreturn:
6219 /* NOTE: ret is eax, so not transcoding must be done */
6220 ret = do_rt_sigreturn(cpu_env);
6221 break;
6222 case TARGET_NR_sethostname:
6223 if (!(p = lock_user_string(arg1)))
6224 goto efault;
6225 ret = get_errno(sethostname(p, arg2));
6226 unlock_user(p, arg1, 0);
6227 break;
6228 case TARGET_NR_setrlimit:
6230 int resource = target_to_host_resource(arg1);
6231 struct target_rlimit *target_rlim;
6232 struct rlimit rlim;
6233 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6234 goto efault;
6235 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6236 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6237 unlock_user_struct(target_rlim, arg2, 0);
6238 ret = get_errno(setrlimit(resource, &rlim));
6240 break;
6241 case TARGET_NR_getrlimit:
6243 int resource = target_to_host_resource(arg1);
6244 struct target_rlimit *target_rlim;
6245 struct rlimit rlim;
6247 ret = get_errno(getrlimit(resource, &rlim));
6248 if (!is_error(ret)) {
6249 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6250 goto efault;
6251 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6252 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6253 unlock_user_struct(target_rlim, arg2, 1);
6256 break;
6257 case TARGET_NR_getrusage:
6259 struct rusage rusage;
6260 ret = get_errno(getrusage(arg1, &rusage));
6261 if (!is_error(ret)) {
6262 host_to_target_rusage(arg2, &rusage);
6265 break;
6266 case TARGET_NR_gettimeofday:
6268 struct timeval tv;
6269 ret = get_errno(gettimeofday(&tv, NULL));
6270 if (!is_error(ret)) {
6271 if (copy_to_user_timeval(arg1, &tv))
6272 goto efault;
6275 break;
6276 case TARGET_NR_settimeofday:
6278 struct timeval tv;
6279 if (copy_from_user_timeval(&tv, arg1))
6280 goto efault;
6281 ret = get_errno(settimeofday(&tv, NULL));
6283 break;
6284 #if defined(TARGET_NR_select)
6285 case TARGET_NR_select:
6286 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6287 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6288 #else
6290 struct target_sel_arg_struct *sel;
6291 abi_ulong inp, outp, exp, tvp;
6292 long nsel;
6294 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6295 goto efault;
6296 nsel = tswapal(sel->n);
6297 inp = tswapal(sel->inp);
6298 outp = tswapal(sel->outp);
6299 exp = tswapal(sel->exp);
6300 tvp = tswapal(sel->tvp);
6301 unlock_user_struct(sel, arg1, 0);
6302 ret = do_select(nsel, inp, outp, exp, tvp);
6304 #endif
6305 break;
6306 #endif
6307 #ifdef TARGET_NR_pselect6
6308 case TARGET_NR_pselect6:
6310 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6311 fd_set rfds, wfds, efds;
6312 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6313 struct timespec ts, *ts_ptr;
6316 * The 6th arg is actually two args smashed together,
6317 * so we cannot use the C library.
6319 sigset_t set;
6320 struct {
6321 sigset_t *set;
6322 size_t size;
6323 } sig, *sig_ptr;
6325 abi_ulong arg_sigset, arg_sigsize, *arg7;
6326 target_sigset_t *target_sigset;
6328 n = arg1;
6329 rfd_addr = arg2;
6330 wfd_addr = arg3;
6331 efd_addr = arg4;
6332 ts_addr = arg5;
6334 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6335 if (ret) {
6336 goto fail;
6338 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6339 if (ret) {
6340 goto fail;
6342 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6343 if (ret) {
6344 goto fail;
6348 * This takes a timespec, and not a timeval, so we cannot
6349 * use the do_select() helper ...
6351 if (ts_addr) {
6352 if (target_to_host_timespec(&ts, ts_addr)) {
6353 goto efault;
6355 ts_ptr = &ts;
6356 } else {
6357 ts_ptr = NULL;
6360 /* Extract the two packed args for the sigset */
6361 if (arg6) {
6362 sig_ptr = &sig;
6363 sig.size = _NSIG / 8;
6365 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6366 if (!arg7) {
6367 goto efault;
6369 arg_sigset = tswapal(arg7[0]);
6370 arg_sigsize = tswapal(arg7[1]);
6371 unlock_user(arg7, arg6, 0);
6373 if (arg_sigset) {
6374 sig.set = &set;
6375 if (arg_sigsize != sizeof(*target_sigset)) {
6376 /* Like the kernel, we enforce correct size sigsets */
6377 ret = -TARGET_EINVAL;
6378 goto fail;
6380 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6381 sizeof(*target_sigset), 1);
6382 if (!target_sigset) {
6383 goto efault;
6385 target_to_host_sigset(&set, target_sigset);
6386 unlock_user(target_sigset, arg_sigset, 0);
6387 } else {
6388 sig.set = NULL;
6390 } else {
6391 sig_ptr = NULL;
6394 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6395 ts_ptr, sig_ptr));
6397 if (!is_error(ret)) {
6398 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6399 goto efault;
6400 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6401 goto efault;
6402 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6403 goto efault;
6405 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6406 goto efault;
6409 break;
6410 #endif
6411 case TARGET_NR_symlink:
6413 void *p2;
6414 p = lock_user_string(arg1);
6415 p2 = lock_user_string(arg2);
6416 if (!p || !p2)
6417 ret = -TARGET_EFAULT;
6418 else
6419 ret = get_errno(symlink(p, p2));
6420 unlock_user(p2, arg2, 0);
6421 unlock_user(p, arg1, 0);
6423 break;
6424 #if defined(TARGET_NR_symlinkat)
6425 case TARGET_NR_symlinkat:
6427 void *p2;
6428 p = lock_user_string(arg1);
6429 p2 = lock_user_string(arg3);
6430 if (!p || !p2)
6431 ret = -TARGET_EFAULT;
6432 else
6433 ret = get_errno(symlinkat(p, arg2, p2));
6434 unlock_user(p2, arg3, 0);
6435 unlock_user(p, arg1, 0);
6437 break;
6438 #endif
6439 #ifdef TARGET_NR_oldlstat
6440 case TARGET_NR_oldlstat:
6441 goto unimplemented;
6442 #endif
6443 case TARGET_NR_readlink:
6445 void *p2;
6446 p = lock_user_string(arg1);
6447 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6448 if (!p || !p2) {
6449 ret = -TARGET_EFAULT;
6450 } else if (is_proc_myself((const char *)p, "exe")) {
6451 char real[PATH_MAX], *temp;
6452 temp = realpath(exec_path, real);
6453 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6454 snprintf((char *)p2, arg3, "%s", real);
6455 } else {
6456 ret = get_errno(readlink(path(p), p2, arg3));
6458 unlock_user(p2, arg2, ret);
6459 unlock_user(p, arg1, 0);
6461 break;
6462 #if defined(TARGET_NR_readlinkat)
6463 case TARGET_NR_readlinkat:
6465 void *p2;
6466 p = lock_user_string(arg2);
6467 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6468 if (!p || !p2) {
6469 ret = -TARGET_EFAULT;
6470 } else if (is_proc_myself((const char *)p, "exe")) {
6471 char real[PATH_MAX], *temp;
6472 temp = realpath(exec_path, real);
6473 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6474 snprintf((char *)p2, arg4, "%s", real);
6475 } else {
6476 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6478 unlock_user(p2, arg3, ret);
6479 unlock_user(p, arg2, 0);
6481 break;
6482 #endif
6483 #ifdef TARGET_NR_uselib
6484 case TARGET_NR_uselib:
6485 goto unimplemented;
6486 #endif
6487 #ifdef TARGET_NR_swapon
6488 case TARGET_NR_swapon:
6489 if (!(p = lock_user_string(arg1)))
6490 goto efault;
6491 ret = get_errno(swapon(p, arg2));
6492 unlock_user(p, arg1, 0);
6493 break;
6494 #endif
6495 case TARGET_NR_reboot:
6496 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6497 /* arg4 must be ignored in all other cases */
6498 p = lock_user_string(arg4);
6499 if (!p) {
6500 goto efault;
6502 ret = get_errno(reboot(arg1, arg2, arg3, p));
6503 unlock_user(p, arg4, 0);
6504 } else {
6505 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6507 break;
6508 #ifdef TARGET_NR_readdir
6509 case TARGET_NR_readdir:
6510 goto unimplemented;
6511 #endif
6512 #ifdef TARGET_NR_mmap
6513 case TARGET_NR_mmap:
6514 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6515 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6516 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6517 || defined(TARGET_S390X)
6519 abi_ulong *v;
6520 abi_ulong v1, v2, v3, v4, v5, v6;
6521 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6522 goto efault;
6523 v1 = tswapal(v[0]);
6524 v2 = tswapal(v[1]);
6525 v3 = tswapal(v[2]);
6526 v4 = tswapal(v[3]);
6527 v5 = tswapal(v[4]);
6528 v6 = tswapal(v[5]);
6529 unlock_user(v, arg1, 0);
6530 ret = get_errno(target_mmap(v1, v2, v3,
6531 target_to_host_bitmask(v4, mmap_flags_tbl),
6532 v5, v6));
6534 #else
6535 ret = get_errno(target_mmap(arg1, arg2, arg3,
6536 target_to_host_bitmask(arg4, mmap_flags_tbl),
6537 arg5,
6538 arg6));
6539 #endif
6540 break;
6541 #endif
6542 #ifdef TARGET_NR_mmap2
6543 case TARGET_NR_mmap2:
6544 #ifndef MMAP_SHIFT
6545 #define MMAP_SHIFT 12
6546 #endif
6547 ret = get_errno(target_mmap(arg1, arg2, arg3,
6548 target_to_host_bitmask(arg4, mmap_flags_tbl),
6549 arg5,
6550 arg6 << MMAP_SHIFT));
6551 break;
6552 #endif
6553 case TARGET_NR_munmap:
6554 ret = get_errno(target_munmap(arg1, arg2));
6555 break;
6556 case TARGET_NR_mprotect:
6558 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6559 /* Special hack to detect libc making the stack executable. */
6560 if ((arg3 & PROT_GROWSDOWN)
6561 && arg1 >= ts->info->stack_limit
6562 && arg1 <= ts->info->start_stack) {
6563 arg3 &= ~PROT_GROWSDOWN;
6564 arg2 = arg2 + arg1 - ts->info->stack_limit;
6565 arg1 = ts->info->stack_limit;
6568 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6569 break;
6570 #ifdef TARGET_NR_mremap
6571 case TARGET_NR_mremap:
6572 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6573 break;
6574 #endif
6575 /* ??? msync/mlock/munlock are broken for softmmu. */
6576 #ifdef TARGET_NR_msync
6577 case TARGET_NR_msync:
6578 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6579 break;
6580 #endif
6581 #ifdef TARGET_NR_mlock
6582 case TARGET_NR_mlock:
6583 ret = get_errno(mlock(g2h(arg1), arg2));
6584 break;
6585 #endif
6586 #ifdef TARGET_NR_munlock
6587 case TARGET_NR_munlock:
6588 ret = get_errno(munlock(g2h(arg1), arg2));
6589 break;
6590 #endif
6591 #ifdef TARGET_NR_mlockall
6592 case TARGET_NR_mlockall:
6593 ret = get_errno(mlockall(arg1));
6594 break;
6595 #endif
6596 #ifdef TARGET_NR_munlockall
6597 case TARGET_NR_munlockall:
6598 ret = get_errno(munlockall());
6599 break;
6600 #endif
6601 case TARGET_NR_truncate:
6602 if (!(p = lock_user_string(arg1)))
6603 goto efault;
6604 ret = get_errno(truncate(p, arg2));
6605 unlock_user(p, arg1, 0);
6606 break;
6607 case TARGET_NR_ftruncate:
6608 ret = get_errno(ftruncate(arg1, arg2));
6609 break;
6610 case TARGET_NR_fchmod:
6611 ret = get_errno(fchmod(arg1, arg2));
6612 break;
6613 #if defined(TARGET_NR_fchmodat)
6614 case TARGET_NR_fchmodat:
6615 if (!(p = lock_user_string(arg2)))
6616 goto efault;
6617 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6618 unlock_user(p, arg2, 0);
6619 break;
6620 #endif
6621 case TARGET_NR_getpriority:
6622 /* Note that negative values are valid for getpriority, so we must
6623 differentiate based on errno settings. */
6624 errno = 0;
6625 ret = getpriority(arg1, arg2);
6626 if (ret == -1 && errno != 0) {
6627 ret = -host_to_target_errno(errno);
6628 break;
6630 #ifdef TARGET_ALPHA
6631 /* Return value is the unbiased priority. Signal no error. */
6632 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6633 #else
6634 /* Return value is a biased priority to avoid negative numbers. */
6635 ret = 20 - ret;
6636 #endif
6637 break;
6638 case TARGET_NR_setpriority:
6639 ret = get_errno(setpriority(arg1, arg2, arg3));
6640 break;
6641 #ifdef TARGET_NR_profil
6642 case TARGET_NR_profil:
6643 goto unimplemented;
6644 #endif
6645 case TARGET_NR_statfs:
6646 if (!(p = lock_user_string(arg1)))
6647 goto efault;
6648 ret = get_errno(statfs(path(p), &stfs));
6649 unlock_user(p, arg1, 0);
6650 convert_statfs:
6651 if (!is_error(ret)) {
6652 struct target_statfs *target_stfs;
6654 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6655 goto efault;
6656 __put_user(stfs.f_type, &target_stfs->f_type);
6657 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6658 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6659 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6660 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6661 __put_user(stfs.f_files, &target_stfs->f_files);
6662 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6663 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6664 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6665 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6666 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6667 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6668 unlock_user_struct(target_stfs, arg2, 1);
6670 break;
6671 case TARGET_NR_fstatfs:
6672 ret = get_errno(fstatfs(arg1, &stfs));
6673 goto convert_statfs;
6674 #ifdef TARGET_NR_statfs64
6675 case TARGET_NR_statfs64:
6676 if (!(p = lock_user_string(arg1)))
6677 goto efault;
6678 ret = get_errno(statfs(path(p), &stfs));
6679 unlock_user(p, arg1, 0);
6680 convert_statfs64:
6681 if (!is_error(ret)) {
6682 struct target_statfs64 *target_stfs;
6684 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6685 goto efault;
6686 __put_user(stfs.f_type, &target_stfs->f_type);
6687 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6688 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6689 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6690 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6691 __put_user(stfs.f_files, &target_stfs->f_files);
6692 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6693 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6694 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6695 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6696 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6697 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6698 unlock_user_struct(target_stfs, arg3, 1);
6700 break;
6701 case TARGET_NR_fstatfs64:
6702 ret = get_errno(fstatfs(arg1, &stfs));
6703 goto convert_statfs64;
6704 #endif
6705 #ifdef TARGET_NR_ioperm
6706 case TARGET_NR_ioperm:
6707 goto unimplemented;
6708 #endif
6709 #ifdef TARGET_NR_socketcall
6710 case TARGET_NR_socketcall:
6711 ret = do_socketcall(arg1, arg2);
6712 break;
6713 #endif
6714 #ifdef TARGET_NR_accept
6715 case TARGET_NR_accept:
6716 ret = do_accept4(arg1, arg2, arg3, 0);
6717 break;
6718 #endif
6719 #ifdef TARGET_NR_accept4
6720 case TARGET_NR_accept4:
6721 #ifdef CONFIG_ACCEPT4
6722 ret = do_accept4(arg1, arg2, arg3, arg4);
6723 #else
6724 goto unimplemented;
6725 #endif
6726 break;
6727 #endif
6728 #ifdef TARGET_NR_bind
6729 case TARGET_NR_bind:
6730 ret = do_bind(arg1, arg2, arg3);
6731 break;
6732 #endif
6733 #ifdef TARGET_NR_connect
6734 case TARGET_NR_connect:
6735 ret = do_connect(arg1, arg2, arg3);
6736 break;
6737 #endif
6738 #ifdef TARGET_NR_getpeername
6739 case TARGET_NR_getpeername:
6740 ret = do_getpeername(arg1, arg2, arg3);
6741 break;
6742 #endif
6743 #ifdef TARGET_NR_getsockname
6744 case TARGET_NR_getsockname:
6745 ret = do_getsockname(arg1, arg2, arg3);
6746 break;
6747 #endif
6748 #ifdef TARGET_NR_getsockopt
6749 case TARGET_NR_getsockopt:
6750 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6751 break;
6752 #endif
6753 #ifdef TARGET_NR_listen
6754 case TARGET_NR_listen:
6755 ret = get_errno(listen(arg1, arg2));
6756 break;
6757 #endif
6758 #ifdef TARGET_NR_recv
6759 case TARGET_NR_recv:
6760 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6761 break;
6762 #endif
6763 #ifdef TARGET_NR_recvfrom
6764 case TARGET_NR_recvfrom:
6765 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6766 break;
6767 #endif
6768 #ifdef TARGET_NR_recvmsg
6769 case TARGET_NR_recvmsg:
6770 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6771 break;
6772 #endif
6773 #ifdef TARGET_NR_send
6774 case TARGET_NR_send:
6775 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6776 break;
6777 #endif
6778 #ifdef TARGET_NR_sendmsg
6779 case TARGET_NR_sendmsg:
6780 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6781 break;
6782 #endif
6783 #ifdef TARGET_NR_sendmmsg
6784 case TARGET_NR_sendmmsg:
6785 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
6786 break;
6787 case TARGET_NR_recvmmsg:
6788 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
6789 break;
6790 #endif
6791 #ifdef TARGET_NR_sendto
6792 case TARGET_NR_sendto:
6793 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6794 break;
6795 #endif
6796 #ifdef TARGET_NR_shutdown
6797 case TARGET_NR_shutdown:
6798 ret = get_errno(shutdown(arg1, arg2));
6799 break;
6800 #endif
6801 #ifdef TARGET_NR_socket
6802 case TARGET_NR_socket:
6803 ret = do_socket(arg1, arg2, arg3);
6804 break;
6805 #endif
6806 #ifdef TARGET_NR_socketpair
6807 case TARGET_NR_socketpair:
6808 ret = do_socketpair(arg1, arg2, arg3, arg4);
6809 break;
6810 #endif
6811 #ifdef TARGET_NR_setsockopt
6812 case TARGET_NR_setsockopt:
6813 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6814 break;
6815 #endif
6817 case TARGET_NR_syslog:
6818 if (!(p = lock_user_string(arg2)))
6819 goto efault;
6820 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6821 unlock_user(p, arg2, 0);
6822 break;
6824 case TARGET_NR_setitimer:
6826 struct itimerval value, ovalue, *pvalue;
6828 if (arg2) {
6829 pvalue = &value;
6830 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6831 || copy_from_user_timeval(&pvalue->it_value,
6832 arg2 + sizeof(struct target_timeval)))
6833 goto efault;
6834 } else {
6835 pvalue = NULL;
6837 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6838 if (!is_error(ret) && arg3) {
6839 if (copy_to_user_timeval(arg3,
6840 &ovalue.it_interval)
6841 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6842 &ovalue.it_value))
6843 goto efault;
6846 break;
6847 case TARGET_NR_getitimer:
6849 struct itimerval value;
6851 ret = get_errno(getitimer(arg1, &value));
6852 if (!is_error(ret) && arg2) {
6853 if (copy_to_user_timeval(arg2,
6854 &value.it_interval)
6855 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6856 &value.it_value))
6857 goto efault;
6860 break;
6861 case TARGET_NR_stat:
6862 if (!(p = lock_user_string(arg1)))
6863 goto efault;
6864 ret = get_errno(stat(path(p), &st));
6865 unlock_user(p, arg1, 0);
6866 goto do_stat;
6867 case TARGET_NR_lstat:
6868 if (!(p = lock_user_string(arg1)))
6869 goto efault;
6870 ret = get_errno(lstat(path(p), &st));
6871 unlock_user(p, arg1, 0);
6872 goto do_stat;
6873 case TARGET_NR_fstat:
6875 ret = get_errno(fstat(arg1, &st));
6876 do_stat:
6877 if (!is_error(ret)) {
6878 struct target_stat *target_st;
6880 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6881 goto efault;
6882 memset(target_st, 0, sizeof(*target_st));
6883 __put_user(st.st_dev, &target_st->st_dev);
6884 __put_user(st.st_ino, &target_st->st_ino);
6885 __put_user(st.st_mode, &target_st->st_mode);
6886 __put_user(st.st_uid, &target_st->st_uid);
6887 __put_user(st.st_gid, &target_st->st_gid);
6888 __put_user(st.st_nlink, &target_st->st_nlink);
6889 __put_user(st.st_rdev, &target_st->st_rdev);
6890 __put_user(st.st_size, &target_st->st_size);
6891 __put_user(st.st_blksize, &target_st->st_blksize);
6892 __put_user(st.st_blocks, &target_st->st_blocks);
6893 __put_user(st.st_atime, &target_st->target_st_atime);
6894 __put_user(st.st_mtime, &target_st->target_st_mtime);
6895 __put_user(st.st_ctime, &target_st->target_st_ctime);
6896 unlock_user_struct(target_st, arg2, 1);
6899 break;
6900 #ifdef TARGET_NR_olduname
6901 case TARGET_NR_olduname:
6902 goto unimplemented;
6903 #endif
6904 #ifdef TARGET_NR_iopl
6905 case TARGET_NR_iopl:
6906 goto unimplemented;
6907 #endif
6908 case TARGET_NR_vhangup:
6909 ret = get_errno(vhangup());
6910 break;
6911 #ifdef TARGET_NR_idle
6912 case TARGET_NR_idle:
6913 goto unimplemented;
6914 #endif
6915 #ifdef TARGET_NR_syscall
6916 case TARGET_NR_syscall:
6917 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6918 arg6, arg7, arg8, 0);
6919 break;
6920 #endif
6921 case TARGET_NR_wait4:
6923 int status;
6924 abi_long status_ptr = arg2;
6925 struct rusage rusage, *rusage_ptr;
6926 abi_ulong target_rusage = arg4;
6927 if (target_rusage)
6928 rusage_ptr = &rusage;
6929 else
6930 rusage_ptr = NULL;
6931 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6932 if (!is_error(ret)) {
6933 if (status_ptr && ret) {
6934 status = host_to_target_waitstatus(status);
6935 if (put_user_s32(status, status_ptr))
6936 goto efault;
6938 if (target_rusage)
6939 host_to_target_rusage(target_rusage, &rusage);
6942 break;
6943 #ifdef TARGET_NR_swapoff
6944 case TARGET_NR_swapoff:
6945 if (!(p = lock_user_string(arg1)))
6946 goto efault;
6947 ret = get_errno(swapoff(p));
6948 unlock_user(p, arg1, 0);
6949 break;
6950 #endif
6951 case TARGET_NR_sysinfo:
6953 struct target_sysinfo *target_value;
6954 struct sysinfo value;
6955 ret = get_errno(sysinfo(&value));
6956 if (!is_error(ret) && arg1)
6958 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6959 goto efault;
6960 __put_user(value.uptime, &target_value->uptime);
6961 __put_user(value.loads[0], &target_value->loads[0]);
6962 __put_user(value.loads[1], &target_value->loads[1]);
6963 __put_user(value.loads[2], &target_value->loads[2]);
6964 __put_user(value.totalram, &target_value->totalram);
6965 __put_user(value.freeram, &target_value->freeram);
6966 __put_user(value.sharedram, &target_value->sharedram);
6967 __put_user(value.bufferram, &target_value->bufferram);
6968 __put_user(value.totalswap, &target_value->totalswap);
6969 __put_user(value.freeswap, &target_value->freeswap);
6970 __put_user(value.procs, &target_value->procs);
6971 __put_user(value.totalhigh, &target_value->totalhigh);
6972 __put_user(value.freehigh, &target_value->freehigh);
6973 __put_user(value.mem_unit, &target_value->mem_unit);
6974 unlock_user_struct(target_value, arg1, 1);
6977 break;
6978 #ifdef TARGET_NR_ipc
6979 case TARGET_NR_ipc:
6980 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6981 break;
6982 #endif
6983 #ifdef TARGET_NR_semget
6984 case TARGET_NR_semget:
6985 ret = get_errno(semget(arg1, arg2, arg3));
6986 break;
6987 #endif
6988 #ifdef TARGET_NR_semop
6989 case TARGET_NR_semop:
6990 ret = do_semop(arg1, arg2, arg3);
6991 break;
6992 #endif
6993 #ifdef TARGET_NR_semctl
6994 case TARGET_NR_semctl:
6995 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6996 break;
6997 #endif
6998 #ifdef TARGET_NR_msgctl
6999 case TARGET_NR_msgctl:
7000 ret = do_msgctl(arg1, arg2, arg3);
7001 break;
7002 #endif
7003 #ifdef TARGET_NR_msgget
7004 case TARGET_NR_msgget:
7005 ret = get_errno(msgget(arg1, arg2));
7006 break;
7007 #endif
7008 #ifdef TARGET_NR_msgrcv
7009 case TARGET_NR_msgrcv:
7010 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7011 break;
7012 #endif
7013 #ifdef TARGET_NR_msgsnd
7014 case TARGET_NR_msgsnd:
7015 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7016 break;
7017 #endif
7018 #ifdef TARGET_NR_shmget
7019 case TARGET_NR_shmget:
7020 ret = get_errno(shmget(arg1, arg2, arg3));
7021 break;
7022 #endif
7023 #ifdef TARGET_NR_shmctl
7024 case TARGET_NR_shmctl:
7025 ret = do_shmctl(arg1, arg2, arg3);
7026 break;
7027 #endif
7028 #ifdef TARGET_NR_shmat
7029 case TARGET_NR_shmat:
7030 ret = do_shmat(arg1, arg2, arg3);
7031 break;
7032 #endif
7033 #ifdef TARGET_NR_shmdt
7034 case TARGET_NR_shmdt:
7035 ret = do_shmdt(arg1);
7036 break;
7037 #endif
7038 case TARGET_NR_fsync:
7039 ret = get_errno(fsync(arg1));
7040 break;
7041 case TARGET_NR_clone:
7042 /* Linux manages to have three different orderings for its
7043 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7044 * match the kernel's CONFIG_CLONE_* settings.
7045 * Microblaze is further special in that it uses a sixth
7046 * implicit argument to clone for the TLS pointer.
7048 #if defined(TARGET_MICROBLAZE)
7049 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7050 #elif defined(TARGET_CLONE_BACKWARDS)
7051 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7052 #elif defined(TARGET_CLONE_BACKWARDS2)
7053 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7054 #else
7055 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7056 #endif
7057 break;
7058 #ifdef __NR_exit_group
7059 /* new thread calls */
7060 case TARGET_NR_exit_group:
7061 #ifdef TARGET_GPROF
7062 _mcleanup();
7063 #endif
7064 gdb_exit(cpu_env, arg1);
7065 ret = get_errno(exit_group(arg1));
7066 break;
7067 #endif
7068 case TARGET_NR_setdomainname:
7069 if (!(p = lock_user_string(arg1)))
7070 goto efault;
7071 ret = get_errno(setdomainname(p, arg2));
7072 unlock_user(p, arg1, 0);
7073 break;
7074 case TARGET_NR_uname:
7075 /* no need to transcode because we use the linux syscall */
7077 struct new_utsname * buf;
7079 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7080 goto efault;
7081 ret = get_errno(sys_uname(buf));
7082 if (!is_error(ret)) {
7083 /* Overrite the native machine name with whatever is being
7084 emulated. */
7085 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7086 /* Allow the user to override the reported release. */
7087 if (qemu_uname_release && *qemu_uname_release)
7088 strcpy (buf->release, qemu_uname_release);
7090 unlock_user_struct(buf, arg1, 1);
7092 break;
7093 #ifdef TARGET_I386
7094 case TARGET_NR_modify_ldt:
7095 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7096 break;
7097 #if !defined(TARGET_X86_64)
7098 case TARGET_NR_vm86old:
7099 goto unimplemented;
7100 case TARGET_NR_vm86:
7101 ret = do_vm86(cpu_env, arg1, arg2);
7102 break;
7103 #endif
7104 #endif
7105 case TARGET_NR_adjtimex:
7106 goto unimplemented;
7107 #ifdef TARGET_NR_create_module
7108 case TARGET_NR_create_module:
7109 #endif
7110 case TARGET_NR_init_module:
7111 case TARGET_NR_delete_module:
7112 #ifdef TARGET_NR_get_kernel_syms
7113 case TARGET_NR_get_kernel_syms:
7114 #endif
7115 goto unimplemented;
7116 case TARGET_NR_quotactl:
7117 goto unimplemented;
7118 case TARGET_NR_getpgid:
7119 ret = get_errno(getpgid(arg1));
7120 break;
7121 case TARGET_NR_fchdir:
7122 ret = get_errno(fchdir(arg1));
7123 break;
7124 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7125 case TARGET_NR_bdflush:
7126 goto unimplemented;
7127 #endif
7128 #ifdef TARGET_NR_sysfs
7129 case TARGET_NR_sysfs:
7130 goto unimplemented;
7131 #endif
7132 case TARGET_NR_personality:
7133 ret = get_errno(personality(arg1));
7134 break;
7135 #ifdef TARGET_NR_afs_syscall
7136 case TARGET_NR_afs_syscall:
7137 goto unimplemented;
7138 #endif
7139 #ifdef TARGET_NR__llseek /* Not on alpha */
7140 case TARGET_NR__llseek:
7142 int64_t res;
7143 #if !defined(__NR_llseek)
7144 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7145 if (res == -1) {
7146 ret = get_errno(res);
7147 } else {
7148 ret = 0;
7150 #else
7151 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7152 #endif
7153 if ((ret == 0) && put_user_s64(res, arg4)) {
7154 goto efault;
7157 break;
7158 #endif
7159 case TARGET_NR_getdents:
7160 #ifdef __NR_getdents
7161 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7163 struct target_dirent *target_dirp;
7164 struct linux_dirent *dirp;
7165 abi_long count = arg3;
7167 dirp = malloc(count);
7168 if (!dirp) {
7169 ret = -TARGET_ENOMEM;
7170 goto fail;
7173 ret = get_errno(sys_getdents(arg1, dirp, count));
7174 if (!is_error(ret)) {
7175 struct linux_dirent *de;
7176 struct target_dirent *tde;
7177 int len = ret;
7178 int reclen, treclen;
7179 int count1, tnamelen;
7181 count1 = 0;
7182 de = dirp;
7183 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7184 goto efault;
7185 tde = target_dirp;
7186 while (len > 0) {
7187 reclen = de->d_reclen;
7188 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7189 assert(tnamelen >= 0);
7190 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7191 assert(count1 + treclen <= count);
7192 tde->d_reclen = tswap16(treclen);
7193 tde->d_ino = tswapal(de->d_ino);
7194 tde->d_off = tswapal(de->d_off);
7195 memcpy(tde->d_name, de->d_name, tnamelen);
7196 de = (struct linux_dirent *)((char *)de + reclen);
7197 len -= reclen;
7198 tde = (struct target_dirent *)((char *)tde + treclen);
7199 count1 += treclen;
7201 ret = count1;
7202 unlock_user(target_dirp, arg2, ret);
7204 free(dirp);
7206 #else
7208 struct linux_dirent *dirp;
7209 abi_long count = arg3;
7211 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7212 goto efault;
7213 ret = get_errno(sys_getdents(arg1, dirp, count));
7214 if (!is_error(ret)) {
7215 struct linux_dirent *de;
7216 int len = ret;
7217 int reclen;
7218 de = dirp;
7219 while (len > 0) {
7220 reclen = de->d_reclen;
7221 if (reclen > len)
7222 break;
7223 de->d_reclen = tswap16(reclen);
7224 tswapls(&de->d_ino);
7225 tswapls(&de->d_off);
7226 de = (struct linux_dirent *)((char *)de + reclen);
7227 len -= reclen;
7230 unlock_user(dirp, arg2, ret);
7232 #endif
7233 #else
7234 /* Implement getdents in terms of getdents64 */
7236 struct linux_dirent64 *dirp;
7237 abi_long count = arg3;
7239 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7240 if (!dirp) {
7241 goto efault;
7243 ret = get_errno(sys_getdents64(arg1, dirp, count));
7244 if (!is_error(ret)) {
7245 /* Convert the dirent64 structs to target dirent. We do this
7246 * in-place, since we can guarantee that a target_dirent is no
7247 * larger than a dirent64; however this means we have to be
7248 * careful to read everything before writing in the new format.
7250 struct linux_dirent64 *de;
7251 struct target_dirent *tde;
7252 int len = ret;
7253 int tlen = 0;
7255 de = dirp;
7256 tde = (struct target_dirent *)dirp;
7257 while (len > 0) {
7258 int namelen, treclen;
7259 int reclen = de->d_reclen;
7260 uint64_t ino = de->d_ino;
7261 int64_t off = de->d_off;
7262 uint8_t type = de->d_type;
7264 namelen = strlen(de->d_name);
7265 treclen = offsetof(struct target_dirent, d_name)
7266 + namelen + 2;
7267 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7269 memmove(tde->d_name, de->d_name, namelen + 1);
7270 tde->d_ino = tswapal(ino);
7271 tde->d_off = tswapal(off);
7272 tde->d_reclen = tswap16(treclen);
7273 /* The target_dirent type is in what was formerly a padding
7274 * byte at the end of the structure:
7276 *(((char *)tde) + treclen - 1) = type;
7278 de = (struct linux_dirent64 *)((char *)de + reclen);
7279 tde = (struct target_dirent *)((char *)tde + treclen);
7280 len -= reclen;
7281 tlen += treclen;
7283 ret = tlen;
7285 unlock_user(dirp, arg2, ret);
7287 #endif
7288 break;
7289 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7290 case TARGET_NR_getdents64:
7292 struct linux_dirent64 *dirp;
7293 abi_long count = arg3;
7294 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7295 goto efault;
7296 ret = get_errno(sys_getdents64(arg1, dirp, count));
7297 if (!is_error(ret)) {
7298 struct linux_dirent64 *de;
7299 int len = ret;
7300 int reclen;
7301 de = dirp;
7302 while (len > 0) {
7303 reclen = de->d_reclen;
7304 if (reclen > len)
7305 break;
7306 de->d_reclen = tswap16(reclen);
7307 tswap64s((uint64_t *)&de->d_ino);
7308 tswap64s((uint64_t *)&de->d_off);
7309 de = (struct linux_dirent64 *)((char *)de + reclen);
7310 len -= reclen;
7313 unlock_user(dirp, arg2, ret);
7315 break;
7316 #endif /* TARGET_NR_getdents64 */
7317 #if defined(TARGET_NR__newselect)
7318 case TARGET_NR__newselect:
7319 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7320 break;
7321 #endif
7322 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7323 # ifdef TARGET_NR_poll
7324 case TARGET_NR_poll:
7325 # endif
7326 # ifdef TARGET_NR_ppoll
7327 case TARGET_NR_ppoll:
7328 # endif
7330 struct target_pollfd *target_pfd;
7331 unsigned int nfds = arg2;
7332 int timeout = arg3;
7333 struct pollfd *pfd;
7334 unsigned int i;
7336 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7337 if (!target_pfd)
7338 goto efault;
7340 pfd = alloca(sizeof(struct pollfd) * nfds);
7341 for(i = 0; i < nfds; i++) {
7342 pfd[i].fd = tswap32(target_pfd[i].fd);
7343 pfd[i].events = tswap16(target_pfd[i].events);
7346 # ifdef TARGET_NR_ppoll
7347 if (num == TARGET_NR_ppoll) {
7348 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7349 target_sigset_t *target_set;
7350 sigset_t _set, *set = &_set;
7352 if (arg3) {
7353 if (target_to_host_timespec(timeout_ts, arg3)) {
7354 unlock_user(target_pfd, arg1, 0);
7355 goto efault;
7357 } else {
7358 timeout_ts = NULL;
7361 if (arg4) {
7362 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7363 if (!target_set) {
7364 unlock_user(target_pfd, arg1, 0);
7365 goto efault;
7367 target_to_host_sigset(set, target_set);
7368 } else {
7369 set = NULL;
7372 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7374 if (!is_error(ret) && arg3) {
7375 host_to_target_timespec(arg3, timeout_ts);
7377 if (arg4) {
7378 unlock_user(target_set, arg4, 0);
7380 } else
7381 # endif
7382 ret = get_errno(poll(pfd, nfds, timeout));
7384 if (!is_error(ret)) {
7385 for(i = 0; i < nfds; i++) {
7386 target_pfd[i].revents = tswap16(pfd[i].revents);
7389 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7391 break;
7392 #endif
7393 case TARGET_NR_flock:
7394 /* NOTE: the flock constant seems to be the same for every
7395 Linux platform */
7396 ret = get_errno(flock(arg1, arg2));
7397 break;
7398 case TARGET_NR_readv:
7400 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7401 if (vec != NULL) {
7402 ret = get_errno(readv(arg1, vec, arg3));
7403 unlock_iovec(vec, arg2, arg3, 1);
7404 } else {
7405 ret = -host_to_target_errno(errno);
7408 break;
7409 case TARGET_NR_writev:
7411 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7412 if (vec != NULL) {
7413 ret = get_errno(writev(arg1, vec, arg3));
7414 unlock_iovec(vec, arg2, arg3, 0);
7415 } else {
7416 ret = -host_to_target_errno(errno);
7419 break;
7420 case TARGET_NR_getsid:
7421 ret = get_errno(getsid(arg1));
7422 break;
7423 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7424 case TARGET_NR_fdatasync:
7425 ret = get_errno(fdatasync(arg1));
7426 break;
7427 #endif
7428 case TARGET_NR__sysctl:
7429 /* We don't implement this, but ENOTDIR is always a safe
7430 return value. */
7431 ret = -TARGET_ENOTDIR;
7432 break;
7433 case TARGET_NR_sched_getaffinity:
7435 unsigned int mask_size;
7436 unsigned long *mask;
7439 * sched_getaffinity needs multiples of ulong, so need to take
7440 * care of mismatches between target ulong and host ulong sizes.
7442 if (arg2 & (sizeof(abi_ulong) - 1)) {
7443 ret = -TARGET_EINVAL;
7444 break;
7446 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7448 mask = alloca(mask_size);
7449 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7451 if (!is_error(ret)) {
7452 if (copy_to_user(arg3, mask, ret)) {
7453 goto efault;
7457 break;
7458 case TARGET_NR_sched_setaffinity:
7460 unsigned int mask_size;
7461 unsigned long *mask;
7464 * sched_setaffinity needs multiples of ulong, so need to take
7465 * care of mismatches between target ulong and host ulong sizes.
7467 if (arg2 & (sizeof(abi_ulong) - 1)) {
7468 ret = -TARGET_EINVAL;
7469 break;
7471 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7473 mask = alloca(mask_size);
7474 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7475 goto efault;
7477 memcpy(mask, p, arg2);
7478 unlock_user_struct(p, arg2, 0);
7480 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7482 break;
7483 case TARGET_NR_sched_setparam:
7485 struct sched_param *target_schp;
7486 struct sched_param schp;
7488 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7489 goto efault;
7490 schp.sched_priority = tswap32(target_schp->sched_priority);
7491 unlock_user_struct(target_schp, arg2, 0);
7492 ret = get_errno(sched_setparam(arg1, &schp));
7494 break;
7495 case TARGET_NR_sched_getparam:
7497 struct sched_param *target_schp;
7498 struct sched_param schp;
7499 ret = get_errno(sched_getparam(arg1, &schp));
7500 if (!is_error(ret)) {
7501 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7502 goto efault;
7503 target_schp->sched_priority = tswap32(schp.sched_priority);
7504 unlock_user_struct(target_schp, arg2, 1);
7507 break;
7508 case TARGET_NR_sched_setscheduler:
7510 struct sched_param *target_schp;
7511 struct sched_param schp;
7512 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7513 goto efault;
7514 schp.sched_priority = tswap32(target_schp->sched_priority);
7515 unlock_user_struct(target_schp, arg3, 0);
7516 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7518 break;
7519 case TARGET_NR_sched_getscheduler:
7520 ret = get_errno(sched_getscheduler(arg1));
7521 break;
7522 case TARGET_NR_sched_yield:
7523 ret = get_errno(sched_yield());
7524 break;
7525 case TARGET_NR_sched_get_priority_max:
7526 ret = get_errno(sched_get_priority_max(arg1));
7527 break;
7528 case TARGET_NR_sched_get_priority_min:
7529 ret = get_errno(sched_get_priority_min(arg1));
7530 break;
7531 case TARGET_NR_sched_rr_get_interval:
7533 struct timespec ts;
7534 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7535 if (!is_error(ret)) {
7536 host_to_target_timespec(arg2, &ts);
7539 break;
7540 case TARGET_NR_nanosleep:
7542 struct timespec req, rem;
7543 target_to_host_timespec(&req, arg1);
7544 ret = get_errno(nanosleep(&req, &rem));
7545 if (is_error(ret) && arg2) {
7546 host_to_target_timespec(arg2, &rem);
7549 break;
7550 #ifdef TARGET_NR_query_module
7551 case TARGET_NR_query_module:
7552 goto unimplemented;
7553 #endif
7554 #ifdef TARGET_NR_nfsservctl
7555 case TARGET_NR_nfsservctl:
7556 goto unimplemented;
7557 #endif
7558 case TARGET_NR_prctl:
7559 switch (arg1) {
7560 case PR_GET_PDEATHSIG:
7562 int deathsig;
7563 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7564 if (!is_error(ret) && arg2
7565 && put_user_ual(deathsig, arg2)) {
7566 goto efault;
7568 break;
7570 #ifdef PR_GET_NAME
7571 case PR_GET_NAME:
7573 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7574 if (!name) {
7575 goto efault;
7577 ret = get_errno(prctl(arg1, (unsigned long)name,
7578 arg3, arg4, arg5));
7579 unlock_user(name, arg2, 16);
7580 break;
7582 case PR_SET_NAME:
7584 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7585 if (!name) {
7586 goto efault;
7588 ret = get_errno(prctl(arg1, (unsigned long)name,
7589 arg3, arg4, arg5));
7590 unlock_user(name, arg2, 0);
7591 break;
7593 #endif
7594 default:
7595 /* Most prctl options have no pointer arguments */
7596 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7597 break;
7599 break;
7600 #ifdef TARGET_NR_arch_prctl
7601 case TARGET_NR_arch_prctl:
7602 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7603 ret = do_arch_prctl(cpu_env, arg1, arg2);
7604 break;
7605 #else
7606 goto unimplemented;
7607 #endif
7608 #endif
7609 #ifdef TARGET_NR_pread64
7610 case TARGET_NR_pread64:
7611 if (regpairs_aligned(cpu_env)) {
7612 arg4 = arg5;
7613 arg5 = arg6;
7615 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7616 goto efault;
7617 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7618 unlock_user(p, arg2, ret);
7619 break;
7620 case TARGET_NR_pwrite64:
7621 if (regpairs_aligned(cpu_env)) {
7622 arg4 = arg5;
7623 arg5 = arg6;
7625 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7626 goto efault;
7627 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7628 unlock_user(p, arg2, 0);
7629 break;
7630 #endif
7631 case TARGET_NR_getcwd:
7632 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7633 goto efault;
7634 ret = get_errno(sys_getcwd1(p, arg2));
7635 unlock_user(p, arg1, ret);
7636 break;
7637 case TARGET_NR_capget:
7638 goto unimplemented;
7639 case TARGET_NR_capset:
7640 goto unimplemented;
7641 case TARGET_NR_sigaltstack:
7642 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7643 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7644 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7645 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7646 break;
7647 #else
7648 goto unimplemented;
7649 #endif
7651 #ifdef CONFIG_SENDFILE
7652 case TARGET_NR_sendfile:
7654 off_t *offp = NULL;
7655 off_t off;
7656 if (arg3) {
7657 ret = get_user_sal(off, arg3);
7658 if (is_error(ret)) {
7659 break;
7661 offp = &off;
7663 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7664 if (!is_error(ret) && arg3) {
7665 abi_long ret2 = put_user_sal(off, arg3);
7666 if (is_error(ret2)) {
7667 ret = ret2;
7670 break;
7672 #ifdef TARGET_NR_sendfile64
7673 case TARGET_NR_sendfile64:
7675 off_t *offp = NULL;
7676 off_t off;
7677 if (arg3) {
7678 ret = get_user_s64(off, arg3);
7679 if (is_error(ret)) {
7680 break;
7682 offp = &off;
7684 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7685 if (!is_error(ret) && arg3) {
7686 abi_long ret2 = put_user_s64(off, arg3);
7687 if (is_error(ret2)) {
7688 ret = ret2;
7691 break;
7693 #endif
7694 #else
7695 case TARGET_NR_sendfile:
7696 #ifdef TARGET_NR_sendfile64
7697 case TARGET_NR_sendfile64:
7698 #endif
7699 goto unimplemented;
7700 #endif
7702 #ifdef TARGET_NR_getpmsg
7703 case TARGET_NR_getpmsg:
7704 goto unimplemented;
7705 #endif
7706 #ifdef TARGET_NR_putpmsg
7707 case TARGET_NR_putpmsg:
7708 goto unimplemented;
7709 #endif
7710 #ifdef TARGET_NR_vfork
7711 case TARGET_NR_vfork:
7712 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7713 0, 0, 0, 0));
7714 break;
7715 #endif
7716 #ifdef TARGET_NR_ugetrlimit
7717 case TARGET_NR_ugetrlimit:
7719 struct rlimit rlim;
7720 int resource = target_to_host_resource(arg1);
7721 ret = get_errno(getrlimit(resource, &rlim));
7722 if (!is_error(ret)) {
7723 struct target_rlimit *target_rlim;
7724 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7725 goto efault;
7726 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7727 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7728 unlock_user_struct(target_rlim, arg2, 1);
7730 break;
7732 #endif
7733 #ifdef TARGET_NR_truncate64
7734 case TARGET_NR_truncate64:
7735 if (!(p = lock_user_string(arg1)))
7736 goto efault;
7737 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7738 unlock_user(p, arg1, 0);
7739 break;
7740 #endif
7741 #ifdef TARGET_NR_ftruncate64
7742 case TARGET_NR_ftruncate64:
7743 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7744 break;
7745 #endif
7746 #ifdef TARGET_NR_stat64
7747 case TARGET_NR_stat64:
7748 if (!(p = lock_user_string(arg1)))
7749 goto efault;
7750 ret = get_errno(stat(path(p), &st));
7751 unlock_user(p, arg1, 0);
7752 if (!is_error(ret))
7753 ret = host_to_target_stat64(cpu_env, arg2, &st);
7754 break;
7755 #endif
7756 #ifdef TARGET_NR_lstat64
7757 case TARGET_NR_lstat64:
7758 if (!(p = lock_user_string(arg1)))
7759 goto efault;
7760 ret = get_errno(lstat(path(p), &st));
7761 unlock_user(p, arg1, 0);
7762 if (!is_error(ret))
7763 ret = host_to_target_stat64(cpu_env, arg2, &st);
7764 break;
7765 #endif
7766 #ifdef TARGET_NR_fstat64
7767 case TARGET_NR_fstat64:
7768 ret = get_errno(fstat(arg1, &st));
7769 if (!is_error(ret))
7770 ret = host_to_target_stat64(cpu_env, arg2, &st);
7771 break;
7772 #endif
7773 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7774 #ifdef TARGET_NR_fstatat64
7775 case TARGET_NR_fstatat64:
7776 #endif
7777 #ifdef TARGET_NR_newfstatat
7778 case TARGET_NR_newfstatat:
7779 #endif
7780 if (!(p = lock_user_string(arg2)))
7781 goto efault;
7782 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7783 if (!is_error(ret))
7784 ret = host_to_target_stat64(cpu_env, arg3, &st);
7785 break;
7786 #endif
7787 case TARGET_NR_lchown:
7788 if (!(p = lock_user_string(arg1)))
7789 goto efault;
7790 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7791 unlock_user(p, arg1, 0);
7792 break;
7793 #ifdef TARGET_NR_getuid
7794 case TARGET_NR_getuid:
7795 ret = get_errno(high2lowuid(getuid()));
7796 break;
7797 #endif
7798 #ifdef TARGET_NR_getgid
7799 case TARGET_NR_getgid:
7800 ret = get_errno(high2lowgid(getgid()));
7801 break;
7802 #endif
7803 #ifdef TARGET_NR_geteuid
7804 case TARGET_NR_geteuid:
7805 ret = get_errno(high2lowuid(geteuid()));
7806 break;
7807 #endif
7808 #ifdef TARGET_NR_getegid
7809 case TARGET_NR_getegid:
7810 ret = get_errno(high2lowgid(getegid()));
7811 break;
7812 #endif
7813 case TARGET_NR_setreuid:
7814 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7815 break;
7816 case TARGET_NR_setregid:
7817 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7818 break;
7819 case TARGET_NR_getgroups:
7821 int gidsetsize = arg1;
7822 target_id *target_grouplist;
7823 gid_t *grouplist;
7824 int i;
7826 grouplist = alloca(gidsetsize * sizeof(gid_t));
7827 ret = get_errno(getgroups(gidsetsize, grouplist));
7828 if (gidsetsize == 0)
7829 break;
7830 if (!is_error(ret)) {
7831 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7832 if (!target_grouplist)
7833 goto efault;
7834 for(i = 0;i < ret; i++)
7835 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7836 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7839 break;
7840 case TARGET_NR_setgroups:
7842 int gidsetsize = arg1;
7843 target_id *target_grouplist;
7844 gid_t *grouplist = NULL;
7845 int i;
7846 if (gidsetsize) {
7847 grouplist = alloca(gidsetsize * sizeof(gid_t));
7848 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7849 if (!target_grouplist) {
7850 ret = -TARGET_EFAULT;
7851 goto fail;
7853 for (i = 0; i < gidsetsize; i++) {
7854 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7856 unlock_user(target_grouplist, arg2, 0);
7858 ret = get_errno(setgroups(gidsetsize, grouplist));
7860 break;
7861 case TARGET_NR_fchown:
7862 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7863 break;
7864 #if defined(TARGET_NR_fchownat)
7865 case TARGET_NR_fchownat:
7866 if (!(p = lock_user_string(arg2)))
7867 goto efault;
7868 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7869 low2highgid(arg4), arg5));
7870 unlock_user(p, arg2, 0);
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_setresuid
7874 case TARGET_NR_setresuid:
7875 ret = get_errno(setresuid(low2highuid(arg1),
7876 low2highuid(arg2),
7877 low2highuid(arg3)));
7878 break;
7879 #endif
7880 #ifdef TARGET_NR_getresuid
7881 case TARGET_NR_getresuid:
7883 uid_t ruid, euid, suid;
7884 ret = get_errno(getresuid(&ruid, &euid, &suid));
7885 if (!is_error(ret)) {
7886 if (put_user_id(high2lowuid(ruid), arg1)
7887 || put_user_id(high2lowuid(euid), arg2)
7888 || put_user_id(high2lowuid(suid), arg3))
7889 goto efault;
7892 break;
7893 #endif
7894 #ifdef TARGET_NR_getresgid
7895 case TARGET_NR_setresgid:
7896 ret = get_errno(setresgid(low2highgid(arg1),
7897 low2highgid(arg2),
7898 low2highgid(arg3)));
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_getresgid
7902 case TARGET_NR_getresgid:
7904 gid_t rgid, egid, sgid;
7905 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7906 if (!is_error(ret)) {
7907 if (put_user_id(high2lowgid(rgid), arg1)
7908 || put_user_id(high2lowgid(egid), arg2)
7909 || put_user_id(high2lowgid(sgid), arg3))
7910 goto efault;
7913 break;
7914 #endif
7915 case TARGET_NR_chown:
7916 if (!(p = lock_user_string(arg1)))
7917 goto efault;
7918 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7919 unlock_user(p, arg1, 0);
7920 break;
7921 case TARGET_NR_setuid:
7922 ret = get_errno(setuid(low2highuid(arg1)));
7923 break;
7924 case TARGET_NR_setgid:
7925 ret = get_errno(setgid(low2highgid(arg1)));
7926 break;
7927 case TARGET_NR_setfsuid:
7928 ret = get_errno(setfsuid(arg1));
7929 break;
7930 case TARGET_NR_setfsgid:
7931 ret = get_errno(setfsgid(arg1));
7932 break;
7934 #ifdef TARGET_NR_lchown32
7935 case TARGET_NR_lchown32:
7936 if (!(p = lock_user_string(arg1)))
7937 goto efault;
7938 ret = get_errno(lchown(p, arg2, arg3));
7939 unlock_user(p, arg1, 0);
7940 break;
7941 #endif
7942 #ifdef TARGET_NR_getuid32
7943 case TARGET_NR_getuid32:
7944 ret = get_errno(getuid());
7945 break;
7946 #endif
7948 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7949 /* Alpha specific */
7950 case TARGET_NR_getxuid:
7952 uid_t euid;
7953 euid=geteuid();
7954 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7956 ret = get_errno(getuid());
7957 break;
7958 #endif
7959 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7960 /* Alpha specific */
7961 case TARGET_NR_getxgid:
7963 uid_t egid;
7964 egid=getegid();
7965 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7967 ret = get_errno(getgid());
7968 break;
7969 #endif
7970 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7971 /* Alpha specific */
7972 case TARGET_NR_osf_getsysinfo:
7973 ret = -TARGET_EOPNOTSUPP;
7974 switch (arg1) {
7975 case TARGET_GSI_IEEE_FP_CONTROL:
7977 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7979 /* Copied from linux ieee_fpcr_to_swcr. */
7980 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7981 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7982 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7983 | SWCR_TRAP_ENABLE_DZE
7984 | SWCR_TRAP_ENABLE_OVF);
7985 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7986 | SWCR_TRAP_ENABLE_INE);
7987 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7988 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7990 if (put_user_u64 (swcr, arg2))
7991 goto efault;
7992 ret = 0;
7994 break;
7996 /* case GSI_IEEE_STATE_AT_SIGNAL:
7997 -- Not implemented in linux kernel.
7998 case GSI_UACPROC:
7999 -- Retrieves current unaligned access state; not much used.
8000 case GSI_PROC_TYPE:
8001 -- Retrieves implver information; surely not used.
8002 case GSI_GET_HWRPB:
8003 -- Grabs a copy of the HWRPB; surely not used.
8006 break;
8007 #endif
8008 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8009 /* Alpha specific */
8010 case TARGET_NR_osf_setsysinfo:
8011 ret = -TARGET_EOPNOTSUPP;
8012 switch (arg1) {
8013 case TARGET_SSI_IEEE_FP_CONTROL:
8015 uint64_t swcr, fpcr, orig_fpcr;
8017 if (get_user_u64 (swcr, arg2)) {
8018 goto efault;
8020 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8021 fpcr = orig_fpcr & FPCR_DYN_MASK;
8023 /* Copied from linux ieee_swcr_to_fpcr. */
8024 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8025 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8026 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8027 | SWCR_TRAP_ENABLE_DZE
8028 | SWCR_TRAP_ENABLE_OVF)) << 48;
8029 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8030 | SWCR_TRAP_ENABLE_INE)) << 57;
8031 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8032 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8034 cpu_alpha_store_fpcr(cpu_env, fpcr);
8035 ret = 0;
8037 break;
8039 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8041 uint64_t exc, fpcr, orig_fpcr;
8042 int si_code;
8044 if (get_user_u64(exc, arg2)) {
8045 goto efault;
8048 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8050 /* We only add to the exception status here. */
8051 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8053 cpu_alpha_store_fpcr(cpu_env, fpcr);
8054 ret = 0;
8056 /* Old exceptions are not signaled. */
8057 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8059 /* If any exceptions set by this call,
8060 and are unmasked, send a signal. */
8061 si_code = 0;
8062 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8063 si_code = TARGET_FPE_FLTRES;
8065 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8066 si_code = TARGET_FPE_FLTUND;
8068 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8069 si_code = TARGET_FPE_FLTOVF;
8071 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8072 si_code = TARGET_FPE_FLTDIV;
8074 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8075 si_code = TARGET_FPE_FLTINV;
8077 if (si_code != 0) {
8078 target_siginfo_t info;
8079 info.si_signo = SIGFPE;
8080 info.si_errno = 0;
8081 info.si_code = si_code;
8082 info._sifields._sigfault._addr
8083 = ((CPUArchState *)cpu_env)->pc;
8084 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8087 break;
8089 /* case SSI_NVPAIRS:
8090 -- Used with SSIN_UACPROC to enable unaligned accesses.
8091 case SSI_IEEE_STATE_AT_SIGNAL:
8092 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8093 -- Not implemented in linux kernel
8096 break;
8097 #endif
8098 #ifdef TARGET_NR_osf_sigprocmask
8099 /* Alpha specific. */
8100 case TARGET_NR_osf_sigprocmask:
8102 abi_ulong mask;
8103 int how;
8104 sigset_t set, oldset;
8106 switch(arg1) {
8107 case TARGET_SIG_BLOCK:
8108 how = SIG_BLOCK;
8109 break;
8110 case TARGET_SIG_UNBLOCK:
8111 how = SIG_UNBLOCK;
8112 break;
8113 case TARGET_SIG_SETMASK:
8114 how = SIG_SETMASK;
8115 break;
8116 default:
8117 ret = -TARGET_EINVAL;
8118 goto fail;
8120 mask = arg2;
8121 target_to_host_old_sigset(&set, &mask);
8122 sigprocmask(how, &set, &oldset);
8123 host_to_target_old_sigset(&mask, &oldset);
8124 ret = mask;
8126 break;
8127 #endif
8129 #ifdef TARGET_NR_getgid32
8130 case TARGET_NR_getgid32:
8131 ret = get_errno(getgid());
8132 break;
8133 #endif
8134 #ifdef TARGET_NR_geteuid32
8135 case TARGET_NR_geteuid32:
8136 ret = get_errno(geteuid());
8137 break;
8138 #endif
8139 #ifdef TARGET_NR_getegid32
8140 case TARGET_NR_getegid32:
8141 ret = get_errno(getegid());
8142 break;
8143 #endif
8144 #ifdef TARGET_NR_setreuid32
8145 case TARGET_NR_setreuid32:
8146 ret = get_errno(setreuid(arg1, arg2));
8147 break;
8148 #endif
8149 #ifdef TARGET_NR_setregid32
8150 case TARGET_NR_setregid32:
8151 ret = get_errno(setregid(arg1, arg2));
8152 break;
8153 #endif
8154 #ifdef TARGET_NR_getgroups32
8155 case TARGET_NR_getgroups32:
8157 int gidsetsize = arg1;
8158 uint32_t *target_grouplist;
8159 gid_t *grouplist;
8160 int i;
8162 grouplist = alloca(gidsetsize * sizeof(gid_t));
8163 ret = get_errno(getgroups(gidsetsize, grouplist));
8164 if (gidsetsize == 0)
8165 break;
8166 if (!is_error(ret)) {
8167 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8168 if (!target_grouplist) {
8169 ret = -TARGET_EFAULT;
8170 goto fail;
8172 for(i = 0;i < ret; i++)
8173 target_grouplist[i] = tswap32(grouplist[i]);
8174 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8177 break;
8178 #endif
8179 #ifdef TARGET_NR_setgroups32
8180 case TARGET_NR_setgroups32:
8182 int gidsetsize = arg1;
8183 uint32_t *target_grouplist;
8184 gid_t *grouplist;
8185 int i;
8187 grouplist = alloca(gidsetsize * sizeof(gid_t));
8188 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8189 if (!target_grouplist) {
8190 ret = -TARGET_EFAULT;
8191 goto fail;
8193 for(i = 0;i < gidsetsize; i++)
8194 grouplist[i] = tswap32(target_grouplist[i]);
8195 unlock_user(target_grouplist, arg2, 0);
8196 ret = get_errno(setgroups(gidsetsize, grouplist));
8198 break;
8199 #endif
8200 #ifdef TARGET_NR_fchown32
8201 case TARGET_NR_fchown32:
8202 ret = get_errno(fchown(arg1, arg2, arg3));
8203 break;
8204 #endif
8205 #ifdef TARGET_NR_setresuid32
8206 case TARGET_NR_setresuid32:
8207 ret = get_errno(setresuid(arg1, arg2, arg3));
8208 break;
8209 #endif
8210 #ifdef TARGET_NR_getresuid32
8211 case TARGET_NR_getresuid32:
8213 uid_t ruid, euid, suid;
8214 ret = get_errno(getresuid(&ruid, &euid, &suid));
8215 if (!is_error(ret)) {
8216 if (put_user_u32(ruid, arg1)
8217 || put_user_u32(euid, arg2)
8218 || put_user_u32(suid, arg3))
8219 goto efault;
8222 break;
8223 #endif
8224 #ifdef TARGET_NR_setresgid32
8225 case TARGET_NR_setresgid32:
8226 ret = get_errno(setresgid(arg1, arg2, arg3));
8227 break;
8228 #endif
8229 #ifdef TARGET_NR_getresgid32
8230 case TARGET_NR_getresgid32:
8232 gid_t rgid, egid, sgid;
8233 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8234 if (!is_error(ret)) {
8235 if (put_user_u32(rgid, arg1)
8236 || put_user_u32(egid, arg2)
8237 || put_user_u32(sgid, arg3))
8238 goto efault;
8241 break;
8242 #endif
8243 #ifdef TARGET_NR_chown32
8244 case TARGET_NR_chown32:
8245 if (!(p = lock_user_string(arg1)))
8246 goto efault;
8247 ret = get_errno(chown(p, arg2, arg3));
8248 unlock_user(p, arg1, 0);
8249 break;
8250 #endif
8251 #ifdef TARGET_NR_setuid32
8252 case TARGET_NR_setuid32:
8253 ret = get_errno(setuid(arg1));
8254 break;
8255 #endif
8256 #ifdef TARGET_NR_setgid32
8257 case TARGET_NR_setgid32:
8258 ret = get_errno(setgid(arg1));
8259 break;
8260 #endif
8261 #ifdef TARGET_NR_setfsuid32
8262 case TARGET_NR_setfsuid32:
8263 ret = get_errno(setfsuid(arg1));
8264 break;
8265 #endif
8266 #ifdef TARGET_NR_setfsgid32
8267 case TARGET_NR_setfsgid32:
8268 ret = get_errno(setfsgid(arg1));
8269 break;
8270 #endif
8272 case TARGET_NR_pivot_root:
8273 goto unimplemented;
8274 #ifdef TARGET_NR_mincore
8275 case TARGET_NR_mincore:
8277 void *a;
8278 ret = -TARGET_EFAULT;
8279 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8280 goto efault;
8281 if (!(p = lock_user_string(arg3)))
8282 goto mincore_fail;
8283 ret = get_errno(mincore(a, arg2, p));
8284 unlock_user(p, arg3, ret);
8285 mincore_fail:
8286 unlock_user(a, arg1, 0);
8288 break;
8289 #endif
8290 #ifdef TARGET_NR_arm_fadvise64_64
8291 case TARGET_NR_arm_fadvise64_64:
8294 * arm_fadvise64_64 looks like fadvise64_64 but
8295 * with different argument order
8297 abi_long temp;
8298 temp = arg3;
8299 arg3 = arg4;
8300 arg4 = temp;
8302 #endif
8303 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8304 #ifdef TARGET_NR_fadvise64_64
8305 case TARGET_NR_fadvise64_64:
8306 #endif
8307 #ifdef TARGET_NR_fadvise64
8308 case TARGET_NR_fadvise64:
8309 #endif
8310 #ifdef TARGET_S390X
8311 switch (arg4) {
8312 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8313 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8314 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8315 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8316 default: break;
8318 #endif
8319 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8320 break;
8321 #endif
8322 #ifdef TARGET_NR_madvise
8323 case TARGET_NR_madvise:
8324 /* A straight passthrough may not be safe because qemu sometimes
8325 turns private file-backed mappings into anonymous mappings.
8326 This will break MADV_DONTNEED.
8327 This is a hint, so ignoring and returning success is ok. */
8328 ret = get_errno(0);
8329 break;
8330 #endif
8331 #if TARGET_ABI_BITS == 32
8332 case TARGET_NR_fcntl64:
8334 int cmd;
8335 struct flock64 fl;
8336 struct target_flock64 *target_fl;
8337 #ifdef TARGET_ARM
8338 struct target_eabi_flock64 *target_efl;
8339 #endif
8341 cmd = target_to_host_fcntl_cmd(arg2);
8342 if (cmd == -TARGET_EINVAL) {
8343 ret = cmd;
8344 break;
8347 switch(arg2) {
8348 case TARGET_F_GETLK64:
8349 #ifdef TARGET_ARM
8350 if (((CPUARMState *)cpu_env)->eabi) {
8351 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8352 goto efault;
8353 fl.l_type = tswap16(target_efl->l_type);
8354 fl.l_whence = tswap16(target_efl->l_whence);
8355 fl.l_start = tswap64(target_efl->l_start);
8356 fl.l_len = tswap64(target_efl->l_len);
8357 fl.l_pid = tswap32(target_efl->l_pid);
8358 unlock_user_struct(target_efl, arg3, 0);
8359 } else
8360 #endif
8362 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8363 goto efault;
8364 fl.l_type = tswap16(target_fl->l_type);
8365 fl.l_whence = tswap16(target_fl->l_whence);
8366 fl.l_start = tswap64(target_fl->l_start);
8367 fl.l_len = tswap64(target_fl->l_len);
8368 fl.l_pid = tswap32(target_fl->l_pid);
8369 unlock_user_struct(target_fl, arg3, 0);
8371 ret = get_errno(fcntl(arg1, cmd, &fl));
8372 if (ret == 0) {
8373 #ifdef TARGET_ARM
8374 if (((CPUARMState *)cpu_env)->eabi) {
8375 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8376 goto efault;
8377 target_efl->l_type = tswap16(fl.l_type);
8378 target_efl->l_whence = tswap16(fl.l_whence);
8379 target_efl->l_start = tswap64(fl.l_start);
8380 target_efl->l_len = tswap64(fl.l_len);
8381 target_efl->l_pid = tswap32(fl.l_pid);
8382 unlock_user_struct(target_efl, arg3, 1);
8383 } else
8384 #endif
8386 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8387 goto efault;
8388 target_fl->l_type = tswap16(fl.l_type);
8389 target_fl->l_whence = tswap16(fl.l_whence);
8390 target_fl->l_start = tswap64(fl.l_start);
8391 target_fl->l_len = tswap64(fl.l_len);
8392 target_fl->l_pid = tswap32(fl.l_pid);
8393 unlock_user_struct(target_fl, arg3, 1);
8396 break;
8398 case TARGET_F_SETLK64:
8399 case TARGET_F_SETLKW64:
8400 #ifdef TARGET_ARM
8401 if (((CPUARMState *)cpu_env)->eabi) {
8402 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8403 goto efault;
8404 fl.l_type = tswap16(target_efl->l_type);
8405 fl.l_whence = tswap16(target_efl->l_whence);
8406 fl.l_start = tswap64(target_efl->l_start);
8407 fl.l_len = tswap64(target_efl->l_len);
8408 fl.l_pid = tswap32(target_efl->l_pid);
8409 unlock_user_struct(target_efl, arg3, 0);
8410 } else
8411 #endif
8413 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8414 goto efault;
8415 fl.l_type = tswap16(target_fl->l_type);
8416 fl.l_whence = tswap16(target_fl->l_whence);
8417 fl.l_start = tswap64(target_fl->l_start);
8418 fl.l_len = tswap64(target_fl->l_len);
8419 fl.l_pid = tswap32(target_fl->l_pid);
8420 unlock_user_struct(target_fl, arg3, 0);
8422 ret = get_errno(fcntl(arg1, cmd, &fl));
8423 break;
8424 default:
8425 ret = do_fcntl(arg1, arg2, arg3);
8426 break;
8428 break;
8430 #endif
8431 #ifdef TARGET_NR_cacheflush
8432 case TARGET_NR_cacheflush:
8433 /* self-modifying code is handled automatically, so nothing needed */
8434 ret = 0;
8435 break;
8436 #endif
8437 #ifdef TARGET_NR_security
8438 case TARGET_NR_security:
8439 goto unimplemented;
8440 #endif
8441 #ifdef TARGET_NR_getpagesize
8442 case TARGET_NR_getpagesize:
8443 ret = TARGET_PAGE_SIZE;
8444 break;
8445 #endif
8446 case TARGET_NR_gettid:
8447 ret = get_errno(gettid());
8448 break;
8449 #ifdef TARGET_NR_readahead
8450 case TARGET_NR_readahead:
8451 #if TARGET_ABI_BITS == 32
8452 if (regpairs_aligned(cpu_env)) {
8453 arg2 = arg3;
8454 arg3 = arg4;
8455 arg4 = arg5;
8457 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8458 #else
8459 ret = get_errno(readahead(arg1, arg2, arg3));
8460 #endif
8461 break;
8462 #endif
8463 #ifdef CONFIG_ATTR
8464 #ifdef TARGET_NR_setxattr
8465 case TARGET_NR_listxattr:
8466 case TARGET_NR_llistxattr:
8468 void *p, *b = 0;
8469 if (arg2) {
8470 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8471 if (!b) {
8472 ret = -TARGET_EFAULT;
8473 break;
8476 p = lock_user_string(arg1);
8477 if (p) {
8478 if (num == TARGET_NR_listxattr) {
8479 ret = get_errno(listxattr(p, b, arg3));
8480 } else {
8481 ret = get_errno(llistxattr(p, b, arg3));
8483 } else {
8484 ret = -TARGET_EFAULT;
8486 unlock_user(p, arg1, 0);
8487 unlock_user(b, arg2, arg3);
8488 break;
8490 case TARGET_NR_flistxattr:
8492 void *b = 0;
8493 if (arg2) {
8494 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8495 if (!b) {
8496 ret = -TARGET_EFAULT;
8497 break;
8500 ret = get_errno(flistxattr(arg1, b, arg3));
8501 unlock_user(b, arg2, arg3);
8502 break;
8504 case TARGET_NR_setxattr:
8505 case TARGET_NR_lsetxattr:
8507 void *p, *n, *v = 0;
8508 if (arg3) {
8509 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8510 if (!v) {
8511 ret = -TARGET_EFAULT;
8512 break;
8515 p = lock_user_string(arg1);
8516 n = lock_user_string(arg2);
8517 if (p && n) {
8518 if (num == TARGET_NR_setxattr) {
8519 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8520 } else {
8521 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8523 } else {
8524 ret = -TARGET_EFAULT;
8526 unlock_user(p, arg1, 0);
8527 unlock_user(n, arg2, 0);
8528 unlock_user(v, arg3, 0);
8530 break;
8531 case TARGET_NR_fsetxattr:
8533 void *n, *v = 0;
8534 if (arg3) {
8535 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8536 if (!v) {
8537 ret = -TARGET_EFAULT;
8538 break;
8541 n = lock_user_string(arg2);
8542 if (n) {
8543 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8544 } else {
8545 ret = -TARGET_EFAULT;
8547 unlock_user(n, arg2, 0);
8548 unlock_user(v, arg3, 0);
8550 break;
8551 case TARGET_NR_getxattr:
8552 case TARGET_NR_lgetxattr:
8554 void *p, *n, *v = 0;
8555 if (arg3) {
8556 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8557 if (!v) {
8558 ret = -TARGET_EFAULT;
8559 break;
8562 p = lock_user_string(arg1);
8563 n = lock_user_string(arg2);
8564 if (p && n) {
8565 if (num == TARGET_NR_getxattr) {
8566 ret = get_errno(getxattr(p, n, v, arg4));
8567 } else {
8568 ret = get_errno(lgetxattr(p, n, v, arg4));
8570 } else {
8571 ret = -TARGET_EFAULT;
8573 unlock_user(p, arg1, 0);
8574 unlock_user(n, arg2, 0);
8575 unlock_user(v, arg3, arg4);
8577 break;
8578 case TARGET_NR_fgetxattr:
8580 void *n, *v = 0;
8581 if (arg3) {
8582 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8583 if (!v) {
8584 ret = -TARGET_EFAULT;
8585 break;
8588 n = lock_user_string(arg2);
8589 if (n) {
8590 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8591 } else {
8592 ret = -TARGET_EFAULT;
8594 unlock_user(n, arg2, 0);
8595 unlock_user(v, arg3, arg4);
8597 break;
8598 case TARGET_NR_removexattr:
8599 case TARGET_NR_lremovexattr:
8601 void *p, *n;
8602 p = lock_user_string(arg1);
8603 n = lock_user_string(arg2);
8604 if (p && n) {
8605 if (num == TARGET_NR_removexattr) {
8606 ret = get_errno(removexattr(p, n));
8607 } else {
8608 ret = get_errno(lremovexattr(p, n));
8610 } else {
8611 ret = -TARGET_EFAULT;
8613 unlock_user(p, arg1, 0);
8614 unlock_user(n, arg2, 0);
8616 break;
8617 case TARGET_NR_fremovexattr:
8619 void *n;
8620 n = lock_user_string(arg2);
8621 if (n) {
8622 ret = get_errno(fremovexattr(arg1, n));
8623 } else {
8624 ret = -TARGET_EFAULT;
8626 unlock_user(n, arg2, 0);
8628 break;
8629 #endif
8630 #endif /* CONFIG_ATTR */
8631 #ifdef TARGET_NR_set_thread_area
8632 case TARGET_NR_set_thread_area:
8633 #if defined(TARGET_MIPS)
8634 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8635 ret = 0;
8636 break;
8637 #elif defined(TARGET_CRIS)
8638 if (arg1 & 0xff)
8639 ret = -TARGET_EINVAL;
8640 else {
8641 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8642 ret = 0;
8644 break;
8645 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8646 ret = do_set_thread_area(cpu_env, arg1);
8647 break;
8648 #elif defined(TARGET_M68K)
8650 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8651 ts->tp_value = arg1;
8652 ret = 0;
8653 break;
8655 #else
8656 goto unimplemented_nowarn;
8657 #endif
8658 #endif
8659 #ifdef TARGET_NR_get_thread_area
8660 case TARGET_NR_get_thread_area:
8661 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8662 ret = do_get_thread_area(cpu_env, arg1);
8663 break;
8664 #elif defined(TARGET_M68K)
8666 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8667 ret = ts->tp_value;
8668 break;
8670 #else
8671 goto unimplemented_nowarn;
8672 #endif
8673 #endif
8674 #ifdef TARGET_NR_getdomainname
8675 case TARGET_NR_getdomainname:
8676 goto unimplemented_nowarn;
8677 #endif
8679 #ifdef TARGET_NR_clock_gettime
8680 case TARGET_NR_clock_gettime:
8682 struct timespec ts;
8683 ret = get_errno(clock_gettime(arg1, &ts));
8684 if (!is_error(ret)) {
8685 host_to_target_timespec(arg2, &ts);
8687 break;
8689 #endif
8690 #ifdef TARGET_NR_clock_getres
8691 case TARGET_NR_clock_getres:
8693 struct timespec ts;
8694 ret = get_errno(clock_getres(arg1, &ts));
8695 if (!is_error(ret)) {
8696 host_to_target_timespec(arg2, &ts);
8698 break;
8700 #endif
8701 #ifdef TARGET_NR_clock_nanosleep
8702 case TARGET_NR_clock_nanosleep:
8704 struct timespec ts;
8705 target_to_host_timespec(&ts, arg3);
8706 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8707 if (arg4)
8708 host_to_target_timespec(arg4, &ts);
8709 break;
8711 #endif
8713 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8714 case TARGET_NR_set_tid_address:
8715 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8716 break;
8717 #endif
8719 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8720 case TARGET_NR_tkill:
8721 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8722 break;
8723 #endif
8725 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8726 case TARGET_NR_tgkill:
8727 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8728 target_to_host_signal(arg3)));
8729 break;
8730 #endif
8732 #ifdef TARGET_NR_set_robust_list
8733 case TARGET_NR_set_robust_list:
8734 case TARGET_NR_get_robust_list:
8735 /* The ABI for supporting robust futexes has userspace pass
8736 * the kernel a pointer to a linked list which is updated by
8737 * userspace after the syscall; the list is walked by the kernel
8738 * when the thread exits. Since the linked list in QEMU guest
8739 * memory isn't a valid linked list for the host and we have
8740 * no way to reliably intercept the thread-death event, we can't
8741 * support these. Silently return ENOSYS so that guest userspace
8742 * falls back to a non-robust futex implementation (which should
8743 * be OK except in the corner case of the guest crashing while
8744 * holding a mutex that is shared with another process via
8745 * shared memory).
8747 goto unimplemented_nowarn;
8748 #endif
8750 #if defined(TARGET_NR_utimensat)
8751 case TARGET_NR_utimensat:
8753 struct timespec *tsp, ts[2];
8754 if (!arg3) {
8755 tsp = NULL;
8756 } else {
8757 target_to_host_timespec(ts, arg3);
8758 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8759 tsp = ts;
8761 if (!arg2)
8762 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8763 else {
8764 if (!(p = lock_user_string(arg2))) {
8765 ret = -TARGET_EFAULT;
8766 goto fail;
8768 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8769 unlock_user(p, arg2, 0);
8772 break;
8773 #endif
8774 case TARGET_NR_futex:
8775 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8776 break;
8777 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8778 case TARGET_NR_inotify_init:
8779 ret = get_errno(sys_inotify_init());
8780 break;
8781 #endif
8782 #ifdef CONFIG_INOTIFY1
8783 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8784 case TARGET_NR_inotify_init1:
8785 ret = get_errno(sys_inotify_init1(arg1));
8786 break;
8787 #endif
8788 #endif
8789 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8790 case TARGET_NR_inotify_add_watch:
8791 p = lock_user_string(arg2);
8792 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8793 unlock_user(p, arg2, 0);
8794 break;
8795 #endif
8796 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8797 case TARGET_NR_inotify_rm_watch:
8798 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8799 break;
8800 #endif
8802 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8803 case TARGET_NR_mq_open:
8805 struct mq_attr posix_mq_attr;
8807 p = lock_user_string(arg1 - 1);
8808 if (arg4 != 0)
8809 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8810 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8811 unlock_user (p, arg1, 0);
8813 break;
8815 case TARGET_NR_mq_unlink:
8816 p = lock_user_string(arg1 - 1);
8817 ret = get_errno(mq_unlink(p));
8818 unlock_user (p, arg1, 0);
8819 break;
8821 case TARGET_NR_mq_timedsend:
8823 struct timespec ts;
8825 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8826 if (arg5 != 0) {
8827 target_to_host_timespec(&ts, arg5);
8828 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8829 host_to_target_timespec(arg5, &ts);
8831 else
8832 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8833 unlock_user (p, arg2, arg3);
8835 break;
8837 case TARGET_NR_mq_timedreceive:
8839 struct timespec ts;
8840 unsigned int prio;
8842 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8843 if (arg5 != 0) {
8844 target_to_host_timespec(&ts, arg5);
8845 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8846 host_to_target_timespec(arg5, &ts);
8848 else
8849 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8850 unlock_user (p, arg2, arg3);
8851 if (arg4 != 0)
8852 put_user_u32(prio, arg4);
8854 break;
8856 /* Not implemented for now... */
8857 /* case TARGET_NR_mq_notify: */
8858 /* break; */
8860 case TARGET_NR_mq_getsetattr:
8862 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8863 ret = 0;
8864 if (arg3 != 0) {
8865 ret = mq_getattr(arg1, &posix_mq_attr_out);
8866 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8868 if (arg2 != 0) {
8869 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8870 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8874 break;
8875 #endif
8877 #ifdef CONFIG_SPLICE
8878 #ifdef TARGET_NR_tee
8879 case TARGET_NR_tee:
8881 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8883 break;
8884 #endif
8885 #ifdef TARGET_NR_splice
8886 case TARGET_NR_splice:
8888 loff_t loff_in, loff_out;
8889 loff_t *ploff_in = NULL, *ploff_out = NULL;
8890 if(arg2) {
8891 get_user_u64(loff_in, arg2);
8892 ploff_in = &loff_in;
8894 if(arg4) {
8895 get_user_u64(loff_out, arg2);
8896 ploff_out = &loff_out;
8898 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8900 break;
8901 #endif
8902 #ifdef TARGET_NR_vmsplice
8903 case TARGET_NR_vmsplice:
8905 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8906 if (vec != NULL) {
8907 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8908 unlock_iovec(vec, arg2, arg3, 0);
8909 } else {
8910 ret = -host_to_target_errno(errno);
8913 break;
8914 #endif
8915 #endif /* CONFIG_SPLICE */
8916 #ifdef CONFIG_EVENTFD
8917 #if defined(TARGET_NR_eventfd)
8918 case TARGET_NR_eventfd:
8919 ret = get_errno(eventfd(arg1, 0));
8920 break;
8921 #endif
8922 #if defined(TARGET_NR_eventfd2)
8923 case TARGET_NR_eventfd2:
8925 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
8926 if (arg2 & TARGET_O_NONBLOCK) {
8927 host_flags |= O_NONBLOCK;
8929 if (arg2 & TARGET_O_CLOEXEC) {
8930 host_flags |= O_CLOEXEC;
8932 ret = get_errno(eventfd(arg1, host_flags));
8933 break;
8935 #endif
8936 #endif /* CONFIG_EVENTFD */
8937 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8938 case TARGET_NR_fallocate:
8939 #if TARGET_ABI_BITS == 32
8940 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8941 target_offset64(arg5, arg6)));
8942 #else
8943 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8944 #endif
8945 break;
8946 #endif
8947 #if defined(CONFIG_SYNC_FILE_RANGE)
8948 #if defined(TARGET_NR_sync_file_range)
8949 case TARGET_NR_sync_file_range:
8950 #if TARGET_ABI_BITS == 32
8951 #if defined(TARGET_MIPS)
8952 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8953 target_offset64(arg5, arg6), arg7));
8954 #else
8955 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8956 target_offset64(arg4, arg5), arg6));
8957 #endif /* !TARGET_MIPS */
8958 #else
8959 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8960 #endif
8961 break;
8962 #endif
8963 #if defined(TARGET_NR_sync_file_range2)
8964 case TARGET_NR_sync_file_range2:
8965 /* This is like sync_file_range but the arguments are reordered */
8966 #if TARGET_ABI_BITS == 32
8967 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8968 target_offset64(arg5, arg6), arg2));
8969 #else
8970 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8971 #endif
8972 break;
8973 #endif
8974 #endif
8975 #if defined(CONFIG_EPOLL)
8976 #if defined(TARGET_NR_epoll_create)
8977 case TARGET_NR_epoll_create:
8978 ret = get_errno(epoll_create(arg1));
8979 break;
8980 #endif
8981 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8982 case TARGET_NR_epoll_create1:
8983 ret = get_errno(epoll_create1(arg1));
8984 break;
8985 #endif
8986 #if defined(TARGET_NR_epoll_ctl)
8987 case TARGET_NR_epoll_ctl:
8989 struct epoll_event ep;
8990 struct epoll_event *epp = 0;
8991 if (arg4) {
8992 struct target_epoll_event *target_ep;
8993 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8994 goto efault;
8996 ep.events = tswap32(target_ep->events);
8997 /* The epoll_data_t union is just opaque data to the kernel,
8998 * so we transfer all 64 bits across and need not worry what
8999 * actual data type it is.
9001 ep.data.u64 = tswap64(target_ep->data.u64);
9002 unlock_user_struct(target_ep, arg4, 0);
9003 epp = &ep;
9005 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9006 break;
9008 #endif
9010 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9011 #define IMPLEMENT_EPOLL_PWAIT
9012 #endif
9013 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9014 #if defined(TARGET_NR_epoll_wait)
9015 case TARGET_NR_epoll_wait:
9016 #endif
9017 #if defined(IMPLEMENT_EPOLL_PWAIT)
9018 case TARGET_NR_epoll_pwait:
9019 #endif
9021 struct target_epoll_event *target_ep;
9022 struct epoll_event *ep;
9023 int epfd = arg1;
9024 int maxevents = arg3;
9025 int timeout = arg4;
9027 target_ep = lock_user(VERIFY_WRITE, arg2,
9028 maxevents * sizeof(struct target_epoll_event), 1);
9029 if (!target_ep) {
9030 goto efault;
9033 ep = alloca(maxevents * sizeof(struct epoll_event));
9035 switch (num) {
9036 #if defined(IMPLEMENT_EPOLL_PWAIT)
9037 case TARGET_NR_epoll_pwait:
9039 target_sigset_t *target_set;
9040 sigset_t _set, *set = &_set;
9042 if (arg5) {
9043 target_set = lock_user(VERIFY_READ, arg5,
9044 sizeof(target_sigset_t), 1);
9045 if (!target_set) {
9046 unlock_user(target_ep, arg2, 0);
9047 goto efault;
9049 target_to_host_sigset(set, target_set);
9050 unlock_user(target_set, arg5, 0);
9051 } else {
9052 set = NULL;
9055 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9056 break;
9058 #endif
9059 #if defined(TARGET_NR_epoll_wait)
9060 case TARGET_NR_epoll_wait:
9061 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9062 break;
9063 #endif
9064 default:
9065 ret = -TARGET_ENOSYS;
9067 if (!is_error(ret)) {
9068 int i;
9069 for (i = 0; i < ret; i++) {
9070 target_ep[i].events = tswap32(ep[i].events);
9071 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9074 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9075 break;
9077 #endif
9078 #endif
9079 #ifdef TARGET_NR_prlimit64
9080 case TARGET_NR_prlimit64:
9082 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9083 struct target_rlimit64 *target_rnew, *target_rold;
9084 struct host_rlimit64 rnew, rold, *rnewp = 0;
9085 if (arg3) {
9086 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9087 goto efault;
9089 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9090 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9091 unlock_user_struct(target_rnew, arg3, 0);
9092 rnewp = &rnew;
9095 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9096 if (!is_error(ret) && arg4) {
9097 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9098 goto efault;
9100 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9101 target_rold->rlim_max = tswap64(rold.rlim_max);
9102 unlock_user_struct(target_rold, arg4, 1);
9104 break;
9106 #endif
9107 #ifdef TARGET_NR_gethostname
9108 case TARGET_NR_gethostname:
9110 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9111 if (name) {
9112 ret = get_errno(gethostname(name, arg2));
9113 unlock_user(name, arg1, arg2);
9114 } else {
9115 ret = -TARGET_EFAULT;
9117 break;
9119 #endif
9120 #ifdef TARGET_NR_atomic_cmpxchg_32
9121 case TARGET_NR_atomic_cmpxchg_32:
9123 /* should use start_exclusive from main.c */
9124 abi_ulong mem_value;
9125 if (get_user_u32(mem_value, arg6)) {
9126 target_siginfo_t info;
9127 info.si_signo = SIGSEGV;
9128 info.si_errno = 0;
9129 info.si_code = TARGET_SEGV_MAPERR;
9130 info._sifields._sigfault._addr = arg6;
9131 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9132 ret = 0xdeadbeef;
9135 if (mem_value == arg2)
9136 put_user_u32(arg1, arg6);
9137 ret = mem_value;
9138 break;
9140 #endif
9141 #ifdef TARGET_NR_atomic_barrier
9142 case TARGET_NR_atomic_barrier:
9144 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9145 break;
9147 #endif
9149 #ifdef TARGET_NR_timer_create
9150 case TARGET_NR_timer_create:
9152 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9154 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9155 struct target_sigevent *ptarget_sevp;
9156 struct target_timer_t *ptarget_timer;
9158 int clkid = arg1;
9159 int timer_index = next_free_host_timer();
9161 if (timer_index < 0) {
9162 ret = -TARGET_EAGAIN;
9163 } else {
9164 timer_t *phtimer = g_posix_timers + timer_index;
9166 if (arg2) {
9167 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) {
9168 goto efault;
9171 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo);
9172 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify);
9174 phost_sevp = &host_sevp;
9177 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9178 if (ret) {
9179 phtimer = NULL;
9180 } else {
9181 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9182 goto efault;
9184 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9185 unlock_user_struct(ptarget_timer, arg3, 1);
9188 break;
9190 #endif
9192 #ifdef TARGET_NR_timer_settime
9193 case TARGET_NR_timer_settime:
9195 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9196 * struct itimerspec * old_value */
9197 arg1 &= 0xffff;
9198 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9199 ret = -TARGET_EINVAL;
9200 } else {
9201 timer_t htimer = g_posix_timers[arg1];
9202 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9204 target_to_host_itimerspec(&hspec_new, arg3);
9205 ret = get_errno(
9206 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9207 host_to_target_itimerspec(arg2, &hspec_old);
9209 break;
9211 #endif
9213 #ifdef TARGET_NR_timer_gettime
9214 case TARGET_NR_timer_gettime:
9216 /* args: timer_t timerid, struct itimerspec *curr_value */
9217 arg1 &= 0xffff;
9218 if (!arg2) {
9219 return -TARGET_EFAULT;
9220 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9221 ret = -TARGET_EINVAL;
9222 } else {
9223 timer_t htimer = g_posix_timers[arg1];
9224 struct itimerspec hspec;
9225 ret = get_errno(timer_gettime(htimer, &hspec));
9227 if (host_to_target_itimerspec(arg2, &hspec)) {
9228 ret = -TARGET_EFAULT;
9231 break;
9233 #endif
9235 #ifdef TARGET_NR_timer_getoverrun
9236 case TARGET_NR_timer_getoverrun:
9238 /* args: timer_t timerid */
9239 arg1 &= 0xffff;
9240 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9241 ret = -TARGET_EINVAL;
9242 } else {
9243 timer_t htimer = g_posix_timers[arg1];
9244 ret = get_errno(timer_getoverrun(htimer));
9246 break;
9248 #endif
9250 #ifdef TARGET_NR_timer_delete
9251 case TARGET_NR_timer_delete:
9253 /* args: timer_t timerid */
9254 arg1 &= 0xffff;
9255 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9256 ret = -TARGET_EINVAL;
9257 } else {
9258 timer_t htimer = g_posix_timers[arg1];
9259 ret = get_errno(timer_delete(htimer));
9260 g_posix_timers[arg1] = 0;
9262 break;
9264 #endif
9266 default:
9267 unimplemented:
9268 gemu_log("qemu: Unsupported syscall: %d\n", num);
9269 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9270 unimplemented_nowarn:
9271 #endif
9272 ret = -TARGET_ENOSYS;
9273 break;
9275 fail:
9276 #ifdef DEBUG
9277 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9278 #endif
9279 if(do_strace)
9280 print_syscall_ret(num, ret);
9281 return ret;
9282 efault:
9283 ret = -TARGET_EFAULT;
9284 goto fail;